repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
jimklo/LearningRegistry | LR/ez_setup.py | 358 | 9716 | #!python
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import sys
DEFAULT_VERSION = "0.6c9"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20',
'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab',
'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53',
'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03',
'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a',
'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6',
'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a',
}
import sys, os
try: from hashlib import md5
except ImportError: from md5 import md5
def _validate_md5(egg_name, data):
if egg_name in md5_data:
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U setuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download()
except pkg_resources.DistributionNotFound:
return do_download()
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:])
| apache-2.0 |
fraenkel-lab/pcst_fast | external/pybind11/tests/test_eigen.py | 6 | 5427 | import pytest
with pytest.suppress(ImportError):
import numpy as np
ref = np.array([[ 0, 3, 0, 0, 0, 11],
[22, 0, 0, 0, 17, 11],
[ 7, 5, 0, 1, 0, 11],
[ 0, 0, 0, 0, 0, 11],
[ 0, 0, 14, 0, 8, 11]])
def assert_equal_ref(mat):
np.testing.assert_array_equal(mat, ref)
def assert_sparse_equal_ref(sparse_mat):
assert_equal_ref(sparse_mat.todense())
@pytest.requires_eigen_and_numpy
def test_fixed():
from pybind11_tests import fixed_r, fixed_c, fixed_passthrough_r, fixed_passthrough_c
assert_equal_ref(fixed_c())
assert_equal_ref(fixed_r())
assert_equal_ref(fixed_passthrough_r(fixed_r()))
assert_equal_ref(fixed_passthrough_c(fixed_c()))
assert_equal_ref(fixed_passthrough_r(fixed_c()))
assert_equal_ref(fixed_passthrough_c(fixed_r()))
@pytest.requires_eigen_and_numpy
def test_dense():
from pybind11_tests import dense_r, dense_c, dense_passthrough_r, dense_passthrough_c
assert_equal_ref(dense_r())
assert_equal_ref(dense_c())
assert_equal_ref(dense_passthrough_r(dense_r()))
assert_equal_ref(dense_passthrough_c(dense_c()))
assert_equal_ref(dense_passthrough_r(dense_c()))
assert_equal_ref(dense_passthrough_c(dense_r()))
@pytest.requires_eigen_and_numpy
def test_nonunit_stride_from_python():
from pybind11_tests import double_row, double_col, double_mat_cm, double_mat_rm
counting_mat = np.arange(9.0, dtype=np.float32).reshape((3, 3))
first_row = counting_mat[0, :]
first_col = counting_mat[:, 0]
assert np.array_equal(double_row(first_row), 2.0 * first_row)
assert np.array_equal(double_col(first_row), 2.0 * first_row)
assert np.array_equal(double_row(first_col), 2.0 * first_col)
assert np.array_equal(double_col(first_col), 2.0 * first_col)
counting_3d = np.arange(27.0, dtype=np.float32).reshape((3, 3, 3))
slices = [counting_3d[0, :, :], counting_3d[:, 0, :], counting_3d[:, :, 0]]
for slice_idx, ref_mat in enumerate(slices):
assert np.array_equal(double_mat_cm(ref_mat), 2.0 * ref_mat)
assert np.array_equal(double_mat_rm(ref_mat), 2.0 * ref_mat)
@pytest.requires_eigen_and_numpy
def test_nonunit_stride_to_python():
from pybind11_tests import diagonal, diagonal_1, diagonal_n, block
assert np.all(diagonal(ref) == ref.diagonal())
assert np.all(diagonal_1(ref) == ref.diagonal(1))
for i in range(-5, 7):
assert np.all(diagonal_n(ref, i) == ref.diagonal(i)), "diagonal_n({})".format(i)
assert np.all(block(ref, 2, 1, 3, 3) == ref[2:5, 1:4])
assert np.all(block(ref, 1, 4, 4, 2) == ref[1:, 4:])
assert np.all(block(ref, 1, 4, 3, 2) == ref[1:4, 4:])
@pytest.requires_eigen_and_numpy
def test_eigen_ref_to_python():
from pybind11_tests import cholesky1, cholesky2, cholesky3, cholesky4, cholesky5, cholesky6
chols = [cholesky1, cholesky2, cholesky3, cholesky4, cholesky5, cholesky6]
for i, chol in enumerate(chols, start=1):
mymat = chol(np.array([[1, 2, 4], [2, 13, 23], [4, 23, 77]]))
assert np.all(mymat == np.array([[1, 0, 0], [2, 3, 0], [4, 5, 6]])), "cholesky{}".format(i)
@pytest.requires_eigen_and_numpy
def test_special_matrix_objects():
from pybind11_tests import incr_diag, symmetric_upper, symmetric_lower
assert np.all(incr_diag(7) == np.diag([1, 2, 3, 4, 5, 6, 7]))
asymm = np.array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12],
[13, 14, 15, 16]])
symm_lower = np.array(asymm)
symm_upper = np.array(asymm)
for i in range(4):
for j in range(i + 1, 4):
symm_lower[i, j] = symm_lower[j, i]
symm_upper[j, i] = symm_upper[i, j]
assert np.all(symmetric_lower(asymm) == symm_lower)
assert np.all(symmetric_upper(asymm) == symm_upper)
@pytest.requires_eigen_and_numpy
def test_dense_signature(doc):
from pybind11_tests import double_col, double_row, double_mat_rm
assert doc(double_col) == """
double_col(arg0: numpy.ndarray[float32[m, 1]]) -> numpy.ndarray[float32[m, 1]]
"""
assert doc(double_row) == """
double_row(arg0: numpy.ndarray[float32[1, n]]) -> numpy.ndarray[float32[1, n]]
"""
assert doc(double_mat_rm) == """
double_mat_rm(arg0: numpy.ndarray[float32[m, n]]) -> numpy.ndarray[float32[m, n]]
"""
@pytest.requires_eigen_and_scipy
def test_sparse():
from pybind11_tests import sparse_r, sparse_c, sparse_passthrough_r, sparse_passthrough_c
assert_sparse_equal_ref(sparse_r())
assert_sparse_equal_ref(sparse_c())
assert_sparse_equal_ref(sparse_passthrough_r(sparse_r()))
assert_sparse_equal_ref(sparse_passthrough_c(sparse_c()))
assert_sparse_equal_ref(sparse_passthrough_r(sparse_c()))
assert_sparse_equal_ref(sparse_passthrough_c(sparse_r()))
@pytest.requires_eigen_and_scipy
def test_sparse_signature(doc):
from pybind11_tests import sparse_passthrough_r, sparse_passthrough_c
assert doc(sparse_passthrough_r) == """
sparse_passthrough_r(arg0: scipy.sparse.csr_matrix[float32]) -> scipy.sparse.csr_matrix[float32]
""" # noqa: E501 line too long
assert doc(sparse_passthrough_c) == """
sparse_passthrough_c(arg0: scipy.sparse.csc_matrix[float32]) -> scipy.sparse.csc_matrix[float32]
""" # noqa: E501 line too long
| mit |
DefyVentures/edx-platform | lms/djangoapps/shoppingcart/migrations/0007_auto__add_field_orderitem_service_fee.py | 114 | 10407 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'OrderItem.service_fee'
db.add_column('shoppingcart_orderitem', 'service_fee',
self.gf('django.db.models.fields.DecimalField')(default=0.0, max_digits=30, decimal_places=2),
keep_default=False)
# Adding index on 'OrderItem', fields ['status']
db.create_index('shoppingcart_orderitem', ['status'])
# Adding index on 'OrderItem', fields ['fulfilled_time']
db.create_index('shoppingcart_orderitem', ['fulfilled_time'])
# Adding index on 'OrderItem', fields ['refund_requested_time']
db.create_index('shoppingcart_orderitem', ['refund_requested_time'])
def backwards(self, orm):
# Removing index on 'OrderItem', fields ['refund_requested_time']
db.delete_index('shoppingcart_orderitem', ['refund_requested_time'])
# Removing index on 'OrderItem', fields ['fulfilled_time']
db.delete_index('shoppingcart_orderitem', ['fulfilled_time'])
# Removing index on 'OrderItem', fields ['status']
db.delete_index('shoppingcart_orderitem', ['status'])
# Deleting field 'OrderItem.service_fee'
db.delete_column('shoppingcart_orderitem', 'service_fee')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'shoppingcart.certificateitem': {
'Meta': {'object_name': 'CertificateItem', '_ormbases': ['shoppingcart.OrderItem']},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']"}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.order': {
'Meta': {'object_name': 'Order'},
'bill_to_cardtype': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'bill_to_ccnum': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_city': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_first': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_last': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_postalcode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'bill_to_state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_street1': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'bill_to_street2': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processor_reply_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'purchase_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'refunded_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'fulfilled_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_desc': ('django.db.models.fields.CharField', [], {'default': "'Misc. Item'", 'max_length': '1024'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'qty': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'refund_requested_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'report_comments': ('django.db.models.fields.TextField', [], {'default': "''"}),
'service_fee': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32', 'db_index': 'True'}),
'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.paidcourseregistration': {
'Meta': {'object_name': 'PaidCourseRegistration', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.paidcourseregistrationannotation': {
'Meta': {'object_name': 'PaidCourseRegistrationAnnotation'},
'annotation': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['shoppingcart']
| agpl-3.0 |
doumadou/ssbc | search/management/commands/loadhash.py | 36 | 1418 | #coding: utf8
from django.core.management.base import BaseCommand
from django import db as ddb
from search.models import Hash
import pymongo
db = pymongo.MongoClient().dht
class Command(BaseCommand):
def handle(self, *args, **options):
Hash.objects.all().delete()
print 'inputing ...'
total = db.basic.count()
ii = 0
ready = []
for x in db.basic.find():
ii += 1
if ii % 10000 == 0:
print ii * 100 / total, '%', total - ii
Hash.objects.bulk_create(ready)
ready = []
ddb.reset_queries()
h = Hash(info_hash = x['info_hash'])
h.classified = x.get('classified', False)
h.tagged = x.get('tagged', False)
h.name = unicode(x.get('name',''))[:255]
h.category = x.get('category','')[:20]
h.extension = x.get('extension', '')[:20]
h.data_hash = x.get('data_hash', '')
h.comment = x.get('comment','')[:255]
h.creator = x.get('creator','')[:20]
h.length = x.get('length', 0)
h.requests = x.get('requests', 0)
h.source_ip = x.get('source_ip')
h.create_time = x.get('create_time')
h.last_seen = x.get('last_seen', h.create_time)
ready.append(h)
if ready:
Hash.objects.bulk_create(ready)
| gpl-2.0 |
arnaud-morvan/QGIS | python/plugins/processing/algs/qgis/PointsFromPolygons.py | 10 | 5832 | # -*- coding: utf-8 -*-
"""
***************************************************************************
PointsFromPolygons.py
---------------------
Date : August 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'August 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from osgeo import gdal
from qgis.core import (QgsFeatureRequest,
QgsFields,
QgsField,
QgsFeature,
QgsFeatureSink,
QgsGeometry,
QgsWkbTypes,
QgsPoint,
QgsProcessing,
QgsProcessingException,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterFeatureSink)
from qgis.PyQt.QtCore import QVariant
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
from processing.tools import raster
class PointsFromPolygons(QgisAlgorithm):
INPUT_RASTER = 'INPUT_RASTER'
INPUT_VECTOR = 'INPUT_VECTOR'
OUTPUT = 'OUTPUT'
def group(self):
return self.tr('Vector creation')
def groupId(self):
return 'vectorcreation'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT_RASTER,
self.tr('Raster layer')))
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT_VECTOR,
self.tr('Vector layer'), [QgsProcessing.TypeVectorPolygon]))
self.addParameter(QgsProcessingParameterFeatureSink(self.OUTPUT, self.tr('Points inside polygons'), QgsProcessing.TypeVectorPoint))
def name(self):
return 'generatepointspixelcentroidsinsidepolygons'
def displayName(self):
return self.tr('Generate points (pixel centroids) inside polygons')
def processAlgorithm(self, parameters, context, feedback):
source = self.parameterAsSource(parameters, self.INPUT_VECTOR, context)
if source is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT_VECTOR))
raster_layer = self.parameterAsRasterLayer(parameters, self.INPUT_RASTER, context)
rasterPath = raster_layer.source()
rasterDS = gdal.Open(rasterPath, gdal.GA_ReadOnly)
geoTransform = rasterDS.GetGeoTransform()
fields = QgsFields()
fields.append(QgsField('id', QVariant.Int, '', 10, 0))
fields.append(QgsField('poly_id', QVariant.Int, '', 10, 0))
fields.append(QgsField('point_id', QVariant.Int, '', 10, 0))
(sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT, context,
fields, QgsWkbTypes.Point, raster_layer.crs())
if sink is None:
raise QgsProcessingException(self.invalidSinkError(parameters, self.OUTPUT))
outFeature = QgsFeature()
outFeature.setFields(fields)
fid = 0
polyId = 0
pointId = 0
features = source.getFeatures(QgsFeatureRequest().setDestinationCrs(raster_layer.crs(), context.transformContext()))
total = 100.0 / source.featureCount() if source.featureCount() else 0
for current, f in enumerate(features):
if feedback.isCanceled():
break
if not f.hasGeometry():
continue
geom = f.geometry()
bbox = geom.boundingBox()
xMin = bbox.xMinimum()
xMax = bbox.xMaximum()
yMin = bbox.yMinimum()
yMax = bbox.yMaximum()
(startRow, startColumn) = raster.mapToPixel(xMin, yMax, geoTransform)
(endRow, endColumn) = raster.mapToPixel(xMax, yMin, geoTransform)
# use prepared geometries for faster intersection tests
engine = QgsGeometry.createGeometryEngine(geom.constGet())
engine.prepareGeometry()
for row in range(startRow, endRow + 1):
for col in range(startColumn, endColumn + 1):
if feedback.isCanceled():
break
(x, y) = raster.pixelToMap(row, col, geoTransform)
point = QgsPoint(x, y)
if engine.contains(point):
outFeature.setGeometry(QgsGeometry(point))
outFeature['id'] = fid
outFeature['poly_id'] = polyId
outFeature['point_id'] = pointId
fid += 1
pointId += 1
sink.addFeature(outFeature, QgsFeatureSink.FastInsert)
pointId = 0
polyId += 1
feedback.setProgress(int(current * total))
return {self.OUTPUT: dest_id}
| gpl-2.0 |
TheWardoctor/Wardoctors-repo | script.module.schism.common/lib/requests/packages/chardet/big5freq.py | 3133 | 82594 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Big5 frequency table
# by Taiwan's Mandarin Promotion Council
# <http://www.edu.tw:81/mandr/>
#
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Ideal Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75
#Char to FreqOrder table
BIG5_TABLE_SIZE = 5376
Big5CharToFreqOrder = (
1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16
3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32
1198,3972,3350,4202, 410,2215, 302, 590, 361,1964, 8, 204, 58,4510,5009,1932, # 48
63,5010,5011, 317,1614, 75, 222, 159,4203,2417,1480,5012,3555,3091, 224,2822, # 64
3682, 3, 10,3973,1471, 29,2787,1135,2866,1940, 873, 130,3275,1123, 312,5013, # 80
4511,2052, 507, 252, 682,5014, 142,1915, 124, 206,2947, 34,3556,3204, 64, 604, # 96
5015,2501,1977,1978, 155,1991, 645, 641,1606,5016,3452, 337, 72, 406,5017, 80, # 112
630, 238,3205,1509, 263, 939,1092,2654, 756,1440,1094,3453, 449, 69,2987, 591, # 128
179,2096, 471, 115,2035,1844, 60, 50,2988, 134, 806,1869, 734,2036,3454, 180, # 144
995,1607, 156, 537,2907, 688,5018, 319,1305, 779,2145, 514,2379, 298,4512, 359, # 160
2502, 90,2716,1338, 663, 11, 906,1099,2553, 20,2441, 182, 532,1716,5019, 732, # 176
1376,4204,1311,1420,3206, 25,2317,1056, 113, 399, 382,1950, 242,3455,2474, 529, # 192
3276, 475,1447,3683,5020, 117, 21, 656, 810,1297,2300,2334,3557,5021, 126,4205, # 208
706, 456, 150, 613,4513, 71,1118,2037,4206, 145,3092, 85, 835, 486,2115,1246, # 224
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,5022,2128,2359, 347,3815, 221, # 240
3558,3135,5023,1956,1153,4207, 83, 296,1199,3093, 192, 624, 93,5024, 822,1898, # 256
2823,3136, 795,2065, 991,1554,1542,1592, 27, 43,2867, 859, 139,1456, 860,4514, # 272
437, 712,3974, 164,2397,3137, 695, 211,3037,2097, 195,3975,1608,3559,3560,3684, # 288
3976, 234, 811,2989,2098,3977,2233,1441,3561,1615,2380, 668,2077,1638, 305, 228, # 304
1664,4515, 467, 415,5025, 262,2099,1593, 239, 108, 300, 200,1033, 512,1247,2078, # 320
5026,5027,2176,3207,3685,2682, 593, 845,1062,3277, 88,1723,2038,3978,1951, 212, # 336
266, 152, 149, 468,1899,4208,4516, 77, 187,5028,3038, 37, 5,2990,5029,3979, # 352
5030,5031, 39,2524,4517,2908,3208,2079, 55, 148, 74,4518, 545, 483,1474,1029, # 368
1665, 217,1870,1531,3138,1104,2655,4209, 24, 172,3562, 900,3980,3563,3564,4519, # 384
32,1408,2824,1312, 329, 487,2360,2251,2717, 784,2683, 4,3039,3351,1427,1789, # 400
188, 109, 499,5032,3686,1717,1790, 888,1217,3040,4520,5033,3565,5034,3352,1520, # 416
3687,3981, 196,1034, 775,5035,5036, 929,1816, 249, 439, 38,5037,1063,5038, 794, # 432
3982,1435,2301, 46, 178,3278,2066,5039,2381,5040, 214,1709,4521, 804, 35, 707, # 448
324,3688,1601,2554, 140, 459,4210,5041,5042,1365, 839, 272, 978,2262,2580,3456, # 464
2129,1363,3689,1423, 697, 100,3094, 48, 70,1231, 495,3139,2196,5043,1294,5044, # 480
2080, 462, 586,1042,3279, 853, 256, 988, 185,2382,3457,1698, 434,1084,5045,3458, # 496
314,2625,2788,4522,2335,2336, 569,2285, 637,1817,2525, 757,1162,1879,1616,3459, # 512
287,1577,2116, 768,4523,1671,2868,3566,2526,1321,3816, 909,2418,5046,4211, 933, # 528
3817,4212,2053,2361,1222,4524, 765,2419,1322, 786,4525,5047,1920,1462,1677,2909, # 544
1699,5048,4526,1424,2442,3140,3690,2600,3353,1775,1941,3460,3983,4213, 309,1369, # 560
1130,2825, 364,2234,1653,1299,3984,3567,3985,3986,2656, 525,1085,3041, 902,2001, # 576
1475, 964,4527, 421,1845,1415,1057,2286, 940,1364,3141, 376,4528,4529,1381, 7, # 592
2527, 983,2383, 336,1710,2684,1846, 321,3461, 559,1131,3042,2752,1809,1132,1313, # 608
265,1481,1858,5049, 352,1203,2826,3280, 167,1089, 420,2827, 776, 792,1724,3568, # 624
4214,2443,3281,5050,4215,5051, 446, 229, 333,2753, 901,3818,1200,1557,4530,2657, # 640
1921, 395,2754,2685,3819,4216,1836, 125, 916,3209,2626,4531,5052,5053,3820,5054, # 656
5055,5056,4532,3142,3691,1133,2555,1757,3462,1510,2318,1409,3569,5057,2146, 438, # 672
2601,2910,2384,3354,1068, 958,3043, 461, 311,2869,2686,4217,1916,3210,4218,1979, # 688
383, 750,2755,2627,4219, 274, 539, 385,1278,1442,5058,1154,1965, 384, 561, 210, # 704
98,1295,2556,3570,5059,1711,2420,1482,3463,3987,2911,1257, 129,5060,3821, 642, # 720
523,2789,2790,2658,5061, 141,2235,1333, 68, 176, 441, 876, 907,4220, 603,2602, # 736
710, 171,3464, 404, 549, 18,3143,2398,1410,3692,1666,5062,3571,4533,2912,4534, # 752
5063,2991, 368,5064, 146, 366, 99, 871,3693,1543, 748, 807,1586,1185, 22,2263, # 768
379,3822,3211,5065,3212, 505,1942,2628,1992,1382,2319,5066, 380,2362, 218, 702, # 784
1818,1248,3465,3044,3572,3355,3282,5067,2992,3694, 930,3283,3823,5068, 59,5069, # 800
585, 601,4221, 497,3466,1112,1314,4535,1802,5070,1223,1472,2177,5071, 749,1837, # 816
690,1900,3824,1773,3988,1476, 429,1043,1791,2236,2117, 917,4222, 447,1086,1629, # 832
5072, 556,5073,5074,2021,1654, 844,1090, 105, 550, 966,1758,2828,1008,1783, 686, # 848
1095,5075,2287, 793,1602,5076,3573,2603,4536,4223,2948,2302,4537,3825, 980,2503, # 864
544, 353, 527,4538, 908,2687,2913,5077, 381,2629,1943,1348,5078,1341,1252, 560, # 880
3095,5079,3467,2870,5080,2054, 973, 886,2081, 143,4539,5081,5082, 157,3989, 496, # 896
4224, 57, 840, 540,2039,4540,4541,3468,2118,1445, 970,2264,1748,1966,2082,4225, # 912
3144,1234,1776,3284,2829,3695, 773,1206,2130,1066,2040,1326,3990,1738,1725,4226, # 928
279,3145, 51,1544,2604, 423,1578,2131,2067, 173,4542,1880,5083,5084,1583, 264, # 944
610,3696,4543,2444, 280, 154,5085,5086,5087,1739, 338,1282,3096, 693,2871,1411, # 960
1074,3826,2445,5088,4544,5089,5090,1240, 952,2399,5091,2914,1538,2688, 685,1483, # 976
4227,2475,1436, 953,4228,2055,4545, 671,2400, 79,4229,2446,3285, 608, 567,2689, # 992
3469,4230,4231,1691, 393,1261,1792,2401,5092,4546,5093,5094,5095,5096,1383,1672, # 1008
3827,3213,1464, 522,1119, 661,1150, 216, 675,4547,3991,1432,3574, 609,4548,2690, # 1024
2402,5097,5098,5099,4232,3045, 0,5100,2476, 315, 231,2447, 301,3356,4549,2385, # 1040
5101, 233,4233,3697,1819,4550,4551,5102, 96,1777,1315,2083,5103, 257,5104,1810, # 1056
3698,2718,1139,1820,4234,2022,1124,2164,2791,1778,2659,5105,3097, 363,1655,3214, # 1072
5106,2993,5107,5108,5109,3992,1567,3993, 718, 103,3215, 849,1443, 341,3357,2949, # 1088
1484,5110,1712, 127, 67, 339,4235,2403, 679,1412, 821,5111,5112, 834, 738, 351, # 1104
2994,2147, 846, 235,1497,1881, 418,1993,3828,2719, 186,1100,2148,2756,3575,1545, # 1120
1355,2950,2872,1377, 583,3994,4236,2581,2995,5113,1298,3699,1078,2557,3700,2363, # 1136
78,3829,3830, 267,1289,2100,2002,1594,4237, 348, 369,1274,2197,2178,1838,4552, # 1152
1821,2830,3701,2757,2288,2003,4553,2951,2758, 144,3358, 882,4554,3995,2759,3470, # 1168
4555,2915,5114,4238,1726, 320,5115,3996,3046, 788,2996,5116,2831,1774,1327,2873, # 1184
3997,2832,5117,1306,4556,2004,1700,3831,3576,2364,2660, 787,2023, 506, 824,3702, # 1200
534, 323,4557,1044,3359,2024,1901, 946,3471,5118,1779,1500,1678,5119,1882,4558, # 1216
165, 243,4559,3703,2528, 123, 683,4239, 764,4560, 36,3998,1793, 589,2916, 816, # 1232
626,1667,3047,2237,1639,1555,1622,3832,3999,5120,4000,2874,1370,1228,1933, 891, # 1248
2084,2917, 304,4240,5121, 292,2997,2720,3577, 691,2101,4241,1115,4561, 118, 662, # 1264
5122, 611,1156, 854,2386,1316,2875, 2, 386, 515,2918,5123,5124,3286, 868,2238, # 1280
1486, 855,2661, 785,2216,3048,5125,1040,3216,3578,5126,3146, 448,5127,1525,5128, # 1296
2165,4562,5129,3833,5130,4242,2833,3579,3147, 503, 818,4001,3148,1568, 814, 676, # 1312
1444, 306,1749,5131,3834,1416,1030, 197,1428, 805,2834,1501,4563,5132,5133,5134, # 1328
1994,5135,4564,5136,5137,2198, 13,2792,3704,2998,3149,1229,1917,5138,3835,2132, # 1344
5139,4243,4565,2404,3580,5140,2217,1511,1727,1120,5141,5142, 646,3836,2448, 307, # 1360
5143,5144,1595,3217,5145,5146,5147,3705,1113,1356,4002,1465,2529,2530,5148, 519, # 1376
5149, 128,2133, 92,2289,1980,5150,4003,1512, 342,3150,2199,5151,2793,2218,1981, # 1392
3360,4244, 290,1656,1317, 789, 827,2365,5152,3837,4566, 562, 581,4004,5153, 401, # 1408
4567,2252, 94,4568,5154,1399,2794,5155,1463,2025,4569,3218,1944,5156, 828,1105, # 1424
4245,1262,1394,5157,4246, 605,4570,5158,1784,2876,5159,2835, 819,2102, 578,2200, # 1440
2952,5160,1502, 436,3287,4247,3288,2836,4005,2919,3472,3473,5161,2721,2320,5162, # 1456
5163,2337,2068, 23,4571, 193, 826,3838,2103, 699,1630,4248,3098, 390,1794,1064, # 1472
3581,5164,1579,3099,3100,1400,5165,4249,1839,1640,2877,5166,4572,4573, 137,4250, # 1488
598,3101,1967, 780, 104, 974,2953,5167, 278, 899, 253, 402, 572, 504, 493,1339, # 1504
5168,4006,1275,4574,2582,2558,5169,3706,3049,3102,2253, 565,1334,2722, 863, 41, # 1520
5170,5171,4575,5172,1657,2338, 19, 463,2760,4251, 606,5173,2999,3289,1087,2085, # 1536
1323,2662,3000,5174,1631,1623,1750,4252,2691,5175,2878, 791,2723,2663,2339, 232, # 1552
2421,5176,3001,1498,5177,2664,2630, 755,1366,3707,3290,3151,2026,1609, 119,1918, # 1568
3474, 862,1026,4253,5178,4007,3839,4576,4008,4577,2265,1952,2477,5179,1125, 817, # 1584
4254,4255,4009,1513,1766,2041,1487,4256,3050,3291,2837,3840,3152,5180,5181,1507, # 1600
5182,2692, 733, 40,1632,1106,2879, 345,4257, 841,2531, 230,4578,3002,1847,3292, # 1616
3475,5183,1263, 986,3476,5184, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562, # 1632
4010,4011,2954, 967,2761,2665,1349, 592,2134,1692,3361,3003,1995,4258,1679,4012, # 1648
1902,2188,5185, 739,3708,2724,1296,1290,5186,4259,2201,2202,1922,1563,2605,2559, # 1664
1871,2762,3004,5187, 435,5188, 343,1108, 596, 17,1751,4579,2239,3477,3709,5189, # 1680
4580, 294,3582,2955,1693, 477, 979, 281,2042,3583, 643,2043,3710,2631,2795,2266, # 1696
1031,2340,2135,2303,3584,4581, 367,1249,2560,5190,3585,5191,4582,1283,3362,2005, # 1712
240,1762,3363,4583,4584, 836,1069,3153, 474,5192,2149,2532, 268,3586,5193,3219, # 1728
1521,1284,5194,1658,1546,4260,5195,3587,3588,5196,4261,3364,2693,1685,4262, 961, # 1744
1673,2632, 190,2006,2203,3841,4585,4586,5197, 570,2504,3711,1490,5198,4587,2633, # 1760
3293,1957,4588, 584,1514, 396,1045,1945,5199,4589,1968,2449,5200,5201,4590,4013, # 1776
619,5202,3154,3294, 215,2007,2796,2561,3220,4591,3221,4592, 763,4263,3842,4593, # 1792
5203,5204,1958,1767,2956,3365,3712,1174, 452,1477,4594,3366,3155,5205,2838,1253, # 1808
2387,2189,1091,2290,4264, 492,5206, 638,1169,1825,2136,1752,4014, 648, 926,1021, # 1824
1324,4595, 520,4596, 997, 847,1007, 892,4597,3843,2267,1872,3713,2405,1785,4598, # 1840
1953,2957,3103,3222,1728,4265,2044,3714,4599,2008,1701,3156,1551, 30,2268,4266, # 1856
5207,2027,4600,3589,5208, 501,5209,4267, 594,3478,2166,1822,3590,3479,3591,3223, # 1872
829,2839,4268,5210,1680,3157,1225,4269,5211,3295,4601,4270,3158,2341,5212,4602, # 1888
4271,5213,4015,4016,5214,1848,2388,2606,3367,5215,4603, 374,4017, 652,4272,4273, # 1904
375,1140, 798,5216,5217,5218,2366,4604,2269, 546,1659, 138,3051,2450,4605,5219, # 1920
2254, 612,1849, 910, 796,3844,1740,1371, 825,3845,3846,5220,2920,2562,5221, 692, # 1936
444,3052,2634, 801,4606,4274,5222,1491, 244,1053,3053,4275,4276, 340,5223,4018, # 1952
1041,3005, 293,1168, 87,1357,5224,1539, 959,5225,2240, 721, 694,4277,3847, 219, # 1968
1478, 644,1417,3368,2666,1413,1401,1335,1389,4019,5226,5227,3006,2367,3159,1826, # 1984
730,1515, 184,2840, 66,4607,5228,1660,2958, 246,3369, 378,1457, 226,3480, 975, # 2000
4020,2959,1264,3592, 674, 696,5229, 163,5230,1141,2422,2167, 713,3593,3370,4608, # 2016
4021,5231,5232,1186, 15,5233,1079,1070,5234,1522,3224,3594, 276,1050,2725, 758, # 2032
1126, 653,2960,3296,5235,2342, 889,3595,4022,3104,3007, 903,1250,4609,4023,3481, # 2048
3596,1342,1681,1718, 766,3297, 286, 89,2961,3715,5236,1713,5237,2607,3371,3008, # 2064
5238,2962,2219,3225,2880,5239,4610,2505,2533, 181, 387,1075,4024, 731,2190,3372, # 2080
5240,3298, 310, 313,3482,2304, 770,4278, 54,3054, 189,4611,3105,3848,4025,5241, # 2096
1230,1617,1850, 355,3597,4279,4612,3373, 111,4280,3716,1350,3160,3483,3055,4281, # 2112
2150,3299,3598,5242,2797,4026,4027,3009, 722,2009,5243,1071, 247,1207,2343,2478, # 2128
1378,4613,2010, 864,1437,1214,4614, 373,3849,1142,2220, 667,4615, 442,2763,2563, # 2144
3850,4028,1969,4282,3300,1840, 837, 170,1107, 934,1336,1883,5244,5245,2119,4283, # 2160
2841, 743,1569,5246,4616,4284, 582,2389,1418,3484,5247,1803,5248, 357,1395,1729, # 2176
3717,3301,2423,1564,2241,5249,3106,3851,1633,4617,1114,2086,4285,1532,5250, 482, # 2192
2451,4618,5251,5252,1492, 833,1466,5253,2726,3599,1641,2842,5254,1526,1272,3718, # 2208
4286,1686,1795, 416,2564,1903,1954,1804,5255,3852,2798,3853,1159,2321,5256,2881, # 2224
4619,1610,1584,3056,2424,2764, 443,3302,1163,3161,5257,5258,4029,5259,4287,2506, # 2240
3057,4620,4030,3162,2104,1647,3600,2011,1873,4288,5260,4289, 431,3485,5261, 250, # 2256
97, 81,4290,5262,1648,1851,1558, 160, 848,5263, 866, 740,1694,5264,2204,2843, # 2272
3226,4291,4621,3719,1687, 950,2479, 426, 469,3227,3720,3721,4031,5265,5266,1188, # 2288
424,1996, 861,3601,4292,3854,2205,2694, 168,1235,3602,4293,5267,2087,1674,4622, # 2304
3374,3303, 220,2565,1009,5268,3855, 670,3010, 332,1208, 717,5269,5270,3603,2452, # 2320
4032,3375,5271, 513,5272,1209,2882,3376,3163,4623,1080,5273,5274,5275,5276,2534, # 2336
3722,3604, 815,1587,4033,4034,5277,3605,3486,3856,1254,4624,1328,3058,1390,4035, # 2352
1741,4036,3857,4037,5278, 236,3858,2453,3304,5279,5280,3723,3859,1273,3860,4625, # 2368
5281, 308,5282,4626, 245,4627,1852,2480,1307,2583, 430, 715,2137,2454,5283, 270, # 2384
199,2883,4038,5284,3606,2727,1753, 761,1754, 725,1661,1841,4628,3487,3724,5285, # 2400
5286, 587, 14,3305, 227,2608, 326, 480,2270, 943,2765,3607, 291, 650,1884,5287, # 2416
1702,1226, 102,1547, 62,3488, 904,4629,3489,1164,4294,5288,5289,1224,1548,2766, # 2432
391, 498,1493,5290,1386,1419,5291,2056,1177,4630, 813, 880,1081,2368, 566,1145, # 2448
4631,2291,1001,1035,2566,2609,2242, 394,1286,5292,5293,2069,5294, 86,1494,1730, # 2464
4039, 491,1588, 745, 897,2963, 843,3377,4040,2767,2884,3306,1768, 998,2221,2070, # 2480
397,1827,1195,1970,3725,3011,3378, 284,5295,3861,2507,2138,2120,1904,5296,4041, # 2496
2151,4042,4295,1036,3490,1905, 114,2567,4296, 209,1527,5297,5298,2964,2844,2635, # 2512
2390,2728,3164, 812,2568,5299,3307,5300,1559, 737,1885,3726,1210, 885, 28,2695, # 2528
3608,3862,5301,4297,1004,1780,4632,5302, 346,1982,2222,2696,4633,3863,1742, 797, # 2544
1642,4043,1934,1072,1384,2152, 896,4044,3308,3727,3228,2885,3609,5303,2569,1959, # 2560
4634,2455,1786,5304,5305,5306,4045,4298,1005,1308,3728,4299,2729,4635,4636,1528, # 2576
2610, 161,1178,4300,1983, 987,4637,1101,4301, 631,4046,1157,3229,2425,1343,1241, # 2592
1016,2243,2570, 372, 877,2344,2508,1160, 555,1935, 911,4047,5307, 466,1170, 169, # 2608
1051,2921,2697,3729,2481,3012,1182,2012,2571,1251,2636,5308, 992,2345,3491,1540, # 2624
2730,1201,2071,2406,1997,2482,5309,4638, 528,1923,2191,1503,1874,1570,2369,3379, # 2640
3309,5310, 557,1073,5311,1828,3492,2088,2271,3165,3059,3107, 767,3108,2799,4639, # 2656
1006,4302,4640,2346,1267,2179,3730,3230, 778,4048,3231,2731,1597,2667,5312,4641, # 2672
5313,3493,5314,5315,5316,3310,2698,1433,3311, 131, 95,1504,4049, 723,4303,3166, # 2688
1842,3610,2768,2192,4050,2028,2105,3731,5317,3013,4051,1218,5318,3380,3232,4052, # 2704
4304,2584, 248,1634,3864, 912,5319,2845,3732,3060,3865, 654, 53,5320,3014,5321, # 2720
1688,4642, 777,3494,1032,4053,1425,5322, 191, 820,2121,2846, 971,4643, 931,3233, # 2736
135, 664, 783,3866,1998, 772,2922,1936,4054,3867,4644,2923,3234, 282,2732, 640, # 2752
1372,3495,1127, 922, 325,3381,5323,5324, 711,2045,5325,5326,4055,2223,2800,1937, # 2768
4056,3382,2224,2255,3868,2305,5327,4645,3869,1258,3312,4057,3235,2139,2965,4058, # 2784
4059,5328,2225, 258,3236,4646, 101,1227,5329,3313,1755,5330,1391,3314,5331,2924, # 2800
2057, 893,5332,5333,5334,1402,4305,2347,5335,5336,3237,3611,5337,5338, 878,1325, # 2816
1781,2801,4647, 259,1385,2585, 744,1183,2272,4648,5339,4060,2509,5340, 684,1024, # 2832
4306,5341, 472,3612,3496,1165,3315,4061,4062, 322,2153, 881, 455,1695,1152,1340, # 2848
660, 554,2154,4649,1058,4650,4307, 830,1065,3383,4063,4651,1924,5342,1703,1919, # 2864
5343, 932,2273, 122,5344,4652, 947, 677,5345,3870,2637, 297,1906,1925,2274,4653, # 2880
2322,3316,5346,5347,4308,5348,4309, 84,4310, 112, 989,5349, 547,1059,4064, 701, # 2896
3613,1019,5350,4311,5351,3497, 942, 639, 457,2306,2456, 993,2966, 407, 851, 494, # 2912
4654,3384, 927,5352,1237,5353,2426,3385, 573,4312, 680, 921,2925,1279,1875, 285, # 2928
790,1448,1984, 719,2168,5354,5355,4655,4065,4066,1649,5356,1541, 563,5357,1077, # 2944
5358,3386,3061,3498, 511,3015,4067,4068,3733,4069,1268,2572,3387,3238,4656,4657, # 2960
5359, 535,1048,1276,1189,2926,2029,3167,1438,1373,2847,2967,1134,2013,5360,4313, # 2976
1238,2586,3109,1259,5361, 700,5362,2968,3168,3734,4314,5363,4315,1146,1876,1907, # 2992
4658,2611,4070, 781,2427, 132,1589, 203, 147, 273,2802,2407, 898,1787,2155,4071, # 3008
4072,5364,3871,2803,5365,5366,4659,4660,5367,3239,5368,1635,3872, 965,5369,1805, # 3024
2699,1516,3614,1121,1082,1329,3317,4073,1449,3873, 65,1128,2848,2927,2769,1590, # 3040
3874,5370,5371, 12,2668, 45, 976,2587,3169,4661, 517,2535,1013,1037,3240,5372, # 3056
3875,2849,5373,3876,5374,3499,5375,2612, 614,1999,2323,3877,3110,2733,2638,5376, # 3072
2588,4316, 599,1269,5377,1811,3735,5378,2700,3111, 759,1060, 489,1806,3388,3318, # 3088
1358,5379,5380,2391,1387,1215,2639,2256, 490,5381,5382,4317,1759,2392,2348,5383, # 3104
4662,3878,1908,4074,2640,1807,3241,4663,3500,3319,2770,2349, 874,5384,5385,3501, # 3120
3736,1859, 91,2928,3737,3062,3879,4664,5386,3170,4075,2669,5387,3502,1202,1403, # 3136
3880,2969,2536,1517,2510,4665,3503,2511,5388,4666,5389,2701,1886,1495,1731,4076, # 3152
2370,4667,5390,2030,5391,5392,4077,2702,1216, 237,2589,4318,2324,4078,3881,4668, # 3168
4669,2703,3615,3504, 445,4670,5393,5394,5395,5396,2771, 61,4079,3738,1823,4080, # 3184
5397, 687,2046, 935, 925, 405,2670, 703,1096,1860,2734,4671,4081,1877,1367,2704, # 3200
3389, 918,2106,1782,2483, 334,3320,1611,1093,4672, 564,3171,3505,3739,3390, 945, # 3216
2641,2058,4673,5398,1926, 872,4319,5399,3506,2705,3112, 349,4320,3740,4082,4674, # 3232
3882,4321,3741,2156,4083,4675,4676,4322,4677,2408,2047, 782,4084, 400, 251,4323, # 3248
1624,5400,5401, 277,3742, 299,1265, 476,1191,3883,2122,4324,4325,1109, 205,5402, # 3264
2590,1000,2157,3616,1861,5403,5404,5405,4678,5406,4679,2573, 107,2484,2158,4085, # 3280
3507,3172,5407,1533, 541,1301, 158, 753,4326,2886,3617,5408,1696, 370,1088,4327, # 3296
4680,3618, 579, 327, 440, 162,2244, 269,1938,1374,3508, 968,3063, 56,1396,3113, # 3312
2107,3321,3391,5409,1927,2159,4681,3016,5410,3619,5411,5412,3743,4682,2485,5413, # 3328
2804,5414,1650,4683,5415,2613,5416,5417,4086,2671,3392,1149,3393,4087,3884,4088, # 3344
5418,1076, 49,5419, 951,3242,3322,3323, 450,2850, 920,5420,1812,2805,2371,4328, # 3360
1909,1138,2372,3885,3509,5421,3243,4684,1910,1147,1518,2428,4685,3886,5422,4686, # 3376
2393,2614, 260,1796,3244,5423,5424,3887,3324, 708,5425,3620,1704,5426,3621,1351, # 3392
1618,3394,3017,1887, 944,4329,3395,4330,3064,3396,4331,5427,3744, 422, 413,1714, # 3408
3325, 500,2059,2350,4332,2486,5428,1344,1911, 954,5429,1668,5430,5431,4089,2409, # 3424
4333,3622,3888,4334,5432,2307,1318,2512,3114, 133,3115,2887,4687, 629, 31,2851, # 3440
2706,3889,4688, 850, 949,4689,4090,2970,1732,2089,4335,1496,1853,5433,4091, 620, # 3456
3245, 981,1242,3745,3397,1619,3746,1643,3326,2140,2457,1971,1719,3510,2169,5434, # 3472
3246,5435,5436,3398,1829,5437,1277,4690,1565,2048,5438,1636,3623,3116,5439, 869, # 3488
2852, 655,3890,3891,3117,4092,3018,3892,1310,3624,4691,5440,5441,5442,1733, 558, # 3504
4692,3747, 335,1549,3065,1756,4336,3748,1946,3511,1830,1291,1192, 470,2735,2108, # 3520
2806, 913,1054,4093,5443,1027,5444,3066,4094,4693, 982,2672,3399,3173,3512,3247, # 3536
3248,1947,2807,5445, 571,4694,5446,1831,5447,3625,2591,1523,2429,5448,2090, 984, # 3552
4695,3749,1960,5449,3750, 852, 923,2808,3513,3751, 969,1519, 999,2049,2325,1705, # 3568
5450,3118, 615,1662, 151, 597,4095,2410,2326,1049, 275,4696,3752,4337, 568,3753, # 3584
3626,2487,4338,3754,5451,2430,2275, 409,3249,5452,1566,2888,3514,1002, 769,2853, # 3600
194,2091,3174,3755,2226,3327,4339, 628,1505,5453,5454,1763,2180,3019,4096, 521, # 3616
1161,2592,1788,2206,2411,4697,4097,1625,4340,4341, 412, 42,3119, 464,5455,2642, # 3632
4698,3400,1760,1571,2889,3515,2537,1219,2207,3893,2643,2141,2373,4699,4700,3328, # 3648
1651,3401,3627,5456,5457,3628,2488,3516,5458,3756,5459,5460,2276,2092, 460,5461, # 3664
4701,5462,3020, 962, 588,3629, 289,3250,2644,1116, 52,5463,3067,1797,5464,5465, # 3680
5466,1467,5467,1598,1143,3757,4342,1985,1734,1067,4702,1280,3402, 465,4703,1572, # 3696
510,5468,1928,2245,1813,1644,3630,5469,4704,3758,5470,5471,2673,1573,1534,5472, # 3712
5473, 536,1808,1761,3517,3894,3175,2645,5474,5475,5476,4705,3518,2929,1912,2809, # 3728
5477,3329,1122, 377,3251,5478, 360,5479,5480,4343,1529, 551,5481,2060,3759,1769, # 3744
2431,5482,2930,4344,3330,3120,2327,2109,2031,4706,1404, 136,1468,1479, 672,1171, # 3760
3252,2308, 271,3176,5483,2772,5484,2050, 678,2736, 865,1948,4707,5485,2014,4098, # 3776
2971,5486,2737,2227,1397,3068,3760,4708,4709,1735,2931,3403,3631,5487,3895, 509, # 3792
2854,2458,2890,3896,5488,5489,3177,3178,4710,4345,2538,4711,2309,1166,1010, 552, # 3808
681,1888,5490,5491,2972,2973,4099,1287,1596,1862,3179, 358, 453, 736, 175, 478, # 3824
1117, 905,1167,1097,5492,1854,1530,5493,1706,5494,2181,3519,2292,3761,3520,3632, # 3840
4346,2093,4347,5495,3404,1193,2489,4348,1458,2193,2208,1863,1889,1421,3331,2932, # 3856
3069,2182,3521, 595,2123,5496,4100,5497,5498,4349,1707,2646, 223,3762,1359, 751, # 3872
3121, 183,3522,5499,2810,3021, 419,2374, 633, 704,3897,2394, 241,5500,5501,5502, # 3888
838,3022,3763,2277,2773,2459,3898,1939,2051,4101,1309,3122,2246,1181,5503,1136, # 3904
2209,3899,2375,1446,4350,2310,4712,5504,5505,4351,1055,2615, 484,3764,5506,4102, # 3920
625,4352,2278,3405,1499,4353,4103,5507,4104,4354,3253,2279,2280,3523,5508,5509, # 3936
2774, 808,2616,3765,3406,4105,4355,3123,2539, 526,3407,3900,4356, 955,5510,1620, # 3952
4357,2647,2432,5511,1429,3766,1669,1832, 994, 928,5512,3633,1260,5513,5514,5515, # 3968
1949,2293, 741,2933,1626,4358,2738,2460, 867,1184, 362,3408,1392,5516,5517,4106, # 3984
4359,1770,1736,3254,2934,4713,4714,1929,2707,1459,1158,5518,3070,3409,2891,1292, # 4000
1930,2513,2855,3767,1986,1187,2072,2015,2617,4360,5519,2574,2514,2170,3768,2490, # 4016
3332,5520,3769,4715,5521,5522, 666,1003,3023,1022,3634,4361,5523,4716,1814,2257, # 4032
574,3901,1603, 295,1535, 705,3902,4362, 283, 858, 417,5524,5525,3255,4717,4718, # 4048
3071,1220,1890,1046,2281,2461,4107,1393,1599, 689,2575, 388,4363,5526,2491, 802, # 4064
5527,2811,3903,2061,1405,2258,5528,4719,3904,2110,1052,1345,3256,1585,5529, 809, # 4080
5530,5531,5532, 575,2739,3524, 956,1552,1469,1144,2328,5533,2329,1560,2462,3635, # 4096
3257,4108, 616,2210,4364,3180,2183,2294,5534,1833,5535,3525,4720,5536,1319,3770, # 4112
3771,1211,3636,1023,3258,1293,2812,5537,5538,5539,3905, 607,2311,3906, 762,2892, # 4128
1439,4365,1360,4721,1485,3072,5540,4722,1038,4366,1450,2062,2648,4367,1379,4723, # 4144
2593,5541,5542,4368,1352,1414,2330,2935,1172,5543,5544,3907,3908,4724,1798,1451, # 4160
5545,5546,5547,5548,2936,4109,4110,2492,2351, 411,4111,4112,3637,3333,3124,4725, # 4176
1561,2674,1452,4113,1375,5549,5550, 47,2974, 316,5551,1406,1591,2937,3181,5552, # 4192
1025,2142,3125,3182, 354,2740, 884,2228,4369,2412, 508,3772, 726,3638, 996,2433, # 4208
3639, 729,5553, 392,2194,1453,4114,4726,3773,5554,5555,2463,3640,2618,1675,2813, # 4224
919,2352,2975,2353,1270,4727,4115, 73,5556,5557, 647,5558,3259,2856,2259,1550, # 4240
1346,3024,5559,1332, 883,3526,5560,5561,5562,5563,3334,2775,5564,1212, 831,1347, # 4256
4370,4728,2331,3909,1864,3073, 720,3910,4729,4730,3911,5565,4371,5566,5567,4731, # 4272
5568,5569,1799,4732,3774,2619,4733,3641,1645,2376,4734,5570,2938, 669,2211,2675, # 4288
2434,5571,2893,5572,5573,1028,3260,5574,4372,2413,5575,2260,1353,5576,5577,4735, # 4304
3183, 518,5578,4116,5579,4373,1961,5580,2143,4374,5581,5582,3025,2354,2355,3912, # 4320
516,1834,1454,4117,2708,4375,4736,2229,2620,1972,1129,3642,5583,2776,5584,2976, # 4336
1422, 577,1470,3026,1524,3410,5585,5586, 432,4376,3074,3527,5587,2594,1455,2515, # 4352
2230,1973,1175,5588,1020,2741,4118,3528,4737,5589,2742,5590,1743,1361,3075,3529, # 4368
2649,4119,4377,4738,2295, 895, 924,4378,2171, 331,2247,3076, 166,1627,3077,1098, # 4384
5591,1232,2894,2231,3411,4739, 657, 403,1196,2377, 542,3775,3412,1600,4379,3530, # 4400
5592,4740,2777,3261, 576, 530,1362,4741,4742,2540,2676,3776,4120,5593, 842,3913, # 4416
5594,2814,2032,1014,4121, 213,2709,3413, 665, 621,4380,5595,3777,2939,2435,5596, # 4432
2436,3335,3643,3414,4743,4381,2541,4382,4744,3644,1682,4383,3531,1380,5597, 724, # 4448
2282, 600,1670,5598,1337,1233,4745,3126,2248,5599,1621,4746,5600, 651,4384,5601, # 4464
1612,4385,2621,5602,2857,5603,2743,2312,3078,5604, 716,2464,3079, 174,1255,2710, # 4480
4122,3645, 548,1320,1398, 728,4123,1574,5605,1891,1197,3080,4124,5606,3081,3082, # 4496
3778,3646,3779, 747,5607, 635,4386,4747,5608,5609,5610,4387,5611,5612,4748,5613, # 4512
3415,4749,2437, 451,5614,3780,2542,2073,4388,2744,4389,4125,5615,1764,4750,5616, # 4528
4390, 350,4751,2283,2395,2493,5617,4391,4126,2249,1434,4127, 488,4752, 458,4392, # 4544
4128,3781, 771,1330,2396,3914,2576,3184,2160,2414,1553,2677,3185,4393,5618,2494, # 4560
2895,2622,1720,2711,4394,3416,4753,5619,2543,4395,5620,3262,4396,2778,5621,2016, # 4576
2745,5622,1155,1017,3782,3915,5623,3336,2313, 201,1865,4397,1430,5624,4129,5625, # 4592
5626,5627,5628,5629,4398,1604,5630, 414,1866, 371,2595,4754,4755,3532,2017,3127, # 4608
4756,1708, 960,4399, 887, 389,2172,1536,1663,1721,5631,2232,4130,2356,2940,1580, # 4624
5632,5633,1744,4757,2544,4758,4759,5634,4760,5635,2074,5636,4761,3647,3417,2896, # 4640
4400,5637,4401,2650,3418,2815, 673,2712,2465, 709,3533,4131,3648,4402,5638,1148, # 4656
502, 634,5639,5640,1204,4762,3649,1575,4763,2623,3783,5641,3784,3128, 948,3263, # 4672
121,1745,3916,1110,5642,4403,3083,2516,3027,4132,3785,1151,1771,3917,1488,4133, # 4688
1987,5643,2438,3534,5644,5645,2094,5646,4404,3918,1213,1407,2816, 531,2746,2545, # 4704
3264,1011,1537,4764,2779,4405,3129,1061,5647,3786,3787,1867,2897,5648,2018, 120, # 4720
4406,4407,2063,3650,3265,2314,3919,2678,3419,1955,4765,4134,5649,3535,1047,2713, # 4736
1266,5650,1368,4766,2858, 649,3420,3920,2546,2747,1102,2859,2679,5651,5652,2000, # 4752
5653,1111,3651,2977,5654,2495,3921,3652,2817,1855,3421,3788,5655,5656,3422,2415, # 4768
2898,3337,3266,3653,5657,2577,5658,3654,2818,4135,1460, 856,5659,3655,5660,2899, # 4784
2978,5661,2900,3922,5662,4408, 632,2517, 875,3923,1697,3924,2296,5663,5664,4767, # 4800
3028,1239, 580,4768,4409,5665, 914, 936,2075,1190,4136,1039,2124,5666,5667,5668, # 4816
5669,3423,1473,5670,1354,4410,3925,4769,2173,3084,4137, 915,3338,4411,4412,3339, # 4832
1605,1835,5671,2748, 398,3656,4413,3926,4138, 328,1913,2860,4139,3927,1331,4414, # 4848
3029, 937,4415,5672,3657,4140,4141,3424,2161,4770,3425, 524, 742, 538,3085,1012, # 4864
5673,5674,3928,2466,5675, 658,1103, 225,3929,5676,5677,4771,5678,4772,5679,3267, # 4880
1243,5680,4142, 963,2250,4773,5681,2714,3658,3186,5682,5683,2596,2332,5684,4774, # 4896
5685,5686,5687,3536, 957,3426,2547,2033,1931,2941,2467, 870,2019,3659,1746,2780, # 4912
2781,2439,2468,5688,3930,5689,3789,3130,3790,3537,3427,3791,5690,1179,3086,5691, # 4928
3187,2378,4416,3792,2548,3188,3131,2749,4143,5692,3428,1556,2549,2297, 977,2901, # 4944
2034,4144,1205,3429,5693,1765,3430,3189,2125,1271, 714,1689,4775,3538,5694,2333, # 4960
3931, 533,4417,3660,2184, 617,5695,2469,3340,3539,2315,5696,5697,3190,5698,5699, # 4976
3932,1988, 618, 427,2651,3540,3431,5700,5701,1244,1690,5702,2819,4418,4776,5703, # 4992
3541,4777,5704,2284,1576, 473,3661,4419,3432, 972,5705,3662,5706,3087,5707,5708, # 5008
4778,4779,5709,3793,4145,4146,5710, 153,4780, 356,5711,1892,2902,4420,2144, 408, # 5024
803,2357,5712,3933,5713,4421,1646,2578,2518,4781,4782,3934,5714,3935,4422,5715, # 5040
2416,3433, 752,5716,5717,1962,3341,2979,5718, 746,3030,2470,4783,4423,3794, 698, # 5056
4784,1893,4424,3663,2550,4785,3664,3936,5719,3191,3434,5720,1824,1302,4147,2715, # 5072
3937,1974,4425,5721,4426,3192, 823,1303,1288,1236,2861,3542,4148,3435, 774,3938, # 5088
5722,1581,4786,1304,2862,3939,4787,5723,2440,2162,1083,3268,4427,4149,4428, 344, # 5104
1173, 288,2316, 454,1683,5724,5725,1461,4788,4150,2597,5726,5727,4789, 985, 894, # 5120
5728,3436,3193,5729,1914,2942,3795,1989,5730,2111,1975,5731,4151,5732,2579,1194, # 5136
425,5733,4790,3194,1245,3796,4429,5734,5735,2863,5736, 636,4791,1856,3940, 760, # 5152
1800,5737,4430,2212,1508,4792,4152,1894,1684,2298,5738,5739,4793,4431,4432,2213, # 5168
479,5740,5741, 832,5742,4153,2496,5743,2980,2497,3797, 990,3132, 627,1815,2652, # 5184
4433,1582,4434,2126,2112,3543,4794,5744, 799,4435,3195,5745,4795,2113,1737,3031, # 5200
1018, 543, 754,4436,3342,1676,4796,4797,4154,4798,1489,5746,3544,5747,2624,2903, # 5216
4155,5748,5749,2981,5750,5751,5752,5753,3196,4799,4800,2185,1722,5754,3269,3270, # 5232
1843,3665,1715, 481, 365,1976,1857,5755,5756,1963,2498,4801,5757,2127,3666,3271, # 5248
433,1895,2064,2076,5758, 602,2750,5759,5760,5761,5762,5763,3032,1628,3437,5764, # 5264
3197,4802,4156,2904,4803,2519,5765,2551,2782,5766,5767,5768,3343,4804,2905,5769, # 5280
4805,5770,2864,4806,4807,1221,2982,4157,2520,5771,5772,5773,1868,1990,5774,5775, # 5296
5776,1896,5777,5778,4808,1897,4158, 318,5779,2095,4159,4437,5780,5781, 485,5782, # 5312
938,3941, 553,2680, 116,5783,3942,3667,5784,3545,2681,2783,3438,3344,2820,5785, # 5328
3668,2943,4160,1747,2944,2983,5786,5787, 207,5788,4809,5789,4810,2521,5790,3033, # 5344
890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360
2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376 #last 512
#Everything below is of no interest for detection purpose
2522,1613,4812,5799,3345,3945,2523,5800,4162,5801,1637,4163,2471,4813,3946,5802, # 5392
2500,3034,3800,5803,5804,2195,4814,5805,2163,5806,5807,5808,5809,5810,5811,5812, # 5408
5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828, # 5424
5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844, # 5440
5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856,5857,5858,5859,5860, # 5456
5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872,5873,5874,5875,5876, # 5472
5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888,5889,5890,5891,5892, # 5488
5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,5906,5907,5908, # 5504
5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,5921,5922,5923,5924, # 5520
5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936,5937,5938,5939,5940, # 5536
5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952,5953,5954,5955,5956, # 5552
5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968,5969,5970,5971,5972, # 5568
5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984,5985,5986,5987,5988, # 5584
5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004, # 5600
6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020, # 5616
6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032,6033,6034,6035,6036, # 5632
6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052, # 5648
6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068, # 5664
6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084, # 5680
6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100, # 5696
6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116, # 5712
6117,6118,6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,6132, # 5728
6133,6134,6135,6136,6137,6138,6139,6140,6141,6142,6143,6144,6145,6146,6147,6148, # 5744
6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,6164, # 5760
6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,6180, # 5776
6181,6182,6183,6184,6185,6186,6187,6188,6189,6190,6191,6192,6193,6194,6195,6196, # 5792
6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,6211,6212, # 5808
6213,6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,3670,6224,6225,6226,6227, # 5824
6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,6242,6243, # 5840
6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,6254,6255,6256,6257,6258,6259, # 5856
6260,6261,6262,6263,6264,6265,6266,6267,6268,6269,6270,6271,6272,6273,6274,6275, # 5872
6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,4815,6286,6287,6288,6289,6290, # 5888
6291,6292,4816,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,6303,6304,6305, # 5904
6306,6307,6308,6309,6310,6311,4817,4818,6312,6313,6314,6315,6316,6317,6318,4819, # 5920
6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,6334, # 5936
6335,6336,6337,4820,6338,6339,6340,6341,6342,6343,6344,6345,6346,6347,6348,6349, # 5952
6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,6364,6365, # 5968
6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,6380,6381, # 5984
6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,6396,6397, # 6000
6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,3441,6411,6412, # 6016
6413,6414,6415,6416,6417,6418,6419,6420,6421,6422,6423,6424,6425,4440,6426,6427, # 6032
6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,6439,6440,6441,6442,6443, # 6048
6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,4821,6455,6456,6457,6458, # 6064
6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,6473,6474, # 6080
6475,6476,6477,3947,3948,6478,6479,6480,6481,3272,4441,6482,6483,6484,6485,4442, # 6096
6486,6487,6488,6489,6490,6491,6492,6493,6494,6495,6496,4822,6497,6498,6499,6500, # 6112
6501,6502,6503,6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516, # 6128
6517,6518,6519,6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532, # 6144
6533,6534,6535,6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548, # 6160
6549,6550,6551,6552,6553,6554,6555,6556,2784,6557,4823,6558,6559,6560,6561,6562, # 6176
6563,6564,6565,6566,6567,6568,6569,3949,6570,6571,6572,4824,6573,6574,6575,6576, # 6192
6577,6578,6579,6580,6581,6582,6583,4825,6584,6585,6586,3950,2785,6587,6588,6589, # 6208
6590,6591,6592,6593,6594,6595,6596,6597,6598,6599,6600,6601,6602,6603,6604,6605, # 6224
6606,6607,6608,6609,6610,6611,6612,4826,6613,6614,6615,4827,6616,6617,6618,6619, # 6240
6620,6621,6622,6623,6624,6625,4164,6626,6627,6628,6629,6630,6631,6632,6633,6634, # 6256
3547,6635,4828,6636,6637,6638,6639,6640,6641,6642,3951,2984,6643,6644,6645,6646, # 6272
6647,6648,6649,4165,6650,4829,6651,6652,4830,6653,6654,6655,6656,6657,6658,6659, # 6288
6660,6661,6662,4831,6663,6664,6665,6666,6667,6668,6669,6670,6671,4166,6672,4832, # 6304
3952,6673,6674,6675,6676,4833,6677,6678,6679,4167,6680,6681,6682,3198,6683,6684, # 6320
6685,6686,6687,6688,6689,6690,6691,6692,6693,6694,6695,6696,6697,4834,6698,6699, # 6336
6700,6701,6702,6703,6704,6705,6706,6707,6708,6709,6710,6711,6712,6713,6714,6715, # 6352
6716,6717,6718,6719,6720,6721,6722,6723,6724,6725,6726,6727,6728,6729,6730,6731, # 6368
6732,6733,6734,4443,6735,6736,6737,6738,6739,6740,6741,6742,6743,6744,6745,4444, # 6384
6746,6747,6748,6749,6750,6751,6752,6753,6754,6755,6756,6757,6758,6759,6760,6761, # 6400
6762,6763,6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777, # 6416
6778,6779,6780,6781,4168,6782,6783,3442,6784,6785,6786,6787,6788,6789,6790,6791, # 6432
4169,6792,6793,6794,6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806, # 6448
6807,6808,6809,6810,6811,4835,6812,6813,6814,4445,6815,6816,4446,6817,6818,6819, # 6464
6820,6821,6822,6823,6824,6825,6826,6827,6828,6829,6830,6831,6832,6833,6834,6835, # 6480
3548,6836,6837,6838,6839,6840,6841,6842,6843,6844,6845,6846,4836,6847,6848,6849, # 6496
6850,6851,6852,6853,6854,3953,6855,6856,6857,6858,6859,6860,6861,6862,6863,6864, # 6512
6865,6866,6867,6868,6869,6870,6871,6872,6873,6874,6875,6876,6877,3199,6878,6879, # 6528
6880,6881,6882,4447,6883,6884,6885,6886,6887,6888,6889,6890,6891,6892,6893,6894, # 6544
6895,6896,6897,6898,6899,6900,6901,6902,6903,6904,4170,6905,6906,6907,6908,6909, # 6560
6910,6911,6912,6913,6914,6915,6916,6917,6918,6919,6920,6921,6922,6923,6924,6925, # 6576
6926,6927,4837,6928,6929,6930,6931,6932,6933,6934,6935,6936,3346,6937,6938,4838, # 6592
6939,6940,6941,4448,6942,6943,6944,6945,6946,4449,6947,6948,6949,6950,6951,6952, # 6608
6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,6967,6968, # 6624
6969,6970,6971,6972,6973,6974,6975,6976,6977,6978,6979,6980,6981,6982,6983,6984, # 6640
6985,6986,6987,6988,6989,6990,6991,6992,6993,6994,3671,6995,6996,6997,6998,4839, # 6656
6999,7000,7001,7002,3549,7003,7004,7005,7006,7007,7008,7009,7010,7011,7012,7013, # 6672
7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,7028,7029, # 6688
7030,4840,7031,7032,7033,7034,7035,7036,7037,7038,4841,7039,7040,7041,7042,7043, # 6704
7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,7059, # 6720
7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,2985,7071,7072,7073,7074, # 6736
7075,7076,7077,7078,7079,7080,4842,7081,7082,7083,7084,7085,7086,7087,7088,7089, # 6752
7090,7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105, # 6768
7106,7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,4450,7119,7120, # 6784
7121,7122,7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136, # 6800
7137,7138,7139,7140,7141,7142,7143,4843,7144,7145,7146,7147,7148,7149,7150,7151, # 6816
7152,7153,7154,7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167, # 6832
7168,7169,7170,7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183, # 6848
7184,7185,7186,7187,7188,4171,4172,7189,7190,7191,7192,7193,7194,7195,7196,7197, # 6864
7198,7199,7200,7201,7202,7203,7204,7205,7206,7207,7208,7209,7210,7211,7212,7213, # 6880
7214,7215,7216,7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229, # 6896
7230,7231,7232,7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245, # 6912
7246,7247,7248,7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261, # 6928
7262,7263,7264,7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277, # 6944
7278,7279,7280,7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293, # 6960
7294,7295,7296,4844,7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308, # 6976
7309,7310,7311,7312,7313,7314,7315,7316,4451,7317,7318,7319,7320,7321,7322,7323, # 6992
7324,7325,7326,7327,7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339, # 7008
7340,7341,7342,7343,7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,4173,7354, # 7024
7355,4845,7356,7357,7358,7359,7360,7361,7362,7363,7364,7365,7366,7367,7368,7369, # 7040
7370,7371,7372,7373,7374,7375,7376,7377,7378,7379,7380,7381,7382,7383,7384,7385, # 7056
7386,7387,7388,4846,7389,7390,7391,7392,7393,7394,7395,7396,7397,7398,7399,7400, # 7072
7401,7402,7403,7404,7405,3672,7406,7407,7408,7409,7410,7411,7412,7413,7414,7415, # 7088
7416,7417,7418,7419,7420,7421,7422,7423,7424,7425,7426,7427,7428,7429,7430,7431, # 7104
7432,7433,7434,7435,7436,7437,7438,7439,7440,7441,7442,7443,7444,7445,7446,7447, # 7120
7448,7449,7450,7451,7452,7453,4452,7454,3200,7455,7456,7457,7458,7459,7460,7461, # 7136
7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,7472,7473,7474,4847,7475,7476, # 7152
7477,3133,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,7488,7489,7490,7491, # 7168
7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,3347,7503,7504,7505,7506, # 7184
7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,7520,7521,4848, # 7200
7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,7536,7537, # 7216
7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,3801,4849,7550,7551, # 7232
7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567, # 7248
7568,7569,3035,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582, # 7264
7583,7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598, # 7280
7599,7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614, # 7296
7615,7616,4850,7617,7618,3802,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628, # 7312
7629,7630,7631,7632,4851,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643, # 7328
7644,7645,7646,7647,7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659, # 7344
7660,7661,7662,7663,7664,7665,7666,7667,7668,7669,7670,4453,7671,7672,7673,7674, # 7360
7675,7676,7677,7678,7679,7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690, # 7376
7691,7692,7693,7694,7695,7696,7697,3443,7698,7699,7700,7701,7702,4454,7703,7704, # 7392
7705,7706,7707,7708,7709,7710,7711,7712,7713,2472,7714,7715,7716,7717,7718,7719, # 7408
7720,7721,7722,7723,7724,7725,7726,7727,7728,7729,7730,7731,3954,7732,7733,7734, # 7424
7735,7736,7737,7738,7739,7740,7741,7742,7743,7744,7745,7746,7747,7748,7749,7750, # 7440
3134,7751,7752,4852,7753,7754,7755,4853,7756,7757,7758,7759,7760,4174,7761,7762, # 7456
7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,7777,7778, # 7472
7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,7792,7793,7794, # 7488
7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,4854,7806,7807,7808,7809, # 7504
7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824,7825, # 7520
4855,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7536
7841,7842,7843,7844,7845,7846,7847,3955,7848,7849,7850,7851,7852,7853,7854,7855, # 7552
7856,7857,7858,7859,7860,3444,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870, # 7568
7871,7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886, # 7584
7887,7888,7889,7890,7891,4175,7892,7893,7894,7895,7896,4856,4857,7897,7898,7899, # 7600
7900,2598,7901,7902,7903,7904,7905,7906,7907,7908,4455,7909,7910,7911,7912,7913, # 7616
7914,3201,7915,7916,7917,7918,7919,7920,7921,4858,7922,7923,7924,7925,7926,7927, # 7632
7928,7929,7930,7931,7932,7933,7934,7935,7936,7937,7938,7939,7940,7941,7942,7943, # 7648
7944,7945,7946,7947,7948,7949,7950,7951,7952,7953,7954,7955,7956,7957,7958,7959, # 7664
7960,7961,7962,7963,7964,7965,7966,7967,7968,7969,7970,7971,7972,7973,7974,7975, # 7680
7976,7977,7978,7979,7980,7981,4859,7982,7983,7984,7985,7986,7987,7988,7989,7990, # 7696
7991,7992,7993,7994,7995,7996,4860,7997,7998,7999,8000,8001,8002,8003,8004,8005, # 7712
8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,8016,4176,8017,8018,8019,8020, # 7728
8021,8022,8023,4861,8024,8025,8026,8027,8028,8029,8030,8031,8032,8033,8034,8035, # 7744
8036,4862,4456,8037,8038,8039,8040,4863,8041,8042,8043,8044,8045,8046,8047,8048, # 7760
8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,8064, # 7776
8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,8080, # 7792
8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,8096, # 7808
8097,8098,8099,4864,4177,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110, # 7824
8111,8112,8113,8114,8115,8116,8117,8118,8119,8120,4178,8121,8122,8123,8124,8125, # 7840
8126,8127,8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141, # 7856
8142,8143,8144,8145,4865,4866,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155, # 7872
8156,8157,8158,8159,8160,8161,8162,8163,8164,8165,4179,8166,8167,8168,8169,8170, # 7888
8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181,4457,8182,8183,8184,8185, # 7904
8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197,8198,8199,8200,8201, # 7920
8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213,8214,8215,8216,8217, # 7936
8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229,8230,8231,8232,8233, # 7952
8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245,8246,8247,8248,8249, # 7968
8250,8251,8252,8253,8254,8255,8256,3445,8257,8258,8259,8260,8261,8262,4458,8263, # 7984
8264,8265,8266,8267,8268,8269,8270,8271,8272,4459,8273,8274,8275,8276,3550,8277, # 8000
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,4460,8290,8291,8292, # 8016
8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,4867, # 8032
8308,8309,8310,8311,8312,3551,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322, # 8048
8323,8324,8325,8326,4868,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337, # 8064
8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353, # 8080
8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,4869,4461,8364,8365,8366,8367, # 8096
8368,8369,8370,4870,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382, # 8112
8383,8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398, # 8128
8399,8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,4871,8411,8412,8413, # 8144
8414,8415,8416,8417,8418,8419,8420,8421,8422,4462,8423,8424,8425,8426,8427,8428, # 8160
8429,8430,8431,8432,8433,2986,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443, # 8176
8444,8445,8446,8447,8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459, # 8192
8460,8461,8462,8463,8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475, # 8208
8476,8477,8478,4180,8479,8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490, # 8224
8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506, # 8240
8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522, # 8256
8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538, # 8272
8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554, # 8288
8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,4872,8565,8566,8567,8568,8569, # 8304
8570,8571,8572,8573,4873,8574,8575,8576,8577,8578,8579,8580,8581,8582,8583,8584, # 8320
8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597,8598,8599,8600, # 8336
8601,8602,8603,8604,8605,3803,8606,8607,8608,8609,8610,8611,8612,8613,4874,3804, # 8352
8614,8615,8616,8617,8618,8619,8620,8621,3956,8622,8623,8624,8625,8626,8627,8628, # 8368
8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,2865,8639,8640,8641,8642,8643, # 8384
8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,4463,8657,8658, # 8400
8659,4875,4876,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672, # 8416
8673,8674,8675,8676,8677,8678,8679,8680,8681,4464,8682,8683,8684,8685,8686,8687, # 8432
8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703, # 8448
8704,8705,8706,8707,8708,8709,2261,8710,8711,8712,8713,8714,8715,8716,8717,8718, # 8464
8719,8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,4181, # 8480
8734,8735,8736,8737,8738,8739,8740,8741,8742,8743,8744,8745,8746,8747,8748,8749, # 8496
8750,8751,8752,8753,8754,8755,8756,8757,8758,8759,8760,8761,8762,8763,4877,8764, # 8512
8765,8766,8767,8768,8769,8770,8771,8772,8773,8774,8775,8776,8777,8778,8779,8780, # 8528
8781,8782,8783,8784,8785,8786,8787,8788,4878,8789,4879,8790,8791,8792,4880,8793, # 8544
8794,8795,8796,8797,8798,8799,8800,8801,4881,8802,8803,8804,8805,8806,8807,8808, # 8560
8809,8810,8811,8812,8813,8814,8815,3957,8816,8817,8818,8819,8820,8821,8822,8823, # 8576
8824,8825,8826,8827,8828,8829,8830,8831,8832,8833,8834,8835,8836,8837,8838,8839, # 8592
8840,8841,8842,8843,8844,8845,8846,8847,4882,8848,8849,8850,8851,8852,8853,8854, # 8608
8855,8856,8857,8858,8859,8860,8861,8862,8863,8864,8865,8866,8867,8868,8869,8870, # 8624
8871,8872,8873,8874,8875,8876,8877,8878,8879,8880,8881,8882,8883,8884,3202,8885, # 8640
8886,8887,8888,8889,8890,8891,8892,8893,8894,8895,8896,8897,8898,8899,8900,8901, # 8656
8902,8903,8904,8905,8906,8907,8908,8909,8910,8911,8912,8913,8914,8915,8916,8917, # 8672
8918,8919,8920,8921,8922,8923,8924,4465,8925,8926,8927,8928,8929,8930,8931,8932, # 8688
4883,8933,8934,8935,8936,8937,8938,8939,8940,8941,8942,8943,2214,8944,8945,8946, # 8704
8947,8948,8949,8950,8951,8952,8953,8954,8955,8956,8957,8958,8959,8960,8961,8962, # 8720
8963,8964,8965,4884,8966,8967,8968,8969,8970,8971,8972,8973,8974,8975,8976,8977, # 8736
8978,8979,8980,8981,8982,8983,8984,8985,8986,8987,8988,8989,8990,8991,8992,4885, # 8752
8993,8994,8995,8996,8997,8998,8999,9000,9001,9002,9003,9004,9005,9006,9007,9008, # 8768
9009,9010,9011,9012,9013,9014,9015,9016,9017,9018,9019,9020,9021,4182,9022,9023, # 8784
9024,9025,9026,9027,9028,9029,9030,9031,9032,9033,9034,9035,9036,9037,9038,9039, # 8800
9040,9041,9042,9043,9044,9045,9046,9047,9048,9049,9050,9051,9052,9053,9054,9055, # 8816
9056,9057,9058,9059,9060,9061,9062,9063,4886,9064,9065,9066,9067,9068,9069,4887, # 8832
9070,9071,9072,9073,9074,9075,9076,9077,9078,9079,9080,9081,9082,9083,9084,9085, # 8848
9086,9087,9088,9089,9090,9091,9092,9093,9094,9095,9096,9097,9098,9099,9100,9101, # 8864
9102,9103,9104,9105,9106,9107,9108,9109,9110,9111,9112,9113,9114,9115,9116,9117, # 8880
9118,9119,9120,9121,9122,9123,9124,9125,9126,9127,9128,9129,9130,9131,9132,9133, # 8896
9134,9135,9136,9137,9138,9139,9140,9141,3958,9142,9143,9144,9145,9146,9147,9148, # 8912
9149,9150,9151,4888,9152,9153,9154,9155,9156,9157,9158,9159,9160,9161,9162,9163, # 8928
9164,9165,9166,9167,9168,9169,9170,9171,9172,9173,9174,9175,4889,9176,9177,9178, # 8944
9179,9180,9181,9182,9183,9184,9185,9186,9187,9188,9189,9190,9191,9192,9193,9194, # 8960
9195,9196,9197,9198,9199,9200,9201,9202,9203,4890,9204,9205,9206,9207,9208,9209, # 8976
9210,9211,9212,9213,9214,9215,9216,9217,9218,9219,9220,9221,9222,4466,9223,9224, # 8992
9225,9226,9227,9228,9229,9230,9231,9232,9233,9234,9235,9236,9237,9238,9239,9240, # 9008
9241,9242,9243,9244,9245,4891,9246,9247,9248,9249,9250,9251,9252,9253,9254,9255, # 9024
9256,9257,4892,9258,9259,9260,9261,4893,4894,9262,9263,9264,9265,9266,9267,9268, # 9040
9269,9270,9271,9272,9273,4467,9274,9275,9276,9277,9278,9279,9280,9281,9282,9283, # 9056
9284,9285,3673,9286,9287,9288,9289,9290,9291,9292,9293,9294,9295,9296,9297,9298, # 9072
9299,9300,9301,9302,9303,9304,9305,9306,9307,9308,9309,9310,9311,9312,9313,9314, # 9088
9315,9316,9317,9318,9319,9320,9321,9322,4895,9323,9324,9325,9326,9327,9328,9329, # 9104
9330,9331,9332,9333,9334,9335,9336,9337,9338,9339,9340,9341,9342,9343,9344,9345, # 9120
9346,9347,4468,9348,9349,9350,9351,9352,9353,9354,9355,9356,9357,9358,9359,9360, # 9136
9361,9362,9363,9364,9365,9366,9367,9368,9369,9370,9371,9372,9373,4896,9374,4469, # 9152
9375,9376,9377,9378,9379,4897,9380,9381,9382,9383,9384,9385,9386,9387,9388,9389, # 9168
9390,9391,9392,9393,9394,9395,9396,9397,9398,9399,9400,9401,9402,9403,9404,9405, # 9184
9406,4470,9407,2751,9408,9409,3674,3552,9410,9411,9412,9413,9414,9415,9416,9417, # 9200
9418,9419,9420,9421,4898,9422,9423,9424,9425,9426,9427,9428,9429,3959,9430,9431, # 9216
9432,9433,9434,9435,9436,4471,9437,9438,9439,9440,9441,9442,9443,9444,9445,9446, # 9232
9447,9448,9449,9450,3348,9451,9452,9453,9454,9455,9456,9457,9458,9459,9460,9461, # 9248
9462,9463,9464,9465,9466,9467,9468,9469,9470,9471,9472,4899,9473,9474,9475,9476, # 9264
9477,4900,9478,9479,9480,9481,9482,9483,9484,9485,9486,9487,9488,3349,9489,9490, # 9280
9491,9492,9493,9494,9495,9496,9497,9498,9499,9500,9501,9502,9503,9504,9505,9506, # 9296
9507,9508,9509,9510,9511,9512,9513,9514,9515,9516,9517,9518,9519,9520,4901,9521, # 9312
9522,9523,9524,9525,9526,4902,9527,9528,9529,9530,9531,9532,9533,9534,9535,9536, # 9328
9537,9538,9539,9540,9541,9542,9543,9544,9545,9546,9547,9548,9549,9550,9551,9552, # 9344
9553,9554,9555,9556,9557,9558,9559,9560,9561,9562,9563,9564,9565,9566,9567,9568, # 9360
9569,9570,9571,9572,9573,9574,9575,9576,9577,9578,9579,9580,9581,9582,9583,9584, # 9376
3805,9585,9586,9587,9588,9589,9590,9591,9592,9593,9594,9595,9596,9597,9598,9599, # 9392
9600,9601,9602,4903,9603,9604,9605,9606,9607,4904,9608,9609,9610,9611,9612,9613, # 9408
9614,4905,9615,9616,9617,9618,9619,9620,9621,9622,9623,9624,9625,9626,9627,9628, # 9424
9629,9630,9631,9632,4906,9633,9634,9635,9636,9637,9638,9639,9640,9641,9642,9643, # 9440
4907,9644,9645,9646,9647,9648,9649,9650,9651,9652,9653,9654,9655,9656,9657,9658, # 9456
9659,9660,9661,9662,9663,9664,9665,9666,9667,9668,9669,9670,9671,9672,4183,9673, # 9472
9674,9675,9676,9677,4908,9678,9679,9680,9681,4909,9682,9683,9684,9685,9686,9687, # 9488
9688,9689,9690,4910,9691,9692,9693,3675,9694,9695,9696,2945,9697,9698,9699,9700, # 9504
9701,9702,9703,9704,9705,4911,9706,9707,9708,9709,9710,9711,9712,9713,9714,9715, # 9520
9716,9717,9718,9719,9720,9721,9722,9723,9724,9725,9726,9727,9728,9729,9730,9731, # 9536
9732,9733,9734,9735,4912,9736,9737,9738,9739,9740,4913,9741,9742,9743,9744,9745, # 9552
9746,9747,9748,9749,9750,9751,9752,9753,9754,9755,9756,9757,9758,4914,9759,9760, # 9568
9761,9762,9763,9764,9765,9766,9767,9768,9769,9770,9771,9772,9773,9774,9775,9776, # 9584
9777,9778,9779,9780,9781,9782,4915,9783,9784,9785,9786,9787,9788,9789,9790,9791, # 9600
9792,9793,4916,9794,9795,9796,9797,9798,9799,9800,9801,9802,9803,9804,9805,9806, # 9616
9807,9808,9809,9810,9811,9812,9813,9814,9815,9816,9817,9818,9819,9820,9821,9822, # 9632
9823,9824,9825,9826,9827,9828,9829,9830,9831,9832,9833,9834,9835,9836,9837,9838, # 9648
9839,9840,9841,9842,9843,9844,9845,9846,9847,9848,9849,9850,9851,9852,9853,9854, # 9664
9855,9856,9857,9858,9859,9860,9861,9862,9863,9864,9865,9866,9867,9868,4917,9869, # 9680
9870,9871,9872,9873,9874,9875,9876,9877,9878,9879,9880,9881,9882,9883,9884,9885, # 9696
9886,9887,9888,9889,9890,9891,9892,4472,9893,9894,9895,9896,9897,3806,9898,9899, # 9712
9900,9901,9902,9903,9904,9905,9906,9907,9908,9909,9910,9911,9912,9913,9914,4918, # 9728
9915,9916,9917,4919,9918,9919,9920,9921,4184,9922,9923,9924,9925,9926,9927,9928, # 9744
9929,9930,9931,9932,9933,9934,9935,9936,9937,9938,9939,9940,9941,9942,9943,9944, # 9760
9945,9946,4920,9947,9948,9949,9950,9951,9952,9953,9954,9955,4185,9956,9957,9958, # 9776
9959,9960,9961,9962,9963,9964,9965,4921,9966,9967,9968,4473,9969,9970,9971,9972, # 9792
9973,9974,9975,9976,9977,4474,9978,9979,9980,9981,9982,9983,9984,9985,9986,9987, # 9808
9988,9989,9990,9991,9992,9993,9994,9995,9996,9997,9998,9999,10000,10001,10002,10003, # 9824
10004,10005,10006,10007,10008,10009,10010,10011,10012,10013,10014,10015,10016,10017,10018,10019, # 9840
10020,10021,4922,10022,4923,10023,10024,10025,10026,10027,10028,10029,10030,10031,10032,10033, # 9856
10034,10035,10036,10037,10038,10039,10040,10041,10042,10043,10044,10045,10046,10047,10048,4924, # 9872
10049,10050,10051,10052,10053,10054,10055,10056,10057,10058,10059,10060,10061,10062,10063,10064, # 9888
10065,10066,10067,10068,10069,10070,10071,10072,10073,10074,10075,10076,10077,10078,10079,10080, # 9904
10081,10082,10083,10084,10085,10086,10087,4475,10088,10089,10090,10091,10092,10093,10094,10095, # 9920
10096,10097,4476,10098,10099,10100,10101,10102,10103,10104,10105,10106,10107,10108,10109,10110, # 9936
10111,2174,10112,10113,10114,10115,10116,10117,10118,10119,10120,10121,10122,10123,10124,10125, # 9952
10126,10127,10128,10129,10130,10131,10132,10133,10134,10135,10136,10137,10138,10139,10140,3807, # 9968
4186,4925,10141,10142,10143,10144,10145,10146,10147,4477,4187,10148,10149,10150,10151,10152, # 9984
10153,4188,10154,10155,10156,10157,10158,10159,10160,10161,4926,10162,10163,10164,10165,10166, #10000
10167,10168,10169,10170,10171,10172,10173,10174,10175,10176,10177,10178,10179,10180,10181,10182, #10016
10183,10184,10185,10186,10187,10188,10189,10190,10191,10192,3203,10193,10194,10195,10196,10197, #10032
10198,10199,10200,4478,10201,10202,10203,10204,4479,10205,10206,10207,10208,10209,10210,10211, #10048
10212,10213,10214,10215,10216,10217,10218,10219,10220,10221,10222,10223,10224,10225,10226,10227, #10064
10228,10229,10230,10231,10232,10233,10234,4927,10235,10236,10237,10238,10239,10240,10241,10242, #10080
10243,10244,10245,10246,10247,10248,10249,10250,10251,10252,10253,10254,10255,10256,10257,10258, #10096
10259,10260,10261,10262,10263,10264,10265,10266,10267,10268,10269,10270,10271,10272,10273,4480, #10112
4928,4929,10274,10275,10276,10277,10278,10279,10280,10281,10282,10283,10284,10285,10286,10287, #10128
10288,10289,10290,10291,10292,10293,10294,10295,10296,10297,10298,10299,10300,10301,10302,10303, #10144
10304,10305,10306,10307,10308,10309,10310,10311,10312,10313,10314,10315,10316,10317,10318,10319, #10160
10320,10321,10322,10323,10324,10325,10326,10327,10328,10329,10330,10331,10332,10333,10334,4930, #10176
10335,10336,10337,10338,10339,10340,10341,10342,4931,10343,10344,10345,10346,10347,10348,10349, #10192
10350,10351,10352,10353,10354,10355,3088,10356,2786,10357,10358,10359,10360,4189,10361,10362, #10208
10363,10364,10365,10366,10367,10368,10369,10370,10371,10372,10373,10374,10375,4932,10376,10377, #10224
10378,10379,10380,10381,10382,10383,10384,10385,10386,10387,10388,10389,10390,10391,10392,4933, #10240
10393,10394,10395,4934,10396,10397,10398,10399,10400,10401,10402,10403,10404,10405,10406,10407, #10256
10408,10409,10410,10411,10412,3446,10413,10414,10415,10416,10417,10418,10419,10420,10421,10422, #10272
10423,4935,10424,10425,10426,10427,10428,10429,10430,4936,10431,10432,10433,10434,10435,10436, #10288
10437,10438,10439,10440,10441,10442,10443,4937,10444,10445,10446,10447,4481,10448,10449,10450, #10304
10451,10452,10453,10454,10455,10456,10457,10458,10459,10460,10461,10462,10463,10464,10465,10466, #10320
10467,10468,10469,10470,10471,10472,10473,10474,10475,10476,10477,10478,10479,10480,10481,10482, #10336
10483,10484,10485,10486,10487,10488,10489,10490,10491,10492,10493,10494,10495,10496,10497,10498, #10352
10499,10500,10501,10502,10503,10504,10505,4938,10506,10507,10508,10509,10510,2552,10511,10512, #10368
10513,10514,10515,10516,3447,10517,10518,10519,10520,10521,10522,10523,10524,10525,10526,10527, #10384
10528,10529,10530,10531,10532,10533,10534,10535,10536,10537,10538,10539,10540,10541,10542,10543, #10400
4482,10544,4939,10545,10546,10547,10548,10549,10550,10551,10552,10553,10554,10555,10556,10557, #10416
10558,10559,10560,10561,10562,10563,10564,10565,10566,10567,3676,4483,10568,10569,10570,10571, #10432
10572,3448,10573,10574,10575,10576,10577,10578,10579,10580,10581,10582,10583,10584,10585,10586, #10448
10587,10588,10589,10590,10591,10592,10593,10594,10595,10596,10597,10598,10599,10600,10601,10602, #10464
10603,10604,10605,10606,10607,10608,10609,10610,10611,10612,10613,10614,10615,10616,10617,10618, #10480
10619,10620,10621,10622,10623,10624,10625,10626,10627,4484,10628,10629,10630,10631,10632,4940, #10496
10633,10634,10635,10636,10637,10638,10639,10640,10641,10642,10643,10644,10645,10646,10647,10648, #10512
10649,10650,10651,10652,10653,10654,10655,10656,4941,10657,10658,10659,2599,10660,10661,10662, #10528
10663,10664,10665,10666,3089,10667,10668,10669,10670,10671,10672,10673,10674,10675,10676,10677, #10544
10678,10679,10680,4942,10681,10682,10683,10684,10685,10686,10687,10688,10689,10690,10691,10692, #10560
10693,10694,10695,10696,10697,4485,10698,10699,10700,10701,10702,10703,10704,4943,10705,3677, #10576
10706,10707,10708,10709,10710,10711,10712,4944,10713,10714,10715,10716,10717,10718,10719,10720, #10592
10721,10722,10723,10724,10725,10726,10727,10728,4945,10729,10730,10731,10732,10733,10734,10735, #10608
10736,10737,10738,10739,10740,10741,10742,10743,10744,10745,10746,10747,10748,10749,10750,10751, #10624
10752,10753,10754,10755,10756,10757,10758,10759,10760,10761,4946,10762,10763,10764,10765,10766, #10640
10767,4947,4948,10768,10769,10770,10771,10772,10773,10774,10775,10776,10777,10778,10779,10780, #10656
10781,10782,10783,10784,10785,10786,10787,10788,10789,10790,10791,10792,10793,10794,10795,10796, #10672
10797,10798,10799,10800,10801,10802,10803,10804,10805,10806,10807,10808,10809,10810,10811,10812, #10688
10813,10814,10815,10816,10817,10818,10819,10820,10821,10822,10823,10824,10825,10826,10827,10828, #10704
10829,10830,10831,10832,10833,10834,10835,10836,10837,10838,10839,10840,10841,10842,10843,10844, #10720
10845,10846,10847,10848,10849,10850,10851,10852,10853,10854,10855,10856,10857,10858,10859,10860, #10736
10861,10862,10863,10864,10865,10866,10867,10868,10869,10870,10871,10872,10873,10874,10875,10876, #10752
10877,10878,4486,10879,10880,10881,10882,10883,10884,10885,4949,10886,10887,10888,10889,10890, #10768
10891,10892,10893,10894,10895,10896,10897,10898,10899,10900,10901,10902,10903,10904,10905,10906, #10784
10907,10908,10909,10910,10911,10912,10913,10914,10915,10916,10917,10918,10919,4487,10920,10921, #10800
10922,10923,10924,10925,10926,10927,10928,10929,10930,10931,10932,4950,10933,10934,10935,10936, #10816
10937,10938,10939,10940,10941,10942,10943,10944,10945,10946,10947,10948,10949,4488,10950,10951, #10832
10952,10953,10954,10955,10956,10957,10958,10959,4190,10960,10961,10962,10963,10964,10965,10966, #10848
10967,10968,10969,10970,10971,10972,10973,10974,10975,10976,10977,10978,10979,10980,10981,10982, #10864
10983,10984,10985,10986,10987,10988,10989,10990,10991,10992,10993,10994,10995,10996,10997,10998, #10880
10999,11000,11001,11002,11003,11004,11005,11006,3960,11007,11008,11009,11010,11011,11012,11013, #10896
11014,11015,11016,11017,11018,11019,11020,11021,11022,11023,11024,11025,11026,11027,11028,11029, #10912
11030,11031,11032,4951,11033,11034,11035,11036,11037,11038,11039,11040,11041,11042,11043,11044, #10928
11045,11046,11047,4489,11048,11049,11050,11051,4952,11052,11053,11054,11055,11056,11057,11058, #10944
4953,11059,11060,11061,11062,11063,11064,11065,11066,11067,11068,11069,11070,11071,4954,11072, #10960
11073,11074,11075,11076,11077,11078,11079,11080,11081,11082,11083,11084,11085,11086,11087,11088, #10976
11089,11090,11091,11092,11093,11094,11095,11096,11097,11098,11099,11100,11101,11102,11103,11104, #10992
11105,11106,11107,11108,11109,11110,11111,11112,11113,11114,11115,3808,11116,11117,11118,11119, #11008
11120,11121,11122,11123,11124,11125,11126,11127,11128,11129,11130,11131,11132,11133,11134,4955, #11024
11135,11136,11137,11138,11139,11140,11141,11142,11143,11144,11145,11146,11147,11148,11149,11150, #11040
11151,11152,11153,11154,11155,11156,11157,11158,11159,11160,11161,4956,11162,11163,11164,11165, #11056
11166,11167,11168,11169,11170,11171,11172,11173,11174,11175,11176,11177,11178,11179,11180,4957, #11072
11181,11182,11183,11184,11185,11186,4958,11187,11188,11189,11190,11191,11192,11193,11194,11195, #11088
11196,11197,11198,11199,11200,3678,11201,11202,11203,11204,11205,11206,4191,11207,11208,11209, #11104
11210,11211,11212,11213,11214,11215,11216,11217,11218,11219,11220,11221,11222,11223,11224,11225, #11120
11226,11227,11228,11229,11230,11231,11232,11233,11234,11235,11236,11237,11238,11239,11240,11241, #11136
11242,11243,11244,11245,11246,11247,11248,11249,11250,11251,4959,11252,11253,11254,11255,11256, #11152
11257,11258,11259,11260,11261,11262,11263,11264,11265,11266,11267,11268,11269,11270,11271,11272, #11168
11273,11274,11275,11276,11277,11278,11279,11280,11281,11282,11283,11284,11285,11286,11287,11288, #11184
11289,11290,11291,11292,11293,11294,11295,11296,11297,11298,11299,11300,11301,11302,11303,11304, #11200
11305,11306,11307,11308,11309,11310,11311,11312,11313,11314,3679,11315,11316,11317,11318,4490, #11216
11319,11320,11321,11322,11323,11324,11325,11326,11327,11328,11329,11330,11331,11332,11333,11334, #11232
11335,11336,11337,11338,11339,11340,11341,11342,11343,11344,11345,11346,11347,4960,11348,11349, #11248
11350,11351,11352,11353,11354,11355,11356,11357,11358,11359,11360,11361,11362,11363,11364,11365, #11264
11366,11367,11368,11369,11370,11371,11372,11373,11374,11375,11376,11377,3961,4961,11378,11379, #11280
11380,11381,11382,11383,11384,11385,11386,11387,11388,11389,11390,11391,11392,11393,11394,11395, #11296
11396,11397,4192,11398,11399,11400,11401,11402,11403,11404,11405,11406,11407,11408,11409,11410, #11312
11411,4962,11412,11413,11414,11415,11416,11417,11418,11419,11420,11421,11422,11423,11424,11425, #11328
11426,11427,11428,11429,11430,11431,11432,11433,11434,11435,11436,11437,11438,11439,11440,11441, #11344
11442,11443,11444,11445,11446,11447,11448,11449,11450,11451,11452,11453,11454,11455,11456,11457, #11360
11458,11459,11460,11461,11462,11463,11464,11465,11466,11467,11468,11469,4963,11470,11471,4491, #11376
11472,11473,11474,11475,4964,11476,11477,11478,11479,11480,11481,11482,11483,11484,11485,11486, #11392
11487,11488,11489,11490,11491,11492,4965,11493,11494,11495,11496,11497,11498,11499,11500,11501, #11408
11502,11503,11504,11505,11506,11507,11508,11509,11510,11511,11512,11513,11514,11515,11516,11517, #11424
11518,11519,11520,11521,11522,11523,11524,11525,11526,11527,11528,11529,3962,11530,11531,11532, #11440
11533,11534,11535,11536,11537,11538,11539,11540,11541,11542,11543,11544,11545,11546,11547,11548, #11456
11549,11550,11551,11552,11553,11554,11555,11556,11557,11558,11559,11560,11561,11562,11563,11564, #11472
4193,4194,11565,11566,11567,11568,11569,11570,11571,11572,11573,11574,11575,11576,11577,11578, #11488
11579,11580,11581,11582,11583,11584,11585,11586,11587,11588,11589,11590,11591,4966,4195,11592, #11504
11593,11594,11595,11596,11597,11598,11599,11600,11601,11602,11603,11604,3090,11605,11606,11607, #11520
11608,11609,11610,4967,11611,11612,11613,11614,11615,11616,11617,11618,11619,11620,11621,11622, #11536
11623,11624,11625,11626,11627,11628,11629,11630,11631,11632,11633,11634,11635,11636,11637,11638, #11552
11639,11640,11641,11642,11643,11644,11645,11646,11647,11648,11649,11650,11651,11652,11653,11654, #11568
11655,11656,11657,11658,11659,11660,11661,11662,11663,11664,11665,11666,11667,11668,11669,11670, #11584
11671,11672,11673,11674,4968,11675,11676,11677,11678,11679,11680,11681,11682,11683,11684,11685, #11600
11686,11687,11688,11689,11690,11691,11692,11693,3809,11694,11695,11696,11697,11698,11699,11700, #11616
11701,11702,11703,11704,11705,11706,11707,11708,11709,11710,11711,11712,11713,11714,11715,11716, #11632
11717,11718,3553,11719,11720,11721,11722,11723,11724,11725,11726,11727,11728,11729,11730,4969, #11648
11731,11732,11733,11734,11735,11736,11737,11738,11739,11740,4492,11741,11742,11743,11744,11745, #11664
11746,11747,11748,11749,11750,11751,11752,4970,11753,11754,11755,11756,11757,11758,11759,11760, #11680
11761,11762,11763,11764,11765,11766,11767,11768,11769,11770,11771,11772,11773,11774,11775,11776, #11696
11777,11778,11779,11780,11781,11782,11783,11784,11785,11786,11787,11788,11789,11790,4971,11791, #11712
11792,11793,11794,11795,11796,11797,4972,11798,11799,11800,11801,11802,11803,11804,11805,11806, #11728
11807,11808,11809,11810,4973,11811,11812,11813,11814,11815,11816,11817,11818,11819,11820,11821, #11744
11822,11823,11824,11825,11826,11827,11828,11829,11830,11831,11832,11833,11834,3680,3810,11835, #11760
11836,4974,11837,11838,11839,11840,11841,11842,11843,11844,11845,11846,11847,11848,11849,11850, #11776
11851,11852,11853,11854,11855,11856,11857,11858,11859,11860,11861,11862,11863,11864,11865,11866, #11792
11867,11868,11869,11870,11871,11872,11873,11874,11875,11876,11877,11878,11879,11880,11881,11882, #11808
11883,11884,4493,11885,11886,11887,11888,11889,11890,11891,11892,11893,11894,11895,11896,11897, #11824
11898,11899,11900,11901,11902,11903,11904,11905,11906,11907,11908,11909,11910,11911,11912,11913, #11840
11914,11915,4975,11916,11917,11918,11919,11920,11921,11922,11923,11924,11925,11926,11927,11928, #11856
11929,11930,11931,11932,11933,11934,11935,11936,11937,11938,11939,11940,11941,11942,11943,11944, #11872
11945,11946,11947,11948,11949,4976,11950,11951,11952,11953,11954,11955,11956,11957,11958,11959, #11888
11960,11961,11962,11963,11964,11965,11966,11967,11968,11969,11970,11971,11972,11973,11974,11975, #11904
11976,11977,11978,11979,11980,11981,11982,11983,11984,11985,11986,11987,4196,11988,11989,11990, #11920
11991,11992,4977,11993,11994,11995,11996,11997,11998,11999,12000,12001,12002,12003,12004,12005, #11936
12006,12007,12008,12009,12010,12011,12012,12013,12014,12015,12016,12017,12018,12019,12020,12021, #11952
12022,12023,12024,12025,12026,12027,12028,12029,12030,12031,12032,12033,12034,12035,12036,12037, #11968
12038,12039,12040,12041,12042,12043,12044,12045,12046,12047,12048,12049,12050,12051,12052,12053, #11984
12054,12055,12056,12057,12058,12059,12060,12061,4978,12062,12063,12064,12065,12066,12067,12068, #12000
12069,12070,12071,12072,12073,12074,12075,12076,12077,12078,12079,12080,12081,12082,12083,12084, #12016
12085,12086,12087,12088,12089,12090,12091,12092,12093,12094,12095,12096,12097,12098,12099,12100, #12032
12101,12102,12103,12104,12105,12106,12107,12108,12109,12110,12111,12112,12113,12114,12115,12116, #12048
12117,12118,12119,12120,12121,12122,12123,4979,12124,12125,12126,12127,12128,4197,12129,12130, #12064
12131,12132,12133,12134,12135,12136,12137,12138,12139,12140,12141,12142,12143,12144,12145,12146, #12080
12147,12148,12149,12150,12151,12152,12153,12154,4980,12155,12156,12157,12158,12159,12160,4494, #12096
12161,12162,12163,12164,3811,12165,12166,12167,12168,12169,4495,12170,12171,4496,12172,12173, #12112
12174,12175,12176,3812,12177,12178,12179,12180,12181,12182,12183,12184,12185,12186,12187,12188, #12128
12189,12190,12191,12192,12193,12194,12195,12196,12197,12198,12199,12200,12201,12202,12203,12204, #12144
12205,12206,12207,12208,12209,12210,12211,12212,12213,12214,12215,12216,12217,12218,12219,12220, #12160
12221,4981,12222,12223,12224,12225,12226,12227,12228,12229,12230,12231,12232,12233,12234,12235, #12176
4982,12236,12237,12238,12239,12240,12241,12242,12243,12244,12245,4983,12246,12247,12248,12249, #12192
4984,12250,12251,12252,12253,12254,12255,12256,12257,12258,12259,12260,12261,12262,12263,12264, #12208
4985,12265,4497,12266,12267,12268,12269,12270,12271,12272,12273,12274,12275,12276,12277,12278, #12224
12279,12280,12281,12282,12283,12284,12285,12286,12287,4986,12288,12289,12290,12291,12292,12293, #12240
12294,12295,12296,2473,12297,12298,12299,12300,12301,12302,12303,12304,12305,12306,12307,12308, #12256
12309,12310,12311,12312,12313,12314,12315,12316,12317,12318,12319,3963,12320,12321,12322,12323, #12272
12324,12325,12326,12327,12328,12329,12330,12331,12332,4987,12333,12334,12335,12336,12337,12338, #12288
12339,12340,12341,12342,12343,12344,12345,12346,12347,12348,12349,12350,12351,12352,12353,12354, #12304
12355,12356,12357,12358,12359,3964,12360,12361,12362,12363,12364,12365,12366,12367,12368,12369, #12320
12370,3965,12371,12372,12373,12374,12375,12376,12377,12378,12379,12380,12381,12382,12383,12384, #12336
12385,12386,12387,12388,12389,12390,12391,12392,12393,12394,12395,12396,12397,12398,12399,12400, #12352
12401,12402,12403,12404,12405,12406,12407,12408,4988,12409,12410,12411,12412,12413,12414,12415, #12368
12416,12417,12418,12419,12420,12421,12422,12423,12424,12425,12426,12427,12428,12429,12430,12431, #12384
12432,12433,12434,12435,12436,12437,12438,3554,12439,12440,12441,12442,12443,12444,12445,12446, #12400
12447,12448,12449,12450,12451,12452,12453,12454,12455,12456,12457,12458,12459,12460,12461,12462, #12416
12463,12464,4989,12465,12466,12467,12468,12469,12470,12471,12472,12473,12474,12475,12476,12477, #12432
12478,12479,12480,4990,12481,12482,12483,12484,12485,12486,12487,12488,12489,4498,12490,12491, #12448
12492,12493,12494,12495,12496,12497,12498,12499,12500,12501,12502,12503,12504,12505,12506,12507, #12464
12508,12509,12510,12511,12512,12513,12514,12515,12516,12517,12518,12519,12520,12521,12522,12523, #12480
12524,12525,12526,12527,12528,12529,12530,12531,12532,12533,12534,12535,12536,12537,12538,12539, #12496
12540,12541,12542,12543,12544,12545,12546,12547,12548,12549,12550,12551,4991,12552,12553,12554, #12512
12555,12556,12557,12558,12559,12560,12561,12562,12563,12564,12565,12566,12567,12568,12569,12570, #12528
12571,12572,12573,12574,12575,12576,12577,12578,3036,12579,12580,12581,12582,12583,3966,12584, #12544
12585,12586,12587,12588,12589,12590,12591,12592,12593,12594,12595,12596,12597,12598,12599,12600, #12560
12601,12602,12603,12604,12605,12606,12607,12608,12609,12610,12611,12612,12613,12614,12615,12616, #12576
12617,12618,12619,12620,12621,12622,12623,12624,12625,12626,12627,12628,12629,12630,12631,12632, #12592
12633,12634,12635,12636,12637,12638,12639,12640,12641,12642,12643,12644,12645,12646,4499,12647, #12608
12648,12649,12650,12651,12652,12653,12654,12655,12656,12657,12658,12659,12660,12661,12662,12663, #12624
12664,12665,12666,12667,12668,12669,12670,12671,12672,12673,12674,12675,12676,12677,12678,12679, #12640
12680,12681,12682,12683,12684,12685,12686,12687,12688,12689,12690,12691,12692,12693,12694,12695, #12656
12696,12697,12698,4992,12699,12700,12701,12702,12703,12704,12705,12706,12707,12708,12709,12710, #12672
12711,12712,12713,12714,12715,12716,12717,12718,12719,12720,12721,12722,12723,12724,12725,12726, #12688
12727,12728,12729,12730,12731,12732,12733,12734,12735,12736,12737,12738,12739,12740,12741,12742, #12704
12743,12744,12745,12746,12747,12748,12749,12750,12751,12752,12753,12754,12755,12756,12757,12758, #12720
12759,12760,12761,12762,12763,12764,12765,12766,12767,12768,12769,12770,12771,12772,12773,12774, #12736
12775,12776,12777,12778,4993,2175,12779,12780,12781,12782,12783,12784,12785,12786,4500,12787, #12752
12788,12789,12790,12791,12792,12793,12794,12795,12796,12797,12798,12799,12800,12801,12802,12803, #12768
12804,12805,12806,12807,12808,12809,12810,12811,12812,12813,12814,12815,12816,12817,12818,12819, #12784
12820,12821,12822,12823,12824,12825,12826,4198,3967,12827,12828,12829,12830,12831,12832,12833, #12800
12834,12835,12836,12837,12838,12839,12840,12841,12842,12843,12844,12845,12846,12847,12848,12849, #12816
12850,12851,12852,12853,12854,12855,12856,12857,12858,12859,12860,12861,4199,12862,12863,12864, #12832
12865,12866,12867,12868,12869,12870,12871,12872,12873,12874,12875,12876,12877,12878,12879,12880, #12848
12881,12882,12883,12884,12885,12886,12887,4501,12888,12889,12890,12891,12892,12893,12894,12895, #12864
12896,12897,12898,12899,12900,12901,12902,12903,12904,12905,12906,12907,12908,12909,12910,12911, #12880
12912,4994,12913,12914,12915,12916,12917,12918,12919,12920,12921,12922,12923,12924,12925,12926, #12896
12927,12928,12929,12930,12931,12932,12933,12934,12935,12936,12937,12938,12939,12940,12941,12942, #12912
12943,12944,12945,12946,12947,12948,12949,12950,12951,12952,12953,12954,12955,12956,1772,12957, #12928
12958,12959,12960,12961,12962,12963,12964,12965,12966,12967,12968,12969,12970,12971,12972,12973, #12944
12974,12975,12976,12977,12978,12979,12980,12981,12982,12983,12984,12985,12986,12987,12988,12989, #12960
12990,12991,12992,12993,12994,12995,12996,12997,4502,12998,4503,12999,13000,13001,13002,13003, #12976
4504,13004,13005,13006,13007,13008,13009,13010,13011,13012,13013,13014,13015,13016,13017,13018, #12992
13019,13020,13021,13022,13023,13024,13025,13026,13027,13028,13029,3449,13030,13031,13032,13033, #13008
13034,13035,13036,13037,13038,13039,13040,13041,13042,13043,13044,13045,13046,13047,13048,13049, #13024
13050,13051,13052,13053,13054,13055,13056,13057,13058,13059,13060,13061,13062,13063,13064,13065, #13040
13066,13067,13068,13069,13070,13071,13072,13073,13074,13075,13076,13077,13078,13079,13080,13081, #13056
13082,13083,13084,13085,13086,13087,13088,13089,13090,13091,13092,13093,13094,13095,13096,13097, #13072
13098,13099,13100,13101,13102,13103,13104,13105,13106,13107,13108,13109,13110,13111,13112,13113, #13088
13114,13115,13116,13117,13118,3968,13119,4995,13120,13121,13122,13123,13124,13125,13126,13127, #13104
4505,13128,13129,13130,13131,13132,13133,13134,4996,4506,13135,13136,13137,13138,13139,4997, #13120
13140,13141,13142,13143,13144,13145,13146,13147,13148,13149,13150,13151,13152,13153,13154,13155, #13136
13156,13157,13158,13159,4998,13160,13161,13162,13163,13164,13165,13166,13167,13168,13169,13170, #13152
13171,13172,13173,13174,13175,13176,4999,13177,13178,13179,13180,13181,13182,13183,13184,13185, #13168
13186,13187,13188,13189,13190,13191,13192,13193,13194,13195,13196,13197,13198,13199,13200,13201, #13184
13202,13203,13204,13205,13206,5000,13207,13208,13209,13210,13211,13212,13213,13214,13215,13216, #13200
13217,13218,13219,13220,13221,13222,13223,13224,13225,13226,13227,4200,5001,13228,13229,13230, #13216
13231,13232,13233,13234,13235,13236,13237,13238,13239,13240,3969,13241,13242,13243,13244,3970, #13232
13245,13246,13247,13248,13249,13250,13251,13252,13253,13254,13255,13256,13257,13258,13259,13260, #13248
13261,13262,13263,13264,13265,13266,13267,13268,3450,13269,13270,13271,13272,13273,13274,13275, #13264
13276,5002,13277,13278,13279,13280,13281,13282,13283,13284,13285,13286,13287,13288,13289,13290, #13280
13291,13292,13293,13294,13295,13296,13297,13298,13299,13300,13301,13302,3813,13303,13304,13305, #13296
13306,13307,13308,13309,13310,13311,13312,13313,13314,13315,13316,13317,13318,13319,13320,13321, #13312
13322,13323,13324,13325,13326,13327,13328,4507,13329,13330,13331,13332,13333,13334,13335,13336, #13328
13337,13338,13339,13340,13341,5003,13342,13343,13344,13345,13346,13347,13348,13349,13350,13351, #13344
13352,13353,13354,13355,13356,13357,13358,13359,13360,13361,13362,13363,13364,13365,13366,13367, #13360
5004,13368,13369,13370,13371,13372,13373,13374,13375,13376,13377,13378,13379,13380,13381,13382, #13376
13383,13384,13385,13386,13387,13388,13389,13390,13391,13392,13393,13394,13395,13396,13397,13398, #13392
13399,13400,13401,13402,13403,13404,13405,13406,13407,13408,13409,13410,13411,13412,13413,13414, #13408
13415,13416,13417,13418,13419,13420,13421,13422,13423,13424,13425,13426,13427,13428,13429,13430, #13424
13431,13432,4508,13433,13434,13435,4201,13436,13437,13438,13439,13440,13441,13442,13443,13444, #13440
13445,13446,13447,13448,13449,13450,13451,13452,13453,13454,13455,13456,13457,5005,13458,13459, #13456
13460,13461,13462,13463,13464,13465,13466,13467,13468,13469,13470,4509,13471,13472,13473,13474, #13472
13475,13476,13477,13478,13479,13480,13481,13482,13483,13484,13485,13486,13487,13488,13489,13490, #13488
13491,13492,13493,13494,13495,13496,13497,13498,13499,13500,13501,13502,13503,13504,13505,13506, #13504
13507,13508,13509,13510,13511,13512,13513,13514,13515,13516,13517,13518,13519,13520,13521,13522, #13520
13523,13524,13525,13526,13527,13528,13529,13530,13531,13532,13533,13534,13535,13536,13537,13538, #13536
13539,13540,13541,13542,13543,13544,13545,13546,13547,13548,13549,13550,13551,13552,13553,13554, #13552
13555,13556,13557,13558,13559,13560,13561,13562,13563,13564,13565,13566,13567,13568,13569,13570, #13568
13571,13572,13573,13574,13575,13576,13577,13578,13579,13580,13581,13582,13583,13584,13585,13586, #13584
13587,13588,13589,13590,13591,13592,13593,13594,13595,13596,13597,13598,13599,13600,13601,13602, #13600
13603,13604,13605,13606,13607,13608,13609,13610,13611,13612,13613,13614,13615,13616,13617,13618, #13616
13619,13620,13621,13622,13623,13624,13625,13626,13627,13628,13629,13630,13631,13632,13633,13634, #13632
13635,13636,13637,13638,13639,13640,13641,13642,5006,13643,13644,13645,13646,13647,13648,13649, #13648
13650,13651,5007,13652,13653,13654,13655,13656,13657,13658,13659,13660,13661,13662,13663,13664, #13664
13665,13666,13667,13668,13669,13670,13671,13672,13673,13674,13675,13676,13677,13678,13679,13680, #13680
13681,13682,13683,13684,13685,13686,13687,13688,13689,13690,13691,13692,13693,13694,13695,13696, #13696
13697,13698,13699,13700,13701,13702,13703,13704,13705,13706,13707,13708,13709,13710,13711,13712, #13712
13713,13714,13715,13716,13717,13718,13719,13720,13721,13722,13723,13724,13725,13726,13727,13728, #13728
13729,13730,13731,13732,13733,13734,13735,13736,13737,13738,13739,13740,13741,13742,13743,13744, #13744
13745,13746,13747,13748,13749,13750,13751,13752,13753,13754,13755,13756,13757,13758,13759,13760, #13760
13761,13762,13763,13764,13765,13766,13767,13768,13769,13770,13771,13772,13773,13774,3273,13775, #13776
13776,13777,13778,13779,13780,13781,13782,13783,13784,13785,13786,13787,13788,13789,13790,13791, #13792
13792,13793,13794,13795,13796,13797,13798,13799,13800,13801,13802,13803,13804,13805,13806,13807, #13808
13808,13809,13810,13811,13812,13813,13814,13815,13816,13817,13818,13819,13820,13821,13822,13823, #13824
13824,13825,13826,13827,13828,13829,13830,13831,13832,13833,13834,13835,13836,13837,13838,13839, #13840
13840,13841,13842,13843,13844,13845,13846,13847,13848,13849,13850,13851,13852,13853,13854,13855, #13856
13856,13857,13858,13859,13860,13861,13862,13863,13864,13865,13866,13867,13868,13869,13870,13871, #13872
13872,13873,13874,13875,13876,13877,13878,13879,13880,13881,13882,13883,13884,13885,13886,13887, #13888
13888,13889,13890,13891,13892,13893,13894,13895,13896,13897,13898,13899,13900,13901,13902,13903, #13904
13904,13905,13906,13907,13908,13909,13910,13911,13912,13913,13914,13915,13916,13917,13918,13919, #13920
13920,13921,13922,13923,13924,13925,13926,13927,13928,13929,13930,13931,13932,13933,13934,13935, #13936
13936,13937,13938,13939,13940,13941,13942,13943,13944,13945,13946,13947,13948,13949,13950,13951, #13952
13952,13953,13954,13955,13956,13957,13958,13959,13960,13961,13962,13963,13964,13965,13966,13967, #13968
13968,13969,13970,13971,13972) #13973
# flake8: noqa
| apache-2.0 |
MShel/ttw | listener/packets/udpPacket.py | 1 | 1408 | from listener.packets.abstractPacket import AbstractPacket
from struct import unpack
class UdpPacket(AbstractPacket):
UNPACK_FORMAT = '!HHHH'
UDP_HEADER_LENGTH = 8
PROTOCOL_NAME = 'UDP'
def __init__(self, binPacket: bytes, margin: int):
self.binPacket = binPacket
self.headerMargin = margin
self.parse()
def parse(self):
AbstractPacket.addMsg(AbstractPacket, 'Started Parsing UDP packet')
binUdpHeader = self.binPacket[self.headerMargin:self.headerMargin + self.UDP_HEADER_LENGTH]
unpackedHeader = unpack(self.UNPACK_FORMAT, binUdpHeader)
self.fromPort = str(unpackedHeader[0])
self.toPort = str(unpackedHeader[1])
self.udpHeaderLength = unpackedHeader[2]
self.udpCheckSum = unpackedHeader[3]
fullHeaderSize = self.headerMargin + self.udpHeaderLength
self.dataSize = len(self.binPacket) - fullHeaderSize
# get data from the packet
self.data = self.binPacket[fullHeaderSize:]
AbstractPacket.addMsg(AbstractPacket, 'Parsed UDP packet from port: ' + self.fromPort + ' to: ' + self.toPort)
AbstractPacket.addMsg(AbstractPacket, 'UDP-PACKET data:\n\n\n ' + str(self.data) +'\n\n')
def getMsg(self):
return self.msg
def getName(self):
return self.PROTOCOL_NAME
def __del__(self):
pass | mit |
aerler/WRF-Tools | Python/wrfavg/wrfout_average.py | 1 | 79431 | '''
Created on 2013-09-28, revised 2014-06-17, added daily output 2020-05-04
A script to average WRF output; the default settings are meant for my 'fineIO' output configuration and
process the smaller diagnostic files.
The script can run in parallel mode, with each process averaging one filetype and domain, producing
exactly one output file.
@author: Andre R. Erler, GPL v3
'''
#TODO: add time-dependent auxiliary files to file processing (use prerequisites from other files)
#TODO: add option to discard prerequisit variables
#TODO: add base variables for correlation and standard deviation (and (co-)variance).
#TODO: more variables: tropopause height, baroclinicity, PV, water flux (require full 3D fields)
#TODO: add shape-averaged output stream (shapes based on a template file)
## imports
import numpy as np
from collections import OrderedDict
#import numpy.ma as ma
import os, re, sys, shutil, gc
import netCDF4 as nc
# my own netcdf stuff
from utils.nctools import add_coord, copy_dims, copy_ncatts, copy_vars
from processing.multiprocess import asyncPoolEC
# import module providing derived variable classes
import wrfavg.derived_variables as dv
# aliases
days_per_month_365 = dv.days_per_month_365
dtype_float = dv.dtype_float
# thresholds for wet-day variables (from AMS glossary and ETCCDI Climate Change Indices)
from utils.constants import precip_thresholds
# N.B.: importing from WRF Tools to GeoPy causes a name collision
# date error class
class DateError(Exception):
''' Exceptions related to wrfout date strings, e.g. in file names. '''
pass
# date error class
class ArgumentError(Exception):
''' Exceptions related to arguments passed to the script. '''
pass
def getDateRegX(period):
''' function to define averaging period based on argument '''
# use '\d' for any number and [1-3,45] for ranges; '\d\d\d\d'
if period == '1979-1980': prdrgx = '19(79|80)' # 2 year historical period
elif period == '1979-1981': prdrgx = '19(79|8[0-1])' # 3 year historical period
elif period == '1979-1983': prdrgx = '19(79|8[0-3])' # 5 year historical period
elif period == '1979-1988': prdrgx = '19(79|8[0-8])' # 10 year historical period
elif period == '1980-1994': prdrgx = '19(8[0-9]|9[04])' # 15 year historical period
elif period == '2045-2047': prdrgx = '204[5-7]' # 3 year future period
elif period == '2045-2049': prdrgx = '204[5-9]' # 5 year future period
elif period == '2045-2054': prdrgx = '20(4[5-9]|5[0-4])' # 10 year future period
elif period == '2045-2059': prdrgx = '20(4[5-9]|5[0-9])' # 15 year future period
elif period == '2085-2087': prdrgx = '208[5-7]' # 3 year future period
elif period == '2085-2089': prdrgx = '208[5-9]' # 5 year future period
elif period == '2085-2094': prdrgx = '20(8[5-9]|9[0-4])' # 10 year future period
elif period == '2085-2099': prdrgx = '20(8[5-9]|9[0-9])' # 15 year future period
elif period == '2090-2094': prdrgx = '209[0-4]' # 5 year future period
else: prdrgx = None
if prdrgx: print(("\nLoading regular expression for date string: '{:s}'".format(period)))
return prdrgx
## read arguments
# number of processes NP
if 'PYAVG_THREADS' in os.environ:
NP = int(os.environ['PYAVG_THREADS'])
else: NP = None
# only compute derived variables
if 'PYAVG_DERIVEDONLY' in os.environ:
lderivedonly = os.environ['PYAVG_DERIVEDONLY'] == 'DERIVEDONLY'
else: lderivedonly = False # i.e. all
# # scale dry-day threshold
# if os.environ.has_key('PYAVG_DRYDAY') and bool(os.environ['PYAVG_DRYDAY']): # i.e. not empty and non-zero
# dryday_correction = float(os.environ['PYAVG_DRYDAY']) # relative to WMO recommendation
# dv.dryday_threshold = dv.dryday_threshold * dryday_correction # precip treshold for a dry day: 2.3e-7 mm/s
# print("\n *** The dry-day threshold was increased by a factor of {:3.2f} relative to WMO recommendation *** \n".format(dryday_correction))
# recompute last timestep and continue (usefule after a crash)
if 'PYAVG_RECOVER' in os.environ:
lrecover = os.environ['PYAVG_RECOVER'] == 'RECOVER'
else: lrecover = False # i.e. normal operation
# just add new and leave old
if 'PYAVG_ADDNEW' in os.environ:
laddnew = os.environ['PYAVG_ADDNEW'] == 'ADDNEW'
else: laddnew = False # i.e. recompute all
# recompute specified variables
if 'PYAVG_RECALC' in os.environ:
if os.environ['PYAVG_RECALC'] == 'DERIVEDONLY':
# recalculate all derived variables and leave others in place
lrecalc = True; lderivedonly = True; recalcvars = []
else:
recalcvars = os.environ['PYAVG_RECALC'].split() # space separated list (other characters cause problems...)
if len(recalcvars) > 0 and len(recalcvars[0]) > 0: lrecalc = True # if there is a variable to recompute
else: lrecalc = False
# lrecalc uses the same pathway, but they can operate independently
else: lrecalc = False # i.e. recompute all
# overwrite existing data
if 'PYAVG_OVERWRITE' in os.environ:
loverwrite = os.environ['PYAVG_OVERWRITE'] == 'OVERWRITE'
if loverwrite: laddnew = False; lrecalc = False
else: loverwrite = False # i.e. append
# N.B.: when loverwrite is True and and prdarg is empty, the entire file is replaced,
# otherwise only the selected months are recomputed
# file types to process
if 'PYAVG_FILETYPES' in os.environ:
filetypes = os.environ['PYAVG_FILETYPES'].split() # space separated list (other characters cause problems...)
if len(filetypes) == 1 and len(filetypes[0]) == 0: filetypes = None # empty string, substitute default
else: filetypes = None # defaults are set below
# domains to process
if 'PYAVG_DOMAINS' in os.environ:
domains = os.environ['PYAVG_DOMAINS'].split() # space separated list (other characters cause problems...)
if len(domains) == 1: domains = [int(i) for i in domains[0]] # string of single-digit indices
else: domains = [int(i) for i in domains] # semi-colon separated list
else: domains = None # defaults are set below
# run script in debug mode
if 'PYAVG_DEBUG' in os.environ:
ldebug = os.environ['PYAVG_DEBUG'] == 'DEBUG'
lderivedonly = ldebug or lderivedonly # usually this is what we are debugging, anyway...
else: ldebug = False # operational mode
# wipe temporary storage after every month (no carry-over)
if 'PYAVG_CARRYOVER' in os.environ:
lcarryover = os.environ['PYAVG_CARRYOVER'] == 'CARRYOVER'
else: lcarryover = True # operational mode
# use simple differences or centered differences for accumulated variables
if 'PYAVG_SMPLDIFF' in os.environ:
lsmplDiff = os.environ['PYAVG_SMPLDIFF'] == 'SMPLDIFF'
else: lsmplDiff = False # default mode: centered differences
# generate formatted daily/sub-daily output files for selected variables
if 'PYAVG_DAILY' in os.environ:
lglobaldaily = os.environ['PYAVG_DAILY'] == 'DAILY'
else: lglobaldaily = False # operational mode
# working directories
exproot = os.getcwd()
exp = exproot.split('/')[-1] # root folder name
infolder = exproot + '/wrfout/' # input folder
outfolder = exproot + '/wrfavg/' # output folder
# figure out time period
# N.B.: values or regex' can be passed for year, month, and day as arguments in this order; alternatively,
# a single argument with the values/regex separated by commas (',') can be used
if len(sys.argv) == 1 or not any(sys.argv[1:]): # treat empty arguments as no argument
period = [] # means recompute everything
elif len(sys.argv) == 2:
period = sys.argv[1].split(',') # regular expression identifying
else:
period = sys.argv[1:]
# prdarg = '1979'; period = prdarg.split('-') # for tests
# default time intervals
yearstr = '\d\d\d\d'; monthstr = '\d\d'; daystr = '\d\d'
# figure out time interval
if len(period) >= 1:
# first try some common expressions
yearstr = getDateRegX(period[0])
if yearstr is None: yearstr = period[0]
if len(period) >= 2: monthstr = period[1]
if len(period) >= 3: daystr = period[2]
# N.B.: the timestr variables are interpreted as strings and support Python regex syntax
if len(period) > 0 or ldebug: print('Date string interpretation:',yearstr,monthstr,daystr)
## definitions
# input files and folders
filetypes = filetypes or ['srfc', 'plev3d', 'xtrm', 'hydro', 'lsm', 'rad', 'snow']
domains = domains or [1,2,3,4]
# filetypes and domains can also be set in an semi-colon-separated environment variable (see above)
# file pattern (WRF output and averaged files)
# inputpattern = 'wrf{0:s}_d{1:02d}_{2:s}-{3:s}-{4:s}_\d\d:\d\d:\d\d.nc' # expanded with format(type,domain,year,month)
inputpattern = '^wrf{0:s}_d{1:s}_{2:s}_\d\d[_:]\d\d[_:]\d\d(?:\.nc$|$)' # expanded with format(type,domain,datestring)
#inputpattern = '^wrf{0:s}_d{1:s}_{2:s}_\d\d[_:]\d\d[_:]\d\d.*$' # expanded with format(type,domain,datestring)
# N.B.: the last section (?:\.nc$|$) matches either .nc at the end or just the end of the string;
# ?: just means that the group defined by () can not be retrieved (it is just to hold "|")
constpattern = 'wrfconst_d{0:02d}' # expanded with format(domain), also WRF output
# N.B.: file extension is added automatically for constpattern and handled by regex for inputpattern
monthlypattern = 'wrf{0:s}_d{1:02d}_monthly.nc' # expanded with format(type,domain)
dailypattern = 'wrf{0:s}_d{1:02d}_daily.nc' # expanded with format(type,domain)
# variable attributes
wrftime = 'Time' # time dim in wrfout files
wrfxtime = 'XTIME' # time in minutes since WRF simulation start
wrfaxes = dict(Time='tax', west_east='xax', south_north='yax', num_press_levels_stag='pax')
wrftimestamp = 'Times' # time-stamp variable in WRF
time = 'time' # time dim in monthly mean files
dimlist = ['x','y'] # dimensions we just copy
dimmap = {time:wrftime} #{time:wrftime, 'x':'west_east','y':'south_north'}
midmap = dict(list(zip(list(dimmap.values()),list(dimmap.keys())))) # reverse dimmap
# accumulated variables (only total accumulation since simulation start, not, e.g., daily accumulated)
acclist = dict(RAINNC=100.,RAINC=100.,RAINSH=None,SNOWNC=None,GRAUPELNC=None,SFCEVP=None,POTEVP=None, # srfc vars
SFROFF=None,UDROFF=None,ACGRDFLX=None,ACSNOW=None,ACSNOM=None,ACHFX=None,ACLHF=None, # lsm vars
ACSWUPT=1.e9,ACSWUPTC=1.e9,ACSWDNT=1.e9,ACSWDNTC=1.e9,ACSWUPB=1.e9,ACSWUPBC=1.e9,ACSWDNB=1.e9,ACSWDNBC=1.e9, # rad vars
ACLWUPT=1.e9,ACLWUPTC=1.e9,ACLWDNT=1.e9,ACLWDNTC=1.e9,ACLWUPB=1.e9,ACLWUPBC=1.e9,ACLWDNB=1.e9,ACLWDNBC=1.e9) # rad vars
# N.B.: keys = variables and values = bucket sizes; value = None or 0 means no bucket
bktpfx = 'I_' # prefix for bucket variables; these are processed together with their accumulated variables
# derived variables
derived_variables = {filetype:[] for filetype in filetypes} # derived variable lists by file type
derived_variables['srfc'] = [dv.Rain(), dv.LiquidPrecipSR(), dv.SolidPrecipSR(), dv.NetPrecip(sfcevp='QFX'),
dv.WaterVapor(), dv.OrographicIndex(), dv.CovOIP(), dv.WindSpeed(),
dv.SummerDays(threshold=25., temp='T2'), dv.FrostDays(threshold=0., temp='T2')]
# N.B.: measures the fraction of 6-hourly samples above/below the threshold (day and night)
derived_variables['xtrm'] = [dv.RainMean(), dv.TimeOfConvection(),
dv.SummerDays(threshold=25., temp='T2MAX'), dv.FrostDays(threshold=0., temp='T2MIN')]
derived_variables['hydro'] = [dv.Rain(), dv.LiquidPrecip(), dv.SolidPrecip(),
dv.NetPrecip(sfcevp='SFCEVP'), dv.NetWaterFlux(), dv.WaterForcing()]
derived_variables['rad'] = [dv.NetRadiation(), dv.NetLWRadiation()]
derived_variables['lsm'] = [dv.RunOff()]
derived_variables['plev3d'] = [dv.OrographicIndexPlev(), dv.Vorticity(), dv.WindSpeed(),
dv.WaterDensity(), dv.WaterFlux_U(), dv.WaterFlux_V(), dv.ColumnWater(),
dv.WaterTransport_U(), dv.WaterTransport_V(),
dv.HeatFlux_U(), dv.HeatFlux_V(), dv.ColumnHeat(),
dv.HeatTransport_U(),dv.HeatTransport_V(),
dv.GHT_Var(), dv.Vorticity_Var()]
# add wet-day variables for different thresholds
wetday_variables = [dv.WetDays, dv.WetDayRain, dv.WetDayPrecip]
for threshold in precip_thresholds:
for wetday_var in wetday_variables:
derived_variables['srfc'].append(wetday_var(threshold=threshold, rain='RAIN'))
derived_variables['hydro'].append(wetday_var(threshold=threshold, rain='RAIN'))
derived_variables['xtrm'].append(wetday_var(threshold=threshold, rain='RAINMEAN'))
# N.B.: derived variables need to be listed in order of computation
# Consecutive exceedance variables
consecutive_variables = {filetype:None for filetype in filetypes} # consecutive variable lists by file type
# skip in debug mode (only specific ones for debug)
if ldebug:
print("Skipping 'Consecutive Days of Exceedance' Variables")
else:
consecutive_variables['srfc'] = {'CFD' : ('T2', 'below', 273.14, 'Consecutive Frost Days (< 0C)'),
'CSD' : ('T2', 'above', 273.14+25., 'Consecutive Summer Days (>25C)'),
# N.B.: night temperatures >25C will rarely happen... so this will be very short
'CNWD' : ('NetPrecip', 'above', 0., 'Consecutive Net Wet Days'),
'CNDD' : ('NetPrecip', 'below', 0., 'Consecutive Net Dry Days'),}
consecutive_variables['xtrm'] = {'CFD' : ('T2MIN', 'below', 273.14, 'Consecutive Frost Days (< 0C)'),
'CSD' : ('T2MAX', 'above', 273.14+25., 'Consecutive Summer Days (>25C)'),}
consecutive_variables['hydro'] = {'CNWD' : ('NetPrecip', 'above', 0., 'Consecutive Net Wet Days'),
'CNDD' : ('NetPrecip', 'below', 0., 'Consecutive Net Dry Days'),
'CWGD' : ('NetWaterFlux', 'above', 0., 'Consecutive Water Gain Days'),
'CWLD' : ('NetWaterFlux', 'below', 0., 'Consecutive Water Loss Days'),}
# add wet-day variables for different thresholds
for threshold in precip_thresholds:
for filetype,rain_var in zip(['srfc','hydro','xtrm'],['RAIN','RAIN','RAINMEAN']):
suffix = '_{:03d}'.format(int(10*threshold)); name_suffix = '{:3.1f} mm/day)'.format(threshold)
consecutive_variables[filetype]['CWD'+suffix] = (rain_var, 'above', threshold/86400.,
'Consecutive Wet Days (>'+name_suffix)
consecutive_variables[filetype]['CDD'+suffix] = (rain_var, 'below', threshold/86400. ,
'Consecutive Dry Days (<'+name_suffix)
## single- and multi-step Extrema
maximum_variables = {filetype:[] for filetype in filetypes} # maxima variable lists by file type
daymax_variables = {filetype:[] for filetype in filetypes} # maxima variable lists by file type
daymin_variables = {filetype:[] for filetype in filetypes} # mininma variable lists by file type
weekmax_variables = {filetype:[] for filetype in filetypes} # maxima variable lists by file type
minimum_variables = {filetype:[] for filetype in filetypes} # minima variable lists by file type
weekmin_variables = {filetype:[] for filetype in filetypes} # mininma variable lists by file type
# skip in debug mode (only specific ones for debug)
if ldebug:
print("Skipping Single- and Multi-step Extrema")
else:
# Maxima (just list base variables; derived variables will be created later)
maximum_variables['srfc'] = ['T2', 'U10', 'V10', 'RAIN', 'RAINC', 'RAINNC', 'NetPrecip', 'WindSpeed']
maximum_variables['xtrm'] = ['T2MEAN', 'T2MAX', 'SPDUV10MEAN', 'SPDUV10MAX',
'RAINMEAN', 'RAINNCVMAX', 'RAINCVMAX']
maximum_variables['hydro'] = ['RAIN', 'RAINC', 'RAINNC', 'ACSNOW', 'ACSNOM', 'NetPrecip', 'NetWaterFlux', 'WaterForcing']
maximum_variables['lsm'] = ['SFROFF', 'Runoff']
maximum_variables['plev3d'] = ['S_PL', 'GHT_PL', 'Vorticity']
# daily (smoothed) maxima
daymax_variables['srfc'] = ['T2','RAIN', 'RAINC', 'RAINNC', 'NetPrecip', 'WindSpeed']
# daily (smoothed) minima
daymin_variables['srfc'] = ['T2']
# weekly (smoothed) maxima
weekmax_variables['xtrm'] = ['T2MEAN', 'T2MAX', 'SPDUV10MEAN']
weekmax_variables['hydro'] = ['RAIN', 'RAINC', 'RAINNC', 'ACSNOW', 'ACSNOM', 'NetPrecip', 'NetWaterFlux', 'WaterForcing']
weekmax_variables['lsm'] = ['SFROFF', 'UDROFF', 'Runoff']
# Maxima (just list base variables; derived variables will be created later)
minimum_variables['srfc'] = ['T2']
minimum_variables['xtrm'] = ['T2MEAN', 'T2MIN', 'SPDUV10MEAN']
minimum_variables['hydro'] = ['RAIN', 'NetPrecip', 'NetWaterFlux', 'WaterForcing']
minimum_variables['plev3d'] = ['GHT_PL', 'Vorticity']
# weekly (smoothed) minima
weekmin_variables['xtrm'] = ['T2MEAN', 'T2MIN', 'SPDUV10MEAN']
weekmin_variables['hydro'] = ['RAIN', 'NetPrecip', 'NetWaterFlux', 'WaterForcing']
weekmin_variables['lsm'] = ['SFROFF','UDROFF','Runoff']
# N.B.: it is important that the derived variables are listed in order of dependency!
# set of pre-requisites
prereq_vars = {key:set() for key in derived_variables.keys()} # pre-requisite variable set by file type
for key in prereq_vars.keys():
prereq_vars[key].update(*[devar.prerequisites for devar in derived_variables[key] if not devar.linear])
## daily variables (can also be 6-hourly or hourly, depending on source file)
if lglobaldaily:
daily_variables = {filetype:[] for filetype in filetypes} # daily variable lists by file type
daily_variables['srfc'] = ['T2', 'PSFC', 'WaterVapor', 'WindSpeed',] # surface climate
daily_variables['xtrm'] = ['T2MIN', 'T2MAX'] # min/max T2
daily_variables['hydro'] = ['RAIN', 'RAINC', 'LiquidPrecip', 'WaterForcing', 'SFCEVP', 'POTEVP'] # water budget
daily_variables['rad'] = ['NetRadiation','ACSWDNB','ACLWDNB','NetLWRadiation',] # surface radiation budget
#daily_variables['lsm'] = [] # runoff and soil temperature
## main work function
# N.B.: the loop iterations should be entirely independent, so that they can be run in parallel
def processFileList(filelist, filetype, ndom, lparallel=False, pidstr='', logger=None, ldebug=False):
''' This function is doing the main work, and is supposed to be run in a multiprocessing environment. '''
## setup files and folders
# load first file to copy some meta data
wrfoutfile = infolder+filelist[0]
logger.debug("\n{0:s} Opening first input file '{1:s}'.".format(pidstr,wrfoutfile))
wrfout = nc.Dataset(wrfoutfile, 'r', format='NETCDF4')
# timeless variables (should be empty, since all timeless variables should be in constant files!)
timeless = [varname for varname,var in wrfout.variables.items() if 'Time' not in var.dimensions]
assert len(timeless) == 0 # actually useless, since all WRF variables have a time dimension...
# time-dependent variables
varlist = [] # list of time-dependent variables to be processed
for varname,var in wrfout.variables.items():
if ('Time' in var.dimensions) and np.issubdtype(var.dtype, np.number) and varname[0:len(bktpfx)] != bktpfx:
varlist.append(varname)
varlist.sort() # alphabetical order...
## derived variables, extrema, and dependencies
# derived variable list
derived_vars = OrderedDict() # it is important that the derived variables are computed in order:
# the reason is that derived variables can depend on other derived variables, and the order in
# which they are listed, should take this into account
for devar in derived_variables[filetype]:
derived_vars[devar.name] = devar
# create consecutive extrema variables
if consecutive_variables[filetype] is not None:
for key,value in consecutive_variables[filetype].items():
if value[0] in derived_vars:
derived_vars[key] = dv.ConsecutiveExtrema(derived_vars[value[0]], value[1], threshold=value[2],
name=key, long_name=value[3])
else:
derived_vars[key] = dv.ConsecutiveExtrema(wrfout.variables[value[0]], value[1], threshold=value[2],
name=key, long_name=value[3], dimmap=midmap)
# method to create derived variables for extrema
def addExtrema(new_variables, mode, interval=0):
for exvar in new_variables[filetype]:
# create derived variable instance
if exvar in derived_vars:
if interval == 0: devar = dv.Extrema(derived_vars[exvar],mode)
else: devar = dv.MeanExtrema(derived_vars[exvar],mode,interval=interval)
else:
if interval == 0: devar = dv.Extrema(wrfout.variables[exvar],mode, dimmap=midmap)
else: devar = dv.MeanExtrema(wrfout.variables[exvar],mode, interval=interval, dimmap=midmap)
# append to derived variables
derived_vars[devar.name] = devar # derived_vars is from the parent scope, not local!
# and now add them
addExtrema(maximum_variables, 'max')
addExtrema(minimum_variables, 'min')
addExtrema(daymax_variables, 'max', interval=1)
addExtrema(daymin_variables, 'min', interval=1)
addExtrema(weekmax_variables, 'max', interval=5) # 5 days is the preferred interval, according to
addExtrema(weekmin_variables, 'min', interval=5) # ETCCDI Climate Change Indices
ldaily = False
if lglobaldaily:
# get varlist (does not include dependencies)
daily_varlist_full = daily_variables[filetype]
if len(daily_varlist_full)>0:
ldaily = True
daily_varlist = []; daily_derived_vars = []
for varname in daily_varlist_full:
if varname in wrfout.variables: daily_varlist.append(varname)
elif varname in derived_vars: daily_derived_vars.append(varname)
else:
raise ArgumentError("Variable '{}' not found in wrfout or derived variables; can only output derived variables that are already being computed for monthly output.".format(varname))
else:
logger.info("\n{0:s} Skipping (sub-)daily output for filetype '{1:s}', since variable list is empty.\n".format(pidstr,filetype))
# if we are only computing derived variables, remove all non-prerequisites
prepq = set().union(*[devar.prerequisites for devar in derived_vars.values()])
if ldaily: prepq |= set(daily_varlist)
if lderivedonly: varlist = [var for var in varlist if var in prepq]
# get some meta info and construct title string (printed after file creation)
begindate = str(nc.chartostring(wrfout.variables[wrftimestamp][0,:10])) # first timestamp in first file
beginyear, beginmonth, beginday = [int(tmp) for tmp in begindate.split('-')]
# always need to begin on the first of a month (discard incomplete data of first month)
if beginday != 1:
beginmonth += 1 # move on to next month
beginday = 1 # and start at the first (always...)
begindate = '{0:04d}-{1:02d}-{2:02d}'.format(beginyear, beginmonth, beginday) # rewrite begin date
# open last file and get last date
lastoutfile = infolder+filelist[-1]
logger.debug("{0:s} Opening last input file '{1:s}'.".format(pidstr,lastoutfile))
lastout = nc.Dataset(lastoutfile, 'r', format='NETCDF4')
lstidx = lastout.variables[wrftimestamp].shape[0]-1 # netcdf library has problems with negative indexing
enddate = str(nc.chartostring(lastout.variables[wrftimestamp][lstidx,:10])) # last timestamp in last file
endyear, endmonth, endday = [int(tmp) for tmp in enddate.split('-')]; del endday # make warning go away...
# the last timestamp should be the next month (i.e. that month is not included)
if endmonth == 1:
endmonth = 12; endyear -= 1 # previous year
else: endmonth -= 1
endday = 1 # first day of last month (always 1st..)
assert 1 <= endday <= 31 and 1 <= endmonth <= 12 # this is kinda trivial...
enddate = '{0:04d}-{1:02d}-{2:02d}'.format(endyear, endmonth, endday) # rewrite begin date
## open/create monthly mean output file
monthly_file = monthlypattern.format(filetype,ndom)
if lparallel: tmppfx = 'tmp_wrfavg_{:s}_'.format(pidstr[1:-1])
else: tmppfx = 'tmp_wrfavg_'
monthly_filepath = outfolder + monthly_file
tmp_monthly_filepath = outfolder + tmppfx + monthly_file
if os.path.exists(monthly_filepath):
if loverwrite or os.path.getsize(monthly_filepath) < 1e6: os.remove(monthly_filepath)
# N.B.: NetCDF files smaller than 1MB are usually incomplete header fragments from a previous crashed job
if os.path.exists(tmp_monthly_filepath) and not lrecover: os.remove(tmp_monthly_filepath) # remove old temp files
if os.path.exists(monthly_filepath):
# make a temporary copy of the file to work on (except, if we are recovering a broken temp file)
if not ( lrecover and os.path.exists(tmp_monthly_filepath) ): shutil.copy(monthly_filepath,tmp_monthly_filepath)
# open (temporary) file
logger.debug("{0:s} Opening existing output file '{1:s}'.\n".format(pidstr,monthly_filepath))
monthly_dataset = nc.Dataset(tmp_monthly_filepath, mode='a', format='NETCDF4') # open to append data (mode='a')
# infer start index
meanbeginyear, meanbeginmonth, meanbeginday = [int(tmp) for tmp in monthly_dataset.begin_date.split('-')]
assert meanbeginday == 1, 'always have to begin on the first of a month'
t0 = (beginyear-meanbeginyear)*12 + (beginmonth-meanbeginmonth) + 1
# check time-stamps in old datasets
if monthly_dataset.end_date < begindate: assert t0 == len(monthly_dataset.dimensions[time]) + 1 # another check
else: assert t0 <= len(monthly_dataset.dimensions[time]) + 1 # get time index where we start; in month beginning 1979
##
## *** special functions like adding new and recalculating old variables could be added later for daily output ***
##
# checks for new variables
if laddnew or lrecalc:
if t0 != 1: raise DateError("Have to start at the beginning to add new or recompute old variables!") # t0 starts with 1, not 0
meanendyear, meanendmonth, meanendday = [int(tmp) for tmp in monthly_dataset.end_date.split('-')]
assert meanendday == 1
endyear, endmonth = meanendyear, meanendmonth # just adding new, not extending!
enddate = monthly_dataset.end_date # for printing...
# check base variables
if laddnew or lrecalc: newvars = []
for var in varlist:
if var not in monthly_dataset.variables:
if laddnew: newvars.append(var)
else: varlist.remove(var)
#raise IOError, "{0:s} variable '{1:s}' not found in file '{2:s}'".format(pidstr,var.name,monthly_file)
# add new variables to netcdf file
if laddnew and len(newvars) > 0:
# copy remaining dimensions to new datasets
if midmap is not None:
dimlist = [midmap.get(dim,dim) for dim in wrfout.dimensions.keys() if dim != wrftime]
else: dimlist = [dim for dim in wrfout.dimensions.keys() if dim != wrftime]
dimlist = [dim for dim in dimlist if dim not in monthly_dataset.dimensions] # only the new ones!
copy_dims(monthly_dataset, wrfout, dimlist=dimlist, namemap=dimmap, copy_coords=False) # don't have coordinate variables
# create time-dependent variable in new datasets
copy_vars(monthly_dataset, wrfout, varlist=newvars, dimmap=dimmap, copy_data=False) # do not copy data - need to average
# change units of accumulated variables (per second)
for varname in newvars: # only new vars
assert varname in monthly_dataset.variables
if varname in acclist:
meanvar = monthly_dataset.variables[varname]
meanvar.units = meanvar.units + '/s' # units per second!
# add variables that should be recalculated
if lrecalc:
for var in recalcvars:
if var in monthly_dataset.variables and var in wrfout.variables:
if var not in newvars: newvars.append(var)
#else: raise ArgumentError, "Variable '{:s}' scheduled for recalculation is not present in output file '{:s}'.".format(var,monthly_filepath)
# check derived variables
if laddnew or lrecalc: newdevars = []
for varname,var in derived_vars.items():
if varname in monthly_dataset.variables:
var.checkPrerequisites(monthly_dataset)
if not var.checked: raise ValueError("Prerequisits for derived variable '{:s}' not found.".format(varname))
if lrecalc:
if ( lderivedonly and len(recalcvars) == 0 ) or ( varname in recalcvars ):
newdevars.append(varname)
var.checkPrerequisites(monthly_dataset) # as long as they are sorted correctly...
#del monthly_dataset.variables[varname]; monthly_dataset.sync()
#var.createVariable(monthly_dataset) # this does not seem to work...
else:
if laddnew:
var.checkPrerequisites(monthly_dataset) # as long as they are sorted correctly...
var.createVariable(monthly_dataset)
newdevars.append(varname)
else: del derived_vars[varname] # don't bother
# N.B.: it is not possible that a previously computed variable depends on a missing variable,
# unless it was purposefully deleted, in which case this will crash!
#raise (dv.DerivedVariableError, "{0:s} Derived variable '{1:s}' not found in file '{2:s}'".format(pidstr,var.name,monthly_file))
# now figure out effective variable list
if laddnew or lrecalc:
varset = set(newvars)
devarset = set(newdevars)
ndv = -1
# check prerequisites
while ndv != len(devarset):
ndv = len(devarset)
for devar in list(devarset): # normal variables don't have prerequisites
for pq in derived_vars[devar].prerequisites:
if pq in derived_vars: devarset.add(pq)
else: varset.add(pq)
# N.B.: this algorithm for dependencies relies on the fact that derived_vars is already ordered correctly,
# and unused variables can simply be removed (below), without changing the order;
# a stand-alone dependency resolution would require soring the derived_vars in order of execution
# consolidate lists
for devar in derived_vars.keys():
if devar not in devarset: del derived_vars[devar] # don't bother with this one...
varlist = list(varset) # order doesnt really matter... but whatever...
varlist.sort() # ... alphabetical order...
else:
logger.debug("{0:s} Creating new output file '{1:s}'.\n".format(pidstr,monthly_filepath))
monthly_dataset = nc.Dataset(tmp_monthly_filepath, 'w', format='NETCDF4') # open to start a new file (mode='w')
t0 = 1 # time index where we start (first month)
monthly_dataset.createDimension(time, size=None) # make time dimension unlimited
add_coord(monthly_dataset, time, data=None, dtype='i4', atts=dict(units='month since '+begindate)) # unlimited time dimension
# copy remaining dimensions to new datasets
if midmap is not None:
dimlist = [midmap.get(dim,dim) for dim in wrfout.dimensions.keys() if dim != wrftime]
else: dimlist = [dim for dim in wrfout.dimensions.keys() if dim != wrftime]
copy_dims(monthly_dataset, wrfout, dimlist=dimlist, namemap=dimmap, copy_coords=False) # don't have coordinate variables
# copy time-less variable to new datasets
copy_vars(monthly_dataset, wrfout, varlist=timeless, dimmap=dimmap, copy_data=True) # copy data
# create time-dependent variable in new datasets
copy_vars(monthly_dataset, wrfout, varlist=varlist, dimmap=dimmap, copy_data=False) # do not copy data - need to average
# change units of accumulated variables (per second)
for varname in acclist:
if varname in monthly_dataset.variables:
meanvar = monthly_dataset.variables[varname]
meanvar.units = meanvar.units + '/s' # units per second!
# also create variable for time-stamps in new datasets
if wrftimestamp in wrfout.variables:
copy_vars(monthly_dataset, wrfout, varlist=[wrftimestamp], dimmap=dimmap, copy_data=False) # do nto copy data - need to average
# create derived variables
for var in derived_vars.values():
var.checkPrerequisites(monthly_dataset) # as long as they are sorted correctly...
var.createVariable(monthly_dataset) # derived variables need to be added in order of computation
# copy global attributes
copy_ncatts(monthly_dataset, wrfout, prefix='') # copy all attributes (no need for prefix; all upper case are original)
# some new attributes
monthly_dataset.acc_diff_mode = 'simple' if lsmplDiff else 'centered'
monthly_dataset.description = 'wrf{0:s}_d{1:02d} monthly means'.format(filetype,ndom)
monthly_dataset.begin_date = begindate
monthly_dataset.experiment = exp
monthly_dataset.creator = 'Andre R. Erler'
# sync with file
monthly_dataset.sync()
## open/create daily output file
if ldaily:
# get datetime
begindatetime = dv.getTimeStamp(wrfout, 0, wrftimestamp)
# figure out filename
daily_file = dailypattern.format(filetype,ndom)
if lparallel: tmppfx = 'tmp_wrfavg_{:s}_'.format(pidstr[1:-1])
else: tmppfx = 'tmp_wrfavg_'
daily_filepath = outfolder + daily_file
tmp_daily_filepath = outfolder + tmppfx + daily_file
if os.path.exists(daily_filepath):
if loverwrite or os.path.getsize(daily_filepath) < 1e6: os.remove(daily_filepath)
# N.B.: NetCDF files smaller than 1MB are usually incomplete header fragments from a previous crashed job
if os.path.exists(tmp_daily_filepath) and not lrecover: os.remove(tmp_daily_filepath) # remove old temp files
if os.path.exists(daily_filepath):
raise NotImplementedError("Currently, updating of and appending to (sub-)daily output files is not supported.")
else:
logger.debug("{0:s} Creating new (sub-)daily output file '{1:s}'.\n".format(pidstr,daily_filepath))
daily_dataset = nc.Dataset(tmp_daily_filepath, 'w', format='NETCDF4') # open to start a new file (mode='w')
timestep_start = 0 # time step where we start (first tiem step)
daily_dataset.createDimension(time, size=None) # make time dimension unlimited
add_coord(daily_dataset, time, data=None, dtype='i8', atts=dict(units='seconds since '+begindatetime)) # unlimited time dimension
# copy remaining dimensions to new datasets
if midmap is not None:
dimlist = [midmap.get(dim,dim) for dim in wrfout.dimensions.keys() if dim != wrftime]
else: dimlist = [dim for dim in wrfout.dimensions.keys() if dim != wrftime]
copy_dims(daily_dataset, wrfout, dimlist=dimlist, namemap=dimmap, copy_coords=False) # don't have coordinate variables
# copy time-less variable to new datasets
copy_vars(daily_dataset, wrfout, varlist=timeless, dimmap=dimmap, copy_data=True) # copy data
# create time-dependent variable in new datasets
copy_vars(daily_dataset, wrfout, varlist=daily_varlist, dimmap=dimmap, copy_data=False) # do not copy data - need to resolve buckets and straighten time
# change units of accumulated variables (per second)
for varname in acclist:
if varname in daily_dataset.variables:
dayvar = daily_dataset.variables[varname]
dayvar.units = dayvar.units + '/s' # units per second!
# also create variable for time-stamps in new datasets
if wrftimestamp in wrfout.variables:
copy_vars(daily_dataset, wrfout, varlist=[wrftimestamp], dimmap=dimmap, copy_data=False) # do not copy data - need to straighten out time axis
if wrfxtime in wrfout.variables:
copy_vars(daily_dataset, wrfout, varlist=[wrfxtime], dimmap=dimmap, copy_data=False) # do not copy data - need to straighten out time axis
# create derived variables
for devarname in daily_derived_vars:
# don't need to check for prerequisites, since they are already being checked and computed for monthly output
derived_vars[devarname].createVariable(daily_dataset) # derived variables need to be added in order of computation
# copy global attributes
copy_ncatts(daily_dataset, wrfout, prefix='') # copy all attributes (no need for prefix; all upper case are original)
# some new attributes
daily_dataset.acc_diff_mode = 'simple' if lsmplDiff else 'centered'
daily_dataset.description = 'wrf{0:s}_d{1:02d} post-processed timestep output'.format(filetype,ndom)
daily_dataset.begin_date = begindatetime
daily_dataset.experiment = exp
daily_dataset.creator = 'Andre R. Erler'
# sync with file
daily_dataset.sync()
## construct dependencies
# update linearity: dependencies of non-linear variables have to be treated as non-linear themselves
lagain = True
# parse through dependencies until nothing changes anymore
while lagain:
lagain = False
for dename,devar in derived_vars.items():
# variables for daily output can be treated as non-linear, so that they are computed at the native timestep
if ldaily and dename in daily_derived_vars: devar.linear = False
if not devar.linear:
# make sure all dependencies are also treated as non-linear
for pq in devar.prerequisites:
if pq in derived_vars and derived_vars[pq].linear:
lagain = True # indicate modification
derived_vars[pq].linear = False
# construct dependency set (should include extrema now)
pqset = set().union(*[devar.prerequisites for devar in derived_vars.values() if not devar.linear])
if ldaily:
# daily output variables need to be treated as prerequisites, so that full timestep fields are loaded for bucket variables
pqset |= set(daily_varlist)
cset = set().union(*[devar.constants for devar in derived_vars.values() if devar.constants is not None])
# initialize dictionary for temporary storage
tmpdata = dict() # not allocated - use sparingly
# load constants, if necessary
const = dict()
lconst = len(cset) > 0
if lconst:
constfile = infolder+constpattern.format(ndom)
if not os.path.exists(constfile): constfile += '.nc' # try with extension
if not os.path.exists(constfile): raise IOError("No constants file found! ({:s})".format(constfile))
logger.debug("\n{0:s} Opening constants file '{1:s}'.\n".format(pidstr,constfile))
wrfconst = nc.Dataset(constfile, 'r', format='NETCDF4')
# constant variables
for cvar in cset:
if cvar in wrfconst.variables: const[cvar] = wrfconst.variables[cvar][:]
elif cvar in wrfconst.ncattrs(): const[cvar] = wrfconst.getncattr(cvar)
else: raise ValueError("Constant variable/attribute '{:s}' not found in constants file '{:s}'.".format(cvar,constfile))
else: const = None
# check axes order of prerequisits and constants
for devar in derived_vars.values():
for pq in devar.prerequisites:
# get dimensions of prerequisite
if pq in varlist: pqax = wrfout.variables[pq].dimensions
elif lconst and pq in wrfconst.variables: pqax = wrfconst.variables[pq].dimensions
elif lconst and pq in const: pqax = () # a scalar value, i.e. no axes
elif pq in derived_vars: pqax = derived_vars[pq].axes
else: raise ValueError("Prerequisite '{:s} for variable '{:s}' not found!".format(pq,devar.name))
# check axes for consistent order
index = -1
for ax in devar.axes:
if ax in pqax:
idx = pqax.index(ax)
if idx > index: index = idx
else: raise IndexError("The axis order of '{:s}' and '{:s}' is inconsistent - this can lead to unexpected results!".format(devar.name,pq))
# announcement: format title string and print
varstr = ''; devarstr = '' # make variable list, also for derived variables
for var in varlist: varstr += '{}, '.format(var)
for devar in derived_vars.values(): devarstr += '%s, '%devar.name
titlestr = '\n\n{0:s} *** Processing wrf{1:s} files for domain {2:d}. ***'.format(pidstr,filetype,ndom)
titlestr += '\n (monthly means from {0:s} to {1:s}, incl.)'.format(begindate,enddate)
if varstr: titlestr += '\n Variable list: {0:s}'.format(str(varstr),)
else: titlestr += '\n Variable list: None'
if devarstr: titlestr += '\n Derived variables: {0:s}'.format(str(devarstr),)
# print meta info (print everything in one chunk, so output from different processes does not get mangled)
logger.info(titlestr)
# extend time dimension in monthly average
if (endyear < beginyear) or (endyear == beginyear and endmonth < beginmonth):
raise DateError("End date is before begin date: {:04d}-{:02d} < {:04d}-{:02d}".format(endyear,endmonth,beginyear,beginmonth))
times = np.arange(t0,t0+(endyear-beginyear)*12+endmonth-beginmonth+1)
# handling of time intervals for accumulated variables
if wrfxtime in wrfout.variables:
lxtime = True # simply compute differences from XTIME (assuming minutes)
time_desc = wrfout.variables[wrfxtime].description
assert time_desc.startswith("minutes since "), time_desc
assert "simulation start" in time_desc or begindate in time_desc or '**' in time_desc, time_desc
# N.B.: the last check (**) is for cases where the date in WRF is garbled...
if t0 == 1 and not wrfout.variables[wrfxtime][0] == 0:
raise ValueError( 'XTIME in first input file does not start with 0!\n'+
'(this can happen, when the first input file is missing)' )
elif wrftimestamp in wrfout.variables:
lxtime = False # interpret timestamp in Times using datetime module
else: raise TypeError
# check if there is a missing_value flag
if 'P_LEV_MISSING' in wrfout.ncattrs():
missing_value = wrfout.P_LEV_MISSING # usually -999.
# N.B.: this is only used in plev3d files, where pressure levels intersect the ground
else: missing_value = None
# allocate fields
data = dict() # temporary data arrays
for var in varlist:
tmpshape = list(wrfout.variables[var].shape)
del tmpshape[wrfout.variables[var].dimensions.index(wrftime)] # allocated arrays have no time dimension
assert len(tmpshape) == len(wrfout.variables[var].shape) -1
data[var] = np.zeros(tmpshape, dtype=dtype_float) # allocate
#if missing_value is not None:
# data[var] += missing_value # initialize with missing value
# allocate derived data arrays (for non-linear variables)
pqdata = {pqvar:None for pqvar in pqset} # temporary data array holding instantaneous values to compute derived variables
# N.B.: since data is only referenced from existing arrays, allocation is not necessary
dedata = dict() # non-linear derived variables
# N.B.: linear derived variables are computed directly from the monthly averages
for dename,devar in derived_vars.items():
if not devar.linear:
tmpshape = [len(wrfout.dimensions[ax]) for ax in devar.axes if ax != time] # infer shape
assert len(tmpshape) == len(devar.axes) -1 # no time dimension
dedata[dename] = np.zeros(tmpshape, dtype=dtype_float) # allocate
# prepare computation of monthly means
filecounter = 0 # number of wrfout file currently processed
i0 = t0-1 # index position we write to: i = i0 + n (zero-based, of course)
if ldaily: daily_start_idx = daily_end_idx = timestep_start # for each file cycle, the time index where to write the data
## start loop over month
if lparallel: progressstr = '' # a string printing the processed dates
else: logger.info('\n Processed dates:')
try:
# loop over month and progressively stepping through input files
for n,meantime in enumerate(times):
# meantime: (complete) month since simulation start
lasttimestamp = None # carry over start time, when moving to the next file (defined below)
# N.B.: when moving to the next file, the script auto-detects and resets this property, no need to change here!
# However (!) it is necessary to reset this for every month, because it is not consistent!
# extend time array / month counter
meanidx = i0 + n
if meanidx == len(monthly_dataset.variables[time]):
lskip = False # append next data point / time step
elif loverwrite or laddnew or lrecalc:
lskip = False # overwrite this step or add data point for new variables
elif meanidx == len(monthly_dataset.variables[time])-1:
if lrecover or monthly_dataset.variables[time][meanidx] == -1:
lskip = False # recompute last step, because it may be incomplete
else: lskip = True
else:
lskip = True # skip this step, but we still have to verify the timing
# check if we are overwriting existing data
if meanidx != len(monthly_dataset.variables[time]):
assert meanidx < len(monthly_dataset.variables[time])
assert meantime == monthly_dataset.variables[time][meanidx] or monthly_dataset.variables[time][meanidx] == -1
# N.B.: writing records is delayed to avoid incomplete records in case of a crash
# current date
currentyear, currentmonth = divmod(n+beginmonth-1,12)
currentyear += beginyear; currentmonth +=1
# sanity checks
assert meanidx + 1 == meantime
currentdate = '{0:04d}-{1:02d}'.format(currentyear,currentmonth)
# determine appropriate start index
wrfstartidx = 0
while currentdate > str(nc.chartostring(wrfout.variables[wrftimestamp][wrfstartidx,0:7])):
wrfstartidx += 1 # count forward
if wrfstartidx != 0: logger.debug('\n{0:s} {1:s}: Starting month at index {2:d}.'.format(pidstr, currentdate, wrfstartidx))
# save WRF time-stamp for beginning of month for the new file, for record
firsttimestamp_chars = wrfout.variables[wrftimestamp][wrfstartidx,:]
#logger.debug('\n{0:s}{1:s}-01_00:00:00, {2:s}'.format(pidstr, currentdate, str(nc.chartostring(wrfout.variables[wrftimestamp][wrfstartidx,:])))
if '{0:s}-01_00:00:00'.format(currentdate,) == str(nc.chartostring(wrfout.variables[wrftimestamp][wrfstartidx,:])):
pass # proper start of the month
elif meanidx == 0 and '{0:s}-01_06:00:00'.format(currentdate,) == str(nc.chartostring(wrfout.variables[wrftimestamp][wrfstartidx,:])):
pass # for some reanalysis... but only at start of simulation
else: raise DateError("{0:s} Did not find first day of month to compute monthly average.".format(pidstr) +
"file: {0:s} date: {1:s}-01_00:00:00".format(monthly_file,currentdate))
# prepare summation of output time steps
lcomplete = False #
ntime = 0 # accumulated output time steps
# time when accumulation starts (in minutes)
# N.B.: the first value is saved as negative, so that adding the last value yields a positive interval
if lxtime: xtime = -1 * wrfout.variables[wrfxtime][wrfstartidx] # minutes
monthlytimestamps = [] # list of timestamps, also used for time period calculation
# clear temporary arrays
for varname,var in data.items(): # base variables
data[varname] = np.zeros(var.shape, dtype=dtype_float) # reset to zero
for dename,devar in dedata.items(): # derived variables
dedata[dename] = np.zeros(devar.shape, dtype=dtype_float) # reset to zero
## loop over files and average
while not lcomplete:
# determine valid end index by checking dates from the end counting backwards
# N.B.: start index is determined above (if a new file was opened in the same month,
# the start index is automatically set to 0 or 1 when the file is opened, below)
wrfendidx = len(wrfout.dimensions[wrftime])-1
while wrfendidx >= 0 and currentdate < str(nc.chartostring(wrfout.variables[wrftimestamp][wrfendidx,0:7])):
if not lcomplete: lcomplete = True # break loop over file if next month is in this file (critical!)
wrfendidx -= 1 # count backwards
#if wrfendidx < len(wrfout.dimensions[wrftime])-1: # check if count-down actually happened
wrfendidx += 1 # reverse last step so that counter sits at first step of next month
# N.B.: if this is not the last file, there was no iteration and wrfendidx should be the length of the the file;
# in this case, wrfendidx is only used to define Python ranges, which are exclusive to the upper boundary;
# if the first date in the file is already the next month, wrfendidx will be 0 and this is the final step;
assert wrfendidx >= wrfstartidx # i.e. wrfendidx = wrfstartidx = 0 is an empty step to finalize accumulation
assert lcomplete or wrfendidx == len(wrfout.dimensions[wrftime])
# if this is the last file and the month is not complete, we have to forcefully terminate
if filecounter == len(filelist)-1 and not lcomplete:
lcomplete = True # end loop
lskip = True # don't write results for this month!
if not lskip:
## compute monthly averages
# loop over variables
for varname in varlist:
logger.debug('{0:s} {1:s}'.format(pidstr,varname))
if varname not in wrfout.variables:
logger.info("{:s} Variable {:s} missing in file '{:s}' - filling with NaN!".format(pidstr,varname,filelist[filecounter]))
data[varname] *= np.NaN # turn everything into NaN, if variable is missing
# N.B.: this can happen, when an output stream was reconfigured between cycle steps
else:
var = wrfout.variables[varname]
tax = var.dimensions.index(wrftime) # index of time axis
slices = [slice(None)]*len(var.shape)
# construct informative IOError message
ioerror = "An Error occcured in file '{:s}'; variable: '{:s}'\n('{:s}')".format(filelist[filecounter], varname, infolder)
# decide how to average
## Accumulated Variables
if varname in acclist:
if missing_value is not None:
raise NotImplementedError("Can't handle accumulated variables with missing values yet.")
# compute mean as difference between end points; normalize by time difference
if ntime == 0: # first time step of the month
slices[tax] = wrfstartidx # relevant time interval
try: tmp = var.__getitem__(slices) # get array
except: raise IOError(ioerror) # informative IO Error
if acclist[varname] is not None: # add bucket level, if applicable
bkt = wrfout.variables[bktpfx+varname]
tmp += bkt.__getitem__(slices) * acclist[varname]
# check that accumulated fields at the beginning of the simulation are zero
if meanidx == 0 and wrfstartidx == 0:
# note that if we are skipping the first step, there is no check
if np.max(tmp) != 0 or np.min(tmp) != 0:
raise ValueError( 'Accumulated fields were not initialized with zero!\n' +
'(this can happen, when the first input file is missing)' )
data[varname] = -1 * tmp # so we can do an in-place operation later
# N.B.: both, begin and end, can be in the same file, hence elif is not appropriate!
if lcomplete: # last step
slices[tax] = wrfendidx # relevant time interval
try: tmp = var.__getitem__(slices) # get array
except: raise IOError(ioerror) # informative IO Error
if acclist[varname] is not None: # add bucket level, if applicable
bkt = wrfout.variables[bktpfx+varname]
tmp += bkt.__getitem__(slices) * acclist[varname]
data[varname] += tmp # the starting data is already negative
# if variable is a prerequisit to others, compute instantaneous values
if varname in pqset:
# compute mean via sum over all elements; normalize by number of time steps
if lsmplDiff: slices[tax] = slice(wrfstartidx,wrfendidx+1) # load longer time interval for diff
else: slices[tax] = slice(wrfstartidx,wrfendidx) # relevant time interval
try: tmp = var.__getitem__(slices) # get array
except: raise IOError(ioerror) # informative IO Error
if acclist[varname] is not None: # add bucket level, if applicable
bkt = wrfout.variables[bktpfx+varname]
tmp = tmp + bkt.__getitem__(slices) * acclist[varname]
if lsmplDiff: pqdata[varname] = np.diff(tmp, axis=tax) # simple differences
else: pqdata[varname] = dv.ctrDiff(tmp, axis=tax, delta=1) # normalization comes later
##
## *** daily values for bucket variables are generated here, ***
## *** but should we really use *centered* differences??? ***
##
elif varname[0:len(bktpfx)] == bktpfx:
pass # do not process buckets
## Normal Variables
else:
# skip "empty" steps (only needed to difference accumulated variables)
if wrfendidx > wrfstartidx:
# compute mean via sum over all elements; normalize by number of time steps
slices[tax] = slice(wrfstartidx,wrfendidx) # relevant time interval
try: tmp = var.__getitem__(slices) # get array
except: raise IOError(ioerror) # informative IO Error
if missing_value is not None:
# N.B.: missing value handling is really only necessary when missing values are time-dependent
tmp = np.where(tmp == missing_value, np.NaN, tmp) # set missing values to NaN
#tmp = ma.masked_equal(tmp, missing_value, copy=False) # mask missing values
data[varname] = data[varname] + tmp.sum(axis=tax) # add to sum
# N.B.: in-place operations with non-masked array destroy the mask, hence need to use this
# keep data in memory if used in computation of derived variables
if varname in pqset: pqdata[varname] = tmp
## compute derived variables
# but first generate a list of timestamps
if lcomplete: tmpendidx = wrfendidx
else: tmpendidx = wrfendidx -1 # end of file
# assemble list of time stamps
currenttimestamps = [] # relevant timestamps in this file
for i in range(wrfstartidx,tmpendidx+1):
timestamp = str(nc.chartostring(wrfout.variables[wrftimestamp][i,:]))
currenttimestamps.append(timestamp)
monthlytimestamps.extend(currenttimestamps) # add to monthly collection
# write daily timestamps
if ldaily:
nsteps = wrfendidx - wrfstartidx
daily_start_idx = daily_end_idx # from previous step
daily_end_idx = daily_start_idx + nsteps
# set time values to -1, to inticate they are being worked on
daily_dataset.variables[time][daily_start_idx:daily_end_idx] = -1
ncvar = None; vardata = None # dummies, to prevent crash later on, if varlist is empty
# copy timestamp and xtime data
daily_dataset.variables[wrftimestamp][daily_start_idx:daily_end_idx,:] = wrfout.variables[wrftimestamp][wrfstartidx:wrfendidx,:]
if lxtime:
daily_dataset.variables[wrfxtime][daily_start_idx:daily_end_idx] = wrfout.variables[wrfxtime][wrfstartidx:wrfendidx]
daily_dataset.sync()
# normalize accumulated pqdata with output interval time
if wrfendidx > wrfstartidx:
assert tmpendidx > wrfstartidx, 'There should never be a single value in a file: wrfstartidx={:d}, wrfendidx={:d}, lcomplete={:s}'.format(wrfstartidx,wrfendidx,str(lcomplete))
# compute time delta
delta = dv.calcTimeDelta(currenttimestamps)
if lxtime:
xdelta = wrfout.variables[wrfxtime][tmpendidx] - wrfout.variables[wrfxtime][wrfstartidx]
xdelta *= 60. # convert minutes to seconds
if delta != xdelta: raise ValueError("Time calculation from time stamps and model time are inconsistent: {:f} != {:f}".format(delta,xdelta))
delta /= float(tmpendidx - wrfstartidx) # the average interval between output time steps
# loop over time-step data
for pqname,pqvar in pqdata.items():
if pqname in acclist: pqvar /= delta # normalize
# write to daily file
if ldaily:
# loop over variables and save data arrays
for varname in daily_varlist:
ncvar = daily_dataset.variables[varname] # destination variable in daily output
vardata = pqdata[varname] # timestep data
if missing_value is not None: # make sure the missing value flag is preserved
vardata = np.where(np.isnan(vardata), missing_value, vardata)
ncvar.missing_value = missing_value # just to make sure
if ncvar.ndim > 1: ncvar[daily_start_idx:daily_end_idx,:] = vardata # here time is always the outermost index
else: ncvar[daily_start_idx:daily_end_idx] = vardata
daily_dataset.sync()
# loop over derived variables
# special treatment for certain string variables
if 'Times' in pqset: pqdata['Times'] = currenttimestamps[:wrfendidx-wrfstartidx] # need same length as actual time dimension
logger.debug('\n{0:s} Available prerequisites: {1:s}'.format(pidstr, str(list(pqdata.keys()))))
for dename,devar in derived_vars.items():
if not devar.linear: # only non-linear ones here, linear one at the end
logger.debug('{0:s} {1:s} {2:s}'.format(pidstr, dename, str(devar.prerequisites)))
tmp = devar.computeValues(pqdata, aggax=tax, delta=delta, const=const, tmp=tmpdata) # possibly needed as pre-requisite
dedata[dename] = devar.aggregateValues(tmp, aggdata=dedata[dename], aggax=tax)
# N.B.: in-place operations with non-masked array destroy the mask, hence need to use this
if dename in pqset: pqdata[dename] = tmp
# save to daily output
if ldaily:
if dename in daily_derived_vars:
ncvar = daily_dataset.variables[dename] # destination variable in daily output
vardata = tmp
if missing_value is not None: # make sure the missing value flag is preserved
vardata = np.where(np.isnan(vardata), missing_value, vardata)
ncvar.missing_value = missing_value # just to make sure
if ncvar.ndim > 1: ncvar[daily_start_idx:daily_end_idx,:] = vardata # here time is always the outermost index
else: ncvar[daily_start_idx:daily_end_idx] = vardata
# N.B.: missing values should be handled implicitly, following missing values in pre-requisites
del tmp # memory hygiene
if ldaily:
# add time in seconds, based on index and time delta
daily_dataset.variables[time][daily_start_idx:daily_end_idx] = np.arange(daily_start_idx,daily_end_idx, dtype='i8')*int(delta)
daily_dataset.end_date = dv.getTimeStamp(wrfout, wrfendidx-1, wrftimestamp) # update current end date
# N.B.: adding the time coordinate and attributes finalized this step
# sync data and clear memory
daily_dataset.sync(); daily_dataset.close() # sync and close dataset
del daily_dataset, ncvar, vardata # remove all other references to data
gc.collect() # clean up memory
# N.B.: the netCDF4 module keeps all data written to a netcdf file in memory; there is no flush command
daily_dataset = nc.Dataset(tmp_daily_filepath, mode='a', format='NETCDF4') # re-open to append more data (mode='a')
# N.B.: flushing the mean file here prevents repeated close/re-open when no data was written (i.e.
# the month was skiped); only flush memory when data was actually written.
# increment counters
ntime += wrfendidx - wrfstartidx
if lcomplete:
# N.B.: now wrfendidx should be a valid time step
# check time steps for this month
laststamp = monthlytimestamps[0]
for timestamp in monthlytimestamps[1:]:
if laststamp >= timestamp:
raise DateError('Timestamps not in order, or repetition: {:s}'.format(timestamp))
laststamp = timestamp
# calculate time period and check against model time (if available)
timeperiod = dv.calcTimeDelta(monthlytimestamps)
if lxtime:
xtime += wrfout.variables[wrfxtime][wrfendidx] # get final time interval (in minutes)
xtime *= 60. # convert minutes to seconds
if timeperiod != xtime:
logger.info("Time calculation from time stamps and model time are inconsistent: {:f} != {:f}".format(timeperiod,xtime))
# two possible ends: month is done or reached end of file
# if we reached the end of the file, open a new one and go again
if not lcomplete:
# N.B.: here wrfendidx is not a valid time step, but the length of the file, i.e. wrfendidx-1 is the last valid time step
lasttimestamp = str(nc.chartostring(wrfout.variables[wrftimestamp][wrfendidx-1,:])) # needed to determine, if first timestep is the same as last
assert lskip or lasttimestamp == monthlytimestamps[-1]
# lasttimestep is also used for leap-year detection later on
assert len(wrfout.dimensions[wrftime]) == wrfendidx, (len(wrfout.dimensions[wrftime]),wrfendidx) # wrfendidx should be the length of the file, not the last index!
## find first timestep (compare to last of previous file) and (re-)set time step counter
# initialize search
tmptimestamp = lasttimestamp; filelen1 = len(wrfout.dimensions[wrftime]) - 1; wrfstartidx = filelen1;
while tmptimestamp <= lasttimestamp:
if wrfstartidx < filelen1:
wrfstartidx += 1 # step forward in current file
else:
# open next file, if we reach the end
wrfout.close() # close file
#del wrfout; gc.collect() # doesn't seem to work here - strange error
# N.B.: filecounter +1 < len(filelist) is already checked above
filecounter += 1 # move to next file
if filecounter < len(filelist):
logger.debug("\n{0:s} Opening input file '{1:s}'.\n".format(pidstr,filelist[filecounter]))
wrfout = nc.Dataset(infolder+filelist[filecounter], 'r', format='NETCDF4') # ... and open new one
filelen1 = len(wrfout.dimensions[wrftime]) - 1 # length of new file
wrfstartidx = 0 # reset index
# check consistency of missing value flag
assert missing_value is None or missing_value == wrfout.P_LEV_MISSING
else: break # this is not really tested...
tmptimestamp = str(nc.chartostring(wrfout.variables[wrftimestamp][wrfstartidx,:]))
# some checks
firsttimestamp = str(nc.chartostring(wrfout.variables[wrftimestamp][0,:]))
error_string = "Inconsistent time-stamps between files:\n lasttimestamp='{:s}', firsttimestamp='{:s}', wrfstartidx={:d}"
if firsttimestamp == lasttimestamp: # skip the initialization step (was already processed in last step)
if wrfstartidx != 1: raise DateError(error_string.format(lasttimestamp, firsttimestamp, wrfstartidx))
if firsttimestamp > lasttimestamp: # no duplicates: first timestep in next file was not present in previous file
if wrfstartidx != 0: raise DateError(error_string.format(lasttimestamp, firsttimestamp, wrfstartidx))
if firsttimestamp < lasttimestamp: # files overlap: count up to next timestamp in sequence
#if wrfstartidx == 2: warn(error_string.format(lasttimestamp, firsttimestamp, wrfstartidx))
if wrfstartidx == 0: raise DateError(error_string.format(lasttimestamp, firsttimestamp, wrfstartidx))
else: # month complete
# print feedback (the current month) to indicate completion
if lparallel: progressstr += '{0:s}, '.format(currentdate) # bundle output in parallel mode
else: logger.info('{0:s},'.format(currentdate)) # serial mode
# clear temporary storage
if lcarryover:
for devar in list(derived_vars.values()):
if not (devar.tmpdata is None or devar.carryover):
if devar.tmpdata in tmpdata: del tmpdata[devar.tmpdata]
else: tmpdata = dict() # reset entire temporary storage
# N.B.: now wrfendidx is a valid timestep, but indicates the first of the next month
lasttimestamp = str(nc.chartostring(wrfout.variables[wrftimestamp][wrfendidx,:])) # this should be the first timestep of the next month
assert lskip or lasttimestamp == monthlytimestamps[-1]
# open next file (if end of month and file coincide)
if wrfendidx == len(wrfout.dimensions[wrftime])-1: # reach end of file
## find first timestep (compare to last of previous file) and (re-)set time step counter
# initialize search
tmptimestamp = lasttimestamp; filelen1 = len(wrfout.dimensions[wrftime]) - 1; wrfstartidx = filelen1;
while tmptimestamp <= lasttimestamp:
if wrfstartidx < filelen1:
wrfstartidx += 1 # step forward in current file
else:
# open next file, if we reach the end
wrfout.close() # close file
#del wrfout; gc.collect() # doesn't seem to work here - strange error
# N.B.: filecounter +1 < len(filelist) is already checked above
filecounter += 1 # move to next file
if filecounter < len(filelist):
logger.debug("\n{0:s} Opening input file '{1:s}'.\n".format(pidstr,filelist[filecounter]))
wrfout = nc.Dataset(infolder+filelist[filecounter], 'r', format='NETCDF4') # ... and open new one
filelen1 = len(wrfout.dimensions[wrftime]) - 1 # length of new file
wrfstartidx = 0 # reset index
# check consistency of missing value flag
assert missing_value is None or missing_value == wrfout.P_LEV_MISSING
else: break # this is not really tested...
tmptimestamp = str(nc.chartostring(wrfout.variables[wrftimestamp][wrfstartidx,:]))
# N.B.: same code as in "not complete" section
# wrfout.close() # close file
# #del wrfout; gc.collect() # doesn't seem to work here - strange error
# filecounter += 1 # move to next file
# if filecounter < len(filelist):
# logger.debug("\n{0:s} Opening input file '{1:s}'.\n".format(pidstr,filelist[filecounter]))
# wrfout = nc.Dataset(infolder+filelist[filecounter], 'r', format='NETCDF4') # ... and open new one
# firsttimestamp = str(nc.chartostring(wrfout.variables[wrftimestamp][0,:])) # check first timestep (compare to last of previous file)
# wrfstartidx = 0 # always use initialization step (but is reset above anyway)
# if firsttimestamp != lasttimestamp:
# raise NotImplementedError, "If the first timestep of the next month is the last timestep in the file, it has to be duplicated in the next file."
## now the the loop over files has terminated and we need to normalize and save the results
if not lskip:
# extend time axis
monthly_dataset.variables[time][meanidx] = -1 # mark timestep in progress
ncvar = None; vardata = None # dummies, to prevent crash later on, if varlist is empty
# loop over variable names
for varname in varlist:
vardata = data[varname]
# decide how to normalize
if varname in acclist: vardata /= timeperiod
else: vardata /= ntime
# save variable
ncvar = monthly_dataset.variables[varname] # this time the destination variable
if missing_value is not None: # make sure the missing value flag is preserved
vardata = np.where(np.isnan(vardata), missing_value, vardata)
ncvar.missing_value = missing_value # just to make sure
if ncvar.ndim > 1: ncvar[meanidx,:] = vardata # here time is always the outermost index
else: ncvar[meanidx] = vardata
# compute derived variables
#logger.debug('\n{0:s} Derived Variable Stats: (mean/min/max)'.format(pidstr))
for dename,devar in derived_vars.items():
if devar.linear:
vardata = devar.computeValues(data) # compute derived variable now from averages
elif devar.normalize:
vardata = dedata[dename] / ntime # no accumulated variables here!
else: vardata = dedata[dename] # just the data...
# not all variables are normalized (e.g. extrema)
#if ldebug:
# mmm = (float(np.nanmean(vardata)),float(np.nanmin(vardata)),float(np.nanmax(vardata)),)
# logger.debug('{0:s} {1:s}, {2:f}, {3:f}, {4:f}'.format(pidstr,dename,*mmm))
data[dename] = vardata # add to data array, so that it can be used to compute linear variables
# save variable
ncvar = monthly_dataset.variables[dename] # this time the destination variable
if missing_value is not None: # make sure the missing value flag is preserved
vardata = np.where(np.isnan(vardata), missing_value, vardata)
ncvar.missing_value = missing_value # just to make sure
if ncvar.ndim > 1: ncvar[meanidx,:] = vardata # here time is always the outermost index
else: ncvar[meanidx] = vardata
#raise dv.DerivedVariableError, "%s Derived variable '%s' is not linear."%(pidstr,devar.name)
# update current end date
monthly_dataset.end_date = str(nc.chartostring(firsttimestamp_chars[:10])) # the date of the first day of the last included month
monthly_dataset.variables[wrftimestamp][meanidx,:] = firsttimestamp_chars
monthly_dataset.variables[time][meanidx] = meantime # update time axis (last action)
# sync data and clear memory
monthly_dataset.sync(); monthly_dataset.close() # sync and close dataset
del monthly_dataset, ncvar, vardata # remove all other references to data
gc.collect() # clean up memory
# N.B.: the netCDF4 module keeps all data written to a netcdf file in memory; there is no flush command
monthly_dataset = nc.Dataset(tmp_monthly_filepath, mode='a', format='NETCDF4') # re-open to append more data (mode='a')
# N.B.: flushing the mean file here prevents repeated close/re-open when no data was written (i.e.
# the month was skiped); only flush memory when data was actually written.
ec = 0 # set zero exit code for this operation
except Exception:
# report error
logger.exception('\n # {0:s} WARNING: an Error occured while stepping through files! '.format(pidstr)+
'\n # Last State: month={0:d}, variable={1:s}, file={2:s}'.format(meanidx,varname,filelist[filecounter])+
'\n # Saving current data and exiting\n')
wrfout.close()
#logger.exception(pidstr) # print stack trace of last exception and current process ID
ec = 1 # set non-zero exit code
# N.B.: this enables us to still close the file!
## here the loop over months finishes and we can close the output file
# print progress
# save to file
if not lparallel: logger.info('') # terminate the line (of dates)
else: logger.info('\n{0:s} Processed dates: {1:s}'.format(pidstr, progressstr))
monthly_dataset.sync()
logger.info("\n{0:s} Writing monthly output to: {1:s}\n('{2:s}')\n".format(pidstr, monthly_file, monthly_filepath))
if ldaily:
daily_dataset.sync()
logger.info("\n{0:s} Writing (sub-)daily output to: {1:s}\n('{2:s}')\n".format(pidstr, daily_file, daily_filepath))
# Finalize: close files and rename to proper names, clean up
monthly_dataset.close() # close NetCDF file
os.rename(tmp_monthly_filepath,monthly_filepath) # rename file to proper name
del monthly_dataset, data # clean up memory
if ldaily:
daily_dataset.close() # close NetCDF file
os.rename(tmp_daily_filepath,daily_filepath) # rename file to proper name
del daily_dataset # clean up memory
gc.collect()
# return exit code
return ec
## now begin execution
if __name__ == '__main__':
# print settings
print('')
print(('OVERWRITE: {:s}, RECOVER: {:s}, CARRYOVER: {:s}, SMPLDIFF: {:s}'.format(
str(loverwrite), str(lrecover), str(lcarryover), str(lsmplDiff))))
print(('DERIVEDONLY: {:s}, ADDNEW: {:s}, RECALC: {:s}'.format(
str(lderivedonly), str(laddnew), str(recalcvars) if lrecalc else str(lrecalc))))
print(('DAILY: {:s}, FILETYPES: {:s}, DOMAINS: {:s}'.format(str(lglobaldaily),str(filetypes),str(domains))))
print(('THREADS: {:s}, DEBUG: {:s}'.format(str(NP),str(ldebug))))
print('')
# compile regular expression, used to infer start and end dates and month (later, during computation)
datestr = '{0:s}-{1:s}-{2:s}'.format(yearstr,monthstr,daystr)
datergx = re.compile(datestr)
# get file list
wrfrgx = re.compile(inputpattern.format('.*','\d\d',datestr,)) # for initial search (all filetypes)
# regular expression to match the name pattern of WRF timestep output files
masterlist = [wrfrgx.match(filename) for filename in os.listdir(infolder)] # list folder and match
masterlist = [match.group() for match in masterlist if match is not None] # assemble valid file list
if len(masterlist) == 0:
raise IOError('No matching WRF output files found for date: {0:s}'.format(datestr))
## loop over filetypes and domains to construct job list
args = []
for filetype in filetypes:
# make list of files
filelist = []
for domain in domains:
typergx = re.compile(inputpattern.format(filetype,"{:02d}".format(domain), datestr))
# N.B.: domain has to be inserted as string, because above it is replaced by a regex
# regular expression to also match type and domain index
filelist = [typergx.match(filename) for filename in masterlist] # list folder and match
filelist = [match.group() for match in filelist if match is not None] # assemble valid file list
filelist.sort() # now, when the list is shortest, we can sort...
# N.B.: sort alphabetically, so that files are in temporally sequence
# now put everything into the lists
if len(filelist) > 0:
args.append( (filelist, filetype, domain) )
else:
print(("Can not process filetype '{:s}' (domain {:d}): no source files.".format(filetype,domain)))
print('\n')
# call parallel execution function
kwargs = dict() # no keyword arguments
ec = asyncPoolEC(processFileList, args, kwargs, NP=NP, ldebug=ldebug, ltrialnerror=True)
# exit with number of failures plus 10 as exit code
exit(int(10+ec) if ec > 0 else 0)
| gpl-3.0 |
cornell-brg/pymtl | pymtl/tools/simulation/SimulationTool.py | 8 | 9013 | #=======================================================================
# SimulationTool.py
#=======================================================================
# Tool for simulating hardware models.
#
# This module contains classes which construct a model simulator for
# execution in the Python interpreter.
from __future__ import print_function
import pprint
import collections
import inspect
import warnings
import sim_utils as sim
from sys import flags
from SimulationMetrics import SimulationMetrics, DummyMetrics
#-----------------------------------------------------------------------
# SimulationTool
#-----------------------------------------------------------------------
# User visible class implementing a tool for simulating hardware models.
#
# This class takes a model instance and creates a simulator for
# execution in the Python interpreter.
class SimulationTool( object ):
#---------------------------------------------------------------------
# __init__
#---------------------------------------------------------------------
# Construct a simulator based on the provided model.
def __init__( self, model, collect_metrics = False ):
# Check that the model has been elaborated
if not model.is_elaborated():
raise Exception( "cannot initialize {0} tool.\n"
"Provided model has not been elaborated yet!!!"
"".format( self.__class__.__name__ ) )
self.model = model
self.ncycles = 0
self._event_queue = EventQueue()
self._sequential_blocks = []
self._register_queue = []
self._current_func = None
self._nets = None # TODO: remove me
#self._DEBUG_signal_cbs = collections.defaultdict(list)
# Only collect metrics if they are enabled, otherwise replace
# with a dummy collection class.
if collect_metrics:
self.metrics = SimulationMetrics()
else:
self.metrics = DummyMetrics()
# If the -O flag was passed to Python, use the perf implementation
# of cycle, otherwise use the dev version.
if flags.optimize:
self.cycle = self._perf_cycle
self.eval_combinational = self._perf_eval
else:
self.cycle = self._dev_cycle
self.eval_combinational = self._dev_eval
# Construct a simulator for the provided model.
signals = sim.collect_signals( model )
nets, slice_connections = sim.signals_to_nets( signals )
sequential_blocks = sim.register_seq_blocks( model )
sim.insert_signal_values( self, nets )
sim.register_comb_blocks ( model, self._event_queue )
sim.create_slice_callbacks( slice_connections, self._event_queue )
sim.register_cffi_updates ( model )
self._nets = nets
self._sequential_blocks = sequential_blocks
# Setup vcd dumping if it's configured
if hasattr( model, 'vcd_file' ) and model.vcd_file:
from vcd import VCDUtil
VCDUtil( self, model.vcd_file )
#---------------------------------------------------------------------
# reset
#---------------------------------------------------------------------
# Sets the reset signal high and cycles the simulator.
def reset( self ):
self.model.reset.v = 1
self.cycle()
self.cycle()
self.model.reset.v = 0
#---------------------------------------------------------------------
# print_line_trace
#---------------------------------------------------------------------
# Print cycle number and line trace of model.
def print_line_trace( self ):
print( "{:>3}:".format( self.ncycles ), self.model.line_trace() )
#---------------------------------------------------------------------
# cycle
#---------------------------------------------------------------------
# Advances the simulator by a single clock cycle, executing all
# sequential @tick and @posedge_clk blocks defined in the design, as
# well as any @combinational blocks that have been added to the event
# queue.
#
# Note: see _debug_cycle and _perf_cycle for actual implementations.
def cycle( self ):
pass
#---------------------------------------------------------------------
# _debug_cycle
#---------------------------------------------------------------------
# Implementation of cycle() for use during develop-test-debug loops.
def _dev_cycle( self ):
# Call all events generated by input changes
self.eval_combinational()
# Clock generation needed by VCD tracing
self.model.clk.value = 0
self.model.clk.value = 1
# Distinguish between events caused by input vectors changing (above)
# and events caused by clocked logic (below).
self.metrics.start_tick()
# Call all rising edge triggered functions
for func in self._sequential_blocks:
func()
# Then flop the shadow state on all registers
while self._register_queue:
reg = self._register_queue.pop()
reg.flop()
# Call all events generated by synchronous logic
self.eval_combinational()
# Increment the simulator cycle count
self.ncycles += 1
# Tell the metrics module to prepare for the next cycle
self.metrics.incr_metrics_cycle()
#---------------------------------------------------------------------
# _perf_cycle
#---------------------------------------------------------------------
# Implementation of cycle() for use when benchmarking models.
def _perf_cycle( self ):
# Call all events generated by input changes
self.eval_combinational()
# Call all rising edge triggered functions
for func in self._sequential_blocks:
func()
# Then flop the shadow state on all registers
while self._register_queue:
reg = self._register_queue.pop()
reg.flop()
# Call all events generated by synchronous logic
self.eval_combinational()
# Increment the simulator cycle count
self.ncycles += 1
#---------------------------------------------------------------------
# eval_combinational
#---------------------------------------------------------------------
# Evaluate all combinational logic blocks currently in the eventqueue.
def eval_combinational( self ):
pass
#---------------------------------------------------------------------
# _debug_eval
#---------------------------------------------------------------------
# Implementation of eval_combinational() for use during
# develop-test-debug loops.
def _dev_eval( self ):
while self._event_queue.len():
self._current_func = func = self._event_queue.deq()
self.metrics.incr_comb_evals( func )
func()
self._current_func = None
#---------------------------------------------------------------------
# _perf_eval
#---------------------------------------------------------------------
# Implementation of eval_combinataional () for use when benchmarking
# models.
def _perf_eval( self ):
while self._event_queue.len():
self._current_func = func = self._event_queue.deq()
func()
self._current_func = None
#---------------------------------------------------------------------
# add_event
#---------------------------------------------------------------------
# Add an event to the simulator event queue for later execution.
#
# This function will check if the written SignalValue instance has any
# registered events (functions decorated with @combinational), and if
# so, adds them to the event queue.
def add_event( self, signal_value ):
# TODO: debug_event
#print(" ADDEVENT: VALUE", signal_value.v, end='')
#print(signal_value in self._DEBUG_signal_cbs, end='')
#print([x.fullname for x in signal_value._DEBUG_signal_names], end='')
#print(self._DEBUG_signal_cbs[signal_value])
self.metrics.incr_add_events()
# Place all other callbacks in the event queue for execution later
for func in signal_value._callbacks:
self.metrics.incr_add_callbk()
if func != self._current_func:
self._event_queue.enq( func.cb, func.id )
#-----------------------------------------------------------------------
# EventQueue
#-----------------------------------------------------------------------
class EventQueue( object ):
def __init__( self, initsize = 1000 ):
self.fifo = collections.deque()
self.func_bv = [ False ] * initsize
self.func_ids = 0
def enq( self, event, id ):
if not self.func_bv[ id ]:
self.func_bv[ id ] = True
self.fifo.appendleft( event )
def deq( self ):
event = self.fifo.pop()
self.func_bv[ event.id ] = False
return event
def len( self ):
return len( self.fifo )
def __len__( self ):
return len( self.fifo )
def get_id( self ):
id = self.func_ids
self.func_ids += 1
if self.func_ids > len( self.func_bv ):
self.func_bv.extend( [ False ] * 1000 )
return id
| bsd-3-clause |
EricMuller/mywebmarks-backend | requirements/twisted/Twisted-17.1.0/build/lib.linux-x86_64-3.5/twisted/names/test/test_dns.py | 12 | 153274 | # test-case-name: twisted.names.test.test_dns
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for twisted.names.dns.
"""
from __future__ import division, absolute_import
from io import BytesIO
import struct
from zope.interface.verify import verifyClass
from twisted.python.failure import Failure
from twisted.python.util import FancyEqMixin, FancyStrMixin
from twisted.internet import address, task
from twisted.internet.error import CannotListenError, ConnectionDone
from twisted.trial import unittest
from twisted.names import dns
from twisted.test import proto_helpers
from twisted.test.testutils import ComparisonTestsMixin
RECORD_TYPES = [
dns.Record_NS, dns.Record_MD, dns.Record_MF, dns.Record_CNAME,
dns.Record_MB, dns.Record_MG, dns.Record_MR, dns.Record_PTR,
dns.Record_DNAME, dns.Record_A, dns.Record_SOA, dns.Record_NULL,
dns.Record_WKS, dns.Record_SRV, dns.Record_AFSDB, dns.Record_RP,
dns.Record_HINFO, dns.Record_MINFO, dns.Record_MX, dns.Record_TXT,
dns.Record_AAAA, dns.Record_A6, dns.Record_NAPTR, dns.UnknownRecord,
]
class Ord2ByteTests(unittest.TestCase):
"""
Tests for L{dns._ord2bytes}.
"""
def test_ord2byte(self):
"""
L{dns._ord2byte} accepts an integer and returns a byte string of length
one with an ordinal value equal to the given integer.
"""
self.assertEqual(b'\x10', dns._ord2bytes(0x10))
class Str2TimeTests(unittest.TestCase):
"""
Tests for L{dns.str2name}.
"""
def test_nonString(self):
"""
When passed a non-string object, L{dns.str2name} returns it unmodified.
"""
time = object()
self.assertIs(time, dns.str2time(time))
def test_seconds(self):
"""
Passed a string giving a number of seconds, L{dns.str2time} returns the
number of seconds represented. For example, C{"10S"} represents C{10}
seconds.
"""
self.assertEqual(10, dns.str2time("10S"))
def test_minutes(self):
"""
Like C{test_seconds}, but for the C{"M"} suffix which multiplies the
time value by C{60} (the number of seconds in a minute!).
"""
self.assertEqual(2 * 60, dns.str2time("2M"))
def test_hours(self):
"""
Like C{test_seconds}, but for the C{"H"} suffix which multiplies the
time value by C{3600}, the number of seconds in an hour.
"""
self.assertEqual(3 * 3600, dns.str2time("3H"))
def test_days(self):
"""
Like L{test_seconds}, but for the C{"D"} suffix which multiplies the
time value by C{86400}, the number of seconds in a day.
"""
self.assertEqual(4 * 86400, dns.str2time("4D"))
def test_weeks(self):
"""
Like L{test_seconds}, but for the C{"W"} suffix which multiplies the
time value by C{604800}, the number of seconds in a week.
"""
self.assertEqual(5 * 604800, dns.str2time("5W"))
def test_years(self):
"""
Like L{test_seconds}, but for the C{"Y"} suffix which multiplies the
time value by C{31536000}, the number of seconds in a year.
"""
self.assertEqual(6 * 31536000, dns.str2time("6Y"))
def test_invalidPrefix(self):
"""
If a non-integer prefix is given, L{dns.str2time} raises L{ValueError}.
"""
self.assertRaises(ValueError, dns.str2time, "fooS")
class NameTests(unittest.TestCase):
"""
Tests for L{Name}, the representation of a single domain name with support
for encoding into and decoding from DNS message format.
"""
def test_nonStringName(self):
"""
When constructed with a name which is neither C{bytes} nor C{str},
L{Name} raises L{TypeError}.
"""
self.assertRaises(TypeError, dns.Name, 123)
self.assertRaises(TypeError, dns.Name, object())
self.assertRaises(TypeError, dns.Name, [])
def test_unicodeName(self):
"""
L{dns.Name} automatically encodes unicode domain name using C{idna}
encoding.
"""
name = dns.Name(u'\u00e9chec.example.org')
self.assertIsInstance(name.name, bytes)
self.assertEqual(b'xn--chec-9oa.example.org', name.name)
def test_decode(self):
"""
L{Name.decode} populates the L{Name} instance with name information read
from the file-like object passed to it.
"""
n = dns.Name()
n.decode(BytesIO(b"\x07example\x03com\x00"))
self.assertEqual(n.name, b"example.com")
def test_encode(self):
"""
L{Name.encode} encodes its name information and writes it to the
file-like object passed to it.
"""
name = dns.Name(b"foo.example.com")
stream = BytesIO()
name.encode(stream)
self.assertEqual(stream.getvalue(), b"\x03foo\x07example\x03com\x00")
def test_encodeWithCompression(self):
"""
If a compression dictionary is passed to it, L{Name.encode} uses offset
information from it to encode its name with references to existing
labels in the stream instead of including another copy of them in the
output. It also updates the compression dictionary with the location of
the name it writes to the stream.
"""
name = dns.Name(b"foo.example.com")
compression = {b"example.com": 0x17}
# Some bytes already encoded into the stream for this message
previous = b"some prefix to change .tell()"
stream = BytesIO()
stream.write(previous)
# The position at which the encoded form of this new name will appear in
# the stream.
expected = len(previous) + dns.Message.headerSize
name.encode(stream, compression)
self.assertEqual(
b"\x03foo\xc0\x17",
stream.getvalue()[len(previous):])
self.assertEqual(
{b"example.com": 0x17, b"foo.example.com": expected},
compression)
def test_unknown(self):
"""
A resource record of unknown type and class is parsed into an
L{UnknownRecord} instance with its data preserved, and an
L{UnknownRecord} instance is serialized to a string equal to the one it
was parsed from.
"""
wire = (
b'\x01\x00' # Message ID
b'\x00' # answer bit, opCode nibble, auth bit, trunc bit, recursive
# bit
b'\x00' # recursion bit, empty bit, authenticData bit,
# checkingDisabled bit, response code nibble
b'\x00\x01' # number of queries
b'\x00\x01' # number of answers
b'\x00\x00' # number of authorities
b'\x00\x01' # number of additionals
# query
b'\x03foo\x03bar\x00' # foo.bar
b'\xde\xad' # type=0xdead
b'\xbe\xef' # cls=0xbeef
# 1st answer
b'\xc0\x0c' # foo.bar - compressed
b'\xde\xad' # type=0xdead
b'\xbe\xef' # cls=0xbeef
b'\x00\x00\x01\x01' # ttl=257
b'\x00\x08somedata' # some payload data
# 1st additional
b'\x03baz\x03ban\x00' # baz.ban
b'\x00\x01' # type=A
b'\x00\x01' # cls=IN
b'\x00\x00\x01\x01' # ttl=257
b'\x00\x04' # len=4
b'\x01\x02\x03\x04' # 1.2.3.4
)
msg = dns.Message()
msg.fromStr(wire)
self.assertEqual(msg.queries, [
dns.Query(b'foo.bar', type=0xdead, cls=0xbeef),
])
self.assertEqual(msg.answers, [
dns.RRHeader(b'foo.bar', type=0xdead, cls=0xbeef, ttl=257,
payload=dns.UnknownRecord(b'somedata', ttl=257)),
])
self.assertEqual(msg.additional, [
dns.RRHeader(b'baz.ban', type=dns.A, cls=dns.IN, ttl=257,
payload=dns.Record_A('1.2.3.4', ttl=257)),
])
enc = msg.toStr()
self.assertEqual(enc, wire)
def test_decodeWithCompression(self):
"""
If the leading byte of an encoded label (in bytes read from a stream
passed to L{Name.decode}) has its two high bits set, the next byte is
treated as a pointer to another label in the stream and that label is
included in the name being decoded.
"""
# Slightly modified version of the example from RFC 1035, section 4.1.4.
stream = BytesIO(
b"x" * 20 +
b"\x01f\x03isi\x04arpa\x00"
b"\x03foo\xc0\x14"
b"\x03bar\xc0\x20")
stream.seek(20)
name = dns.Name()
name.decode(stream)
# Verify we found the first name in the stream and that the stream
# position is left at the first byte after the decoded name.
self.assertEqual(b"f.isi.arpa", name.name)
self.assertEqual(32, stream.tell())
# Get the second name from the stream and make the same assertions.
name.decode(stream)
self.assertEqual(name.name, b"foo.f.isi.arpa")
self.assertEqual(38, stream.tell())
# Get the third and final name
name.decode(stream)
self.assertEqual(name.name, b"bar.foo.f.isi.arpa")
self.assertEqual(44, stream.tell())
def test_rejectCompressionLoop(self):
"""
L{Name.decode} raises L{ValueError} if the stream passed to it includes
a compression pointer which forms a loop, causing the name to be
undecodable.
"""
name = dns.Name()
stream = BytesIO(b"\xc0\x00")
self.assertRaises(ValueError, name.decode, stream)
def test_equality(self):
"""
L{Name} instances are equal as long as they have the same value for
L{Name.name}, regardless of the case.
"""
name1 = dns.Name(b"foo.bar")
name2 = dns.Name(b"foo.bar")
self.assertEqual(name1, name2)
name3 = dns.Name(b"fOO.bar")
self.assertEqual(name1, name3)
def test_inequality(self):
"""
L{Name} instances are not equal as long as they have different
L{Name.name} attributes.
"""
name1 = dns.Name(b"foo.bar")
name2 = dns.Name(b"bar.foo")
self.assertNotEqual(name1, name2)
class RoundtripDNSTests(unittest.TestCase):
"""
Encoding and then decoding various objects.
"""
names = [b"example.org", b"go-away.fish.tv", b"23strikesback.net"]
def test_name(self):
for n in self.names:
# encode the name
f = BytesIO()
dns.Name(n).encode(f)
# decode the name
f.seek(0, 0)
result = dns.Name()
result.decode(f)
self.assertEqual(result.name, n)
def test_query(self):
"""
L{dns.Query.encode} returns a byte string representing the fields of the
query which can be decoded into a new L{dns.Query} instance using
L{dns.Query.decode}.
"""
for n in self.names:
for dnstype in range(1, 17):
for dnscls in range(1, 5):
# encode the query
f = BytesIO()
dns.Query(n, dnstype, dnscls).encode(f)
# decode the result
f.seek(0, 0)
result = dns.Query()
result.decode(f)
self.assertEqual(result.name.name, n)
self.assertEqual(result.type, dnstype)
self.assertEqual(result.cls, dnscls)
def test_resourceRecordHeader(self):
"""
L{dns.RRHeader.encode} encodes the record header's information and
writes it to the file-like object passed to it and
L{dns.RRHeader.decode} reads from a file-like object to re-construct a
L{dns.RRHeader} instance.
"""
# encode the RR
f = BytesIO()
dns.RRHeader(b"test.org", 3, 4, 17).encode(f)
# decode the result
f.seek(0, 0)
result = dns.RRHeader()
result.decode(f)
self.assertEqual(result.name, dns.Name(b"test.org"))
self.assertEqual(result.type, 3)
self.assertEqual(result.cls, 4)
self.assertEqual(result.ttl, 17)
def test_resources(self):
"""
L{dns.SimpleRecord.encode} encodes the record's name information and
writes it to the file-like object passed to it and
L{dns.SimpleRecord.decode} reads from a file-like object to re-construct
a L{dns.SimpleRecord} instance.
"""
names = (
b"this.are.test.name",
b"will.compress.will.this.will.name.will.hopefully",
b"test.CASE.preSErVatIOn.YeAH",
b"a.s.h.o.r.t.c.a.s.e.t.o.t.e.s.t",
b"singleton"
)
for s in names:
f = BytesIO()
dns.SimpleRecord(s).encode(f)
f.seek(0, 0)
result = dns.SimpleRecord()
result.decode(f)
self.assertEqual(result.name, dns.Name(s))
def test_hashable(self):
"""
Instances of all record types are hashable.
"""
for k in RECORD_TYPES:
k1, k2 = k(), k()
hk1 = hash(k1)
hk2 = hash(k2)
self.assertEqual(hk1, hk2, "%s != %s (for %s)" % (hk1,hk2,k))
def test_Charstr(self):
"""
Test L{dns.Charstr} encode and decode.
"""
for n in self.names:
# encode the name
f = BytesIO()
dns.Charstr(n).encode(f)
# decode the name
f.seek(0, 0)
result = dns.Charstr()
result.decode(f)
self.assertEqual(result.string, n)
def _recordRoundtripTest(self, record):
"""
Assert that encoding C{record} and then decoding the resulting bytes
creates a record which compares equal to C{record}.
"""
stream = BytesIO()
record.encode(stream)
length = stream.tell()
stream.seek(0, 0)
replica = record.__class__()
replica.decode(stream, length)
self.assertEqual(record, replica)
def test_SOA(self):
"""
The byte stream written by L{dns.Record_SOA.encode} can be used by
L{dns.Record_SOA.decode} to reconstruct the state of the original
L{dns.Record_SOA} instance.
"""
self._recordRoundtripTest(
dns.Record_SOA(
mname=b'foo', rname=b'bar', serial=12, refresh=34,
retry=56, expire=78, minimum=90))
def test_A(self):
"""
The byte stream written by L{dns.Record_A.encode} can be used by
L{dns.Record_A.decode} to reconstruct the state of the original
L{dns.Record_A} instance.
"""
self._recordRoundtripTest(dns.Record_A('1.2.3.4'))
def test_NULL(self):
"""
The byte stream written by L{dns.Record_NULL.encode} can be used by
L{dns.Record_NULL.decode} to reconstruct the state of the original
L{dns.Record_NULL} instance.
"""
self._recordRoundtripTest(dns.Record_NULL(b'foo bar'))
def test_WKS(self):
"""
The byte stream written by L{dns.Record_WKS.encode} can be used by
L{dns.Record_WKS.decode} to reconstruct the state of the original
L{dns.Record_WKS} instance.
"""
self._recordRoundtripTest(dns.Record_WKS('1.2.3.4', 3, b'xyz'))
def test_AAAA(self):
"""
The byte stream written by L{dns.Record_AAAA.encode} can be used by
L{dns.Record_AAAA.decode} to reconstruct the state of the original
L{dns.Record_AAAA} instance.
"""
self._recordRoundtripTest(dns.Record_AAAA('::1'))
def test_A6(self):
"""
The byte stream written by L{dns.Record_A6.encode} can be used by
L{dns.Record_A6.decode} to reconstruct the state of the original
L{dns.Record_A6} instance.
"""
self._recordRoundtripTest(dns.Record_A6(8, '::1:2', b'foo'))
def test_SRV(self):
"""
The byte stream written by L{dns.Record_SRV.encode} can be used by
L{dns.Record_SRV.decode} to reconstruct the state of the original
L{dns.Record_SRV} instance.
"""
self._recordRoundtripTest(dns.Record_SRV(
priority=1, weight=2, port=3, target=b'example.com'))
def test_NAPTR(self):
"""
Test L{dns.Record_NAPTR} encode and decode.
"""
naptrs = [
(100, 10, b"u", b"sip+E2U",
b"!^.*$!sip:information@domain.tld!", b""),
(100, 50, b"s", b"http+I2L+I2C+I2R",
b"", b"_http._tcp.gatech.edu")]
for (order, preference, flags, service, regexp, replacement) in naptrs:
rin = dns.Record_NAPTR(order, preference, flags, service, regexp,
replacement)
e = BytesIO()
rin.encode(e)
e.seek(0, 0)
rout = dns.Record_NAPTR()
rout.decode(e)
self.assertEqual(rin.order, rout.order)
self.assertEqual(rin.preference, rout.preference)
self.assertEqual(rin.flags, rout.flags)
self.assertEqual(rin.service, rout.service)
self.assertEqual(rin.regexp, rout.regexp)
self.assertEqual(rin.replacement.name, rout.replacement.name)
self.assertEqual(rin.ttl, rout.ttl)
def test_AFSDB(self):
"""
The byte stream written by L{dns.Record_AFSDB.encode} can be used by
L{dns.Record_AFSDB.decode} to reconstruct the state of the original
L{dns.Record_AFSDB} instance.
"""
self._recordRoundtripTest(dns.Record_AFSDB(
subtype=3, hostname=b'example.com'))
def test_RP(self):
"""
The byte stream written by L{dns.Record_RP.encode} can be used by
L{dns.Record_RP.decode} to reconstruct the state of the original
L{dns.Record_RP} instance.
"""
self._recordRoundtripTest(dns.Record_RP(
mbox=b'alice.example.com', txt=b'example.com'))
def test_HINFO(self):
"""
The byte stream written by L{dns.Record_HINFO.encode} can be used by
L{dns.Record_HINFO.decode} to reconstruct the state of the original
L{dns.Record_HINFO} instance.
"""
self._recordRoundtripTest(dns.Record_HINFO(cpu=b'fast', os=b'great'))
def test_MINFO(self):
"""
The byte stream written by L{dns.Record_MINFO.encode} can be used by
L{dns.Record_MINFO.decode} to reconstruct the state of the original
L{dns.Record_MINFO} instance.
"""
self._recordRoundtripTest(dns.Record_MINFO(
rmailbx=b'foo', emailbx=b'bar'))
def test_MX(self):
"""
The byte stream written by L{dns.Record_MX.encode} can be used by
L{dns.Record_MX.decode} to reconstruct the state of the original
L{dns.Record_MX} instance.
"""
self._recordRoundtripTest(dns.Record_MX(
preference=1, name=b'example.com'))
def test_TXT(self):
"""
The byte stream written by L{dns.Record_TXT.encode} can be used by
L{dns.Record_TXT.decode} to reconstruct the state of the original
L{dns.Record_TXT} instance.
"""
self._recordRoundtripTest(dns.Record_TXT(b'foo', b'bar'))
MESSAGE_AUTHENTIC_DATA_BYTES = (
b'\x00\x00' # ID
b'\x00' #
b'\x20' # RA, Z, AD=1, CD, RCODE
b'\x00\x00' # Query count
b'\x00\x00' # Answer count
b'\x00\x00' # Authority count
b'\x00\x00' # Additional count
)
MESSAGE_CHECKING_DISABLED_BYTES = (
b'\x00\x00' # ID
b'\x00' #
b'\x10' # RA, Z, AD, CD=1, RCODE
b'\x00\x00' # Query count
b'\x00\x00' # Answer count
b'\x00\x00' # Authority count
b'\x00\x00' # Additional count
)
class MessageTests(unittest.SynchronousTestCase):
"""
Tests for L{twisted.names.dns.Message}.
"""
def test_authenticDataDefault(self):
"""
L{dns.Message.authenticData} has default value 0.
"""
self.assertEqual(dns.Message().authenticData, 0)
def test_authenticDataOverride(self):
"""
L{dns.Message.__init__} accepts a C{authenticData} argument which
is assigned to L{dns.Message.authenticData}.
"""
self.assertEqual(dns.Message(authenticData=1).authenticData, 1)
def test_authenticDataEncode(self):
"""
L{dns.Message.toStr} encodes L{dns.Message.authenticData} into
byte4 of the byte string.
"""
self.assertEqual(
dns.Message(authenticData=1).toStr(),
MESSAGE_AUTHENTIC_DATA_BYTES
)
def test_authenticDataDecode(self):
"""
L{dns.Message.fromStr} decodes byte4 and assigns bit3 to
L{dns.Message.authenticData}.
"""
m = dns.Message()
m.fromStr(MESSAGE_AUTHENTIC_DATA_BYTES)
self.assertEqual(m.authenticData, 1)
def test_checkingDisabledDefault(self):
"""
L{dns.Message.checkingDisabled} has default value 0.
"""
self.assertEqual(dns.Message().checkingDisabled, 0)
def test_checkingDisabledOverride(self):
"""
L{dns.Message.__init__} accepts a C{checkingDisabled} argument which
is assigned to L{dns.Message.checkingDisabled}.
"""
self.assertEqual(
dns.Message(checkingDisabled=1).checkingDisabled, 1)
def test_checkingDisabledEncode(self):
"""
L{dns.Message.toStr} encodes L{dns.Message.checkingDisabled} into
byte4 of the byte string.
"""
self.assertEqual(
dns.Message(checkingDisabled=1).toStr(),
MESSAGE_CHECKING_DISABLED_BYTES
)
def test_checkingDisabledDecode(self):
"""
L{dns.Message.fromStr} decodes byte4 and assigns bit4 to
L{dns.Message.checkingDisabled}.
"""
m = dns.Message()
m.fromStr(MESSAGE_CHECKING_DISABLED_BYTES)
self.assertEqual(m.checkingDisabled, 1)
def test_reprDefaults(self):
"""
L{dns.Message.__repr__} omits field values and sections which are
identical to their defaults. The id field value is always shown.
"""
self.assertEqual(
'<Message id=0>',
repr(dns.Message())
)
def test_reprFlagsIfSet(self):
"""
L{dns.Message.__repr__} displays flags if they are L{True}.
"""
m = dns.Message(answer=True, auth=True, trunc=True, recDes=True,
recAv=True, authenticData=True, checkingDisabled=True)
self.assertEqual(
'<Message '
'id=0 '
'flags=answer,auth,trunc,recDes,recAv,authenticData,'
'checkingDisabled'
'>',
repr(m),
)
def test_reprNonDefautFields(self):
"""
L{dns.Message.__repr__} displays field values if they differ from their
defaults.
"""
m = dns.Message(id=10, opCode=20, rCode=30, maxSize=40)
self.assertEqual(
'<Message '
'id=10 '
'opCode=20 '
'rCode=30 '
'maxSize=40'
'>',
repr(m),
)
def test_reprNonDefaultSections(self):
"""
L{dns.Message.__repr__} displays sections which differ from their
defaults.
"""
m = dns.Message()
m.queries = [1, 2, 3]
m.answers = [4, 5, 6]
m.authority = [7, 8, 9]
m.additional = [10, 11, 12]
self.assertEqual(
'<Message '
'id=0 '
'queries=[1, 2, 3] '
'answers=[4, 5, 6] '
'authority=[7, 8, 9] '
'additional=[10, 11, 12]'
'>',
repr(m),
)
def test_emptyMessage(self):
"""
Test that a message which has been truncated causes an EOFError to
be raised when it is parsed.
"""
msg = dns.Message()
self.assertRaises(EOFError, msg.fromStr, b'')
def test_emptyQuery(self):
"""
Test that bytes representing an empty query message can be decoded
as such.
"""
msg = dns.Message()
msg.fromStr(
b'\x01\x00' # Message ID
b'\x00' # answer bit, opCode nibble, auth bit, trunc bit, recursive bit
b'\x00' # recursion bit, empty bit, authenticData bit,
# checkingDisabled bit, response code nibble
b'\x00\x00' # number of queries
b'\x00\x00' # number of answers
b'\x00\x00' # number of authorities
b'\x00\x00' # number of additionals
)
self.assertEqual(msg.id, 256)
self.assertFalse(
msg.answer, "Message was not supposed to be an answer.")
self.assertEqual(msg.opCode, dns.OP_QUERY)
self.assertFalse(
msg.auth, "Message was not supposed to be authoritative.")
self.assertFalse(
msg.trunc, "Message was not supposed to be truncated.")
self.assertEqual(msg.queries, [])
self.assertEqual(msg.answers, [])
self.assertEqual(msg.authority, [])
self.assertEqual(msg.additional, [])
def test_NULL(self):
"""
A I{NULL} record with an arbitrary payload can be encoded and decoded as
part of a L{dns.Message}.
"""
bytes = b''.join([dns._ord2bytes(i) for i in range(256)])
rec = dns.Record_NULL(bytes)
rr = dns.RRHeader(b'testname', dns.NULL, payload=rec)
msg1 = dns.Message()
msg1.answers.append(rr)
s = BytesIO()
msg1.encode(s)
s.seek(0, 0)
msg2 = dns.Message()
msg2.decode(s)
self.assertIsInstance(msg2.answers[0].payload, dns.Record_NULL)
self.assertEqual(msg2.answers[0].payload.payload, bytes)
def test_lookupRecordTypeDefault(self):
"""
L{Message.lookupRecordType} returns C{dns.UnknownRecord} if it is
called with an integer which doesn't correspond to any known record
type.
"""
# 65280 is the first value in the range reserved for private
# use, so it shouldn't ever conflict with an officially
# allocated value.
self.assertIs(dns.Message().lookupRecordType(65280), dns.UnknownRecord)
def test_nonAuthoritativeMessage(self):
"""
The L{RRHeader} instances created by L{Message} from a non-authoritative
message are marked as not authoritative.
"""
buf = BytesIO()
answer = dns.RRHeader(payload=dns.Record_A('1.2.3.4', ttl=0))
answer.encode(buf)
message = dns.Message()
message.fromStr(
b'\x01\x00' # Message ID
# answer bit, opCode nibble, auth bit, trunc bit, recursive bit
b'\x00'
# recursion bit, empty bit, authenticData bit,
# checkingDisabled bit, response code nibble
b'\x00'
b'\x00\x00' # number of queries
b'\x00\x01' # number of answers
b'\x00\x00' # number of authorities
b'\x00\x00' # number of additionals
+ buf.getvalue()
)
self.assertEqual(message.answers, [answer])
self.assertFalse(message.answers[0].auth)
def test_authoritativeMessage(self):
"""
The L{RRHeader} instances created by L{Message} from an authoritative
message are marked as authoritative.
"""
buf = BytesIO()
answer = dns.RRHeader(payload=dns.Record_A('1.2.3.4', ttl=0))
answer.encode(buf)
message = dns.Message()
message.fromStr(
b'\x01\x00' # Message ID
# answer bit, opCode nibble, auth bit, trunc bit, recursive bit
b'\x04'
# recursion bit, empty bit, authenticData bit,
# checkingDisabled bit, response code nibble
b'\x00'
b'\x00\x00' # number of queries
b'\x00\x01' # number of answers
b'\x00\x00' # number of authorities
b'\x00\x00' # number of additionals
+ buf.getvalue()
)
answer.auth = True
self.assertEqual(message.answers, [answer])
self.assertTrue(message.answers[0].auth)
class MessageComparisonTests(ComparisonTestsMixin,
unittest.SynchronousTestCase):
"""
Tests for the rich comparison of L{dns.Message} instances.
"""
def messageFactory(self, *args, **kwargs):
"""
Create a L{dns.Message}.
The L{dns.Message} constructor doesn't accept C{queries}, C{answers},
C{authority}, C{additional} arguments, so we extract them from the
kwargs supplied to this factory function and assign them to the message.
@param args: Positional arguments.
@param kwargs: Keyword arguments.
@return: A L{dns.Message} instance.
"""
queries = kwargs.pop('queries', [])
answers = kwargs.pop('answers', [])
authority = kwargs.pop('authority', [])
additional = kwargs.pop('additional', [])
m = dns.Message(**kwargs)
if queries:
m.queries = queries
if answers:
m.answers = answers
if authority:
m.authority = authority
if additional:
m.additional = additional
return m
def test_id(self):
"""
Two L{dns.Message} instances compare equal if they have the same id
value.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(id=10),
self.messageFactory(id=10),
self.messageFactory(id=20),
)
def test_answer(self):
"""
Two L{dns.Message} instances compare equal if they have the same answer
flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(answer=1),
self.messageFactory(answer=1),
self.messageFactory(answer=0),
)
def test_opCode(self):
"""
Two L{dns.Message} instances compare equal if they have the same opCode
value.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(opCode=10),
self.messageFactory(opCode=10),
self.messageFactory(opCode=20),
)
def test_recDes(self):
"""
Two L{dns.Message} instances compare equal if they have the same recDes
flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(recDes=1),
self.messageFactory(recDes=1),
self.messageFactory(recDes=0),
)
def test_recAv(self):
"""
Two L{dns.Message} instances compare equal if they have the same recAv
flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(recAv=1),
self.messageFactory(recAv=1),
self.messageFactory(recAv=0),
)
def test_auth(self):
"""
Two L{dns.Message} instances compare equal if they have the same auth
flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(auth=1),
self.messageFactory(auth=1),
self.messageFactory(auth=0),
)
def test_rCode(self):
"""
Two L{dns.Message} instances compare equal if they have the same rCode
value.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(rCode=10),
self.messageFactory(rCode=10),
self.messageFactory(rCode=20),
)
def test_trunc(self):
"""
Two L{dns.Message} instances compare equal if they have the same trunc
flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(trunc=1),
self.messageFactory(trunc=1),
self.messageFactory(trunc=0),
)
def test_maxSize(self):
"""
Two L{dns.Message} instances compare equal if they have the same
maxSize value.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(maxSize=10),
self.messageFactory(maxSize=10),
self.messageFactory(maxSize=20),
)
def test_authenticData(self):
"""
Two L{dns.Message} instances compare equal if they have the same
authenticData flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(authenticData=1),
self.messageFactory(authenticData=1),
self.messageFactory(authenticData=0),
)
def test_checkingDisabled(self):
"""
Two L{dns.Message} instances compare equal if they have the same
checkingDisabled flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(checkingDisabled=1),
self.messageFactory(checkingDisabled=1),
self.messageFactory(checkingDisabled=0),
)
def test_queries(self):
"""
Two L{dns.Message} instances compare equal if they have the same
queries.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(queries=[dns.Query(b'example.com')]),
self.messageFactory(queries=[dns.Query(b'example.com')]),
self.messageFactory(queries=[dns.Query(b'example.org')]),
)
def test_answers(self):
"""
Two L{dns.Message} instances compare equal if they have the same
answers.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(answers=[dns.RRHeader(
b'example.com', payload=dns.Record_A('1.2.3.4'))]),
self.messageFactory(answers=[dns.RRHeader(
b'example.com', payload=dns.Record_A('1.2.3.4'))]),
self.messageFactory(answers=[dns.RRHeader(
b'example.org', payload=dns.Record_A('4.3.2.1'))]),
)
def test_authority(self):
"""
Two L{dns.Message} instances compare equal if they have the same
authority records.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(authority=[dns.RRHeader(
b'example.com',
type=dns.SOA, payload=dns.Record_SOA())]),
self.messageFactory(authority=[dns.RRHeader(
b'example.com',
type=dns.SOA, payload=dns.Record_SOA())]),
self.messageFactory(authority=[dns.RRHeader(
b'example.org',
type=dns.SOA, payload=dns.Record_SOA())]),
)
def test_additional(self):
"""
Two L{dns.Message} instances compare equal if they have the same
additional records.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(additional=[dns.RRHeader(
b'example.com', payload=dns.Record_A('1.2.3.4'))]),
self.messageFactory(additional=[dns.RRHeader(
b'example.com', payload=dns.Record_A('1.2.3.4'))]),
self.messageFactory(additional=[dns.RRHeader(
b'example.org', payload=dns.Record_A('1.2.3.4'))]),
)
class TestController(object):
"""
Pretend to be a DNS query processor for a DNSDatagramProtocol.
@ivar messages: the list of received messages.
@type messages: C{list} of (msg, protocol, address)
"""
def __init__(self):
"""
Initialize the controller: create a list of messages.
"""
self.messages = []
def messageReceived(self, msg, proto, addr=None):
"""
Save the message so that it can be checked during the tests.
"""
self.messages.append((msg, proto, addr))
class DatagramProtocolTests(unittest.TestCase):
"""
Test various aspects of L{dns.DNSDatagramProtocol}.
"""
def setUp(self):
"""
Create a L{dns.DNSDatagramProtocol} with a deterministic clock.
"""
self.clock = task.Clock()
self.controller = TestController()
self.proto = dns.DNSDatagramProtocol(self.controller)
transport = proto_helpers.FakeDatagramTransport()
self.proto.makeConnection(transport)
self.proto.callLater = self.clock.callLater
def test_truncatedPacket(self):
"""
Test that when a short datagram is received, datagramReceived does
not raise an exception while processing it.
"""
self.proto.datagramReceived(
b'', address.IPv4Address('UDP', '127.0.0.1', 12345))
self.assertEqual(self.controller.messages, [])
def test_simpleQuery(self):
"""
Test content received after a query.
"""
d = self.proto.query(('127.0.0.1', 21345), [dns.Query(b'foo')])
self.assertEqual(len(self.proto.liveMessages.keys()), 1)
m = dns.Message()
m.id = next(iter(self.proto.liveMessages.keys()))
m.answers = [dns.RRHeader(payload=dns.Record_A(address='1.2.3.4'))]
def cb(result):
self.assertEqual(result.answers[0].payload.dottedQuad(), '1.2.3.4')
d.addCallback(cb)
self.proto.datagramReceived(m.toStr(), ('127.0.0.1', 21345))
return d
def test_queryTimeout(self):
"""
Test that query timeouts after some seconds.
"""
d = self.proto.query(('127.0.0.1', 21345), [dns.Query(b'foo')])
self.assertEqual(len(self.proto.liveMessages), 1)
self.clock.advance(10)
self.assertFailure(d, dns.DNSQueryTimeoutError)
self.assertEqual(len(self.proto.liveMessages), 0)
return d
def test_writeError(self):
"""
Exceptions raised by the transport's write method should be turned into
C{Failure}s passed to errbacks of the C{Deferred} returned by
L{DNSDatagramProtocol.query}.
"""
def writeError(message, addr):
raise RuntimeError("bar")
self.proto.transport.write = writeError
d = self.proto.query(('127.0.0.1', 21345), [dns.Query(b'foo')])
return self.assertFailure(d, RuntimeError)
def test_listenError(self):
"""
Exception L{CannotListenError} raised by C{listenUDP} should be turned
into a C{Failure} passed to errback of the C{Deferred} returned by
L{DNSDatagramProtocol.query}.
"""
def startListeningError():
raise CannotListenError(None, None, None)
self.proto.startListening = startListeningError
# Clean up transport so that the protocol calls startListening again
self.proto.transport = None
d = self.proto.query(('127.0.0.1', 21345), [dns.Query(b'foo')])
return self.assertFailure(d, CannotListenError)
def test_receiveMessageNotInLiveMessages(self):
"""
When receiving a message whose id is not in
L{DNSDatagramProtocol.liveMessages} or L{DNSDatagramProtocol.resends},
the message will be received by L{DNSDatagramProtocol.controller}.
"""
message = dns.Message()
message.id = 1
message.answers = [dns.RRHeader(
payload=dns.Record_A(address='1.2.3.4'))]
self.proto.datagramReceived(message.toStr(), ('127.0.0.1', 21345))
self.assertEqual(self.controller.messages[-1][0].toStr(),
message.toStr())
class TestTCPController(TestController):
"""
Pretend to be a DNS query processor for a DNSProtocol.
@ivar connections: A list of L{DNSProtocol} instances which have
notified this controller that they are connected and have not
yet notified it that their connection has been lost.
"""
def __init__(self):
TestController.__init__(self)
self.connections = []
def connectionMade(self, proto):
self.connections.append(proto)
def connectionLost(self, proto):
self.connections.remove(proto)
class DNSProtocolTests(unittest.TestCase):
"""
Test various aspects of L{dns.DNSProtocol}.
"""
def setUp(self):
"""
Create a L{dns.DNSProtocol} with a deterministic clock.
"""
self.clock = task.Clock()
self.controller = TestTCPController()
self.proto = dns.DNSProtocol(self.controller)
self.proto.makeConnection(proto_helpers.StringTransport())
self.proto.callLater = self.clock.callLater
def test_connectionTracking(self):
"""
L{dns.DNSProtocol} calls its controller's C{connectionMade}
method with itself when it is connected to a transport and its
controller's C{connectionLost} method when it is disconnected.
"""
self.assertEqual(self.controller.connections, [self.proto])
self.proto.connectionLost(
Failure(ConnectionDone("Fake Connection Done")))
self.assertEqual(self.controller.connections, [])
def test_queryTimeout(self):
"""
Test that query timeouts after some seconds.
"""
d = self.proto.query([dns.Query(b'foo')])
self.assertEqual(len(self.proto.liveMessages), 1)
self.clock.advance(60)
self.assertFailure(d, dns.DNSQueryTimeoutError)
self.assertEqual(len(self.proto.liveMessages), 0)
return d
def test_simpleQuery(self):
"""
Test content received after a query.
"""
d = self.proto.query([dns.Query(b'foo')])
self.assertEqual(len(self.proto.liveMessages.keys()), 1)
m = dns.Message()
m.id = next(iter(self.proto.liveMessages.keys()))
m.answers = [dns.RRHeader(payload=dns.Record_A(address='1.2.3.4'))]
def cb(result):
self.assertEqual(result.answers[0].payload.dottedQuad(), '1.2.3.4')
d.addCallback(cb)
s = m.toStr()
s = struct.pack('!H', len(s)) + s
self.proto.dataReceived(s)
return d
def test_writeError(self):
"""
Exceptions raised by the transport's write method should be turned into
C{Failure}s passed to errbacks of the C{Deferred} returned by
L{DNSProtocol.query}.
"""
def writeError(message):
raise RuntimeError("bar")
self.proto.transport.write = writeError
d = self.proto.query([dns.Query(b'foo')])
return self.assertFailure(d, RuntimeError)
def test_receiveMessageNotInLiveMessages(self):
"""
When receiving a message whose id is not in L{DNSProtocol.liveMessages}
the message will be received by L{DNSProtocol.controller}.
"""
message = dns.Message()
message.id = 1
message.answers = [dns.RRHeader(
payload=dns.Record_A(address='1.2.3.4'))]
string = message.toStr()
string = struct.pack('!H', len(string)) + string
self.proto.dataReceived(string)
self.assertEqual(self.controller.messages[-1][0].toStr(),
message.toStr())
class ReprTests(unittest.TestCase):
"""
Tests for the C{__repr__} implementation of record classes.
"""
def test_ns(self):
"""
The repr of a L{dns.Record_NS} instance includes the name of the
nameserver and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_NS(b'example.com', 4321)),
"<NS name=example.com ttl=4321>")
def test_md(self):
"""
The repr of a L{dns.Record_MD} instance includes the name of the
mail destination and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_MD(b'example.com', 4321)),
"<MD name=example.com ttl=4321>")
def test_mf(self):
"""
The repr of a L{dns.Record_MF} instance includes the name of the
mail forwarder and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_MF(b'example.com', 4321)),
"<MF name=example.com ttl=4321>")
def test_cname(self):
"""
The repr of a L{dns.Record_CNAME} instance includes the name of the
mail forwarder and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_CNAME(b'example.com', 4321)),
"<CNAME name=example.com ttl=4321>")
def test_mb(self):
"""
The repr of a L{dns.Record_MB} instance includes the name of the
mailbox and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_MB(b'example.com', 4321)),
"<MB name=example.com ttl=4321>")
def test_mg(self):
"""
The repr of a L{dns.Record_MG} instance includes the name of the
mail group member and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_MG(b'example.com', 4321)),
"<MG name=example.com ttl=4321>")
def test_mr(self):
"""
The repr of a L{dns.Record_MR} instance includes the name of the
mail rename domain and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_MR(b'example.com', 4321)),
"<MR name=example.com ttl=4321>")
def test_ptr(self):
"""
The repr of a L{dns.Record_PTR} instance includes the name of the
pointer and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_PTR(b'example.com', 4321)),
"<PTR name=example.com ttl=4321>")
def test_dname(self):
"""
The repr of a L{dns.Record_DNAME} instance includes the name of the
non-terminal DNS name redirection and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_DNAME(b'example.com', 4321)),
"<DNAME name=example.com ttl=4321>")
def test_a(self):
"""
The repr of a L{dns.Record_A} instance includes the dotted-quad
string representation of the address it is for and the TTL of the
record.
"""
self.assertEqual(
repr(dns.Record_A('1.2.3.4', 567)),
'<A address=1.2.3.4 ttl=567>')
def test_soa(self):
"""
The repr of a L{dns.Record_SOA} instance includes all of the
authority fields.
"""
self.assertEqual(
repr(dns.Record_SOA(mname=b'mName', rname=b'rName', serial=123,
refresh=456, retry=789, expire=10,
minimum=11, ttl=12)),
"<SOA mname=mName rname=rName serial=123 refresh=456 "
"retry=789 expire=10 minimum=11 ttl=12>")
def test_null(self):
"""
The repr of a L{dns.Record_NULL} instance includes the repr of its
payload and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_NULL(b'abcd', 123)),
"<NULL payload='abcd' ttl=123>")
def test_wks(self):
"""
The repr of a L{dns.Record_WKS} instance includes the dotted-quad
string representation of the address it is for, the IP protocol
number it is for, and the TTL of the record.
"""
self.assertEqual(
repr(dns.Record_WKS('2.3.4.5', 7, ttl=8)),
"<WKS address=2.3.4.5 protocol=7 ttl=8>")
def test_aaaa(self):
"""
The repr of a L{dns.Record_AAAA} instance includes the colon-separated
hex string representation of the address it is for and the TTL of the
record.
"""
self.assertEqual(
repr(dns.Record_AAAA('8765::1234', ttl=10)),
"<AAAA address=8765::1234 ttl=10>")
def test_a6(self):
"""
The repr of a L{dns.Record_A6} instance includes the colon-separated
hex string representation of the address it is for and the TTL of the
record.
"""
self.assertEqual(
repr(dns.Record_A6(0, '1234::5678', b'foo.bar', ttl=10)),
"<A6 suffix=1234::5678 prefix=foo.bar ttl=10>")
def test_srv(self):
"""
The repr of a L{dns.Record_SRV} instance includes the name and port of
the target and the priority, weight, and TTL of the record.
"""
self.assertEqual(
repr(dns.Record_SRV(1, 2, 3, b'example.org', 4)),
"<SRV priority=1 weight=2 target=example.org port=3 ttl=4>")
def test_naptr(self):
"""
The repr of a L{dns.Record_NAPTR} instance includes the order,
preference, flags, service, regular expression, replacement, and TTL of
the record.
"""
record = dns.Record_NAPTR(
5, 9, b"S", b"http", b"/foo/bar/i", b"baz", 3)
self.assertEqual(
repr(record),
"<NAPTR order=5 preference=9 flags=S service=http "
"regexp=/foo/bar/i replacement=baz ttl=3>")
def test_afsdb(self):
"""
The repr of a L{dns.Record_AFSDB} instance includes the subtype,
hostname, and TTL of the record.
"""
self.assertEqual(
repr(dns.Record_AFSDB(3, b'example.org', 5)),
"<AFSDB subtype=3 hostname=example.org ttl=5>")
def test_rp(self):
"""
The repr of a L{dns.Record_RP} instance includes the mbox, txt, and TTL
fields of the record.
"""
self.assertEqual(
repr(dns.Record_RP(b'alice.example.com', b'admin.example.com', 3)),
"<RP mbox=alice.example.com txt=admin.example.com ttl=3>")
def test_hinfo(self):
"""
The repr of a L{dns.Record_HINFO} instance includes the cpu, os, and
TTL fields of the record.
"""
self.assertEqual(
repr(dns.Record_HINFO(b'sparc', b'minix', 12)),
"<HINFO cpu='sparc' os='minix' ttl=12>")
def test_minfo(self):
"""
The repr of a L{dns.Record_MINFO} instance includes the rmailbx,
emailbx, and TTL fields of the record.
"""
record = dns.Record_MINFO(
b'alice.example.com', b'bob.example.com', 15)
self.assertEqual(
repr(record),
"<MINFO responsibility=alice.example.com "
"errors=bob.example.com ttl=15>")
def test_mx(self):
"""
The repr of a L{dns.Record_MX} instance includes the preference, name,
and TTL fields of the record.
"""
self.assertEqual(
repr(dns.Record_MX(13, b'mx.example.com', 2)),
"<MX preference=13 name=mx.example.com ttl=2>")
def test_txt(self):
"""
The repr of a L{dns.Record_TXT} instance includes the data and ttl
fields of the record.
"""
self.assertEqual(
repr(dns.Record_TXT(b"foo", b"bar", ttl=15)),
"<TXT data=['foo', 'bar'] ttl=15>")
def test_spf(self):
"""
The repr of a L{dns.Record_SPF} instance includes the data and ttl
fields of the record.
"""
self.assertEqual(
repr(dns.Record_SPF(b"foo", b"bar", ttl=15)),
"<SPF data=['foo', 'bar'] ttl=15>")
def test_unknown(self):
"""
The repr of a L{dns.UnknownRecord} instance includes the data and ttl
fields of the record.
"""
self.assertEqual(
repr(dns.UnknownRecord(b"foo\x1fbar", 12)),
"<UNKNOWN data='foo\\x1fbar' ttl=12>")
class EqualityTests(ComparisonTestsMixin, unittest.TestCase):
"""
Tests for the equality and non-equality behavior of record classes.
"""
def _equalityTest(self, firstValueOne, secondValueOne, valueTwo):
return self.assertNormalEqualityImplementation(
firstValueOne, secondValueOne, valueTwo)
def test_charstr(self):
"""
Two L{dns.Charstr} instances compare equal if and only if they have the
same string value.
"""
self._equalityTest(
dns.Charstr(b'abc'), dns.Charstr(b'abc'), dns.Charstr(b'def'))
def test_name(self):
"""
Two L{dns.Name} instances compare equal if and only if they have the
same name value.
"""
self._equalityTest(
dns.Name(b'abc'), dns.Name(b'abc'), dns.Name(b'def'))
def _simpleEqualityTest(self, cls):
"""
Assert that instances of C{cls} with the same attributes compare equal
to each other and instances with different attributes compare as not
equal.
@param cls: A L{dns.SimpleRecord} subclass.
"""
# Vary the TTL
self._equalityTest(
cls(b'example.com', 123),
cls(b'example.com', 123),
cls(b'example.com', 321))
# Vary the name
self._equalityTest(
cls(b'example.com', 123),
cls(b'example.com', 123),
cls(b'example.org', 123))
def test_rrheader(self):
"""
Two L{dns.RRHeader} instances compare equal if and only if they have
the same name, type, class, time to live, payload, and authoritative
bit.
"""
# Vary the name
self._equalityTest(
dns.RRHeader(b'example.com', payload=dns.Record_A('1.2.3.4')),
dns.RRHeader(b'example.com', payload=dns.Record_A('1.2.3.4')),
dns.RRHeader(b'example.org', payload=dns.Record_A('1.2.3.4')))
# Vary the payload
self._equalityTest(
dns.RRHeader(b'example.com', payload=dns.Record_A('1.2.3.4')),
dns.RRHeader(b'example.com', payload=dns.Record_A('1.2.3.4')),
dns.RRHeader(b'example.com', payload=dns.Record_A('1.2.3.5')))
# Vary the type. Leave the payload as None so that we don't have to
# provide non-equal values.
self._equalityTest(
dns.RRHeader(b'example.com', dns.A),
dns.RRHeader(b'example.com', dns.A),
dns.RRHeader(b'example.com', dns.MX))
# Probably not likely to come up. Most people use the internet.
self._equalityTest(
dns.RRHeader(b'example.com', cls=dns.IN, payload=dns.Record_A('1.2.3.4')),
dns.RRHeader(b'example.com', cls=dns.IN, payload=dns.Record_A('1.2.3.4')),
dns.RRHeader(b'example.com', cls=dns.CS, payload=dns.Record_A('1.2.3.4')))
# Vary the ttl
self._equalityTest(
dns.RRHeader(b'example.com', ttl=60, payload=dns.Record_A('1.2.3.4')),
dns.RRHeader(b'example.com', ttl=60, payload=dns.Record_A('1.2.3.4')),
dns.RRHeader(b'example.com', ttl=120, payload=dns.Record_A('1.2.3.4')))
# Vary the auth bit
self._equalityTest(
dns.RRHeader(b'example.com', auth=1, payload=dns.Record_A('1.2.3.4')),
dns.RRHeader(b'example.com', auth=1, payload=dns.Record_A('1.2.3.4')),
dns.RRHeader(b'example.com', auth=0, payload=dns.Record_A('1.2.3.4')))
def test_ns(self):
"""
Two L{dns.Record_NS} instances compare equal if and only if they have
the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_NS)
def test_md(self):
"""
Two L{dns.Record_MD} instances compare equal if and only if they have
the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_MD)
def test_mf(self):
"""
Two L{dns.Record_MF} instances compare equal if and only if they have
the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_MF)
def test_cname(self):
"""
Two L{dns.Record_CNAME} instances compare equal if and only if they
have the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_CNAME)
def test_mb(self):
"""
Two L{dns.Record_MB} instances compare equal if and only if they have
the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_MB)
def test_mg(self):
"""
Two L{dns.Record_MG} instances compare equal if and only if they have
the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_MG)
def test_mr(self):
"""
Two L{dns.Record_MR} instances compare equal if and only if they have
the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_MR)
def test_ptr(self):
"""
Two L{dns.Record_PTR} instances compare equal if and only if they have
the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_PTR)
def test_dname(self):
"""
Two L{dns.Record_MD} instances compare equal if and only if they have
the same name and TTL.
"""
self._simpleEqualityTest(dns.Record_DNAME)
def test_a(self):
"""
Two L{dns.Record_A} instances compare equal if and only if they have
the same address and TTL.
"""
# Vary the TTL
self._equalityTest(
dns.Record_A('1.2.3.4', 5),
dns.Record_A('1.2.3.4', 5),
dns.Record_A('1.2.3.4', 6))
# Vary the address
self._equalityTest(
dns.Record_A('1.2.3.4', 5),
dns.Record_A('1.2.3.4', 5),
dns.Record_A('1.2.3.5', 5))
def test_soa(self):
"""
Two L{dns.Record_SOA} instances compare equal if and only if they have
the same mname, rname, serial, refresh, minimum, expire, retry, and
ttl.
"""
# Vary the mname
self._equalityTest(
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'xname', b'rname', 123, 456, 789, 10, 20, 30))
# Vary the rname
self._equalityTest(
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'xname', 123, 456, 789, 10, 20, 30))
# Vary the serial
self._equalityTest(
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 1, 456, 789, 10, 20, 30))
# Vary the refresh
self._equalityTest(
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 1, 789, 10, 20, 30))
# Vary the minimum
self._equalityTest(
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 456, 1, 10, 20, 30))
# Vary the expire
self._equalityTest(
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 1, 20, 30))
# Vary the retry
self._equalityTest(
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 1, 30))
# Vary the ttl
self._equalityTest(
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'rname', 123, 456, 789, 10, 20, 30),
dns.Record_SOA(b'mname', b'xname', 123, 456, 789, 10, 20, 1))
def test_null(self):
"""
Two L{dns.Record_NULL} instances compare equal if and only if they have
the same payload and ttl.
"""
# Vary the payload
self._equalityTest(
dns.Record_NULL('foo bar', 10),
dns.Record_NULL('foo bar', 10),
dns.Record_NULL('bar foo', 10))
# Vary the ttl
self._equalityTest(
dns.Record_NULL('foo bar', 10),
dns.Record_NULL('foo bar', 10),
dns.Record_NULL('foo bar', 100))
def test_wks(self):
"""
Two L{dns.Record_WKS} instances compare equal if and only if they have
the same address, protocol, map, and ttl.
"""
# Vary the address
self._equalityTest(
dns.Record_WKS('1.2.3.4', 1, 'foo', 2),
dns.Record_WKS('1.2.3.4', 1, 'foo', 2),
dns.Record_WKS('4.3.2.1', 1, 'foo', 2))
# Vary the protocol
self._equalityTest(
dns.Record_WKS('1.2.3.4', 1, 'foo', 2),
dns.Record_WKS('1.2.3.4', 1, 'foo', 2),
dns.Record_WKS('1.2.3.4', 100, 'foo', 2))
# Vary the map
self._equalityTest(
dns.Record_WKS('1.2.3.4', 1, 'foo', 2),
dns.Record_WKS('1.2.3.4', 1, 'foo', 2),
dns.Record_WKS('1.2.3.4', 1, 'bar', 2))
# Vary the ttl
self._equalityTest(
dns.Record_WKS('1.2.3.4', 1, 'foo', 2),
dns.Record_WKS('1.2.3.4', 1, 'foo', 2),
dns.Record_WKS('1.2.3.4', 1, 'foo', 200))
def test_aaaa(self):
"""
Two L{dns.Record_AAAA} instances compare equal if and only if they have
the same address and ttl.
"""
# Vary the address
self._equalityTest(
dns.Record_AAAA('1::2', 1),
dns.Record_AAAA('1::2', 1),
dns.Record_AAAA('2::1', 1))
# Vary the ttl
self._equalityTest(
dns.Record_AAAA('1::2', 1),
dns.Record_AAAA('1::2', 1),
dns.Record_AAAA('1::2', 10))
def test_a6(self):
"""
Two L{dns.Record_A6} instances compare equal if and only if they have
the same prefix, prefix length, suffix, and ttl.
"""
# Note, A6 is crazy, I'm not sure these values are actually legal.
# Hopefully that doesn't matter for this test. -exarkun
# Vary the prefix length
self._equalityTest(
dns.Record_A6(16, '::abcd', b'example.com', 10),
dns.Record_A6(16, '::abcd', b'example.com', 10),
dns.Record_A6(32, '::abcd', b'example.com', 10))
# Vary the suffix
self._equalityTest(
dns.Record_A6(16, '::abcd', b'example.com', 10),
dns.Record_A6(16, '::abcd', b'example.com', 10),
dns.Record_A6(16, '::abcd:0', b'example.com', 10))
# Vary the prefix
self._equalityTest(
dns.Record_A6(16, '::abcd', b'example.com', 10),
dns.Record_A6(16, '::abcd', b'example.com', 10),
dns.Record_A6(16, '::abcd', b'example.org', 10))
# Vary the ttl
self._equalityTest(
dns.Record_A6(16, '::abcd', b'example.com', 10),
dns.Record_A6(16, '::abcd', b'example.com', 10),
dns.Record_A6(16, '::abcd', b'example.com', 100))
def test_srv(self):
"""
Two L{dns.Record_SRV} instances compare equal if and only if they have
the same priority, weight, port, target, and ttl.
"""
# Vary the priority
self._equalityTest(
dns.Record_SRV(10, 20, 30, b'example.com', 40),
dns.Record_SRV(10, 20, 30, b'example.com', 40),
dns.Record_SRV(100, 20, 30, b'example.com', 40))
# Vary the weight
self._equalityTest(
dns.Record_SRV(10, 20, 30, b'example.com', 40),
dns.Record_SRV(10, 20, 30, b'example.com', 40),
dns.Record_SRV(10, 200, 30, b'example.com', 40))
# Vary the port
self._equalityTest(
dns.Record_SRV(10, 20, 30, b'example.com', 40),
dns.Record_SRV(10, 20, 30, b'example.com', 40),
dns.Record_SRV(10, 20, 300, b'example.com', 40))
# Vary the target
self._equalityTest(
dns.Record_SRV(10, 20, 30, b'example.com', 40),
dns.Record_SRV(10, 20, 30, b'example.com', 40),
dns.Record_SRV(10, 20, 30, b'example.org', 40))
# Vary the ttl
self._equalityTest(
dns.Record_SRV(10, 20, 30, b'example.com', 40),
dns.Record_SRV(10, 20, 30, b'example.com', 40),
dns.Record_SRV(10, 20, 30, b'example.com', 400))
def test_naptr(self):
"""
Two L{dns.Record_NAPTR} instances compare equal if and only if they
have the same order, preference, flags, service, regexp, replacement,
and ttl.
"""
# Vary the order
self._equalityTest(
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(2, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12))
# Vary the preference
self._equalityTest(
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 3, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12))
# Vary the flags
self._equalityTest(
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"p", b"sip+E2U", b"/foo/bar/", b"baz", 12))
# Vary the service
self._equalityTest(
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"u", b"http", b"/foo/bar/", b"baz", 12))
# Vary the regexp
self._equalityTest(
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/bar/foo/", b"baz", 12))
# Vary the replacement
self._equalityTest(
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/bar/foo/", b"quux", 12))
# Vary the ttl
self._equalityTest(
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/foo/bar/", b"baz", 12),
dns.Record_NAPTR(1, 2, b"u", b"sip+E2U", b"/bar/foo/", b"baz", 5))
def test_afsdb(self):
"""
Two L{dns.Record_AFSDB} instances compare equal if and only if they
have the same subtype, hostname, and ttl.
"""
# Vary the subtype
self._equalityTest(
dns.Record_AFSDB(1, b'example.com', 2),
dns.Record_AFSDB(1, b'example.com', 2),
dns.Record_AFSDB(2, b'example.com', 2))
# Vary the hostname
self._equalityTest(
dns.Record_AFSDB(1, b'example.com', 2),
dns.Record_AFSDB(1, b'example.com', 2),
dns.Record_AFSDB(1, b'example.org', 2))
# Vary the ttl
self._equalityTest(
dns.Record_AFSDB(1, b'example.com', 2),
dns.Record_AFSDB(1, b'example.com', 2),
dns.Record_AFSDB(1, b'example.com', 3))
def test_rp(self):
"""
Two L{Record_RP} instances compare equal if and only if they have the
same mbox, txt, and ttl.
"""
# Vary the mbox
self._equalityTest(
dns.Record_RP(b'alice.example.com', b'alice is nice', 10),
dns.Record_RP(b'alice.example.com', b'alice is nice', 10),
dns.Record_RP(b'bob.example.com', b'alice is nice', 10))
# Vary the txt
self._equalityTest(
dns.Record_RP(b'alice.example.com', b'alice is nice', 10),
dns.Record_RP(b'alice.example.com', b'alice is nice', 10),
dns.Record_RP(b'alice.example.com', b'alice is not nice', 10))
# Vary the ttl
self._equalityTest(
dns.Record_RP(b'alice.example.com', b'alice is nice', 10),
dns.Record_RP(b'alice.example.com', b'alice is nice', 10),
dns.Record_RP(b'alice.example.com', b'alice is nice', 100))
def test_hinfo(self):
"""
Two L{dns.Record_HINFO} instances compare equal if and only if they
have the same cpu, os, and ttl.
"""
# Vary the cpu
self._equalityTest(
dns.Record_HINFO('x86-64', 'plan9', 10),
dns.Record_HINFO('x86-64', 'plan9', 10),
dns.Record_HINFO('i386', 'plan9', 10))
# Vary the os
self._equalityTest(
dns.Record_HINFO('x86-64', 'plan9', 10),
dns.Record_HINFO('x86-64', 'plan9', 10),
dns.Record_HINFO('x86-64', 'plan11', 10))
# Vary the ttl
self._equalityTest(
dns.Record_HINFO('x86-64', 'plan9', 10),
dns.Record_HINFO('x86-64', 'plan9', 10),
dns.Record_HINFO('x86-64', 'plan9', 100))
def test_minfo(self):
"""
Two L{dns.Record_MINFO} instances compare equal if and only if they
have the same rmailbx, emailbx, and ttl.
"""
# Vary the rmailbx
self._equalityTest(
dns.Record_MINFO(b'rmailbox', b'emailbox', 10),
dns.Record_MINFO(b'rmailbox', b'emailbox', 10),
dns.Record_MINFO(b'someplace', b'emailbox', 10))
# Vary the emailbx
self._equalityTest(
dns.Record_MINFO(b'rmailbox', b'emailbox', 10),
dns.Record_MINFO(b'rmailbox', b'emailbox', 10),
dns.Record_MINFO(b'rmailbox', b'something', 10))
# Vary the ttl
self._equalityTest(
dns.Record_MINFO(b'rmailbox', b'emailbox', 10),
dns.Record_MINFO(b'rmailbox', b'emailbox', 10),
dns.Record_MINFO(b'rmailbox', b'emailbox', 100))
def test_mx(self):
"""
Two L{dns.Record_MX} instances compare equal if and only if they have
the same preference, name, and ttl.
"""
# Vary the preference
self._equalityTest(
dns.Record_MX(10, b'example.org', 20),
dns.Record_MX(10, b'example.org', 20),
dns.Record_MX(100, b'example.org', 20))
# Vary the name
self._equalityTest(
dns.Record_MX(10, b'example.org', 20),
dns.Record_MX(10, b'example.org', 20),
dns.Record_MX(10, b'example.net', 20))
# Vary the ttl
self._equalityTest(
dns.Record_MX(10, b'example.org', 20),
dns.Record_MX(10, b'example.org', 20),
dns.Record_MX(10, b'example.org', 200))
def test_txt(self):
"""
Two L{dns.Record_TXT} instances compare equal if and only if they have
the same data and ttl.
"""
# Vary the length of the data
self._equalityTest(
dns.Record_TXT('foo', 'bar', ttl=10),
dns.Record_TXT('foo', 'bar', ttl=10),
dns.Record_TXT('foo', 'bar', 'baz', ttl=10))
# Vary the value of the data
self._equalityTest(
dns.Record_TXT('foo', 'bar', ttl=10),
dns.Record_TXT('foo', 'bar', ttl=10),
dns.Record_TXT('bar', 'foo', ttl=10))
# Vary the ttl
self._equalityTest(
dns.Record_TXT('foo', 'bar', ttl=10),
dns.Record_TXT('foo', 'bar', ttl=10),
dns.Record_TXT('foo', 'bar', ttl=100))
def test_spf(self):
"""
L{dns.Record_SPF} instances compare equal if and only if they have the
same data and ttl.
"""
# Vary the length of the data
self._equalityTest(
dns.Record_SPF('foo', 'bar', ttl=10),
dns.Record_SPF('foo', 'bar', ttl=10),
dns.Record_SPF('foo', 'bar', 'baz', ttl=10))
# Vary the value of the data
self._equalityTest(
dns.Record_SPF('foo', 'bar', ttl=10),
dns.Record_SPF('foo', 'bar', ttl=10),
dns.Record_SPF('bar', 'foo', ttl=10))
# Vary the ttl
self._equalityTest(
dns.Record_SPF('foo', 'bar', ttl=10),
dns.Record_SPF('foo', 'bar', ttl=10),
dns.Record_SPF('foo', 'bar', ttl=100))
def test_unknown(self):
"""
L{dns.UnknownRecord} instances compare equal if and only if they have
the same data and ttl.
"""
# Vary the length of the data
self._equalityTest(
dns.UnknownRecord('foo', ttl=10),
dns.UnknownRecord('foo', ttl=10),
dns.UnknownRecord('foobar', ttl=10))
# Vary the value of the data
self._equalityTest(
dns.UnknownRecord('foo', ttl=10),
dns.UnknownRecord('foo', ttl=10),
dns.UnknownRecord('bar', ttl=10))
# Vary the ttl
self._equalityTest(
dns.UnknownRecord('foo', ttl=10),
dns.UnknownRecord('foo', ttl=10),
dns.UnknownRecord('foo', ttl=100))
class RRHeaderTests(unittest.TestCase):
"""
Tests for L{twisted.names.dns.RRHeader}.
"""
def test_negativeTTL(self):
"""
Attempting to create a L{dns.RRHeader} instance with a negative TTL
causes L{ValueError} to be raised.
"""
self.assertRaises(
ValueError, dns.RRHeader, "example.com", dns.A,
dns.IN, -1, dns.Record_A("127.0.0.1"))
class NameToLabelsTests(unittest.SynchronousTestCase):
"""
Tests for L{twisted.names.dns._nameToLabels}.
"""
def test_empty(self):
"""
L{dns._nameToLabels} returns a list containing a single
empty label for an empty name.
"""
self.assertEqual(dns._nameToLabels(b''), [b''])
def test_onlyDot(self):
"""
L{dns._nameToLabels} returns a list containing a single
empty label for a name containing only a dot.
"""
self.assertEqual(dns._nameToLabels(b'.'), [b''])
def test_withoutTrailingDot(self):
"""
L{dns._nameToLabels} returns a list ending with an empty
label for a name without a trailing dot.
"""
self.assertEqual(dns._nameToLabels(b'com'), [b'com', b''])
def test_withTrailingDot(self):
"""
L{dns._nameToLabels} returns a list ending with an empty
label for a name with a trailing dot.
"""
self.assertEqual(dns._nameToLabels(b'com.'), [b'com', b''])
def test_subdomain(self):
"""
L{dns._nameToLabels} returns a list containing entries
for all labels in a subdomain name.
"""
self.assertEqual(
dns._nameToLabels(b'foo.bar.baz.example.com.'),
[b'foo', b'bar', b'baz', b'example', b'com', b''])
def test_casePreservation(self):
"""
L{dns._nameToLabels} preserves the case of ascii
characters in labels.
"""
self.assertEqual(
dns._nameToLabels(b'EXAMPLE.COM'),
[b'EXAMPLE', b'COM', b''])
def assertIsSubdomainOf(testCase, descendant, ancestor):
"""
Assert that C{descendant} *is* a subdomain of C{ancestor}.
@type testCase: L{unittest.SynchronousTestCase}
@param testCase: The test case on which to run the assertions.
@type descendant: C{str}
@param descendant: The subdomain name to test.
@type ancestor: C{str}
@param ancestor: The superdomain name to test.
"""
testCase.assertTrue(
dns._isSubdomainOf(descendant, ancestor),
'%r is not a subdomain of %r' % (descendant, ancestor))
def assertIsNotSubdomainOf(testCase, descendant, ancestor):
"""
Assert that C{descendant} *is not* a subdomain of C{ancestor}.
@type testCase: L{unittest.SynchronousTestCase}
@param testCase: The test case on which to run the assertions.
@type descendant: C{str}
@param descendant: The subdomain name to test.
@type ancestor: C{str}
@param ancestor: The superdomain name to test.
"""
testCase.assertFalse(
dns._isSubdomainOf(descendant, ancestor),
'%r is a subdomain of %r' % (descendant, ancestor))
class IsSubdomainOfTests(unittest.SynchronousTestCase):
"""
Tests for L{twisted.names.dns._isSubdomainOf}.
"""
def test_identical(self):
"""
L{dns._isSubdomainOf} returns C{True} for identical
domain names.
"""
assertIsSubdomainOf(self, b'example.com', b'example.com')
def test_parent(self):
"""
L{dns._isSubdomainOf} returns C{True} when the first
name is an immediate descendant of the second name.
"""
assertIsSubdomainOf(self, b'foo.example.com', b'example.com')
def test_distantAncestor(self):
"""
L{dns._isSubdomainOf} returns C{True} when the first
name is a distant descendant of the second name.
"""
assertIsSubdomainOf(self, b'foo.bar.baz.example.com', b'com')
def test_superdomain(self):
"""
L{dns._isSubdomainOf} returns C{False} when the first
name is an ancestor of the second name.
"""
assertIsNotSubdomainOf(self, b'example.com', b'foo.example.com')
def test_sibling(self):
"""
L{dns._isSubdomainOf} returns C{False} if the first name
is a sibling of the second name.
"""
assertIsNotSubdomainOf(self, b'foo.example.com', b'bar.example.com')
def test_unrelatedCommonSuffix(self):
"""
L{dns._isSubdomainOf} returns C{False} even when domain
names happen to share a common suffix.
"""
assertIsNotSubdomainOf(self, b'foo.myexample.com', b'example.com')
def test_subdomainWithTrailingDot(self):
"""
L{dns._isSubdomainOf} returns C{True} if the first name
is a subdomain of the second name but the first name has a
trailing ".".
"""
assertIsSubdomainOf(self, b'foo.example.com.', b'example.com')
def test_superdomainWithTrailingDot(self):
"""
L{dns._isSubdomainOf} returns C{True} if the first name
is a subdomain of the second name but the second name has a
trailing ".".
"""
assertIsSubdomainOf(self, b'foo.example.com', b'example.com.')
def test_bothWithTrailingDot(self):
"""
L{dns._isSubdomainOf} returns C{True} if the first name
is a subdomain of the second name and both names have a
trailing ".".
"""
assertIsSubdomainOf(self, b'foo.example.com.', b'example.com.')
def test_emptySubdomain(self):
"""
L{dns._isSubdomainOf} returns C{False} if the first name
is empty and the second name is not.
"""
assertIsNotSubdomainOf(self, b'', b'example.com')
def test_emptySuperdomain(self):
"""
L{dns._isSubdomainOf} returns C{True} if the second name
is empty and the first name is not.
"""
assertIsSubdomainOf(self, b'foo.example.com', b'')
def test_caseInsensitiveComparison(self):
"""
L{dns._isSubdomainOf} does case-insensitive comparison
of name labels.
"""
assertIsSubdomainOf(self, b'foo.example.com', b'EXAMPLE.COM')
assertIsSubdomainOf(self, b'FOO.EXAMPLE.COM', b'example.com')
class OPTNonStandardAttributes(object):
"""
Generate byte and instance representations of an L{dns._OPTHeader}
where all attributes are set to non-default values.
For testing whether attributes have really been read from the byte
string during decoding.
"""
@classmethod
def bytes(cls, excludeName=False, excludeOptions=False):
"""
Return L{bytes} representing an encoded OPT record.
@param excludeName: A flag that controls whether to exclude
the name field. This allows a non-standard name to be
prepended during the test.
@type excludeName: L{bool}
@param excludeOptions: A flag that controls whether to exclude
the RDLEN field. This allows encoded variable options to be
appended during the test.
@type excludeOptions: L{bool}
@return: L{bytes} representing the encoded OPT record returned
by L{object}.
"""
rdlen = b'\x00\x00' # RDLEN 0
if excludeOptions:
rdlen = b''
return (
b'\x00' # 0 root zone
b'\x00\x29' # type 41
b'\x02\x00' # udpPayloadsize 512
b'\x03' # extendedRCODE 3
b'\x04' # version 4
b'\x80\x00' # DNSSEC OK 1 + Z
) + rdlen
@classmethod
def object(cls):
"""
Return a new L{dns._OPTHeader} instance.
@return: A L{dns._OPTHeader} instance with attributes that
match the encoded record returned by L{bytes}.
"""
return dns._OPTHeader(
udpPayloadSize=512,
extendedRCODE=3,
version=4,
dnssecOK=True)
class OPTHeaderTests(ComparisonTestsMixin, unittest.TestCase):
"""
Tests for L{twisted.names.dns._OPTHeader}.
"""
def test_interface(self):
"""
L{dns._OPTHeader} implements L{dns.IEncodable}.
"""
verifyClass(dns.IEncodable, dns._OPTHeader)
def test_name(self):
"""
L{dns._OPTHeader.name} is an instance attribute whose value is
fixed as the root domain
"""
self.assertEqual(dns._OPTHeader().name, dns.Name(b''))
def test_nameReadonly(self):
"""
L{dns._OPTHeader.name} is readonly.
"""
h = dns._OPTHeader()
self.assertRaises(
AttributeError, setattr, h, 'name', dns.Name(b'example.com'))
def test_type(self):
"""
L{dns._OPTHeader.type} is an instance attribute with fixed value
41.
"""
self.assertEqual(dns._OPTHeader().type, 41)
def test_typeReadonly(self):
"""
L{dns._OPTHeader.type} is readonly.
"""
h = dns._OPTHeader()
self.assertRaises(
AttributeError, setattr, h, 'type', dns.A)
def test_udpPayloadSize(self):
"""
L{dns._OPTHeader.udpPayloadSize} defaults to 4096 as
recommended in rfc6891 section-6.2.5.
"""
self.assertEqual(dns._OPTHeader().udpPayloadSize, 4096)
def test_udpPayloadSizeOverride(self):
"""
L{dns._OPTHeader.udpPayloadSize} can be overridden in the
constructor.
"""
self.assertEqual(dns._OPTHeader(udpPayloadSize=512).udpPayloadSize, 512)
def test_extendedRCODE(self):
"""
L{dns._OPTHeader.extendedRCODE} defaults to 0.
"""
self.assertEqual(dns._OPTHeader().extendedRCODE, 0)
def test_extendedRCODEOverride(self):
"""
L{dns._OPTHeader.extendedRCODE} can be overridden in the
constructor.
"""
self.assertEqual(dns._OPTHeader(extendedRCODE=1).extendedRCODE, 1)
def test_version(self):
"""
L{dns._OPTHeader.version} defaults to 0.
"""
self.assertEqual(dns._OPTHeader().version, 0)
def test_versionOverride(self):
"""
L{dns._OPTHeader.version} can be overridden in the
constructor.
"""
self.assertEqual(dns._OPTHeader(version=1).version, 1)
def test_dnssecOK(self):
"""
L{dns._OPTHeader.dnssecOK} defaults to False.
"""
self.assertFalse(dns._OPTHeader().dnssecOK)
def test_dnssecOKOverride(self):
"""
L{dns._OPTHeader.dnssecOK} can be overridden in the
constructor.
"""
self.assertTrue(dns._OPTHeader(dnssecOK=True).dnssecOK)
def test_options(self):
"""
L{dns._OPTHeader.options} defaults to empty list.
"""
self.assertEqual(dns._OPTHeader().options, [])
def test_optionsOverride(self):
"""
L{dns._OPTHeader.options} can be overridden in the
constructor.
"""
h = dns._OPTHeader(options=[(1, 1, b'\x00')])
self.assertEqual(h.options, [(1, 1, b'\x00')])
def test_encode(self):
"""
L{dns._OPTHeader.encode} packs the header fields and writes
them to a file like object passed in as an argument.
"""
b = BytesIO()
OPTNonStandardAttributes.object().encode(b)
self.assertEqual(
b.getvalue(),
OPTNonStandardAttributes.bytes()
)
def test_encodeWithOptions(self):
"""
L{dns._OPTHeader.options} is a list of L{dns._OPTVariableOption}
instances which are packed into the rdata area of the header.
"""
h = OPTNonStandardAttributes.object()
h.options = [
dns._OPTVariableOption(1, b'foobarbaz'),
dns._OPTVariableOption(2, b'qux'),
]
b = BytesIO()
h.encode(b)
self.assertEqual(
b.getvalue(),
OPTNonStandardAttributes.bytes(excludeOptions=True) + (
b'\x00\x14' # RDLEN 20
b'\x00\x01' # OPTION-CODE
b'\x00\x09' # OPTION-LENGTH
b'foobarbaz' # OPTION-DATA
b'\x00\x02' # OPTION-CODE
b'\x00\x03' # OPTION-LENGTH
b'qux' # OPTION-DATA
))
def test_decode(self):
"""
L{dns._OPTHeader.decode} unpacks the header fields from a file
like object and populates the attributes of an existing
L{dns._OPTHeader} instance.
"""
decodedHeader = dns._OPTHeader()
decodedHeader.decode(BytesIO(OPTNonStandardAttributes.bytes()))
self.assertEqual(
decodedHeader,
OPTNonStandardAttributes.object())
def test_decodeAllExpectedBytes(self):
"""
L{dns._OPTHeader.decode} reads all the bytes of the record
that is being decoded.
"""
# Check that all the input data has been consumed.
b = BytesIO(OPTNonStandardAttributes.bytes())
decodedHeader = dns._OPTHeader()
decodedHeader.decode(b)
self.assertEqual(b.tell(), len(b.getvalue()))
def test_decodeOnlyExpectedBytes(self):
"""
L{dns._OPTHeader.decode} reads only the bytes from the current
file position to the end of the record that is being
decoded. Trailing bytes are not consumed.
"""
b = BytesIO(OPTNonStandardAttributes.bytes()
+ b'xxxx') # Trailing bytes
decodedHeader = dns._OPTHeader()
decodedHeader.decode(b)
self.assertEqual(b.tell(), len(b.getvalue())-len(b'xxxx'))
def test_decodeDiscardsName(self):
"""
L{dns._OPTHeader.decode} discards the name which is encoded in
the supplied bytes. The name attribute of the resulting
L{dns._OPTHeader} instance will always be L{dns.Name(b'')}.
"""
b = BytesIO(OPTNonStandardAttributes.bytes(excludeName=True)
+ b'\x07example\x03com\x00')
h = dns._OPTHeader()
h.decode(b)
self.assertEqual(h.name, dns.Name(b''))
def test_decodeRdlengthTooShort(self):
"""
L{dns._OPTHeader.decode} raises an exception if the supplied
RDLEN is too short.
"""
b = BytesIO(
OPTNonStandardAttributes.bytes(excludeOptions=True) + (
b'\x00\x05' # RDLEN 5 Too short - should be 6
b'\x00\x01' # OPTION-CODE
b'\x00\x02' # OPTION-LENGTH
b'\x00\x00' # OPTION-DATA
))
h = dns._OPTHeader()
self.assertRaises(EOFError, h.decode, b)
def test_decodeRdlengthTooLong(self):
"""
L{dns._OPTHeader.decode} raises an exception if the supplied
RDLEN is too long.
"""
b = BytesIO(
OPTNonStandardAttributes.bytes(excludeOptions=True) + (
b'\x00\x07' # RDLEN 7 Too long - should be 6
b'\x00\x01' # OPTION-CODE
b'\x00\x02' # OPTION-LENGTH
b'\x00\x00' # OPTION-DATA
))
h = dns._OPTHeader()
self.assertRaises(EOFError, h.decode, b)
def test_decodeWithOptions(self):
"""
If the OPT bytes contain variable options,
L{dns._OPTHeader.decode} will populate a list
L{dns._OPTHeader.options} with L{dns._OPTVariableOption}
instances.
"""
b = BytesIO(
OPTNonStandardAttributes.bytes(excludeOptions=True) + (
b'\x00\x14' # RDLEN 20
b'\x00\x01' # OPTION-CODE
b'\x00\x09' # OPTION-LENGTH
b'foobarbaz' # OPTION-DATA
b'\x00\x02' # OPTION-CODE
b'\x00\x03' # OPTION-LENGTH
b'qux' # OPTION-DATA
))
h = dns._OPTHeader()
h.decode(b)
self.assertEqual(
h.options,
[dns._OPTVariableOption(1, b'foobarbaz'),
dns._OPTVariableOption(2, b'qux'),]
)
def test_fromRRHeader(self):
"""
L{_OPTHeader.fromRRHeader} accepts an L{RRHeader} instance and
returns an L{_OPTHeader} instance whose attribute values have
been derived from the C{cls}, C{ttl} and C{payload} attributes
of the original header.
"""
genericHeader = dns.RRHeader(
b'example.com',
type=dns.OPT,
cls=0xffff,
ttl=(0xfe << 24
| 0xfd << 16
| True << 15),
payload=dns.UnknownRecord(b'\xff\xff\x00\x03abc'))
decodedOptHeader = dns._OPTHeader.fromRRHeader(genericHeader)
expectedOptHeader = dns._OPTHeader(
udpPayloadSize=0xffff,
extendedRCODE=0xfe,
version=0xfd,
dnssecOK=True,
options=[dns._OPTVariableOption(code=0xffff, data=b'abc')])
self.assertEqual(decodedOptHeader, expectedOptHeader)
def test_repr(self):
"""
L{dns._OPTHeader.__repr__} displays the name and type and all
the fixed and extended header values of the OPT record.
"""
self.assertEqual(
repr(dns._OPTHeader()),
'<_OPTHeader '
'name= '
'type=41 '
'udpPayloadSize=4096 '
'extendedRCODE=0 '
'version=0 '
'dnssecOK=False '
'options=[]>')
def test_equalityUdpPayloadSize(self):
"""
Two L{OPTHeader} instances compare equal if they have the same
udpPayloadSize.
"""
self.assertNormalEqualityImplementation(
dns._OPTHeader(udpPayloadSize=512),
dns._OPTHeader(udpPayloadSize=512),
dns._OPTHeader(udpPayloadSize=4096))
def test_equalityExtendedRCODE(self):
"""
Two L{OPTHeader} instances compare equal if they have the same
extendedRCODE.
"""
self.assertNormalEqualityImplementation(
dns._OPTHeader(extendedRCODE=1),
dns._OPTHeader(extendedRCODE=1),
dns._OPTHeader(extendedRCODE=2))
def test_equalityVersion(self):
"""
Two L{OPTHeader} instances compare equal if they have the same
version.
"""
self.assertNormalEqualityImplementation(
dns._OPTHeader(version=1),
dns._OPTHeader(version=1),
dns._OPTHeader(version=2))
def test_equalityDnssecOK(self):
"""
Two L{OPTHeader} instances compare equal if they have the same
dnssecOK flags.
"""
self.assertNormalEqualityImplementation(
dns._OPTHeader(dnssecOK=True),
dns._OPTHeader(dnssecOK=True),
dns._OPTHeader(dnssecOK=False))
def test_equalityOptions(self):
"""
Two L{OPTHeader} instances compare equal if they have the same
options.
"""
self.assertNormalEqualityImplementation(
dns._OPTHeader(options=[dns._OPTVariableOption(1, b'x')]),
dns._OPTHeader(options=[dns._OPTVariableOption(1, b'x')]),
dns._OPTHeader(options=[dns._OPTVariableOption(2, b'y')]))
class OPTVariableOptionTests(ComparisonTestsMixin, unittest.TestCase):
"""
Tests for L{dns._OPTVariableOption}.
"""
def test_interface(self):
"""
L{dns._OPTVariableOption} implements L{dns.IEncodable}.
"""
verifyClass(dns.IEncodable, dns._OPTVariableOption)
def test_constructorArguments(self):
"""
L{dns._OPTVariableOption.__init__} requires code and data
arguments which are saved as public instance attributes.
"""
h = dns._OPTVariableOption(1, b'x')
self.assertEqual(h.code, 1)
self.assertEqual(h.data, b'x')
def test_repr(self):
"""
L{dns._OPTVariableOption.__repr__} displays the code and data
of the option.
"""
self.assertEqual(
repr(dns._OPTVariableOption(1, b'x')),
'<_OPTVariableOption '
'code=1 '
"data=x"
'>')
def test_equality(self):
"""
Two OPTVariableOption instances compare equal if they have the same
code and data values.
"""
self.assertNormalEqualityImplementation(
dns._OPTVariableOption(1, b'x'),
dns._OPTVariableOption(1, b'x'),
dns._OPTVariableOption(2, b'x'))
self.assertNormalEqualityImplementation(
dns._OPTVariableOption(1, b'x'),
dns._OPTVariableOption(1, b'x'),
dns._OPTVariableOption(1, b'y'))
def test_encode(self):
"""
L{dns._OPTVariableOption.encode} encodes the code and data
instance attributes to a byte string which also includes the
data length.
"""
o = dns._OPTVariableOption(1, b'foobar')
b = BytesIO()
o.encode(b)
self.assertEqual(
b.getvalue(),
b'\x00\x01' # OPTION-CODE 1
b'\x00\x06' # OPTION-LENGTH 6
b'foobar' # OPTION-DATA
)
def test_decode(self):
"""
L{dns._OPTVariableOption.decode} is a classmethod that decodes
a byte string and returns a L{dns._OPTVariableOption} instance.
"""
b = BytesIO(
b'\x00\x01' # OPTION-CODE 1
b'\x00\x06' # OPTION-LENGTH 6
b'foobar' # OPTION-DATA
)
o = dns._OPTVariableOption()
o.decode(b)
self.assertEqual(o.code, 1)
self.assertEqual(o.data, b'foobar')
class RaisedArgs(Exception):
"""
An exception which can be raised by fakes to test that the fake is called
with expected arguments.
"""
def __init__(self, args, kwargs):
"""
Store the positional and keyword arguments as attributes.
@param args: The positional args.
@param kwargs: The keyword args.
"""
self.args = args
self.kwargs = kwargs
class MessageEmpty(object):
"""
Generate byte string and constructor arguments for an empty
L{dns._EDNSMessage}.
"""
@classmethod
def bytes(cls):
"""
Bytes which are expected when encoding an instance constructed using
C{kwargs} and which are expected to result in an identical instance when
decoded.
@return: The L{bytes} of a wire encoded message.
"""
return (
b'\x01\x00' # id: 256
b'\x97' # QR: 1, OPCODE: 2, AA: 0, TC: 0, RD: 1
b'\x8f' # RA: 1, Z, RCODE: 15
b'\x00\x00' # number of queries
b'\x00\x00' # number of answers
b'\x00\x00' # number of authorities
b'\x00\x00' # number of additionals
)
@classmethod
def kwargs(cls):
"""
Keyword constructor arguments which are expected to result in an
instance which returns C{bytes} when encoded.
@return: A L{dict} of keyword arguments.
"""
return dict(
id=256,
answer=True,
opCode=dns.OP_STATUS,
auth=True,
trunc=True,
recDes=True,
recAv=True,
rCode=15,
ednsVersion=None,
)
class MessageTruncated(object):
"""
An empty response message whose TR bit is set to 1.
"""
@classmethod
def bytes(cls):
"""
Bytes which are expected when encoding an instance constructed using
C{kwargs} and which are expected to result in an identical instance when
decoded.
@return: The L{bytes} of a wire encoded message.
"""
return (
b'\x01\x00' # ID: 256
b'\x82' # QR: 1, OPCODE: 0, AA: 0, TC: 1, RD: 0
b'\x00' # RA: 0, Z, RCODE: 0
b'\x00\x00' # Number of queries
b'\x00\x00' # Number of answers
b'\x00\x00' # Number of authorities
b'\x00\x00' # Number of additionals
)
@classmethod
def kwargs(cls):
"""
Keyword constructor arguments which are expected to result in an
instance which returns C{bytes} when encoded.
@return: A L{dict} of keyword arguments.
"""
return dict(
id=256,
answer=1,
opCode=0,
auth=0,
trunc=1,
recDes=0,
recAv=0,
rCode=0,
ednsVersion=None,)
class MessageNonAuthoritative(object):
"""
A minimal non-authoritative message.
"""
@classmethod
def bytes(cls):
"""
Bytes which are expected when encoding an instance constructed using
C{kwargs} and which are expected to result in an identical instance when
decoded.
@return: The L{bytes} of a wire encoded message.
"""
return (
b'\x01\x00' # ID 256
b'\x00' # QR: 0, OPCODE: 0, AA: 0, TC: 0, RD: 0
b'\x00' # RA: 0, Z, RCODE: 0
b'\x00\x00' # Query count
b'\x00\x01' # Answer count
b'\x00\x00' # Authorities count
b'\x00\x00' # Additionals count
# Answer
b'\x00' # RR NAME (root)
b'\x00\x01' # RR TYPE 1 (A)
b'\x00\x01' # RR CLASS 1 (IN)
b'\x00\x00\x00\x00' # RR TTL
b'\x00\x04' # RDLENGTH 4
b'\x01\x02\x03\x04' # IPv4 1.2.3.4
)
@classmethod
def kwargs(cls):
"""
Keyword constructor arguments which are expected to result in an
instance which returns C{bytes} when encoded.
@return: A L{dict} of keyword arguments.
"""
return dict(
id=256,
auth=0,
ednsVersion=None,
answers=[
dns.RRHeader(
b'',
payload=dns.Record_A('1.2.3.4', ttl=0),
auth=False)])
class MessageAuthoritative(object):
"""
A minimal authoritative message.
"""
@classmethod
def bytes(cls):
"""
Bytes which are expected when encoding an instance constructed using
C{kwargs} and which are expected to result in an identical instance when
decoded.
@return: The L{bytes} of a wire encoded message.
"""
return (
b'\x01\x00' # ID: 256
b'\x04' # QR: 0, OPCODE: 0, AA: 1, TC: 0, RD: 0
b'\x00' # RA: 0, Z, RCODE: 0
b'\x00\x00' # Query count
b'\x00\x01' # Answer count
b'\x00\x00' # Authorities count
b'\x00\x00' # Additionals count
# Answer
b'\x00' # RR NAME (root)
b'\x00\x01' # RR TYPE 1 (A)
b'\x00\x01' # RR CLASS 1 (IN)
b'\x00\x00\x00\x00' # RR TTL
b'\x00\x04' # RDLENGTH 4
b'\x01\x02\x03\x04' # IPv4 1.2.3.4
)
@classmethod
def kwargs(cls):
"""
Keyword constructor arguments which are expected to result in an
instance which returns C{bytes} when encoded.
@return: A L{dict} of keyword arguments.
"""
return dict(
id=256,
auth=1,
ednsVersion=None,
answers=[
dns.RRHeader(
b'',
payload=dns.Record_A('1.2.3.4', ttl=0),
auth=True)])
class MessageComplete:
"""
An example of a fully populated non-edns response message.
Contains name compression, answers, authority, and additional records.
"""
@classmethod
def bytes(cls):
"""
Bytes which are expected when encoding an instance constructed using
C{kwargs} and which are expected to result in an identical instance when
decoded.
@return: The L{bytes} of a wire encoded message.
"""
return (
b'\x01\x00' # ID: 256
b'\x95' # QR: 1, OPCODE: 2, AA: 1, TC: 0, RD: 1
b'\x8f' # RA: 1, Z, RCODE: 15
b'\x00\x01' # Query count
b'\x00\x01' # Answer count
b'\x00\x01' # Authorities count
b'\x00\x01' # Additionals count
# Query begins at Byte 12
b'\x07example\x03com\x00' # QNAME
b'\x00\x06' # QTYPE 6 (SOA)
b'\x00\x01' # QCLASS 1 (IN)
# Answers
b'\xc0\x0c' # RR NAME (compression ref b12)
b'\x00\x06' # RR TYPE 6 (SOA)
b'\x00\x01' # RR CLASS 1 (IN)
b'\xff\xff\xff\xff' # RR TTL
b'\x00\x27' # RDLENGTH 39
b'\x03ns1\xc0\x0c' # Mname (ns1.example.com (compression ref b15)
b'\x0ahostmaster\xc0\x0c' # rname (hostmaster.example.com)
b'\xff\xff\xff\xfe' # Serial
b'\x7f\xff\xff\xfd' # Refresh
b'\x7f\xff\xff\xfc' # Retry
b'\x7f\xff\xff\xfb' # Expire
b'\xff\xff\xff\xfa' # Minimum
# Authority
b'\xc0\x0c' # RR NAME (example.com compression ref b12)
b'\x00\x02' # RR TYPE 2 (NS)
b'\x00\x01' # RR CLASS 1 (IN)
b'\xff\xff\xff\xff' # RR TTL
b'\x00\x02' # RDLENGTH
b'\xc0\x29' # RDATA (ns1.example.com (compression ref b41)
# Additional
b'\xc0\x29' # RR NAME (ns1.example.com compression ref b41)
b'\x00\x01' # RR TYPE 1 (A)
b'\x00\x01' # RR CLASS 1 (IN)
b'\xff\xff\xff\xff' # RR TTL
b'\x00\x04' # RDLENGTH
b'\x05\x06\x07\x08' # RDATA 5.6.7.8
)
@classmethod
def kwargs(cls):
"""
Keyword constructor arguments which are expected to result in an
instance which returns C{bytes} when encoded.
@return: A L{dict} of keyword arguments.
"""
return dict(
id=256,
answer=1,
opCode=dns.OP_STATUS,
auth=1,
recDes=1,
recAv=1,
rCode=15,
ednsVersion=None,
queries=[dns.Query(b'example.com', dns.SOA)],
answers=[
dns.RRHeader(
b'example.com',
type=dns.SOA,
ttl=0xffffffff,
auth=True,
payload=dns.Record_SOA(
ttl=0xffffffff,
mname=b'ns1.example.com',
rname=b'hostmaster.example.com',
serial=0xfffffffe,
refresh=0x7ffffffd,
retry=0x7ffffffc,
expire=0x7ffffffb,
minimum=0xfffffffa,
))],
authority=[
dns.RRHeader(
b'example.com',
type=dns.NS,
ttl=0xffffffff,
auth=True,
payload=dns.Record_NS(
'ns1.example.com', ttl=0xffffffff))],
additional=[
dns.RRHeader(
b'ns1.example.com',
type=dns.A,
ttl=0xffffffff,
auth=True,
payload=dns.Record_A(
'5.6.7.8', ttl=0xffffffff))])
class MessageEDNSQuery(object):
"""
A minimal EDNS query message.
"""
@classmethod
def bytes(cls):
"""
Bytes which are expected when encoding an instance constructed using
C{kwargs} and which are expected to result in an identical instance when
decoded.
@return: The L{bytes} of a wire encoded message.
"""
return (
b'\x00\x00' # ID: 0
b'\x00' # QR: 0, OPCODE: 0, AA: 0, TC: 0, RD: 0
b'\x00' # RA: 0, Z, RCODE: 0
b'\x00\x01' # Queries count
b'\x00\x00' # Anwers count
b'\x00\x00' # Authority count
b'\x00\x01' # Additionals count
# Queries
b'\x03www\x07example\x03com\x00' # QNAME
b'\x00\x01' # QTYPE (A)
b'\x00\x01' # QCLASS (IN)
# Additional OPT record
b'\x00' # NAME (.)
b'\x00\x29' # TYPE (OPT 41)
b'\x10\x00' # UDP Payload Size (4096)
b'\x00' # Extended RCODE
b'\x03' # EDNS version
b'\x00\x00' # DO: False + Z
b'\x00\x00' # RDLENGTH
)
@classmethod
def kwargs(cls):
"""
Keyword constructor arguments which are expected to result in an
instance which returns C{bytes} when encoded.
@return: A L{dict} of keyword arguments.
"""
return dict(
id=0,
answer=0,
opCode=dns.OP_QUERY,
auth=0,
recDes=0,
recAv=0,
rCode=0,
ednsVersion=3,
dnssecOK=False,
queries=[dns.Query(b'www.example.com', dns.A)],
additional=[])
class MessageEDNSComplete(object):
"""
An example of a fully populated edns response message.
Contains name compression, answers, authority, and additional records.
"""
@classmethod
def bytes(cls):
"""
Bytes which are expected when encoding an instance constructed using
C{kwargs} and which are expected to result in an identical instance when
decoded.
@return: The L{bytes} of a wire encoded message.
"""
return (
b'\x01\x00' # ID: 256
b'\x95' # QR: 1, OPCODE: 2, AA: 1, TC: 0, RD: 1
b'\xbf' # RA: 1, AD: 1, RCODE: 15
b'\x00\x01' # Query count
b'\x00\x01' # Answer count
b'\x00\x01' # Authorities count
b'\x00\x02' # Additionals count
# Query begins at Byte 12
b'\x07example\x03com\x00' # QNAME
b'\x00\x06' # QTYPE 6 (SOA)
b'\x00\x01' # QCLASS 1 (IN)
# Answers
b'\xc0\x0c' # RR NAME (compression ref b12)
b'\x00\x06' # RR TYPE 6 (SOA)
b'\x00\x01' # RR CLASS 1 (IN)
b'\xff\xff\xff\xff' # RR TTL
b'\x00\x27' # RDLENGTH 39
b'\x03ns1\xc0\x0c' # mname (ns1.example.com (compression ref b15)
b'\x0ahostmaster\xc0\x0c' # rname (hostmaster.example.com)
b'\xff\xff\xff\xfe' # Serial
b'\x7f\xff\xff\xfd' # Refresh
b'\x7f\xff\xff\xfc' # Retry
b'\x7f\xff\xff\xfb' # Expire
b'\xff\xff\xff\xfa' # Minimum
# Authority
b'\xc0\x0c' # RR NAME (example.com compression ref b12)
b'\x00\x02' # RR TYPE 2 (NS)
b'\x00\x01' # RR CLASS 1 (IN)
b'\xff\xff\xff\xff' # RR TTL
b'\x00\x02' # RDLENGTH
b'\xc0\x29' # RDATA (ns1.example.com (compression ref b41)
# Additional
b'\xc0\x29' # RR NAME (ns1.example.com compression ref b41)
b'\x00\x01' # RR TYPE 1 (A)
b'\x00\x01' # RR CLASS 1 (IN)
b'\xff\xff\xff\xff' # RR TTL
b'\x00\x04' # RDLENGTH
b'\x05\x06\x07\x08' # RDATA 5.6.7.8
# Additional OPT record
b'\x00' # NAME (.)
b'\x00\x29' # TYPE (OPT 41)
b'\x04\x00' # UDP Payload Size (1024)
b'\x00' # Extended RCODE
b'\x03' # EDNS version
b'\x80\x00' # DO: True + Z
b'\x00\x00' # RDLENGTH
)
@classmethod
def kwargs(cls):
"""
Keyword constructor arguments which are expected to result in an
instance which returns C{bytes} when encoded.
@return: A L{dict} of keyword arguments.
"""
return dict(
id=256,
answer=1,
opCode=dns.OP_STATUS,
auth=1,
trunc=0,
recDes=1,
recAv=1,
rCode=15,
ednsVersion=3,
dnssecOK=True,
authenticData=True,
checkingDisabled=True,
maxSize=1024,
queries=[dns.Query(b'example.com', dns.SOA)],
answers=[
dns.RRHeader(
b'example.com',
type=dns.SOA,
ttl=0xffffffff,
auth=True,
payload=dns.Record_SOA(
ttl=0xffffffff,
mname=b'ns1.example.com',
rname=b'hostmaster.example.com',
serial=0xfffffffe,
refresh=0x7ffffffd,
retry=0x7ffffffc,
expire=0x7ffffffb,
minimum=0xfffffffa,
))],
authority=[
dns.RRHeader(
b'example.com',
type=dns.NS,
ttl=0xffffffff,
auth=True,
payload=dns.Record_NS(
'ns1.example.com', ttl=0xffffffff))],
additional=[
dns.RRHeader(
b'ns1.example.com',
type=dns.A,
ttl=0xffffffff,
auth=True,
payload=dns.Record_A(
'5.6.7.8', ttl=0xffffffff))])
class MessageEDNSExtendedRCODE(object):
"""
An example of an EDNS message with an extended RCODE.
"""
@classmethod
def bytes(cls):
"""
Bytes which are expected when encoding an instance constructed using
C{kwargs} and which are expected to result in an identical instance when
decoded.
@return: The L{bytes} of a wire encoded message.
"""
return (
b'\x00\x00'
b'\x00'
b'\x0c' # RA: 0, Z, RCODE: 12
b'\x00\x00'
b'\x00\x00'
b'\x00\x00'
b'\x00\x01' # 1 additionals
# Additional OPT record
b'\x00'
b'\x00\x29'
b'\x10\x00'
b'\xab' # Extended RCODE: 171
b'\x00'
b'\x00\x00'
b'\x00\x00'
)
@classmethod
def kwargs(cls):
"""
Keyword constructor arguments which are expected to result in an
instance which returns C{bytes} when encoded.
@return: A L{dict} of keyword arguments.
"""
return dict(
id=0,
answer=False,
opCode=dns.OP_QUERY,
auth=False,
trunc=False,
recDes=False,
recAv=False,
rCode=0xabc, # Combined OPT extended RCODE + Message RCODE
ednsVersion=0,
dnssecOK=False,
maxSize=4096,
queries=[],
answers=[],
authority=[],
additional=[],
)
class MessageComparable(FancyEqMixin, FancyStrMixin, object):
"""
A wrapper around L{dns.Message} which is comparable so that it can be tested
using some of the L{dns._EDNSMessage} tests.
"""
showAttributes = compareAttributes = (
'id', 'answer', 'opCode', 'auth', 'trunc',
'recDes', 'recAv', 'rCode',
'queries', 'answers', 'authority', 'additional')
def __init__(self, original):
self.original = original
def __getattr__(self, key):
return getattr(self.original, key)
def verifyConstructorArgument(testCase, cls, argName, defaultVal, altVal,
attrName=None):
"""
Verify that an attribute has the expected default value and that a
corresponding argument passed to a constructor is assigned to that
attribute.
@param testCase: The L{TestCase} whose assert methods will be
called.
@type testCase: L{unittest.TestCase}
@param cls: The constructor under test.
@type cls: L{type}
@param argName: The name of the constructor argument under test.
@type argName: L{str}
@param defaultVal: The expected default value of C{attrName} /
C{argName}
@type defaultVal: L{object}
@param altVal: A value which is different from the default. Used to
test that supplied constructor arguments are actually assigned to the
correct attribute.
@type altVal: L{object}
@param attrName: The name of the attribute under test if different
from C{argName}. Defaults to C{argName}
@type attrName: L{str}
"""
if attrName is None:
attrName = argName
actual = {}
expected = {'defaultVal': defaultVal, 'altVal': altVal}
o = cls()
actual['defaultVal'] = getattr(o, attrName)
o = cls(**{argName: altVal})
actual['altVal'] = getattr(o, attrName)
testCase.assertEqual(expected, actual)
class ConstructorTestsMixin(object):
"""
Helper methods for verifying default attribute values and corresponding
constructor arguments.
"""
def _verifyConstructorArgument(self, argName, defaultVal, altVal):
"""
Wrap L{verifyConstructorArgument} to provide simpler interface for
testing Message and _EDNSMessage constructor arguments.
@param argName: The name of the constructor argument.
@param defaultVal: The expected default value.
@param altVal: An alternative value which is expected to be assigned to
a correspondingly named attribute.
"""
verifyConstructorArgument(testCase=self, cls=self.messageFactory,
argName=argName, defaultVal=defaultVal,
altVal=altVal)
def _verifyConstructorFlag(self, argName, defaultVal):
"""
Wrap L{verifyConstructorArgument} to provide simpler interface for
testing _EDNSMessage constructor flags.
@param argName: The name of the constructor flag argument
@param defaultVal: The expected default value of the flag
"""
assert defaultVal in (True, False)
verifyConstructorArgument(testCase=self, cls=self.messageFactory,
argName=argName, defaultVal=defaultVal,
altVal=not defaultVal,)
class CommonConstructorTestsMixin(object):
"""
Tests for constructor arguments and their associated attributes that are
common to both L{twisted.names.dns._EDNSMessage} and L{dns.Message}.
TestCase classes that use this mixin must provide a C{messageFactory} method
which accepts any argment supported by L{dns.Message.__init__}.
TestCases must also mixin ConstructorTestsMixin which provides some custom
assertions for testing constructor arguments.
"""
def test_id(self):
"""
L{dns._EDNSMessage.id} defaults to C{0} and can be overridden in
the constructor.
"""
self._verifyConstructorArgument('id', defaultVal=0, altVal=1)
def test_answer(self):
"""
L{dns._EDNSMessage.answer} defaults to C{False} and can be overridden in
the constructor.
"""
self._verifyConstructorFlag('answer', defaultVal=False)
def test_opCode(self):
"""
L{dns._EDNSMessage.opCode} defaults to L{dns.OP_QUERY} and can be
overridden in the constructor.
"""
self._verifyConstructorArgument(
'opCode', defaultVal=dns.OP_QUERY, altVal=dns.OP_STATUS)
def test_auth(self):
"""
L{dns._EDNSMessage.auth} defaults to C{False} and can be overridden in
the constructor.
"""
self._verifyConstructorFlag('auth', defaultVal=False)
def test_trunc(self):
"""
L{dns._EDNSMessage.trunc} defaults to C{False} and can be overridden in
the constructor.
"""
self._verifyConstructorFlag('trunc', defaultVal=False)
def test_recDes(self):
"""
L{dns._EDNSMessage.recDes} defaults to C{False} and can be overridden in
the constructor.
"""
self._verifyConstructorFlag('recDes', defaultVal=False)
def test_recAv(self):
"""
L{dns._EDNSMessage.recAv} defaults to C{False} and can be overridden in
the constructor.
"""
self._verifyConstructorFlag('recAv', defaultVal=False)
def test_rCode(self):
"""
L{dns._EDNSMessage.rCode} defaults to C{0} and can be overridden in the
constructor.
"""
self._verifyConstructorArgument('rCode', defaultVal=0, altVal=123)
def test_maxSize(self):
"""
L{dns._EDNSMessage.maxSize} defaults to C{512} and can be overridden in
the constructor.
"""
self._verifyConstructorArgument('maxSize', defaultVal=512, altVal=1024)
def test_queries(self):
"""
L{dns._EDNSMessage.queries} defaults to C{[]}.
"""
self.assertEqual(self.messageFactory().queries, [])
def test_answers(self):
"""
L{dns._EDNSMessage.answers} defaults to C{[]}.
"""
self.assertEqual(self.messageFactory().answers, [])
def test_authority(self):
"""
L{dns._EDNSMessage.authority} defaults to C{[]}.
"""
self.assertEqual(self.messageFactory().authority, [])
def test_additional(self):
"""
L{dns._EDNSMessage.additional} defaults to C{[]}.
"""
self.assertEqual(self.messageFactory().additional, [])
class EDNSMessageConstructorTests(ConstructorTestsMixin,
CommonConstructorTestsMixin,
unittest.SynchronousTestCase):
"""
Tests for L{twisted.names.dns._EDNSMessage} constructor arguments that are
shared with L{dns.Message}.
"""
messageFactory = dns._EDNSMessage
class MessageConstructorTests(ConstructorTestsMixin,
CommonConstructorTestsMixin,
unittest.SynchronousTestCase):
"""
Tests for L{twisted.names.dns.Message} constructor arguments that are shared
with L{dns._EDNSMessage}.
"""
messageFactory = dns.Message
class EDNSMessageSpecificsTests(ConstructorTestsMixin,
unittest.SynchronousTestCase):
"""
Tests for L{dns._EDNSMessage}.
These tests are for L{dns._EDNSMessage} APIs which are not shared with
L{dns.Message}.
"""
messageFactory = dns._EDNSMessage
def test_ednsVersion(self):
"""
L{dns._EDNSMessage.ednsVersion} defaults to C{0} and can be overridden
in the constructor.
"""
self._verifyConstructorArgument(
'ednsVersion', defaultVal=0, altVal=None)
def test_dnssecOK(self):
"""
L{dns._EDNSMessage.dnssecOK} defaults to C{False} and can be overridden
in the constructor.
"""
self._verifyConstructorFlag('dnssecOK', defaultVal=False)
def test_authenticData(self):
"""
L{dns._EDNSMessage.authenticData} defaults to C{False} and can be
overridden in the constructor.
"""
self._verifyConstructorFlag('authenticData', defaultVal=False)
def test_checkingDisabled(self):
"""
L{dns._EDNSMessage.checkingDisabled} defaults to C{False} and can be
overridden in the constructor.
"""
self._verifyConstructorFlag('checkingDisabled', defaultVal=False)
def test_queriesOverride(self):
"""
L{dns._EDNSMessage.queries} can be overridden in the constructor.
"""
msg = self.messageFactory(queries=[dns.Query(b'example.com')])
self.assertEqual(
msg.queries,
[dns.Query(b'example.com')])
def test_answersOverride(self):
"""
L{dns._EDNSMessage.answers} can be overridden in the constructor.
"""
msg = self.messageFactory(
answers=[
dns.RRHeader(
b'example.com',
payload=dns.Record_A('1.2.3.4'))])
self.assertEqual(
msg.answers,
[dns.RRHeader(b'example.com', payload=dns.Record_A('1.2.3.4'))])
def test_authorityOverride(self):
"""
L{dns._EDNSMessage.authority} can be overridden in the constructor.
"""
msg = self.messageFactory(
authority=[
dns.RRHeader(
b'example.com',
type=dns.SOA,
payload=dns.Record_SOA())])
self.assertEqual(
msg.authority,
[dns.RRHeader(b'example.com', type=dns.SOA,
payload=dns.Record_SOA())])
def test_additionalOverride(self):
"""
L{dns._EDNSMessage.authority} can be overridden in the constructor.
"""
msg = self.messageFactory(
additional=[
dns.RRHeader(
b'example.com',
payload=dns.Record_A('1.2.3.4'))])
self.assertEqual(
msg.additional,
[dns.RRHeader(b'example.com', payload=dns.Record_A('1.2.3.4'))])
def test_reprDefaults(self):
"""
L{dns._EDNSMessage.__repr__} omits field values and sections which are
identical to their defaults. The id field value is always shown.
"""
self.assertEqual(
'<_EDNSMessage id=0>',
repr(self.messageFactory())
)
def test_reprFlagsIfSet(self):
"""
L{dns._EDNSMessage.__repr__} displays flags if they are L{True}.
"""
m = self.messageFactory(answer=True, auth=True, trunc=True, recDes=True,
recAv=True, authenticData=True,
checkingDisabled=True, dnssecOK=True)
self.assertEqual(
'<_EDNSMessage '
'id=0 '
'flags=answer,auth,trunc,recDes,recAv,authenticData,'
'checkingDisabled,dnssecOK'
'>',
repr(m),
)
def test_reprNonDefautFields(self):
"""
L{dns._EDNSMessage.__repr__} displays field values if they differ from
their defaults.
"""
m = self.messageFactory(id=10, opCode=20, rCode=30, maxSize=40,
ednsVersion=50)
self.assertEqual(
'<_EDNSMessage '
'id=10 '
'opCode=20 '
'rCode=30 '
'maxSize=40 '
'ednsVersion=50'
'>',
repr(m),
)
def test_reprNonDefaultSections(self):
"""
L{dns.Message.__repr__} displays sections which differ from their
defaults.
"""
m = self.messageFactory()
m.queries = [1, 2, 3]
m.answers = [4, 5, 6]
m.authority = [7, 8, 9]
m.additional = [10, 11, 12]
self.assertEqual(
'<_EDNSMessage '
'id=0 '
'queries=[1, 2, 3] '
'answers=[4, 5, 6] '
'authority=[7, 8, 9] '
'additional=[10, 11, 12]'
'>',
repr(m),
)
def test_fromStrCallsMessageFactory(self):
"""
L{dns._EDNSMessage.fromString} calls L{dns._EDNSMessage._messageFactory}
to create a new L{dns.Message} instance which is used to decode the
supplied bytes.
"""
class FakeMessageFactory(object):
"""
Fake message factory.
"""
def fromStr(self, *args, **kwargs):
"""
Fake fromStr method which raises the arguments it was passed.
@param args: positional arguments
@param kwargs: keyword arguments
"""
raise RaisedArgs(args, kwargs)
m = dns._EDNSMessage()
m._messageFactory = FakeMessageFactory
dummyBytes = object()
e = self.assertRaises(RaisedArgs, m.fromStr, dummyBytes)
self.assertEqual(
((dummyBytes,), {}),
(e.args, e.kwargs)
)
def test_fromStrCallsFromMessage(self):
"""
L{dns._EDNSMessage.fromString} calls L{dns._EDNSMessage._fromMessage}
with a L{dns.Message} instance
"""
m = dns._EDNSMessage()
class FakeMessageFactory():
"""
Fake message factory.
"""
def fromStr(self, bytes):
"""
A noop fake version of fromStr
@param bytes: the bytes to be decoded
"""
fakeMessage = FakeMessageFactory()
m._messageFactory = lambda: fakeMessage
def fakeFromMessage(*args, **kwargs):
raise RaisedArgs(args, kwargs)
m._fromMessage = fakeFromMessage
e = self.assertRaises(RaisedArgs, m.fromStr, b'')
self.assertEqual(
((fakeMessage,), {}),
(e.args, e.kwargs)
)
def test_toStrCallsToMessage(self):
"""
L{dns._EDNSMessage.toStr} calls L{dns._EDNSMessage._toMessage}
"""
m = dns._EDNSMessage()
def fakeToMessage(*args, **kwargs):
raise RaisedArgs(args, kwargs)
m._toMessage = fakeToMessage
e = self.assertRaises(RaisedArgs, m.toStr)
self.assertEqual(
((), {}),
(e.args, e.kwargs)
)
def test_toStrCallsToMessageToStr(self):
"""
L{dns._EDNSMessage.toStr} calls C{toStr} on the message returned by
L{dns._EDNSMessage._toMessage}.
"""
m = dns._EDNSMessage()
dummyBytes = object()
class FakeMessage(object):
"""
Fake Message
"""
def toStr(self):
"""
Fake toStr which returns dummyBytes.
@return: dummyBytes
"""
return dummyBytes
def fakeToMessage(*args, **kwargs):
return FakeMessage()
m._toMessage = fakeToMessage
self.assertEqual(
dummyBytes,
m.toStr()
)
class EDNSMessageEqualityTests(ComparisonTestsMixin, unittest.SynchronousTestCase):
"""
Tests for equality between L(dns._EDNSMessage} instances.
These tests will not work with L{dns.Message} because it does not use
L{twisted.python.util.FancyEqMixin}.
"""
messageFactory = dns._EDNSMessage
def test_id(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
id.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(id=1),
self.messageFactory(id=1),
self.messageFactory(id=2),
)
def test_answer(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
answer flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(answer=True),
self.messageFactory(answer=True),
self.messageFactory(answer=False),
)
def test_opCode(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
opCode.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(opCode=dns.OP_STATUS),
self.messageFactory(opCode=dns.OP_STATUS),
self.messageFactory(opCode=dns.OP_INVERSE),
)
def test_auth(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
auth flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(auth=True),
self.messageFactory(auth=True),
self.messageFactory(auth=False),
)
def test_trunc(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
trunc flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(trunc=True),
self.messageFactory(trunc=True),
self.messageFactory(trunc=False),
)
def test_recDes(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
recDes flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(recDes=True),
self.messageFactory(recDes=True),
self.messageFactory(recDes=False),
)
def test_recAv(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
recAv flag.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(recAv=True),
self.messageFactory(recAv=True),
self.messageFactory(recAv=False),
)
def test_rCode(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
rCode.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(rCode=16),
self.messageFactory(rCode=16),
self.messageFactory(rCode=15),
)
def test_ednsVersion(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
ednsVersion.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(ednsVersion=1),
self.messageFactory(ednsVersion=1),
self.messageFactory(ednsVersion=None),
)
def test_dnssecOK(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
dnssecOK.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(dnssecOK=True),
self.messageFactory(dnssecOK=True),
self.messageFactory(dnssecOK=False),
)
def test_authenticData(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
authenticData flags.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(authenticData=True),
self.messageFactory(authenticData=True),
self.messageFactory(authenticData=False),
)
def test_checkingDisabled(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
checkingDisabled flags.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(checkingDisabled=True),
self.messageFactory(checkingDisabled=True),
self.messageFactory(checkingDisabled=False),
)
def test_maxSize(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
maxSize.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(maxSize=2048),
self.messageFactory(maxSize=2048),
self.messageFactory(maxSize=1024),
)
def test_queries(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
queries.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(queries=[dns.Query(b'example.com')]),
self.messageFactory(queries=[dns.Query(b'example.com')]),
self.messageFactory(queries=[dns.Query(b'example.org')]),
)
def test_answers(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
answers.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(answers=[dns.RRHeader(
b'example.com', payload=dns.Record_A('1.2.3.4'))]),
self.messageFactory(answers=[dns.RRHeader(
b'example.com', payload=dns.Record_A('1.2.3.4'))]),
self.messageFactory(answers=[dns.RRHeader(
b'example.org', payload=dns.Record_A('4.3.2.1'))]),
)
def test_authority(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
authority records.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(authority=[dns.RRHeader(
b'example.com',
type=dns.SOA, payload=dns.Record_SOA())]),
self.messageFactory(authority=[dns.RRHeader(
b'example.com',
type=dns.SOA, payload=dns.Record_SOA())]),
self.messageFactory(authority=[dns.RRHeader(
b'example.org',
type=dns.SOA, payload=dns.Record_SOA())]),
)
def test_additional(self):
"""
Two L{dns._EDNSMessage} instances compare equal if they have the same
additional records.
"""
self.assertNormalEqualityImplementation(
self.messageFactory(additional=[dns.RRHeader(
b'example.com', payload=dns.Record_A('1.2.3.4'))]),
self.messageFactory(additional=[dns.RRHeader(
b'example.com', payload=dns.Record_A('1.2.3.4'))]),
self.messageFactory(additional=[dns.RRHeader(
b'example.org', payload=dns.Record_A('1.2.3.4'))]),
)
class StandardEncodingTestsMixin(object):
"""
Tests for the encoding and decoding of various standard (not EDNS) messages.
These tests should work with both L{dns._EDNSMessage} and L{dns.Message}.
TestCase classes that use this mixin must provide a C{messageFactory} method
which accepts any argment supported by L{dns._EDNSMessage.__init__}.
EDNS specific arguments may be discarded if not supported by the message
class under construction.
"""
def test_emptyMessageEncode(self):
"""
An empty message can be encoded.
"""
self.assertEqual(
self.messageFactory(**MessageEmpty.kwargs()).toStr(),
MessageEmpty.bytes())
def test_emptyMessageDecode(self):
"""
An empty message byte sequence can be decoded.
"""
m = self.messageFactory()
m.fromStr(MessageEmpty.bytes())
self.assertEqual(m, self.messageFactory(**MessageEmpty.kwargs()))
def test_completeQueryEncode(self):
"""
A fully populated query message can be encoded.
"""
self.assertEqual(
self.messageFactory(**MessageComplete.kwargs()).toStr(),
MessageComplete.bytes())
def test_completeQueryDecode(self):
"""
A fully populated message byte string can be decoded.
"""
m = self.messageFactory()
m.fromStr(MessageComplete.bytes()),
self.assertEqual(m, self.messageFactory(**MessageComplete.kwargs()))
def test_NULL(self):
"""
A I{NULL} record with an arbitrary payload can be encoded and decoded as
part of a message.
"""
bytes = b''.join([dns._ord2bytes(i) for i in range(256)])
rec = dns.Record_NULL(bytes)
rr = dns.RRHeader(b'testname', dns.NULL, payload=rec)
msg1 = self.messageFactory()
msg1.answers.append(rr)
s = msg1.toStr()
msg2 = self.messageFactory()
msg2.fromStr(s)
self.assertIsInstance(msg2.answers[0].payload, dns.Record_NULL)
self.assertEqual(msg2.answers[0].payload.payload, bytes)
def test_nonAuthoritativeMessageEncode(self):
"""
If the message C{authoritative} attribute is set to 0, the encoded bytes
will have AA bit 0.
"""
self.assertEqual(
self.messageFactory(**MessageNonAuthoritative.kwargs()).toStr(),
MessageNonAuthoritative.bytes())
def test_nonAuthoritativeMessageDecode(self):
"""
The L{dns.RRHeader} instances created by a message from a
non-authoritative message byte string are marked as not authoritative.
"""
m = self.messageFactory()
m.fromStr(MessageNonAuthoritative.bytes())
self.assertEqual(
m, self.messageFactory(**MessageNonAuthoritative.kwargs()))
def test_authoritativeMessageEncode(self):
"""
If the message C{authoritative} attribute is set to 1, the encoded bytes
will have AA bit 1.
"""
self.assertEqual(
self.messageFactory(**MessageAuthoritative.kwargs()).toStr(),
MessageAuthoritative.bytes())
def test_authoritativeMessageDecode(self):
"""
The message and its L{dns.RRHeader} instances created by C{decode} from
an authoritative message byte string, are marked as authoritative.
"""
m = self.messageFactory()
m.fromStr(MessageAuthoritative.bytes())
self.assertEqual(
m, self.messageFactory(**MessageAuthoritative.kwargs()))
def test_truncatedMessageEncode(self):
"""
If the message C{trunc} attribute is set to 1 the encoded bytes will
have TR bit 1.
"""
self.assertEqual(
self.messageFactory(**MessageTruncated.kwargs()).toStr(),
MessageTruncated.bytes())
def test_truncatedMessageDecode(self):
"""
The message instance created by decoding a truncated message is marked
as truncated.
"""
m = self.messageFactory()
m.fromStr(MessageTruncated.bytes())
self.assertEqual(m, self.messageFactory(**MessageTruncated.kwargs()))
class EDNSMessageStandardEncodingTests(StandardEncodingTestsMixin,
unittest.SynchronousTestCase):
"""
Tests for the encoding and decoding of various standard (non-EDNS) messages
by L{dns._EDNSMessage}.
"""
messageFactory = dns._EDNSMessage
class MessageStandardEncodingTests(StandardEncodingTestsMixin,
unittest.SynchronousTestCase):
"""
Tests for the encoding and decoding of various standard (non-EDNS) messages
by L{dns.Message}.
"""
@staticmethod
def messageFactory(**kwargs):
"""
This function adapts constructor arguments expected by
_EDNSMessage.__init__ to arguments suitable for use with the
Message.__init__.
Also handles the fact that unlike L{dns._EDNSMessage},
L{dns.Message.__init__} does not accept queries, answers etc as
arguments.
Also removes any L{dns._EDNSMessage} specific arguments.
@param args: The positional arguments which will be passed to
L{dns.Message.__init__}.
@param kwargs: The keyword arguments which will be stripped of EDNS
specific arguments before being passed to L{dns.Message.__init__}.
@return: An L{dns.Message} instance.
"""
queries = kwargs.pop('queries', [])
answers = kwargs.pop('answers', [])
authority = kwargs.pop('authority', [])
additional = kwargs.pop('additional', [])
kwargs.pop('ednsVersion', None)
m = dns.Message(**kwargs)
m.queries = queries
m.answers = answers
m.authority = authority
m.additional = additional
return MessageComparable(m)
class EDNSMessageEDNSEncodingTests(unittest.SynchronousTestCase):
"""
Tests for the encoding and decoding of various EDNS messages.
These test will not work with L{dns.Message}.
"""
messageFactory = dns._EDNSMessage
def test_ednsMessageDecodeStripsOptRecords(self):
"""
The L(_EDNSMessage} instance created by L{dns._EDNSMessage.decode} from
an EDNS query never includes OPT records in the additional section.
"""
m = self.messageFactory()
m.fromStr(MessageEDNSQuery.bytes())
self.assertEqual(m.additional, [])
def test_ednsMessageDecodeMultipleOptRecords(self):
"""
An L(_EDNSMessage} instance created from a byte string containing
multiple I{OPT} records will discard all the C{OPT} records.
C{ednsVersion} will be set to L{None}.
@see: U{https://tools.ietf.org/html/rfc6891#section-6.1.1}
"""
m = dns.Message()
m.additional = [
dns._OPTHeader(version=2),
dns._OPTHeader(version=3)]
ednsMessage = dns._EDNSMessage()
ednsMessage.fromStr(m.toStr())
self.assertIsNone(ednsMessage.ednsVersion)
def test_fromMessageCopiesSections(self):
"""
L{dns._EDNSMessage._fromMessage} returns an L{_EDNSMessage} instance
whose queries, answers, authority and additional lists are copies (not
references to) the original message lists.
"""
standardMessage = dns.Message()
standardMessage.fromStr(MessageEDNSQuery.bytes())
ednsMessage = dns._EDNSMessage._fromMessage(standardMessage)
duplicates = []
for attrName in ('queries', 'answers', 'authority', 'additional'):
if (getattr(standardMessage, attrName)
is getattr(ednsMessage, attrName)):
duplicates.append(attrName)
if duplicates:
self.fail(
'Message and _EDNSMessage shared references to the following '
'section lists after decoding: %s' % (duplicates,))
def test_toMessageCopiesSections(self):
"""
L{dns._EDNSMessage.toStr} makes no in place changes to the message
instance.
"""
ednsMessage = dns._EDNSMessage(ednsVersion=1)
ednsMessage.toStr()
self.assertEqual(ednsMessage.additional, [])
def test_optHeaderPosition(self):
"""
L{dns._EDNSMessage} can decode OPT records, regardless of their position
in the additional records section.
"The OPT RR MAY be placed anywhere within the additional data section."
@see: U{https://tools.ietf.org/html/rfc6891#section-6.1.1}
"""
# XXX: We need an _OPTHeader.toRRHeader method. See #6779.
b = BytesIO()
optRecord = dns._OPTHeader(version=1)
optRecord.encode(b)
optRRHeader = dns.RRHeader()
b.seek(0)
optRRHeader.decode(b)
m = dns.Message()
m.additional = [optRRHeader]
actualMessages = []
actualMessages.append(dns._EDNSMessage._fromMessage(m).ednsVersion)
m.additional.append(dns.RRHeader(type=dns.A))
actualMessages.append(
dns._EDNSMessage._fromMessage(m).ednsVersion)
m.additional.insert(0, dns.RRHeader(type=dns.A))
actualMessages.append(
dns._EDNSMessage._fromMessage(m).ednsVersion)
self.assertEqual(
[1] * 3,
actualMessages
)
def test_ednsDecode(self):
"""
The L(_EDNSMessage} instance created by L{dns._EDNSMessage.fromStr}
derives its edns specific values (C{ednsVersion}, etc) from the supplied
OPT record.
"""
m = self.messageFactory()
m.fromStr(MessageEDNSComplete.bytes())
self.assertEqual(m, self.messageFactory(**MessageEDNSComplete.kwargs()))
def test_ednsEncode(self):
"""
The L(_EDNSMessage} instance created by L{dns._EDNSMessage.toStr}
encodes its edns specific values (C{ednsVersion}, etc) into an OPT
record added to the additional section.
"""
self.assertEqual(
self.messageFactory(**MessageEDNSComplete.kwargs()).toStr(),
MessageEDNSComplete.bytes())
def test_extendedRcodeEncode(self):
"""
The L(_EDNSMessage.toStr} encodes the extended I{RCODE} (>=16) by
assigning the lower 4bits to the message RCODE field and the upper 4bits
to the OPT pseudo record.
"""
self.assertEqual(
self.messageFactory(**MessageEDNSExtendedRCODE.kwargs()).toStr(),
MessageEDNSExtendedRCODE.bytes())
def test_extendedRcodeDecode(self):
"""
The L(_EDNSMessage} instance created by L{dns._EDNSMessage.fromStr}
derives RCODE from the supplied OPT record.
"""
m = self.messageFactory()
m.fromStr(MessageEDNSExtendedRCODE.bytes())
self.assertEqual(
m, self.messageFactory(**MessageEDNSExtendedRCODE.kwargs()))
def test_extendedRcodeZero(self):
"""
Note that EXTENDED-RCODE value 0 indicates that an unextended RCODE is
in use (values 0 through 15).
https://tools.ietf.org/html/rfc6891#section-6.1.3
"""
ednsMessage = self.messageFactory(rCode=15, ednsVersion=0)
standardMessage = ednsMessage._toMessage()
self.assertEqual(
(15, 0),
(standardMessage.rCode, standardMessage.additional[0].extendedRCODE)
)
class ResponseFromMessageTests(unittest.SynchronousTestCase):
"""
Tests for L{dns._responseFromMessage}.
"""
def test_responseFromMessageResponseType(self):
"""
L{dns.Message._responseFromMessage} is a constructor function which
generates a new I{answer} message from an existing L{dns.Message} like
instance.
"""
request = dns.Message()
response = dns._responseFromMessage(responseConstructor=dns.Message,
message=request)
self.assertIsNot(request, response)
def test_responseType(self):
"""
L{dns._responseFromMessage} returns a new instance of C{cls}
"""
class SuppliedClass(object):
id = 1
queries = []
expectedClass = dns.Message
self.assertIsInstance(
dns._responseFromMessage(responseConstructor=expectedClass,
message=SuppliedClass()),
expectedClass
)
def test_responseId(self):
"""
L{dns._responseFromMessage} copies the C{id} attribute of the original
message.
"""
self.assertEqual(
1234,
dns._responseFromMessage(responseConstructor=dns.Message,
message=dns.Message(id=1234)).id
)
def test_responseAnswer(self):
"""
L{dns._responseFromMessage} sets the C{answer} flag to L{True}
"""
request = dns.Message()
response = dns._responseFromMessage(responseConstructor=dns.Message,
message=request)
self.assertEqual(
(False, True),
(request.answer, response.answer)
)
def test_responseQueries(self):
"""
L{dns._responseFromMessage} copies the C{queries} attribute of the
original message.
"""
request = dns.Message()
expectedQueries = [object(), object(), object()]
request.queries = expectedQueries[:]
self.assertEqual(
expectedQueries,
dns._responseFromMessage(responseConstructor=dns.Message,
message=request).queries
)
def test_responseKwargs(self):
"""
L{dns._responseFromMessage} accepts other C{kwargs} which are assigned
to the new message before it is returned.
"""
self.assertEqual(
123,
dns._responseFromMessage(
responseConstructor=dns.Message, message=dns.Message(),
rCode=123).rCode
)
class Foo(object):
"""
An example class for use in L{dns._compactRepr} tests.
It follows the pattern of initialiser settable flags, fields and sections
found in L{dns.Message} and L{dns._EDNSMessage}.
"""
def __init__(self,
field1=1, field2=2, alwaysShowField='AS',
flagTrue=True, flagFalse=False, section1=None):
"""
Set some flags, fields and sections as public attributes.
"""
self.field1 = field1
self.field2 = field2
self.alwaysShowField = alwaysShowField
self.flagTrue = flagTrue
self.flagFalse = flagFalse
if section1 is None:
section1 = []
self.section1 = section1
def __repr__(self):
"""
Call L{dns._compactRepr} to generate a string representation.
"""
return dns._compactRepr(
self,
alwaysShow='alwaysShowField'.split(),
fieldNames='field1 field2 alwaysShowField'.split(),
flagNames='flagTrue flagFalse'.split(),
sectionNames='section1 section2'.split()
)
class CompactReprTests(unittest.SynchronousTestCase):
"""
Tests for L[dns._compactRepr}.
"""
messageFactory = Foo
def test_defaults(self):
"""
L{dns._compactRepr} omits field values and sections which have the
default value. Flags which are True are always shown.
"""
self.assertEqual(
"<Foo alwaysShowField='AS' flags=flagTrue>",
repr(self.messageFactory())
)
def test_flagsIfSet(self):
"""
L{dns._compactRepr} displays flags if they have a non-default value.
"""
m = self.messageFactory(flagTrue=True, flagFalse=True)
self.assertEqual(
'<Foo '
"alwaysShowField='AS' "
'flags=flagTrue,flagFalse'
'>',
repr(m),
)
def test_nonDefautFields(self):
"""
L{dns._compactRepr} displays field values if they differ from their
defaults.
"""
m = self.messageFactory(field1=10, field2=20)
self.assertEqual(
'<Foo '
'field1=10 '
'field2=20 '
"alwaysShowField='AS' "
'flags=flagTrue'
'>',
repr(m),
)
def test_nonDefaultSections(self):
"""
L{dns._compactRepr} displays sections which differ from their defaults.
"""
m = self.messageFactory()
m.section1 = [1, 1, 1]
m.section2 = [2, 2, 2]
self.assertEqual(
'<Foo '
"alwaysShowField='AS' "
'flags=flagTrue '
'section1=[1, 1, 1] '
'section2=[2, 2, 2]'
'>',
repr(m),
)
| mit |
jxtech/teambition-api | teambition/api/__init__.py | 3 | 1610 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from teambition.api.oauth import OAuth # NOQA
from teambition.api.projects import Projects # NOQA
from teambition.api.tasklists import Tasklists # NOQA
from teambition.api.stages import Stages # NOQA
from teambition.api.tasks import Tasks # NOQA
from teambition.api.users import Users # NOQA
from teambition.api.organizations import Organizations # NOQA
from teambition.api.stagetemplates import StageTemplates # NOQA
from teambition.api.teams import Teams # NOQA
from teambition.api.subtasks import Subtasks # NOQA
from teambition.api.messages import Messages # NOQA
from teambition.api.posts import Posts # NOQA
from teambition.api.collections import Collections # NOQA
from teambition.api.works import Works # NOQA
from teambition.api.events import Events # NOQA
from teambition.api.tags import Tags # NOQA
from teambition.api.objectlinks import ObjectLinks # NOQA
from teambition.api.activities import Activities # NOQA
from teambition.api.webhooks import Webhooks # NOQA
from teambition.api.bookkeepings import BookKeepings # NOQA
from teambition.api.entrycategories import EntryCategories # NOQA
from teambition.api.entries import Entries # NOQA
__all__ = [
'OAuth',
'Projects',
'Tasklists',
'Stages',
'Tasks',
'Users',
'Organizations',
'StageTemplates',
'Teams',
'Subtasks',
'Messages',
'Posts',
'Collections',
'Works',
'Events',
'Tags',
'ObjectLinks',
'Activities',
'BookKeepings',
'EntryCategories',
'Entries',
]
| mit |
thaumos/ansible | lib/ansible/modules/cloud/azure/azure_rm_mariadbdatabase_facts.py | 12 | 5998 | #!/usr/bin/python
#
# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_mariadbdatabase_facts
version_added: "2.8"
short_description: Get Azure MariaDB Database facts.
description:
- Get facts of MariaDB Database.
options:
resource_group:
description:
- The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
required: True
server_name:
description:
- The name of the server.
required: True
name:
description:
- The name of the database.
extends_documentation_fragment:
- azure
author:
- "Zim Kalinowski (@zikalino)"
- "Matti Ranta (@techknowlogick)"
'''
EXAMPLES = '''
- name: Get instance of MariaDB Database
azure_rm_mariadbdatabase_facts:
resource_group: myResourceGroup
server_name: server_name
name: database_name
- name: List instances of MariaDB Database
azure_rm_mariadbdatabase_facts:
resource_group: myResourceGroup
server_name: server_name
'''
RETURN = '''
databases:
description: A list of dictionaries containing facts for MariaDB Databases.
returned: always
type: complex
contains:
id:
description:
- Resource ID
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testser
ver/databases/db1"
resource_group:
description:
- Resource group name.
returned: always
type: str
sample: testrg
server_name:
description:
- Server name.
returned: always
type: str
sample: testserver
name:
description:
- Resource name.
returned: always
type: str
sample: db1
charset:
description:
- The charset of the database.
returned: always
type: str
sample: UTF8
collation:
description:
- The collation of the database.
returned: always
type: str
sample: English_United States.1252
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMMariaDbDatabaseFacts(AzureRMModuleBase):
def __init__(self):
# define user inputs into argument
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
server_name=dict(
type='str',
required=True
),
name=dict(
type='str'
)
)
# store the results of the module operation
self.results = dict(
changed=False
)
self.resource_group = None
self.server_name = None
self.name = None
super(AzureRMMariaDbDatabaseFacts, self).__init__(self.module_arg_spec, supports_tags=False)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if (self.resource_group is not None and
self.server_name is not None and
self.name is not None):
self.results['databases'] = self.get()
elif (self.resource_group is not None and
self.server_name is not None):
self.results['databases'] = self.list_by_server()
return self.results
def get(self):
response = None
results = []
try:
response = self.mariadb_client.databases.get(resource_group_name=self.resource_group,
server_name=self.server_name,
database_name=self.name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.log('Could not get facts for Databases.')
if response is not None:
results.append(self.format_item(response))
return results
def list_by_server(self):
response = None
results = []
try:
response = self.mariadb_client.databases.list_by_server(resource_group_name=self.resource_group,
server_name=self.server_name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.fail("Error listing for server {0} - {1}".format(self.server_name, str(e)))
if response is not None:
for item in response:
results.append(self.format_item(item))
return results
def format_item(self, item):
d = item.as_dict()
d = {
'resource_group': self.resource_group,
'server_name': self.server_name,
'name': d['name'],
'charset': d['charset'],
'collation': d['collation']
}
return d
def main():
AzureRMMariaDbDatabaseFacts()
if __name__ == '__main__':
main()
| gpl-3.0 |
Aloomaio/googleads-python-lib | examples/ad_manager/v201808/creative_wrapper_service/update_creative_wrappers.py | 1 | 2747 | #!/usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates a creative wrapper to the 'OUTER' wrapping order.
To determine which creative wrappers exist, run get_all_creative_wrappers.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
# Set the ID of the creative wrapper to update.
CREATIVE_WRAPPER_ID = 'INSERT_CREATIVE_WRAPPER_ID_HERE'
def main(client, creative_wrapper_id):
# Initialize appropriate service.
creative_wrapper_service = client.GetService('CreativeWrapperService',
version='v201808')
# Create statement to get a creative wrapper by ID.
statement = (ad_manager.StatementBuilder(version='v201808')
.Where('id = :creativeWrapperId')
.WithBindVariable('creativeWrapperId',
long(creative_wrapper_id)))
# Get creative wrappers.
response = creative_wrapper_service.getCreativeWrappersByStatement(
statement.ToStatement())
if 'results' in response and len(response['results']):
updated_creative_wrappers = []
for creative_wrapper in response['results']:
creative_wrapper['ordering'] = 'OUTER'
updated_creative_wrappers.append(creative_wrapper)
# Update the creative wrappers on the server.
creative_wrappers = creative_wrapper_service.updateCreativeWrappers(
updated_creative_wrappers)
# Display results.
for creative_wrapper in creative_wrappers:
print (('Creative wrapper with ID "%s" and wrapping order "%s" '
'was updated.') % (creative_wrapper['id'],
creative_wrapper['ordering']))
else:
print 'No creative wrappers found to update.'
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, CREATIVE_WRAPPER_ID)
| apache-2.0 |
dannyperry571/theapprentice | script.module.unidecode/lib/unidecode/x011.py | 252 | 4135 | data = (
'g', # 0x00
'gg', # 0x01
'n', # 0x02
'd', # 0x03
'dd', # 0x04
'r', # 0x05
'm', # 0x06
'b', # 0x07
'bb', # 0x08
's', # 0x09
'ss', # 0x0a
'', # 0x0b
'j', # 0x0c
'jj', # 0x0d
'c', # 0x0e
'k', # 0x0f
't', # 0x10
'p', # 0x11
'h', # 0x12
'ng', # 0x13
'nn', # 0x14
'nd', # 0x15
'nb', # 0x16
'dg', # 0x17
'rn', # 0x18
'rr', # 0x19
'rh', # 0x1a
'rN', # 0x1b
'mb', # 0x1c
'mN', # 0x1d
'bg', # 0x1e
'bn', # 0x1f
'', # 0x20
'bs', # 0x21
'bsg', # 0x22
'bst', # 0x23
'bsb', # 0x24
'bss', # 0x25
'bsj', # 0x26
'bj', # 0x27
'bc', # 0x28
'bt', # 0x29
'bp', # 0x2a
'bN', # 0x2b
'bbN', # 0x2c
'sg', # 0x2d
'sn', # 0x2e
'sd', # 0x2f
'sr', # 0x30
'sm', # 0x31
'sb', # 0x32
'sbg', # 0x33
'sss', # 0x34
's', # 0x35
'sj', # 0x36
'sc', # 0x37
'sk', # 0x38
'st', # 0x39
'sp', # 0x3a
'sh', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'Z', # 0x40
'g', # 0x41
'd', # 0x42
'm', # 0x43
'b', # 0x44
's', # 0x45
'Z', # 0x46
'', # 0x47
'j', # 0x48
'c', # 0x49
't', # 0x4a
'p', # 0x4b
'N', # 0x4c
'j', # 0x4d
'', # 0x4e
'', # 0x4f
'', # 0x50
'', # 0x51
'ck', # 0x52
'ch', # 0x53
'', # 0x54
'', # 0x55
'pb', # 0x56
'pN', # 0x57
'hh', # 0x58
'Q', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'', # 0x5f
'', # 0x60
'a', # 0x61
'ae', # 0x62
'ya', # 0x63
'yae', # 0x64
'eo', # 0x65
'e', # 0x66
'yeo', # 0x67
'ye', # 0x68
'o', # 0x69
'wa', # 0x6a
'wae', # 0x6b
'oe', # 0x6c
'yo', # 0x6d
'u', # 0x6e
'weo', # 0x6f
'we', # 0x70
'wi', # 0x71
'yu', # 0x72
'eu', # 0x73
'yi', # 0x74
'i', # 0x75
'a-o', # 0x76
'a-u', # 0x77
'ya-o', # 0x78
'ya-yo', # 0x79
'eo-o', # 0x7a
'eo-u', # 0x7b
'eo-eu', # 0x7c
'yeo-o', # 0x7d
'yeo-u', # 0x7e
'o-eo', # 0x7f
'o-e', # 0x80
'o-ye', # 0x81
'o-o', # 0x82
'o-u', # 0x83
'yo-ya', # 0x84
'yo-yae', # 0x85
'yo-yeo', # 0x86
'yo-o', # 0x87
'yo-i', # 0x88
'u-a', # 0x89
'u-ae', # 0x8a
'u-eo-eu', # 0x8b
'u-ye', # 0x8c
'u-u', # 0x8d
'yu-a', # 0x8e
'yu-eo', # 0x8f
'yu-e', # 0x90
'yu-yeo', # 0x91
'yu-ye', # 0x92
'yu-u', # 0x93
'yu-i', # 0x94
'eu-u', # 0x95
'eu-eu', # 0x96
'yi-u', # 0x97
'i-a', # 0x98
'i-ya', # 0x99
'i-o', # 0x9a
'i-u', # 0x9b
'i-eu', # 0x9c
'i-U', # 0x9d
'U', # 0x9e
'U-eo', # 0x9f
'U-u', # 0xa0
'U-i', # 0xa1
'UU', # 0xa2
'[?]', # 0xa3
'[?]', # 0xa4
'[?]', # 0xa5
'[?]', # 0xa6
'[?]', # 0xa7
'g', # 0xa8
'gg', # 0xa9
'gs', # 0xaa
'n', # 0xab
'nj', # 0xac
'nh', # 0xad
'd', # 0xae
'l', # 0xaf
'lg', # 0xb0
'lm', # 0xb1
'lb', # 0xb2
'ls', # 0xb3
'lt', # 0xb4
'lp', # 0xb5
'lh', # 0xb6
'm', # 0xb7
'b', # 0xb8
'bs', # 0xb9
's', # 0xba
'ss', # 0xbb
'ng', # 0xbc
'j', # 0xbd
'c', # 0xbe
'k', # 0xbf
't', # 0xc0
'p', # 0xc1
'h', # 0xc2
'gl', # 0xc3
'gsg', # 0xc4
'ng', # 0xc5
'nd', # 0xc6
'ns', # 0xc7
'nZ', # 0xc8
'nt', # 0xc9
'dg', # 0xca
'tl', # 0xcb
'lgs', # 0xcc
'ln', # 0xcd
'ld', # 0xce
'lth', # 0xcf
'll', # 0xd0
'lmg', # 0xd1
'lms', # 0xd2
'lbs', # 0xd3
'lbh', # 0xd4
'rNp', # 0xd5
'lss', # 0xd6
'lZ', # 0xd7
'lk', # 0xd8
'lQ', # 0xd9
'mg', # 0xda
'ml', # 0xdb
'mb', # 0xdc
'ms', # 0xdd
'mss', # 0xde
'mZ', # 0xdf
'mc', # 0xe0
'mh', # 0xe1
'mN', # 0xe2
'bl', # 0xe3
'bp', # 0xe4
'ph', # 0xe5
'pN', # 0xe6
'sg', # 0xe7
'sd', # 0xe8
'sl', # 0xe9
'sb', # 0xea
'Z', # 0xeb
'g', # 0xec
'ss', # 0xed
'', # 0xee
'kh', # 0xef
'N', # 0xf0
'Ns', # 0xf1
'NZ', # 0xf2
'pb', # 0xf3
'pN', # 0xf4
'hn', # 0xf5
'hl', # 0xf6
'hm', # 0xf7
'hb', # 0xf8
'Q', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| gpl-2.0 |
RAtechntukan/CouchPotatoServer | libs/pyutil/verlib.py | 106 | 12275 | # -*- coding: utf-8 -*-
"""
"Rational" version definition and parsing for DistutilsVersionFight
discussion at PyCon 2009.
This was written by Tarek Ziadé.
Zooko copied it from http://bitbucket.org/tarek/distutilsversion/ on 2010-07-29.
"""
import re
class IrrationalVersionError(Exception):
"""This is an irrational version."""
pass
class HugeMajorVersionNumError(IrrationalVersionError):
"""An irrational version because the major version number is huge
(often because a year or date was used).
See `error_on_huge_major_num` option in `NormalizedVersion` for details.
This guard can be disabled by setting that option False.
"""
pass
class PreconditionViolationException(Exception):
pass
# A marker used in the second and third parts of the `parts` tuple, for
# versions that don't have those segments, to sort properly. An example
# of versions in sort order ('highest' last):
# 1.0b1 ((1,0), ('b',1), ('f',))
# 1.0.dev345 ((1,0), ('f',), ('dev', 345))
# 1.0 ((1,0), ('f',), ('f',))
# 1.0.post256.dev345 ((1,0), ('f',), ('f', 'post', 256, 'dev', 345))
# 1.0.post345 ((1,0), ('f',), ('f', 'post', 345, 'f'))
# ^ ^ ^
# 'b' < 'f' ---------------------/ | |
# | |
# 'dev' < 'f' < 'post' -------------------/ |
# |
# 'dev' < 'f' ----------------------------------------------/
# Other letters would do, but 'f' for 'final' is kind of nice.
FINAL_MARKER = ('f',)
VERSION_RE = re.compile(r'''
^
(?P<version>\d+\.\d+) # minimum 'N.N'
(?P<extraversion>(?:\.\d+)*) # any number of extra '.N' segments
(?:
(?P<prerel>[abc]|rc) # 'a'=alpha, 'b'=beta, 'c'=release candidate
# 'rc'= alias for release candidate
(?P<prerelversion>\d+(?:\.\d+)*)
)?
(?P<postdev>(\.post(?P<post>\d+)|-r(?P<oldpost>\d+))?(\.dev(?P<dev>\d+))?)?
$''', re.VERBOSE)
class NormalizedVersion(object):
"""A rational version.
Good:
1.2 # equivalent to "1.2.0"
1.2.0
1.2a1
1.2.3a2
1.2.3b1
1.2.3c1
1.2.3.4
TODO: fill this out
Bad:
1 # mininum two numbers
1.2a # release level must have a release serial
1.2.3b
"""
def __init__(self, s, error_on_huge_major_num=True):
"""Create a NormalizedVersion instance from a version string.
@param s {str} The version string.
@param error_on_huge_major_num {bool} Whether to consider an
apparent use of a year or full date as the major version number
an error. Default True. One of the observed patterns on PyPI before
the introduction of `NormalizedVersion` was version numbers like this:
2009.01.03
20040603
2005.01
This guard is here to strongly encourage the package author to
use an alternate version, because a release deployed into PyPI
and, e.g. downstream Linux package managers, will forever remove
the possibility of using a version number like "1.0" (i.e.
where the major number is less than that huge major number).
"""
self._parse(s, error_on_huge_major_num)
@classmethod
def from_parts(cls, version, prerelease=FINAL_MARKER,
devpost=FINAL_MARKER):
return cls(cls.parts_to_str((version, prerelease, devpost)))
def _parse(self, s, error_on_huge_major_num=True):
"""Parses a string version into parts."""
if not isinstance(s, basestring):
raise PreconditionViolationException("s is required to be a string: %s :: %s" % (s, type(s)))
match = VERSION_RE.search(s)
if not match:
raise IrrationalVersionError(s)
groups = match.groupdict()
parts = []
# main version
block = self._parse_numdots(groups['version'], s, False, 2)
extraversion = groups.get('extraversion')
if extraversion not in ('', None):
block += self._parse_numdots(extraversion[1:], s)
parts.append(tuple(block))
# prerelease
prerel = groups.get('prerel')
if prerel is not None:
block = [prerel]
block += self._parse_numdots(groups.get('prerelversion'), s,
pad_zeros_length=1)
parts.append(tuple(block))
else:
parts.append(FINAL_MARKER)
# postdev
if groups.get('postdev'):
post = groups.get('post') or groups.get('oldpost')
dev = groups.get('dev')
postdev = []
if post is not None:
postdev.extend([FINAL_MARKER[0], 'post', int(post)])
if dev is None:
postdev.append(FINAL_MARKER[0])
if dev is not None:
postdev.extend(['dev', int(dev)])
parts.append(tuple(postdev))
else:
parts.append(FINAL_MARKER)
self.parts = tuple(parts)
if error_on_huge_major_num and self.parts[0][0] > 1980:
raise HugeMajorVersionNumError("huge major version number, %r, "
"which might cause future problems: %r" % (self.parts[0][0], s))
def _parse_numdots(self, s, full_ver_str, drop_trailing_zeros=True,
pad_zeros_length=0):
"""Parse 'N.N.N' sequences, return a list of ints.
@param s {str} 'N.N.N...' sequence to be parsed
@param full_ver_str {str} The full version string from which this
comes. Used for error strings.
@param drop_trailing_zeros {bool} Whether to drop trailing zeros
from the returned list. Default True.
@param pad_zeros_length {int} The length to which to pad the
returned list with zeros, if necessary. Default 0.
"""
nums = []
for n in s.split("."):
if len(n) > 1 and n[0] == '0':
raise IrrationalVersionError("cannot have leading zero in "
"version number segment: '%s' in %r" % (n, full_ver_str))
nums.append(int(n))
if drop_trailing_zeros:
while nums and nums[-1] == 0:
nums.pop()
while len(nums) < pad_zeros_length:
nums.append(0)
return nums
def __str__(self):
return self.parts_to_str(self.parts)
@classmethod
def parts_to_str(cls, parts):
"""Transforms a version expressed in tuple into its string
representation."""
# XXX This doesn't check for invalid tuples
main, prerel, postdev = parts
s = '.'.join(str(v) for v in main)
if prerel is not FINAL_MARKER:
s += prerel[0]
s += '.'.join(str(v) for v in prerel[1:])
if postdev and postdev is not FINAL_MARKER:
if postdev[0] == 'f':
postdev = postdev[1:]
i = 0
while i < len(postdev):
if i % 2 == 0:
s += '.'
s += str(postdev[i])
i += 1
return s
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, self)
def _cannot_compare(self, other):
raise TypeError("cannot compare %s and %s"
% (type(self).__name__, type(other).__name__))
def __eq__(self, other):
if not isinstance(other, NormalizedVersion):
self._cannot_compare(other)
return self.parts == other.parts
def __lt__(self, other):
if not isinstance(other, NormalizedVersion):
self._cannot_compare(other)
return self.parts < other.parts
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
def suggest_normalized_version(s):
"""Suggest a normalized version close to the given version string.
If you have a version string that isn't rational (i.e. NormalizedVersion
doesn't like it) then you might be able to get an equivalent (or close)
rational version from this function.
This does a number of simple normalizations to the given string, based
on observation of versions currently in use on PyPI. Given a dump of
those version during PyCon 2009, 4287 of them:
- 2312 (53.93%) match NormalizedVersion without change
- with the automatic suggestion
- 3474 (81.04%) match when using this suggestion method
@param s {str} An irrational version string.
@returns A rational version string, or None, if couldn't determine one.
"""
try:
NormalizedVersion(s)
return s # already rational
except IrrationalVersionError:
pass
rs = s.lower()
# part of this could use maketrans
for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'),
('beta', 'b'), ('rc', 'c'), ('-final', ''),
('-pre', 'c'),
('-release', ''), ('.release', ''), ('-stable', ''),
('+', '.'), ('_', '.'), (' ', ''), ('.final', ''),
('final', '')):
rs = rs.replace(orig, repl)
# if something ends with dev or pre, we add a 0
rs = re.sub(r"pre$", r"pre0", rs)
rs = re.sub(r"dev$", r"dev0", rs)
# if we have something like "b-2" or "a.2" at the end of the
# version, that is pobably beta, alpha, etc
# let's remove the dash or dot
rs = re.sub(r"([abc|rc])[\-\.](\d+)$", r"\1\2", rs)
# 1.0-dev-r371 -> 1.0.dev371
# 0.1-dev-r79 -> 0.1.dev79
rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs)
# Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1
rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs)
# Clean: v0.3, v1.0
if rs.startswith('v'):
rs = rs[1:]
# Clean leading '0's on numbers.
#TODO: unintended side-effect on, e.g., "2003.05.09"
# PyPI stats: 77 (~2%) better
rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs)
# Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers
# zero.
# PyPI stats: 245 (7.56%) better
rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs)
# the 'dev-rNNN' tag is a dev tag
rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs)
# clean the - when used as a pre delimiter
rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs)
# a terminal "dev" or "devel" can be changed into ".dev0"
rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs)
# a terminal "dev" can be changed into ".dev0"
rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs)
# a terminal "final" or "stable" can be removed
rs = re.sub(r"(final|stable)$", "", rs)
# The 'r' and the '-' tags are post release tags
# 0.4a1.r10 -> 0.4a1.post10
# 0.9.33-17222 -> 0.9.3.post17222
# 0.9.33-r17222 -> 0.9.3.post17222
rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs)
# Clean 'r' instead of 'dev' usage:
# 0.9.33+r17222 -> 0.9.3.dev17222
# 1.0dev123 -> 1.0.dev123
# 1.0.git123 -> 1.0.dev123
# 1.0.bzr123 -> 1.0.dev123
# 0.1a0dev.123 -> 0.1a0.dev123
# PyPI stats: ~150 (~4%) better
rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs)
# Clean '.pre' (normalized from '-pre' above) instead of 'c' usage:
# 0.2.pre1 -> 0.2c1
# 0.2-c1 -> 0.2c1
# 1.0preview123 -> 1.0c123
# PyPI stats: ~21 (0.62%) better
rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs)
# Tcl/Tk uses "px" for their post release markers
rs = re.sub(r"p(\d+)$", r".post\1", rs)
try:
NormalizedVersion(rs)
return rs # already rational
except IrrationalVersionError:
pass
return None
| gpl-3.0 |
cnsoft/kbengine-cocos2dx | kbe/res/scripts/common/Lib/idlelib/EditorWindow.py | 4 | 65343 | import sys
import os
import re
import string
import imp
from tkinter import *
import tkinter.simpledialog as tkSimpleDialog
import tkinter.messagebox as tkMessageBox
import traceback
import webbrowser
from idlelib.MultiCall import MultiCallCreator
from idlelib import idlever
from idlelib import WindowList
from idlelib import SearchDialog
from idlelib import GrepDialog
from idlelib import ReplaceDialog
from idlelib import PyParse
from idlelib.configHandler import idleConf
from idlelib import aboutDialog, textView, configDialog
from idlelib import macosxSupport
# The default tab setting for a Text widget, in average-width characters.
TK_TABWIDTH_DEFAULT = 8
def _sphinx_version():
"Format sys.version_info to produce the Sphinx version string used to install the chm docs"
major, minor, micro, level, serial = sys.version_info
release = '%s%s' % (major, minor)
if micro:
release += '%s' % (micro,)
if level == 'candidate':
release += 'rc%s' % (serial,)
elif level != 'final':
release += '%s%s' % (level[0], serial)
return release
def _find_module(fullname, path=None):
"""Version of imp.find_module() that handles hierarchical module names"""
file = None
for tgt in fullname.split('.'):
if file is not None:
file.close() # close intermediate files
(file, filename, descr) = imp.find_module(tgt, path)
if descr[2] == imp.PY_SOURCE:
break # find but not load the source file
module = imp.load_module(tgt, file, filename, descr)
try:
path = module.__path__
except AttributeError:
raise ImportError('No source for module ' + module.__name__)
if descr[2] != imp.PY_SOURCE:
# If all of the above fails and didn't raise an exception,fallback
# to a straight import which can find __init__.py in a package.
m = __import__(fullname)
try:
filename = m.__file__
except AttributeError:
pass
else:
file = None
descr = os.path.splitext(filename)[1], None, imp.PY_SOURCE
return file, filename, descr
class HelpDialog(object):
def __init__(self):
self.parent = None # parent of help window
self.dlg = None # the help window iteself
def display(self, parent, near=None):
""" Display the help dialog.
parent - parent widget for the help window
near - a Toplevel widget (e.g. EditorWindow or PyShell)
to use as a reference for placing the help window
"""
if self.dlg is None:
self.show_dialog(parent)
if near:
self.nearwindow(near)
def show_dialog(self, parent):
self.parent = parent
fn=os.path.join(os.path.abspath(os.path.dirname(__file__)),'help.txt')
self.dlg = dlg = textView.view_file(parent,'Help',fn, modal=False)
dlg.bind('<Destroy>', self.destroy, '+')
def nearwindow(self, near):
# Place the help dialog near the window specified by parent.
# Note - this may not reposition the window in Metacity
# if "/apps/metacity/general/disable_workarounds" is enabled
dlg = self.dlg
geom = (near.winfo_rootx() + 10, near.winfo_rooty() + 10)
dlg.withdraw()
dlg.geometry("=+%d+%d" % geom)
dlg.deiconify()
dlg.lift()
def destroy(self, ev=None):
self.dlg = None
self.parent = None
helpDialog = HelpDialog() # singleton instance
class EditorWindow(object):
from idlelib.Percolator import Percolator
from idlelib.ColorDelegator import ColorDelegator
from idlelib.UndoDelegator import UndoDelegator
from idlelib.IOBinding import IOBinding, filesystemencoding, encoding
from idlelib import Bindings
from tkinter import Toplevel
from idlelib.MultiStatusBar import MultiStatusBar
help_url = None
def __init__(self, flist=None, filename=None, key=None, root=None):
if EditorWindow.help_url is None:
dochome = os.path.join(sys.prefix, 'Doc', 'index.html')
if sys.platform.count('linux'):
# look for html docs in a couple of standard places
pyver = 'python-docs-' + '%s.%s.%s' % sys.version_info[:3]
if os.path.isdir('/var/www/html/python/'): # "python2" rpm
dochome = '/var/www/html/python/index.html'
else:
basepath = '/usr/share/doc/' # standard location
dochome = os.path.join(basepath, pyver,
'Doc', 'index.html')
elif sys.platform[:3] == 'win':
chmfile = os.path.join(sys.prefix, 'Doc',
'Python%s.chm' % _sphinx_version())
if os.path.isfile(chmfile):
dochome = chmfile
elif macosxSupport.runningAsOSXApp():
# documentation is stored inside the python framework
dochome = os.path.join(sys.prefix,
'Resources/English.lproj/Documentation/index.html')
dochome = os.path.normpath(dochome)
if os.path.isfile(dochome):
EditorWindow.help_url = dochome
if sys.platform == 'darwin':
# Safari requires real file:-URLs
EditorWindow.help_url = 'file://' + EditorWindow.help_url
else:
EditorWindow.help_url = "http://docs.python.org/%d.%d" % sys.version_info[:2]
currentTheme=idleConf.CurrentTheme()
self.flist = flist
root = root or flist.root
self.root = root
try:
sys.ps1
except AttributeError:
sys.ps1 = '>>> '
self.menubar = Menu(root)
self.top = top = WindowList.ListedToplevel(root, menu=self.menubar)
if flist:
self.tkinter_vars = flist.vars
#self.top.instance_dict makes flist.inversedict available to
#configDialog.py so it can access all EditorWindow instances
self.top.instance_dict = flist.inversedict
else:
self.tkinter_vars = {} # keys: Tkinter event names
# values: Tkinter variable instances
self.top.instance_dict = {}
self.recent_files_path = os.path.join(idleConf.GetUserCfgDir(),
'recent-files.lst')
self.text_frame = text_frame = Frame(top)
self.vbar = vbar = Scrollbar(text_frame, name='vbar')
self.width = idleConf.GetOption('main', 'EditorWindow',
'width', type='int')
text_options = {
'name': 'text',
'padx': 5,
'wrap': 'none',
'width': self.width,
'height': idleConf.GetOption('main', 'EditorWindow',
'height', type='int')}
if TkVersion >= 8.5:
# Starting with tk 8.5 we have to set the new tabstyle option
# to 'wordprocessor' to achieve the same display of tabs as in
# older tk versions.
text_options['tabstyle'] = 'wordprocessor'
self.text = text = MultiCallCreator(Text)(text_frame, **text_options)
self.top.focused_widget = self.text
self.createmenubar()
self.apply_bindings()
self.top.protocol("WM_DELETE_WINDOW", self.close)
self.top.bind("<<close-window>>", self.close_event)
if macosxSupport.runningAsOSXApp():
# Command-W on editorwindows doesn't work without this.
text.bind('<<close-window>>', self.close_event)
# Some OS X systems have only one mouse button,
# so use control-click for pulldown menus there.
# (Note, AquaTk defines <2> as the right button if
# present and the Tk Text widget already binds <2>.)
text.bind("<Control-Button-1>",self.right_menu_event)
else:
# Elsewhere, use right-click for pulldown menus.
text.bind("<3>",self.right_menu_event)
text.bind("<<cut>>", self.cut)
text.bind("<<copy>>", self.copy)
text.bind("<<paste>>", self.paste)
text.bind("<<center-insert>>", self.center_insert_event)
text.bind("<<help>>", self.help_dialog)
text.bind("<<python-docs>>", self.python_docs)
text.bind("<<about-idle>>", self.about_dialog)
text.bind("<<open-config-dialog>>", self.config_dialog)
text.bind("<<open-module>>", self.open_module)
text.bind("<<do-nothing>>", lambda event: "break")
text.bind("<<select-all>>", self.select_all)
text.bind("<<remove-selection>>", self.remove_selection)
text.bind("<<find>>", self.find_event)
text.bind("<<find-again>>", self.find_again_event)
text.bind("<<find-in-files>>", self.find_in_files_event)
text.bind("<<find-selection>>", self.find_selection_event)
text.bind("<<replace>>", self.replace_event)
text.bind("<<goto-line>>", self.goto_line_event)
text.bind("<<smart-backspace>>",self.smart_backspace_event)
text.bind("<<newline-and-indent>>",self.newline_and_indent_event)
text.bind("<<smart-indent>>",self.smart_indent_event)
text.bind("<<indent-region>>",self.indent_region_event)
text.bind("<<dedent-region>>",self.dedent_region_event)
text.bind("<<comment-region>>",self.comment_region_event)
text.bind("<<uncomment-region>>",self.uncomment_region_event)
text.bind("<<tabify-region>>",self.tabify_region_event)
text.bind("<<untabify-region>>",self.untabify_region_event)
text.bind("<<toggle-tabs>>",self.toggle_tabs_event)
text.bind("<<change-indentwidth>>",self.change_indentwidth_event)
text.bind("<Left>", self.move_at_edge_if_selection(0))
text.bind("<Right>", self.move_at_edge_if_selection(1))
text.bind("<<del-word-left>>", self.del_word_left)
text.bind("<<del-word-right>>", self.del_word_right)
text.bind("<<beginning-of-line>>", self.home_callback)
if flist:
flist.inversedict[self] = key
if key:
flist.dict[key] = self
text.bind("<<open-new-window>>", self.new_callback)
text.bind("<<close-all-windows>>", self.flist.close_all_callback)
text.bind("<<open-class-browser>>", self.open_class_browser)
text.bind("<<open-path-browser>>", self.open_path_browser)
self.set_status_bar()
vbar['command'] = text.yview
vbar.pack(side=RIGHT, fill=Y)
text['yscrollcommand'] = vbar.set
fontWeight = 'normal'
if idleConf.GetOption('main', 'EditorWindow', 'font-bold', type='bool'):
fontWeight='bold'
text.config(font=(idleConf.GetOption('main', 'EditorWindow', 'font'),
idleConf.GetOption('main', 'EditorWindow',
'font-size', type='int'),
fontWeight))
text_frame.pack(side=LEFT, fill=BOTH, expand=1)
text.pack(side=TOP, fill=BOTH, expand=1)
text.focus_set()
# usetabs true -> literal tab characters are used by indent and
# dedent cmds, possibly mixed with spaces if
# indentwidth is not a multiple of tabwidth,
# which will cause Tabnanny to nag!
# false -> tab characters are converted to spaces by indent
# and dedent cmds, and ditto TAB keystrokes
# Although use-spaces=0 can be configured manually in config-main.def,
# configuration of tabs v. spaces is not supported in the configuration
# dialog. IDLE promotes the preferred Python indentation: use spaces!
usespaces = idleConf.GetOption('main', 'Indent',
'use-spaces', type='bool')
self.usetabs = not usespaces
# tabwidth is the display width of a literal tab character.
# CAUTION: telling Tk to use anything other than its default
# tab setting causes it to use an entirely different tabbing algorithm,
# treating tab stops as fixed distances from the left margin.
# Nobody expects this, so for now tabwidth should never be changed.
self.tabwidth = 8 # must remain 8 until Tk is fixed.
# indentwidth is the number of screen characters per indent level.
# The recommended Python indentation is four spaces.
self.indentwidth = self.tabwidth
self.set_notabs_indentwidth()
# If context_use_ps1 is true, parsing searches back for a ps1 line;
# else searches for a popular (if, def, ...) Python stmt.
self.context_use_ps1 = False
# When searching backwards for a reliable place to begin parsing,
# first start num_context_lines[0] lines back, then
# num_context_lines[1] lines back if that didn't work, and so on.
# The last value should be huge (larger than the # of lines in a
# conceivable file).
# Making the initial values larger slows things down more often.
self.num_context_lines = 50, 500, 5000000
self.per = per = self.Percolator(text)
self.undo = undo = self.UndoDelegator()
per.insertfilter(undo)
text.undo_block_start = undo.undo_block_start
text.undo_block_stop = undo.undo_block_stop
undo.set_saved_change_hook(self.saved_change_hook)
# IOBinding implements file I/O and printing functionality
self.io = io = self.IOBinding(self)
io.set_filename_change_hook(self.filename_change_hook)
self.good_load = False
self.set_indentation_params(False)
self.color = None # initialized below in self.ResetColorizer
if filename:
if os.path.exists(filename) and not os.path.isdir(filename):
if io.loadfile(filename):
self.good_load = True
is_py_src = self.ispythonsource(filename)
self.set_indentation_params(is_py_src)
if is_py_src:
self.color = color = self.ColorDelegator()
per.insertfilter(color)
else:
io.set_filename(filename)
self.ResetColorizer()
self.saved_change_hook()
self.update_recent_files_list()
self.load_extensions()
menu = self.menudict.get('windows')
if menu:
end = menu.index("end")
if end is None:
end = -1
if end >= 0:
menu.add_separator()
end = end + 1
self.wmenu_end = end
WindowList.register_callback(self.postwindowsmenu)
# Some abstractions so IDLE extensions are cross-IDE
self.askyesno = tkMessageBox.askyesno
self.askinteger = tkSimpleDialog.askinteger
self.showerror = tkMessageBox.showerror
def _filename_to_unicode(self, filename):
"""convert filename to unicode in order to display it in Tk"""
if isinstance(filename, str) or not filename:
return filename
else:
try:
return filename.decode(self.filesystemencoding)
except UnicodeDecodeError:
# XXX
try:
return filename.decode(self.encoding)
except UnicodeDecodeError:
# byte-to-byte conversion
return filename.decode('iso8859-1')
def new_callback(self, event):
dirname, basename = self.io.defaultfilename()
self.flist.new(dirname)
return "break"
def home_callback(self, event):
if (event.state & 4) != 0 and event.keysym == "Home":
# state&4==Control. If <Control-Home>, use the Tk binding.
return
if self.text.index("iomark") and \
self.text.compare("iomark", "<=", "insert lineend") and \
self.text.compare("insert linestart", "<=", "iomark"):
# In Shell on input line, go to just after prompt
insertpt = int(self.text.index("iomark").split(".")[1])
else:
line = self.text.get("insert linestart", "insert lineend")
for insertpt in range(len(line)):
if line[insertpt] not in (' ','\t'):
break
else:
insertpt=len(line)
lineat = int(self.text.index("insert").split('.')[1])
if insertpt == lineat:
insertpt = 0
dest = "insert linestart+"+str(insertpt)+"c"
if (event.state&1) == 0:
# shift was not pressed
self.text.tag_remove("sel", "1.0", "end")
else:
if not self.text.index("sel.first"):
# there was no previous selection
self.text.mark_set("my_anchor", "insert")
else:
if self.text.compare(self.text.index("sel.first"), "<",
self.text.index("insert")):
self.text.mark_set("my_anchor", "sel.first") # extend back
else:
self.text.mark_set("my_anchor", "sel.last") # extend forward
first = self.text.index(dest)
last = self.text.index("my_anchor")
if self.text.compare(first,">",last):
first,last = last,first
self.text.tag_remove("sel", "1.0", "end")
self.text.tag_add("sel", first, last)
self.text.mark_set("insert", dest)
self.text.see("insert")
return "break"
def set_status_bar(self):
self.status_bar = self.MultiStatusBar(self.top)
if macosxSupport.runningAsOSXApp():
# Insert some padding to avoid obscuring some of the statusbar
# by the resize widget.
self.status_bar.set_label('_padding1', ' ', side=RIGHT)
self.status_bar.set_label('column', 'Col: ?', side=RIGHT)
self.status_bar.set_label('line', 'Ln: ?', side=RIGHT)
self.status_bar.pack(side=BOTTOM, fill=X)
self.text.bind("<<set-line-and-column>>", self.set_line_and_column)
self.text.event_add("<<set-line-and-column>>",
"<KeyRelease>", "<ButtonRelease>")
self.text.after_idle(self.set_line_and_column)
def set_line_and_column(self, event=None):
line, column = self.text.index(INSERT).split('.')
self.status_bar.set_label('column', 'Col: %s' % column)
self.status_bar.set_label('line', 'Ln: %s' % line)
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("format", "F_ormat"),
("run", "_Run"),
("options", "_Options"),
("windows", "_Windows"),
("help", "_Help"),
]
if macosxSupport.runningAsOSXApp():
del menu_specs[-3]
menu_specs[-2] = ("windows", "_Window")
def createmenubar(self):
mbar = self.menubar
self.menudict = menudict = {}
for name, label in self.menu_specs:
underline, label = prepstr(label)
menudict[name] = menu = Menu(mbar, name=name)
mbar.add_cascade(label=label, menu=menu, underline=underline)
if macosxSupport.isCarbonAquaTk(self.root):
# Insert the application menu
menudict['application'] = menu = Menu(mbar, name='apple')
mbar.add_cascade(label='IDLE', menu=menu)
self.fill_menus()
self.recent_files_menu = Menu(self.menubar)
self.menudict['file'].insert_cascade(3, label='Recent Files',
underline=0,
menu=self.recent_files_menu)
self.base_helpmenu_length = self.menudict['help'].index(END)
self.reset_help_menu_entries()
def postwindowsmenu(self):
# Only called when Windows menu exists
menu = self.menudict['windows']
end = menu.index("end")
if end is None:
end = -1
if end > self.wmenu_end:
menu.delete(self.wmenu_end+1, end)
WindowList.add_windows_to_menu(menu)
rmenu = None
def right_menu_event(self, event):
self.text.mark_set("insert", "@%d,%d" % (event.x, event.y))
if not self.rmenu:
self.make_rmenu()
rmenu = self.rmenu
self.event = event
iswin = sys.platform[:3] == 'win'
if iswin:
self.text.config(cursor="arrow")
for label, eventname, verify_state in self.rmenu_specs:
if verify_state is None:
continue
state = getattr(self, verify_state)()
rmenu.entryconfigure(label, state=state)
rmenu.tk_popup(event.x_root, event.y_root)
if iswin:
self.text.config(cursor="ibeam")
rmenu_specs = [
# ("Label", "<<virtual-event>>", "statefuncname"), ...
("Close", "<<close-window>>", None), # Example
]
def make_rmenu(self):
rmenu = Menu(self.text, tearoff=0)
for label, eventname, _ in self.rmenu_specs:
if label is not None:
def command(text=self.text, eventname=eventname):
text.event_generate(eventname)
rmenu.add_command(label=label, command=command)
else:
rmenu.add_separator()
self.rmenu = rmenu
def rmenu_check_cut(self):
return self.rmenu_check_copy()
def rmenu_check_copy(self):
try:
indx = self.text.index('sel.first')
except TclError:
return 'disabled'
else:
return 'normal' if indx else 'disabled'
def rmenu_check_paste(self):
try:
self.text.tk.call('tk::GetSelection', self.text, 'CLIPBOARD')
except TclError:
return 'disabled'
else:
return 'normal'
def about_dialog(self, event=None):
aboutDialog.AboutDialog(self.top,'About IDLE')
def config_dialog(self, event=None):
configDialog.ConfigDialog(self.top,'Settings')
def help_dialog(self, event=None):
if self.root:
parent = self.root
else:
parent = self.top
helpDialog.display(parent, near=self.top)
def python_docs(self, event=None):
if sys.platform[:3] == 'win':
try:
os.startfile(self.help_url)
except WindowsError as why:
tkMessageBox.showerror(title='Document Start Failure',
message=str(why), parent=self.text)
else:
webbrowser.open(self.help_url)
return "break"
def cut(self,event):
self.text.event_generate("<<Cut>>")
return "break"
def copy(self,event):
if not self.text.tag_ranges("sel"):
# There is no selection, so do nothing and maybe interrupt.
return
self.text.event_generate("<<Copy>>")
return "break"
def paste(self,event):
self.text.event_generate("<<Paste>>")
self.text.see("insert")
return "break"
def select_all(self, event=None):
self.text.tag_add("sel", "1.0", "end-1c")
self.text.mark_set("insert", "1.0")
self.text.see("insert")
return "break"
def remove_selection(self, event=None):
self.text.tag_remove("sel", "1.0", "end")
self.text.see("insert")
def move_at_edge_if_selection(self, edge_index):
"""Cursor move begins at start or end of selection
When a left/right cursor key is pressed create and return to Tkinter a
function which causes a cursor move from the associated edge of the
selection.
"""
self_text_index = self.text.index
self_text_mark_set = self.text.mark_set
edges_table = ("sel.first+1c", "sel.last-1c")
def move_at_edge(event):
if (event.state & 5) == 0: # no shift(==1) or control(==4) pressed
try:
self_text_index("sel.first")
self_text_mark_set("insert", edges_table[edge_index])
except TclError:
pass
return move_at_edge
def del_word_left(self, event):
self.text.event_generate('<Meta-Delete>')
return "break"
def del_word_right(self, event):
self.text.event_generate('<Meta-d>')
return "break"
def find_event(self, event):
SearchDialog.find(self.text)
return "break"
def find_again_event(self, event):
SearchDialog.find_again(self.text)
return "break"
def find_selection_event(self, event):
SearchDialog.find_selection(self.text)
return "break"
def find_in_files_event(self, event):
GrepDialog.grep(self.text, self.io, self.flist)
return "break"
def replace_event(self, event):
ReplaceDialog.replace(self.text)
return "break"
def goto_line_event(self, event):
text = self.text
lineno = tkSimpleDialog.askinteger("Goto",
"Go to line number:",parent=text)
if lineno is None:
return "break"
if lineno <= 0:
text.bell()
return "break"
text.mark_set("insert", "%d.0" % lineno)
text.see("insert")
def open_module(self, event=None):
# XXX Shouldn't this be in IOBinding?
try:
name = self.text.get("sel.first", "sel.last")
except TclError:
name = ""
else:
name = name.strip()
name = tkSimpleDialog.askstring("Module",
"Enter the name of a Python module\n"
"to search on sys.path and open:",
parent=self.text, initialvalue=name)
if name:
name = name.strip()
if not name:
return
# XXX Ought to insert current file's directory in front of path
try:
(f, file, (suffix, mode, type)) = _find_module(name)
except (NameError, ImportError) as msg:
tkMessageBox.showerror("Import error", str(msg), parent=self.text)
return
if type != imp.PY_SOURCE:
tkMessageBox.showerror("Unsupported type",
"%s is not a source module" % name, parent=self.text)
return
if f:
f.close()
if self.flist:
self.flist.open(file)
else:
self.io.loadfile(file)
def open_class_browser(self, event=None):
filename = self.io.filename
if not filename:
tkMessageBox.showerror(
"No filename",
"This buffer has no associated filename",
master=self.text)
self.text.focus_set()
return None
head, tail = os.path.split(filename)
base, ext = os.path.splitext(tail)
from idlelib import ClassBrowser
ClassBrowser.ClassBrowser(self.flist, base, [head])
def open_path_browser(self, event=None):
from idlelib import PathBrowser
PathBrowser.PathBrowser(self.flist)
def gotoline(self, lineno):
if lineno is not None and lineno > 0:
self.text.mark_set("insert", "%d.0" % lineno)
self.text.tag_remove("sel", "1.0", "end")
self.text.tag_add("sel", "insert", "insert +1l")
self.center()
def ispythonsource(self, filename):
if not filename or os.path.isdir(filename):
return True
base, ext = os.path.splitext(os.path.basename(filename))
if os.path.normcase(ext) in (".py", ".pyw"):
return True
line = self.text.get('1.0', '1.0 lineend')
return line.startswith('#!') and 'python' in line
def close_hook(self):
if self.flist:
self.flist.unregister_maybe_terminate(self)
self.flist = None
def set_close_hook(self, close_hook):
self.close_hook = close_hook
def filename_change_hook(self):
if self.flist:
self.flist.filename_changed_edit(self)
self.saved_change_hook()
self.top.update_windowlist_registry(self)
self.ResetColorizer()
def _addcolorizer(self):
if self.color:
return
if self.ispythonsource(self.io.filename):
self.color = self.ColorDelegator()
# can add more colorizers here...
if self.color:
self.per.removefilter(self.undo)
self.per.insertfilter(self.color)
self.per.insertfilter(self.undo)
def _rmcolorizer(self):
if not self.color:
return
self.color.removecolors()
self.per.removefilter(self.color)
self.color = None
def ResetColorizer(self):
"Update the colour theme"
# Called from self.filename_change_hook and from configDialog.py
self._rmcolorizer()
self._addcolorizer()
theme = idleConf.GetOption('main','Theme','name')
normal_colors = idleConf.GetHighlight(theme, 'normal')
cursor_color = idleConf.GetHighlight(theme, 'cursor', fgBg='fg')
select_colors = idleConf.GetHighlight(theme, 'hilite')
self.text.config(
foreground=normal_colors['foreground'],
background=normal_colors['background'],
insertbackground=cursor_color,
selectforeground=select_colors['foreground'],
selectbackground=select_colors['background'],
)
IDENTCHARS = string.ascii_letters + string.digits + "_"
def colorize_syntax_error(self, text, pos):
text.tag_add("ERROR", pos)
char = text.get(pos)
if char and char in self.IDENTCHARS:
text.tag_add("ERROR", pos + " wordstart", pos)
if '\n' == text.get(pos): # error at line end
text.mark_set("insert", pos)
else:
text.mark_set("insert", pos + "+1c")
text.see(pos)
def ResetFont(self):
"Update the text widgets' font if it is changed"
# Called from configDialog.py
fontWeight='normal'
if idleConf.GetOption('main','EditorWindow','font-bold',type='bool'):
fontWeight='bold'
self.text.config(font=(idleConf.GetOption('main','EditorWindow','font'),
idleConf.GetOption('main','EditorWindow','font-size',
type='int'),
fontWeight))
def RemoveKeybindings(self):
"Remove the keybindings before they are changed."
# Called from configDialog.py
self.Bindings.default_keydefs = keydefs = idleConf.GetCurrentKeySet()
for event, keylist in keydefs.items():
self.text.event_delete(event, *keylist)
for extensionName in self.get_standard_extension_names():
xkeydefs = idleConf.GetExtensionBindings(extensionName)
if xkeydefs:
for event, keylist in xkeydefs.items():
self.text.event_delete(event, *keylist)
def ApplyKeybindings(self):
"Update the keybindings after they are changed"
# Called from configDialog.py
self.Bindings.default_keydefs = keydefs = idleConf.GetCurrentKeySet()
self.apply_bindings()
for extensionName in self.get_standard_extension_names():
xkeydefs = idleConf.GetExtensionBindings(extensionName)
if xkeydefs:
self.apply_bindings(xkeydefs)
#update menu accelerators
menuEventDict = {}
for menu in self.Bindings.menudefs:
menuEventDict[menu[0]] = {}
for item in menu[1]:
if item:
menuEventDict[menu[0]][prepstr(item[0])[1]] = item[1]
for menubarItem in self.menudict:
menu = self.menudict[menubarItem]
end = menu.index(END) + 1
for index in range(0, end):
if menu.type(index) == 'command':
accel = menu.entrycget(index, 'accelerator')
if accel:
itemName = menu.entrycget(index, 'label')
event = ''
if menubarItem in menuEventDict:
if itemName in menuEventDict[menubarItem]:
event = menuEventDict[menubarItem][itemName]
if event:
accel = get_accelerator(keydefs, event)
menu.entryconfig(index, accelerator=accel)
def set_notabs_indentwidth(self):
"Update the indentwidth if changed and not using tabs in this window"
# Called from configDialog.py
if not self.usetabs:
self.indentwidth = idleConf.GetOption('main', 'Indent','num-spaces',
type='int')
def reset_help_menu_entries(self):
"Update the additional help entries on the Help menu"
help_list = idleConf.GetAllExtraHelpSourcesList()
helpmenu = self.menudict['help']
# first delete the extra help entries, if any
helpmenu_length = helpmenu.index(END)
if helpmenu_length > self.base_helpmenu_length:
helpmenu.delete((self.base_helpmenu_length + 1), helpmenu_length)
# then rebuild them
if help_list:
helpmenu.add_separator()
for entry in help_list:
cmd = self.__extra_help_callback(entry[1])
helpmenu.add_command(label=entry[0], command=cmd)
# and update the menu dictionary
self.menudict['help'] = helpmenu
def __extra_help_callback(self, helpfile):
"Create a callback with the helpfile value frozen at definition time"
def display_extra_help(helpfile=helpfile):
if not helpfile.startswith(('www', 'http')):
helpfile = os.path.normpath(helpfile)
if sys.platform[:3] == 'win':
try:
os.startfile(helpfile)
except WindowsError as why:
tkMessageBox.showerror(title='Document Start Failure',
message=str(why), parent=self.text)
else:
webbrowser.open(helpfile)
return display_extra_help
def update_recent_files_list(self, new_file=None):
"Load and update the recent files list and menus"
rf_list = []
if os.path.exists(self.recent_files_path):
rf_list_file = open(self.recent_files_path,'r',
encoding='utf_8', errors='replace')
try:
rf_list = rf_list_file.readlines()
finally:
rf_list_file.close()
if new_file:
new_file = os.path.abspath(new_file) + '\n'
if new_file in rf_list:
rf_list.remove(new_file) # move to top
rf_list.insert(0, new_file)
# clean and save the recent files list
bad_paths = []
for path in rf_list:
if '\0' in path or not os.path.exists(path[0:-1]):
bad_paths.append(path)
rf_list = [path for path in rf_list if path not in bad_paths]
ulchars = "1234567890ABCDEFGHIJK"
rf_list = rf_list[0:len(ulchars)]
try:
with open(self.recent_files_path, 'w',
encoding='utf_8', errors='replace') as rf_file:
rf_file.writelines(rf_list)
except IOError as err:
if not getattr(self.root, "recentfilelist_error_displayed", False):
self.root.recentfilelist_error_displayed = True
tkMessageBox.showerror(title='IDLE Error',
message='Unable to update Recent Files list:\n%s'
% str(err),
parent=self.text)
# for each edit window instance, construct the recent files menu
for instance in self.top.instance_dict:
menu = instance.recent_files_menu
menu.delete(0, END) # clear, and rebuild:
for i, file_name in enumerate(rf_list):
file_name = file_name.rstrip() # zap \n
# make unicode string to display non-ASCII chars correctly
ufile_name = self._filename_to_unicode(file_name)
callback = instance.__recent_file_callback(file_name)
menu.add_command(label=ulchars[i] + " " + ufile_name,
command=callback,
underline=0)
def __recent_file_callback(self, file_name):
def open_recent_file(fn_closure=file_name):
self.io.open(editFile=fn_closure)
return open_recent_file
def saved_change_hook(self):
short = self.short_title()
long = self.long_title()
if short and long:
title = short + " - " + long
elif short:
title = short
elif long:
title = long
else:
title = "Untitled"
icon = short or long or title
if not self.get_saved():
title = "*%s*" % title
icon = "*%s" % icon
self.top.wm_title(title)
self.top.wm_iconname(icon)
def get_saved(self):
return self.undo.get_saved()
def set_saved(self, flag):
self.undo.set_saved(flag)
def reset_undo(self):
self.undo.reset_undo()
def short_title(self):
filename = self.io.filename
if filename:
filename = os.path.basename(filename)
# return unicode string to display non-ASCII chars correctly
return self._filename_to_unicode(filename)
def long_title(self):
# return unicode string to display non-ASCII chars correctly
return self._filename_to_unicode(self.io.filename or "")
def center_insert_event(self, event):
self.center()
def center(self, mark="insert"):
text = self.text
top, bot = self.getwindowlines()
lineno = self.getlineno(mark)
height = bot - top
newtop = max(1, lineno - height//2)
text.yview(float(newtop))
def getwindowlines(self):
text = self.text
top = self.getlineno("@0,0")
bot = self.getlineno("@0,65535")
if top == bot and text.winfo_height() == 1:
# Geometry manager hasn't run yet
height = int(text['height'])
bot = top + height - 1
return top, bot
def getlineno(self, mark="insert"):
text = self.text
return int(float(text.index(mark)))
def get_geometry(self):
"Return (width, height, x, y)"
geom = self.top.wm_geometry()
m = re.match(r"(\d+)x(\d+)\+(-?\d+)\+(-?\d+)", geom)
return list(map(int, m.groups()))
def close_event(self, event):
self.close()
def maybesave(self):
if self.io:
if not self.get_saved():
if self.top.state()!='normal':
self.top.deiconify()
self.top.lower()
self.top.lift()
return self.io.maybesave()
def close(self):
reply = self.maybesave()
if str(reply) != "cancel":
self._close()
return reply
def _close(self):
if self.io.filename:
self.update_recent_files_list(new_file=self.io.filename)
WindowList.unregister_callback(self.postwindowsmenu)
self.unload_extensions()
self.io.close()
self.io = None
self.undo = None
if self.color:
self.color.close(False)
self.color = None
self.text = None
self.tkinter_vars = None
self.per.close()
self.per = None
self.top.destroy()
if self.close_hook:
# unless override: unregister from flist, terminate if last window
self.close_hook()
def load_extensions(self):
self.extensions = {}
self.load_standard_extensions()
def unload_extensions(self):
for ins in list(self.extensions.values()):
if hasattr(ins, "close"):
ins.close()
self.extensions = {}
def load_standard_extensions(self):
for name in self.get_standard_extension_names():
try:
self.load_extension(name)
except:
print("Failed to load extension", repr(name))
traceback.print_exc()
def get_standard_extension_names(self):
return idleConf.GetExtensions(editor_only=True)
def load_extension(self, name):
try:
mod = __import__(name, globals(), locals(), [])
except ImportError:
print("\nFailed to import extension: ", name)
raise
cls = getattr(mod, name)
keydefs = idleConf.GetExtensionBindings(name)
if hasattr(cls, "menudefs"):
self.fill_menus(cls.menudefs, keydefs)
ins = cls(self)
self.extensions[name] = ins
if keydefs:
self.apply_bindings(keydefs)
for vevent in keydefs:
methodname = vevent.replace("-", "_")
while methodname[:1] == '<':
methodname = methodname[1:]
while methodname[-1:] == '>':
methodname = methodname[:-1]
methodname = methodname + "_event"
if hasattr(ins, methodname):
self.text.bind(vevent, getattr(ins, methodname))
def apply_bindings(self, keydefs=None):
if keydefs is None:
keydefs = self.Bindings.default_keydefs
text = self.text
text.keydefs = keydefs
for event, keylist in keydefs.items():
if keylist:
text.event_add(event, *keylist)
def fill_menus(self, menudefs=None, keydefs=None):
"""Add appropriate entries to the menus and submenus
Menus that are absent or None in self.menudict are ignored.
"""
if menudefs is None:
menudefs = self.Bindings.menudefs
if keydefs is None:
keydefs = self.Bindings.default_keydefs
menudict = self.menudict
text = self.text
for mname, entrylist in menudefs:
menu = menudict.get(mname)
if not menu:
continue
for entry in entrylist:
if not entry:
menu.add_separator()
else:
label, eventname = entry
checkbutton = (label[:1] == '!')
if checkbutton:
label = label[1:]
underline, label = prepstr(label)
accelerator = get_accelerator(keydefs, eventname)
def command(text=text, eventname=eventname):
text.event_generate(eventname)
if checkbutton:
var = self.get_var_obj(eventname, BooleanVar)
menu.add_checkbutton(label=label, underline=underline,
command=command, accelerator=accelerator,
variable=var)
else:
menu.add_command(label=label, underline=underline,
command=command,
accelerator=accelerator)
def getvar(self, name):
var = self.get_var_obj(name)
if var:
value = var.get()
return value
else:
raise NameError(name)
def setvar(self, name, value, vartype=None):
var = self.get_var_obj(name, vartype)
if var:
var.set(value)
else:
raise NameError(name)
def get_var_obj(self, name, vartype=None):
var = self.tkinter_vars.get(name)
if not var and vartype:
# create a Tkinter variable object with self.text as master:
self.tkinter_vars[name] = var = vartype(self.text)
return var
# Tk implementations of "virtual text methods" -- each platform
# reusing IDLE's support code needs to define these for its GUI's
# flavor of widget.
# Is character at text_index in a Python string? Return 0 for
# "guaranteed no", true for anything else. This info is expensive
# to compute ab initio, but is probably already known by the
# platform's colorizer.
def is_char_in_string(self, text_index):
if self.color:
# Return true iff colorizer hasn't (re)gotten this far
# yet, or the character is tagged as being in a string
return self.text.tag_prevrange("TODO", text_index) or \
"STRING" in self.text.tag_names(text_index)
else:
# The colorizer is missing: assume the worst
return 1
# If a selection is defined in the text widget, return (start,
# end) as Tkinter text indices, otherwise return (None, None)
def get_selection_indices(self):
try:
first = self.text.index("sel.first")
last = self.text.index("sel.last")
return first, last
except TclError:
return None, None
# Return the text widget's current view of what a tab stop means
# (equivalent width in spaces).
def get_tk_tabwidth(self):
current = self.text['tabs'] or TK_TABWIDTH_DEFAULT
return int(current)
# Set the text widget's current view of what a tab stop means.
def set_tk_tabwidth(self, newtabwidth):
text = self.text
if self.get_tk_tabwidth() != newtabwidth:
# Set text widget tab width
pixels = text.tk.call("font", "measure", text["font"],
"-displayof", text.master,
"n" * newtabwidth)
text.configure(tabs=pixels)
### begin autoindent code ### (configuration was moved to beginning of class)
def set_indentation_params(self, is_py_src, guess=True):
if is_py_src and guess:
i = self.guess_indent()
if 2 <= i <= 8:
self.indentwidth = i
if self.indentwidth != self.tabwidth:
self.usetabs = False
self.set_tk_tabwidth(self.tabwidth)
def smart_backspace_event(self, event):
text = self.text
first, last = self.get_selection_indices()
if first and last:
text.delete(first, last)
text.mark_set("insert", first)
return "break"
# Delete whitespace left, until hitting a real char or closest
# preceding virtual tab stop.
chars = text.get("insert linestart", "insert")
if chars == '':
if text.compare("insert", ">", "1.0"):
# easy: delete preceding newline
text.delete("insert-1c")
else:
text.bell() # at start of buffer
return "break"
if chars[-1] not in " \t":
# easy: delete preceding real char
text.delete("insert-1c")
return "break"
# Ick. It may require *inserting* spaces if we back up over a
# tab character! This is written to be clear, not fast.
tabwidth = self.tabwidth
have = len(chars.expandtabs(tabwidth))
assert have > 0
want = ((have - 1) // self.indentwidth) * self.indentwidth
# Debug prompt is multilined....
if self.context_use_ps1:
last_line_of_prompt = sys.ps1.split('\n')[-1]
else:
last_line_of_prompt = ''
ncharsdeleted = 0
while 1:
if chars == last_line_of_prompt:
break
chars = chars[:-1]
ncharsdeleted = ncharsdeleted + 1
have = len(chars.expandtabs(tabwidth))
if have <= want or chars[-1] not in " \t":
break
text.undo_block_start()
text.delete("insert-%dc" % ncharsdeleted, "insert")
if have < want:
text.insert("insert", ' ' * (want - have))
text.undo_block_stop()
return "break"
def smart_indent_event(self, event):
# if intraline selection:
# delete it
# elif multiline selection:
# do indent-region
# else:
# indent one level
text = self.text
first, last = self.get_selection_indices()
text.undo_block_start()
try:
if first and last:
if index2line(first) != index2line(last):
return self.indent_region_event(event)
text.delete(first, last)
text.mark_set("insert", first)
prefix = text.get("insert linestart", "insert")
raw, effective = classifyws(prefix, self.tabwidth)
if raw == len(prefix):
# only whitespace to the left
self.reindent_to(effective + self.indentwidth)
else:
# tab to the next 'stop' within or to right of line's text:
if self.usetabs:
pad = '\t'
else:
effective = len(prefix.expandtabs(self.tabwidth))
n = self.indentwidth
pad = ' ' * (n - effective % n)
text.insert("insert", pad)
text.see("insert")
return "break"
finally:
text.undo_block_stop()
def newline_and_indent_event(self, event):
text = self.text
first, last = self.get_selection_indices()
text.undo_block_start()
try:
if first and last:
text.delete(first, last)
text.mark_set("insert", first)
line = text.get("insert linestart", "insert")
i, n = 0, len(line)
while i < n and line[i] in " \t":
i = i+1
if i == n:
# the cursor is in or at leading indentation in a continuation
# line; just inject an empty line at the start
text.insert("insert linestart", '\n')
return "break"
indent = line[:i]
# strip whitespace before insert point unless it's in the prompt
i = 0
last_line_of_prompt = sys.ps1.split('\n')[-1]
while line and line[-1] in " \t" and line != last_line_of_prompt:
line = line[:-1]
i = i+1
if i:
text.delete("insert - %d chars" % i, "insert")
# strip whitespace after insert point
while text.get("insert") in " \t":
text.delete("insert")
# start new line
text.insert("insert", '\n')
# adjust indentation for continuations and block
# open/close first need to find the last stmt
lno = index2line(text.index('insert'))
y = PyParse.Parser(self.indentwidth, self.tabwidth)
if not self.context_use_ps1:
for context in self.num_context_lines:
startat = max(lno - context, 1)
startatindex = repr(startat) + ".0"
rawtext = text.get(startatindex, "insert")
y.set_str(rawtext)
bod = y.find_good_parse_start(
self.context_use_ps1,
self._build_char_in_string_func(startatindex))
if bod is not None or startat == 1:
break
y.set_lo(bod or 0)
else:
r = text.tag_prevrange("console", "insert")
if r:
startatindex = r[1]
else:
startatindex = "1.0"
rawtext = text.get(startatindex, "insert")
y.set_str(rawtext)
y.set_lo(0)
c = y.get_continuation_type()
if c != PyParse.C_NONE:
# The current stmt hasn't ended yet.
if c == PyParse.C_STRING_FIRST_LINE:
# after the first line of a string; do not indent at all
pass
elif c == PyParse.C_STRING_NEXT_LINES:
# inside a string which started before this line;
# just mimic the current indent
text.insert("insert", indent)
elif c == PyParse.C_BRACKET:
# line up with the first (if any) element of the
# last open bracket structure; else indent one
# level beyond the indent of the line with the
# last open bracket
self.reindent_to(y.compute_bracket_indent())
elif c == PyParse.C_BACKSLASH:
# if more than one line in this stmt already, just
# mimic the current indent; else if initial line
# has a start on an assignment stmt, indent to
# beyond leftmost =; else to beyond first chunk of
# non-whitespace on initial line
if y.get_num_lines_in_stmt() > 1:
text.insert("insert", indent)
else:
self.reindent_to(y.compute_backslash_indent())
else:
assert 0, "bogus continuation type %r" % (c,)
return "break"
# This line starts a brand new stmt; indent relative to
# indentation of initial line of closest preceding
# interesting stmt.
indent = y.get_base_indent_string()
text.insert("insert", indent)
if y.is_block_opener():
self.smart_indent_event(event)
elif indent and y.is_block_closer():
self.smart_backspace_event(event)
return "break"
finally:
text.see("insert")
text.undo_block_stop()
# Our editwin provides a is_char_in_string function that works
# with a Tk text index, but PyParse only knows about offsets into
# a string. This builds a function for PyParse that accepts an
# offset.
def _build_char_in_string_func(self, startindex):
def inner(offset, _startindex=startindex,
_icis=self.is_char_in_string):
return _icis(_startindex + "+%dc" % offset)
return inner
def indent_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines)):
line = lines[pos]
if line:
raw, effective = classifyws(line, self.tabwidth)
effective = effective + self.indentwidth
lines[pos] = self._make_blanks(effective) + line[raw:]
self.set_region(head, tail, chars, lines)
return "break"
def dedent_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines)):
line = lines[pos]
if line:
raw, effective = classifyws(line, self.tabwidth)
effective = max(effective - self.indentwidth, 0)
lines[pos] = self._make_blanks(effective) + line[raw:]
self.set_region(head, tail, chars, lines)
return "break"
def comment_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines) - 1):
line = lines[pos]
lines[pos] = '##' + line
self.set_region(head, tail, chars, lines)
def uncomment_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines)):
line = lines[pos]
if not line:
continue
if line[:2] == '##':
line = line[2:]
elif line[:1] == '#':
line = line[1:]
lines[pos] = line
self.set_region(head, tail, chars, lines)
def tabify_region_event(self, event):
head, tail, chars, lines = self.get_region()
tabwidth = self._asktabwidth()
for pos in range(len(lines)):
line = lines[pos]
if line:
raw, effective = classifyws(line, tabwidth)
ntabs, nspaces = divmod(effective, tabwidth)
lines[pos] = '\t' * ntabs + ' ' * nspaces + line[raw:]
self.set_region(head, tail, chars, lines)
def untabify_region_event(self, event):
head, tail, chars, lines = self.get_region()
tabwidth = self._asktabwidth()
for pos in range(len(lines)):
lines[pos] = lines[pos].expandtabs(tabwidth)
self.set_region(head, tail, chars, lines)
def toggle_tabs_event(self, event):
if self.askyesno(
"Toggle tabs",
"Turn tabs " + ("on", "off")[self.usetabs] +
"?\nIndent width " +
("will be", "remains at")[self.usetabs] + " 8." +
"\n Note: a tab is always 8 columns",
parent=self.text):
self.usetabs = not self.usetabs
# Try to prevent inconsistent indentation.
# User must change indent width manually after using tabs.
self.indentwidth = 8
return "break"
# XXX this isn't bound to anything -- see tabwidth comments
## def change_tabwidth_event(self, event):
## new = self._asktabwidth()
## if new != self.tabwidth:
## self.tabwidth = new
## self.set_indentation_params(0, guess=0)
## return "break"
def change_indentwidth_event(self, event):
new = self.askinteger(
"Indent width",
"New indent width (2-16)\n(Always use 8 when using tabs)",
parent=self.text,
initialvalue=self.indentwidth,
minvalue=2,
maxvalue=16)
if new and new != self.indentwidth and not self.usetabs:
self.indentwidth = new
return "break"
def get_region(self):
text = self.text
first, last = self.get_selection_indices()
if first and last:
head = text.index(first + " linestart")
tail = text.index(last + "-1c lineend +1c")
else:
head = text.index("insert linestart")
tail = text.index("insert lineend +1c")
chars = text.get(head, tail)
lines = chars.split("\n")
return head, tail, chars, lines
def set_region(self, head, tail, chars, lines):
text = self.text
newchars = "\n".join(lines)
if newchars == chars:
text.bell()
return
text.tag_remove("sel", "1.0", "end")
text.mark_set("insert", head)
text.undo_block_start()
text.delete(head, tail)
text.insert(head, newchars)
text.undo_block_stop()
text.tag_add("sel", head, "insert")
# Make string that displays as n leading blanks.
def _make_blanks(self, n):
if self.usetabs:
ntabs, nspaces = divmod(n, self.tabwidth)
return '\t' * ntabs + ' ' * nspaces
else:
return ' ' * n
# Delete from beginning of line to insert point, then reinsert
# column logical (meaning use tabs if appropriate) spaces.
def reindent_to(self, column):
text = self.text
text.undo_block_start()
if text.compare("insert linestart", "!=", "insert"):
text.delete("insert linestart", "insert")
if column:
text.insert("insert", self._make_blanks(column))
text.undo_block_stop()
def _asktabwidth(self):
return self.askinteger(
"Tab width",
"Columns per tab? (2-16)",
parent=self.text,
initialvalue=self.indentwidth,
minvalue=2,
maxvalue=16) or self.tabwidth
# Guess indentwidth from text content.
# Return guessed indentwidth. This should not be believed unless
# it's in a reasonable range (e.g., it will be 0 if no indented
# blocks are found).
def guess_indent(self):
opener, indented = IndentSearcher(self.text, self.tabwidth).run()
if opener and indented:
raw, indentsmall = classifyws(opener, self.tabwidth)
raw, indentlarge = classifyws(indented, self.tabwidth)
else:
indentsmall = indentlarge = 0
return indentlarge - indentsmall
# "line.col" -> line, as an int
def index2line(index):
return int(float(index))
# Look at the leading whitespace in s.
# Return pair (# of leading ws characters,
# effective # of leading blanks after expanding
# tabs to width tabwidth)
def classifyws(s, tabwidth):
raw = effective = 0
for ch in s:
if ch == ' ':
raw = raw + 1
effective = effective + 1
elif ch == '\t':
raw = raw + 1
effective = (effective // tabwidth + 1) * tabwidth
else:
break
return raw, effective
import tokenize
_tokenize = tokenize
del tokenize
class IndentSearcher(object):
# .run() chews over the Text widget, looking for a block opener
# and the stmt following it. Returns a pair,
# (line containing block opener, line containing stmt)
# Either or both may be None.
def __init__(self, text, tabwidth):
self.text = text
self.tabwidth = tabwidth
self.i = self.finished = 0
self.blkopenline = self.indentedline = None
def readline(self):
if self.finished:
return ""
i = self.i = self.i + 1
mark = repr(i) + ".0"
if self.text.compare(mark, ">=", "end"):
return ""
return self.text.get(mark, mark + " lineend+1c")
def tokeneater(self, type, token, start, end, line,
INDENT=_tokenize.INDENT,
NAME=_tokenize.NAME,
OPENERS=('class', 'def', 'for', 'if', 'try', 'while')):
if self.finished:
pass
elif type == NAME and token in OPENERS:
self.blkopenline = line
elif type == INDENT and self.blkopenline:
self.indentedline = line
self.finished = 1
def run(self):
save_tabsize = _tokenize.tabsize
_tokenize.tabsize = self.tabwidth
try:
try:
tokens = _tokenize.generate_tokens(self.readline)
for token in tokens:
self.tokeneater(*token)
except (_tokenize.TokenError, SyntaxError):
# since we cut off the tokenizer early, we can trigger
# spurious errors
pass
finally:
_tokenize.tabsize = save_tabsize
return self.blkopenline, self.indentedline
### end autoindent code ###
def prepstr(s):
# Helper to extract the underscore from a string, e.g.
# prepstr("Co_py") returns (2, "Copy").
i = s.find('_')
if i >= 0:
s = s[:i] + s[i+1:]
return i, s
keynames = {
'bracketleft': '[',
'bracketright': ']',
'slash': '/',
}
def get_accelerator(keydefs, eventname):
keylist = keydefs.get(eventname)
# issue10940: temporary workaround to prevent hang with OS X Cocoa Tk 8.5
# if not keylist:
if (not keylist) or (macosxSupport.runningAsOSXApp() and eventname in {
"<<open-module>>",
"<<goto-line>>",
"<<change-indentwidth>>"}):
return ""
s = keylist[0]
s = re.sub(r"-[a-z]\b", lambda m: m.group().upper(), s)
s = re.sub(r"\b\w+\b", lambda m: keynames.get(m.group(), m.group()), s)
s = re.sub("Key-", "", s)
s = re.sub("Cancel","Ctrl-Break",s) # dscherer@cmu.edu
s = re.sub("Control-", "Ctrl-", s)
s = re.sub("-", "+", s)
s = re.sub("><", " ", s)
s = re.sub("<", "", s)
s = re.sub(">", "", s)
return s
def fixwordbreaks(root):
# Make sure that Tk's double-click and next/previous word
# operations use our definition of a word (i.e. an identifier)
tk = root.tk
tk.call('tcl_wordBreakAfter', 'a b', 0) # make sure word.tcl is loaded
tk.call('set', 'tcl_wordchars', '[a-zA-Z0-9_]')
tk.call('set', 'tcl_nonwordchars', '[^a-zA-Z0-9_]')
def test():
root = Tk()
fixwordbreaks(root)
root.withdraw()
if sys.argv[1:]:
filename = sys.argv[1]
else:
filename = None
edit = EditorWindow(root=root, filename=filename)
edit.set_close_hook(root.quit)
edit.text.bind("<<close-all-windows>>", edit.close_event)
root.mainloop()
root.destroy()
if __name__ == '__main__':
test()
| lgpl-3.0 |
icomms/wqmanager | apps/domain/models.py | 1 | 6972 | from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.db import models
from domain import Permissions
from wqm.models import WqmAuthority
from locations.models import LocationType
##############################################################################################################
#
# Originally had my own hacky global storage of content type, but it turns out that contenttype.models
# wisely caches content types! No hit to the db beyond the first call - no need for us to do our own
# custom caching.
#
# See ContentType.get_for_model() code for details.
class Domain(models.Model):
'''Domain is the highest level collection of people/stuff
in the system. Pretty much everything happens at the
domain-level, including user membership, permission to
see data, reports, charts, etc.'''
name = models.CharField(max_length=64, unique=True)
full_name = models.CharField(max_length = 100, null=True)
is_active = models.BooleanField(default=False)
#description = models.CharField(max_length=255, null=True, blank=True)
#timezone = models.CharField(max_length=64,null=True)
# Utility function - gets active domains in which user has an active membership
# Note that User.is_active is not checked here - we're only concerned about usable
# domains in which the user can theoretically participate, not whether the user
# is cleared to login.
@staticmethod
def active_for_user(user):
return Domain.objects.filter( membership__member_type = ContentType.objects.get_for_model(User),
membership__member_id = user.id,
membership__is_active=True, # Looks in membership table
is_active=True) # Looks in domain table
def save(self, *args, **kwargs):
edit = False
if self.pk is not None:
edit = True
super(Domain, self).save(*args, **kwargs)
if edit:
wqmauthority = WqmAuthority.objects.get(domain=self)
wqmauthority.code = self.name
wqmauthority.name = self.full_name
wqmauthority.save()
else:
type = LocationType.objects.get(name="authority")
wqmauthority = WqmAuthority(name=self.full_name, domain=self, type=type, code=self.name)
wqmauthority.save()
def __unicode__(self):
return self.name
##############################################################################################################
#
# Use cases:
#
# Get all members in a domain:
# Member.objects.filter(member_type = 3, domain = 1) then iterate - slow, because of one query (for User) per row
# User.objects.filter(membership__domain = 2) - fast, but requires the addition of a GenericRelation to User.
# See UserInDomain, below.
#
# Get all domains to which a member belongs:
# User.objects.get(id = 1).membership.all() and then iterate to pick out domains - slow, because of one query
# (for Domain) per row. Requires GenericRelation on User.
# Member.objects.filter(member_type = 3, member_id = 1).query.as_sql() Generate same SQL, and require same
# slow iteration
# Domain.objects.filter(membership__member_type = 3, membership__member_id = 1) - fast, and requires no new fields
# (as Domain is a FK of Member)
#
member_limits = {'model__in':('user', 'formdatagroup')}
class Membership(models.Model):
domain = models.ForeignKey(Domain)
member_type = models.ForeignKey(ContentType, limit_choices_to=member_limits)
member_id = models.PositiveIntegerField()
member_object = generic.GenericForeignKey('member_type', 'member_id')
is_active = models.BooleanField(default=False)
def __unicode__(self):
return str(self.member_type) + str(self.member_id) + str(self.member_object)
##############################################################################################################
class RegistrationRequest(models.Model):
tos_confirmed = models.BooleanField(default=False)
# No verbose name on times and IPs - filled in on server
request_time = models.DateTimeField()
request_ip = models.IPAddressField()
activation_guid = models.CharField(max_length=32, unique=True)
# confirm info is blank until a confirming click is received
confirm_time = models.DateTimeField(null=True, blank=True)
confirm_ip = models.IPAddressField(null=True, blank=True)
domain = models.OneToOneField(Domain)
new_user = models.ForeignKey(User, related_name='new_user') # Not clear if we'll always create a new user - might be many reqs to one user, thus FK
# requesting_user is only filled in if a logged-in user requests a domain.
requesting_user = models.ForeignKey(User, related_name='requesting_user', null=True, blank=True) # blank and null -> FK is optional.
class Meta:
db_table = 'domain_registration_request'
# To be added:
# language
# number pref
# currency pref
# date pref
# time pref
##############################################################################################################
class Settings(models.Model):
domain = models.OneToOneField(Domain)
max_users = models.PositiveIntegerField()
# To be added - all of the date, time, etc. fields that will go into RegistrationRequest
##############################################################################################################
#
# http://bolhoed.net/blog/how-to-dynamically-add-fields-to-a-django-model shows:
#
# User.add_to_class('membership', generic.GenericRelation(Membership, content_type_field='member_type', object_id_field='member_id'))
#
# Rather than that hackery, I tried to implemenet a trivial proxy model for User, containing just the
# GenericRelation field. Doesn't work, though! Django complains about a field being defined on a proxy model.
#
# Looks like we have to enable the above hackery if we want an easy means of filtering users in a domain. Makes
# life easier, too, in that views will have access to this information.
#
User.add_to_class('domain_membership',
generic.GenericRelation( Membership, content_type_field='member_type', object_id_field='member_id' ) )
##############################################################################################################
# Monkeypatch a function onto User to tell if user is administrator of selected domain
def _admin_p (self):
dom = getattr(self, 'selected_domain', None)
if dom is not None:
return self.has_row_perm(dom, Permissions.ADMINISTRATOR)
else:
return False
User.is_selected_dom_admin = _admin_p | bsd-3-clause |
kaguillera/newspipeline | node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/xcode_emulation.py | 1283 | 65086 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module contains classes that help to emulate xcodebuild behavior on top of
other build systems, such as make and ninja.
"""
import copy
import gyp.common
import os
import os.path
import re
import shlex
import subprocess
import sys
import tempfile
from gyp.common import GypError
# Populated lazily by XcodeVersion, for efficiency, and to fix an issue when
# "xcodebuild" is called too quickly (it has been found to return incorrect
# version number).
XCODE_VERSION_CACHE = None
# Populated lazily by GetXcodeArchsDefault, to an |XcodeArchsDefault| instance
# corresponding to the installed version of Xcode.
XCODE_ARCHS_DEFAULT_CACHE = None
def XcodeArchsVariableMapping(archs, archs_including_64_bit=None):
"""Constructs a dictionary with expansion for $(ARCHS_STANDARD) variable,
and optionally for $(ARCHS_STANDARD_INCLUDING_64_BIT)."""
mapping = {'$(ARCHS_STANDARD)': archs}
if archs_including_64_bit:
mapping['$(ARCHS_STANDARD_INCLUDING_64_BIT)'] = archs_including_64_bit
return mapping
class XcodeArchsDefault(object):
"""A class to resolve ARCHS variable from xcode_settings, resolving Xcode
macros and implementing filtering by VALID_ARCHS. The expansion of macros
depends on the SDKROOT used ("macosx", "iphoneos", "iphonesimulator") and
on the version of Xcode.
"""
# Match variable like $(ARCHS_STANDARD).
variable_pattern = re.compile(r'\$\([a-zA-Z_][a-zA-Z0-9_]*\)$')
def __init__(self, default, mac, iphonesimulator, iphoneos):
self._default = (default,)
self._archs = {'mac': mac, 'ios': iphoneos, 'iossim': iphonesimulator}
def _VariableMapping(self, sdkroot):
"""Returns the dictionary of variable mapping depending on the SDKROOT."""
sdkroot = sdkroot.lower()
if 'iphoneos' in sdkroot:
return self._archs['ios']
elif 'iphonesimulator' in sdkroot:
return self._archs['iossim']
else:
return self._archs['mac']
def _ExpandArchs(self, archs, sdkroot):
"""Expands variables references in ARCHS, and remove duplicates."""
variable_mapping = self._VariableMapping(sdkroot)
expanded_archs = []
for arch in archs:
if self.variable_pattern.match(arch):
variable = arch
try:
variable_expansion = variable_mapping[variable]
for arch in variable_expansion:
if arch not in expanded_archs:
expanded_archs.append(arch)
except KeyError as e:
print 'Warning: Ignoring unsupported variable "%s".' % variable
elif arch not in expanded_archs:
expanded_archs.append(arch)
return expanded_archs
def ActiveArchs(self, archs, valid_archs, sdkroot):
"""Expands variables references in ARCHS, and filter by VALID_ARCHS if it
is defined (if not set, Xcode accept any value in ARCHS, otherwise, only
values present in VALID_ARCHS are kept)."""
expanded_archs = self._ExpandArchs(archs or self._default, sdkroot or '')
if valid_archs:
filtered_archs = []
for arch in expanded_archs:
if arch in valid_archs:
filtered_archs.append(arch)
expanded_archs = filtered_archs
return expanded_archs
def GetXcodeArchsDefault():
"""Returns the |XcodeArchsDefault| object to use to expand ARCHS for the
installed version of Xcode. The default values used by Xcode for ARCHS
and the expansion of the variables depends on the version of Xcode used.
For all version anterior to Xcode 5.0 or posterior to Xcode 5.1 included
uses $(ARCHS_STANDARD) if ARCHS is unset, while Xcode 5.0 to 5.0.2 uses
$(ARCHS_STANDARD_INCLUDING_64_BIT). This variable was added to Xcode 5.0
and deprecated with Xcode 5.1.
For "macosx" SDKROOT, all version starting with Xcode 5.0 includes 64-bit
architecture as part of $(ARCHS_STANDARD) and default to only building it.
For "iphoneos" and "iphonesimulator" SDKROOT, 64-bit architectures are part
of $(ARCHS_STANDARD_INCLUDING_64_BIT) from Xcode 5.0. From Xcode 5.1, they
are also part of $(ARCHS_STANDARD).
All thoses rules are coded in the construction of the |XcodeArchsDefault|
object to use depending on the version of Xcode detected. The object is
for performance reason."""
global XCODE_ARCHS_DEFAULT_CACHE
if XCODE_ARCHS_DEFAULT_CACHE:
return XCODE_ARCHS_DEFAULT_CACHE
xcode_version, _ = XcodeVersion()
if xcode_version < '0500':
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD)',
XcodeArchsVariableMapping(['i386']),
XcodeArchsVariableMapping(['i386']),
XcodeArchsVariableMapping(['armv7']))
elif xcode_version < '0510':
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD_INCLUDING_64_BIT)',
XcodeArchsVariableMapping(['x86_64'], ['x86_64']),
XcodeArchsVariableMapping(['i386'], ['i386', 'x86_64']),
XcodeArchsVariableMapping(
['armv7', 'armv7s'],
['armv7', 'armv7s', 'arm64']))
else:
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD)',
XcodeArchsVariableMapping(['x86_64'], ['x86_64']),
XcodeArchsVariableMapping(['i386', 'x86_64'], ['i386', 'x86_64']),
XcodeArchsVariableMapping(
['armv7', 'armv7s', 'arm64'],
['armv7', 'armv7s', 'arm64']))
return XCODE_ARCHS_DEFAULT_CACHE
class XcodeSettings(object):
"""A class that understands the gyp 'xcode_settings' object."""
# Populated lazily by _SdkPath(). Shared by all XcodeSettings, so cached
# at class-level for efficiency.
_sdk_path_cache = {}
_sdk_root_cache = {}
# Populated lazily by GetExtraPlistItems(). Shared by all XcodeSettings, so
# cached at class-level for efficiency.
_plist_cache = {}
# Populated lazily by GetIOSPostbuilds. Shared by all XcodeSettings, so
# cached at class-level for efficiency.
_codesigning_key_cache = {}
def __init__(self, spec):
self.spec = spec
self.isIOS = False
# Per-target 'xcode_settings' are pushed down into configs earlier by gyp.
# This means self.xcode_settings[config] always contains all settings
# for that config -- the per-target settings as well. Settings that are
# the same for all configs are implicitly per-target settings.
self.xcode_settings = {}
configs = spec['configurations']
for configname, config in configs.iteritems():
self.xcode_settings[configname] = config.get('xcode_settings', {})
self._ConvertConditionalKeys(configname)
if self.xcode_settings[configname].get('IPHONEOS_DEPLOYMENT_TARGET',
None):
self.isIOS = True
# This is only non-None temporarily during the execution of some methods.
self.configname = None
# Used by _AdjustLibrary to match .a and .dylib entries in libraries.
self.library_re = re.compile(r'^lib([^/]+)\.(a|dylib)$')
def _ConvertConditionalKeys(self, configname):
"""Converts or warns on conditional keys. Xcode supports conditional keys,
such as CODE_SIGN_IDENTITY[sdk=iphoneos*]. This is a partial implementation
with some keys converted while the rest force a warning."""
settings = self.xcode_settings[configname]
conditional_keys = [key for key in settings if key.endswith(']')]
for key in conditional_keys:
# If you need more, speak up at http://crbug.com/122592
if key.endswith("[sdk=iphoneos*]"):
if configname.endswith("iphoneos"):
new_key = key.split("[")[0]
settings[new_key] = settings[key]
else:
print 'Warning: Conditional keys not implemented, ignoring:', \
' '.join(conditional_keys)
del settings[key]
def _Settings(self):
assert self.configname
return self.xcode_settings[self.configname]
def _Test(self, test_key, cond_key, default):
return self._Settings().get(test_key, default) == cond_key
def _Appendf(self, lst, test_key, format_str, default=None):
if test_key in self._Settings():
lst.append(format_str % str(self._Settings()[test_key]))
elif default:
lst.append(format_str % str(default))
def _WarnUnimplemented(self, test_key):
if test_key in self._Settings():
print 'Warning: Ignoring not yet implemented key "%s".' % test_key
def IsBinaryOutputFormat(self, configname):
default = "binary" if self.isIOS else "xml"
format = self.xcode_settings[configname].get('INFOPLIST_OUTPUT_FORMAT',
default)
return format == "binary"
def _IsBundle(self):
return int(self.spec.get('mac_bundle', 0)) != 0
def _IsIosAppExtension(self):
return int(self.spec.get('ios_app_extension', 0)) != 0
def _IsIosWatchKitExtension(self):
return int(self.spec.get('ios_watchkit_extension', 0)) != 0
def _IsIosWatchApp(self):
return int(self.spec.get('ios_watch_app', 0)) != 0
def GetFrameworkVersion(self):
"""Returns the framework version of the current target. Only valid for
bundles."""
assert self._IsBundle()
return self.GetPerTargetSetting('FRAMEWORK_VERSION', default='A')
def GetWrapperExtension(self):
"""Returns the bundle extension (.app, .framework, .plugin, etc). Only
valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('loadable_module', 'shared_library'):
default_wrapper_extension = {
'loadable_module': 'bundle',
'shared_library': 'framework',
}[self.spec['type']]
wrapper_extension = self.GetPerTargetSetting(
'WRAPPER_EXTENSION', default=default_wrapper_extension)
return '.' + self.spec.get('product_extension', wrapper_extension)
elif self.spec['type'] == 'executable':
if self._IsIosAppExtension() or self._IsIosWatchKitExtension():
return '.' + self.spec.get('product_extension', 'appex')
else:
return '.' + self.spec.get('product_extension', 'app')
else:
assert False, "Don't know extension for '%s', target '%s'" % (
self.spec['type'], self.spec['target_name'])
def GetProductName(self):
"""Returns PRODUCT_NAME."""
return self.spec.get('product_name', self.spec['target_name'])
def GetFullProductName(self):
"""Returns FULL_PRODUCT_NAME."""
if self._IsBundle():
return self.GetWrapperName()
else:
return self._GetStandaloneBinaryPath()
def GetWrapperName(self):
"""Returns the directory name of the bundle represented by this target.
Only valid for bundles."""
assert self._IsBundle()
return self.GetProductName() + self.GetWrapperExtension()
def GetBundleContentsFolderPath(self):
"""Returns the qualified path to the bundle's contents folder. E.g.
Chromium.app/Contents or Foo.bundle/Versions/A. Only valid for bundles."""
if self.isIOS:
return self.GetWrapperName()
assert self._IsBundle()
if self.spec['type'] == 'shared_library':
return os.path.join(
self.GetWrapperName(), 'Versions', self.GetFrameworkVersion())
else:
# loadable_modules have a 'Contents' folder like executables.
return os.path.join(self.GetWrapperName(), 'Contents')
def GetBundleResourceFolder(self):
"""Returns the qualified path to the bundle's resource folder. E.g.
Chromium.app/Contents/Resources. Only valid for bundles."""
assert self._IsBundle()
if self.isIOS:
return self.GetBundleContentsFolderPath()
return os.path.join(self.GetBundleContentsFolderPath(), 'Resources')
def GetBundlePlistPath(self):
"""Returns the qualified path to the bundle's plist file. E.g.
Chromium.app/Contents/Info.plist. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('executable', 'loadable_module'):
return os.path.join(self.GetBundleContentsFolderPath(), 'Info.plist')
else:
return os.path.join(self.GetBundleContentsFolderPath(),
'Resources', 'Info.plist')
def GetProductType(self):
"""Returns the PRODUCT_TYPE of this target."""
if self._IsIosAppExtension():
assert self._IsBundle(), ('ios_app_extension flag requires mac_bundle '
'(target %s)' % self.spec['target_name'])
return 'com.apple.product-type.app-extension'
if self._IsIosWatchKitExtension():
assert self._IsBundle(), ('ios_watchkit_extension flag requires '
'mac_bundle (target %s)' % self.spec['target_name'])
return 'com.apple.product-type.watchkit-extension'
if self._IsIosWatchApp():
assert self._IsBundle(), ('ios_watch_app flag requires mac_bundle '
'(target %s)' % self.spec['target_name'])
return 'com.apple.product-type.application.watchapp'
if self._IsBundle():
return {
'executable': 'com.apple.product-type.application',
'loadable_module': 'com.apple.product-type.bundle',
'shared_library': 'com.apple.product-type.framework',
}[self.spec['type']]
else:
return {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.apple.product-type.library.dynamic',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
}[self.spec['type']]
def GetMachOType(self):
"""Returns the MACH_O_TYPE of this target."""
# Weird, but matches Xcode.
if not self._IsBundle() and self.spec['type'] == 'executable':
return ''
return {
'executable': 'mh_execute',
'static_library': 'staticlib',
'shared_library': 'mh_dylib',
'loadable_module': 'mh_bundle',
}[self.spec['type']]
def _GetBundleBinaryPath(self):
"""Returns the name of the bundle binary of by this target.
E.g. Chromium.app/Contents/MacOS/Chromium. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('shared_library') or self.isIOS:
path = self.GetBundleContentsFolderPath()
elif self.spec['type'] in ('executable', 'loadable_module'):
path = os.path.join(self.GetBundleContentsFolderPath(), 'MacOS')
return os.path.join(path, self.GetExecutableName())
def _GetStandaloneExecutableSuffix(self):
if 'product_extension' in self.spec:
return '.' + self.spec['product_extension']
return {
'executable': '',
'static_library': '.a',
'shared_library': '.dylib',
'loadable_module': '.so',
}[self.spec['type']]
def _GetStandaloneExecutablePrefix(self):
return self.spec.get('product_prefix', {
'executable': '',
'static_library': 'lib',
'shared_library': 'lib',
# Non-bundled loadable_modules are called foo.so for some reason
# (that is, .so and no prefix) with the xcode build -- match that.
'loadable_module': '',
}[self.spec['type']])
def _GetStandaloneBinaryPath(self):
"""Returns the name of the non-bundle binary represented by this target.
E.g. hello_world. Only valid for non-bundles."""
assert not self._IsBundle()
assert self.spec['type'] in (
'executable', 'shared_library', 'static_library', 'loadable_module'), (
'Unexpected type %s' % self.spec['type'])
target = self.spec['target_name']
if self.spec['type'] == 'static_library':
if target[:3] == 'lib':
target = target[3:]
elif self.spec['type'] in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = self._GetStandaloneExecutablePrefix()
target = self.spec.get('product_name', target)
target_ext = self._GetStandaloneExecutableSuffix()
return target_prefix + target + target_ext
def GetExecutableName(self):
"""Returns the executable name of the bundle represented by this target.
E.g. Chromium."""
if self._IsBundle():
return self.spec.get('product_name', self.spec['target_name'])
else:
return self._GetStandaloneBinaryPath()
def GetExecutablePath(self):
"""Returns the directory name of the bundle represented by this target. E.g.
Chromium.app/Contents/MacOS/Chromium."""
if self._IsBundle():
return self._GetBundleBinaryPath()
else:
return self._GetStandaloneBinaryPath()
def GetActiveArchs(self, configname):
"""Returns the architectures this target should be built for."""
config_settings = self.xcode_settings[configname]
xcode_archs_default = GetXcodeArchsDefault()
return xcode_archs_default.ActiveArchs(
config_settings.get('ARCHS'),
config_settings.get('VALID_ARCHS'),
config_settings.get('SDKROOT'))
def _GetSdkVersionInfoItem(self, sdk, infoitem):
# xcodebuild requires Xcode and can't run on Command Line Tools-only
# systems from 10.7 onward.
# Since the CLT has no SDK paths anyway, returning None is the
# most sensible route and should still do the right thing.
try:
return GetStdout(['xcodebuild', '-version', '-sdk', sdk, infoitem])
except:
pass
def _SdkRoot(self, configname):
if configname is None:
configname = self.configname
return self.GetPerConfigSetting('SDKROOT', configname, default='')
def _SdkPath(self, configname=None):
sdk_root = self._SdkRoot(configname)
if sdk_root.startswith('/'):
return sdk_root
return self._XcodeSdkPath(sdk_root)
def _XcodeSdkPath(self, sdk_root):
if sdk_root not in XcodeSettings._sdk_path_cache:
sdk_path = self._GetSdkVersionInfoItem(sdk_root, 'Path')
XcodeSettings._sdk_path_cache[sdk_root] = sdk_path
if sdk_root:
XcodeSettings._sdk_root_cache[sdk_path] = sdk_root
return XcodeSettings._sdk_path_cache[sdk_root]
def _AppendPlatformVersionMinFlags(self, lst):
self._Appendf(lst, 'MACOSX_DEPLOYMENT_TARGET', '-mmacosx-version-min=%s')
if 'IPHONEOS_DEPLOYMENT_TARGET' in self._Settings():
# TODO: Implement this better?
sdk_path_basename = os.path.basename(self._SdkPath())
if sdk_path_basename.lower().startswith('iphonesimulator'):
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-mios-simulator-version-min=%s')
else:
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-miphoneos-version-min=%s')
def GetCflags(self, configname, arch=None):
"""Returns flags that need to be added to .c, .cc, .m, and .mm
compilations."""
# This functions (and the similar ones below) do not offer complete
# emulation of all xcode_settings keys. They're implemented on demand.
self.configname = configname
cflags = []
sdk_root = self._SdkPath()
if 'SDKROOT' in self._Settings() and sdk_root:
cflags.append('-isysroot %s' % sdk_root)
if self._Test('CLANG_WARN_CONSTANT_CONVERSION', 'YES', default='NO'):
cflags.append('-Wconstant-conversion')
if self._Test('GCC_CHAR_IS_UNSIGNED_CHAR', 'YES', default='NO'):
cflags.append('-funsigned-char')
if self._Test('GCC_CW_ASM_SYNTAX', 'YES', default='YES'):
cflags.append('-fasm-blocks')
if 'GCC_DYNAMIC_NO_PIC' in self._Settings():
if self._Settings()['GCC_DYNAMIC_NO_PIC'] == 'YES':
cflags.append('-mdynamic-no-pic')
else:
pass
# TODO: In this case, it depends on the target. xcode passes
# mdynamic-no-pic by default for executable and possibly static lib
# according to mento
if self._Test('GCC_ENABLE_PASCAL_STRINGS', 'YES', default='YES'):
cflags.append('-mpascal-strings')
self._Appendf(cflags, 'GCC_OPTIMIZATION_LEVEL', '-O%s', default='s')
if self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES'):
dbg_format = self._Settings().get('DEBUG_INFORMATION_FORMAT', 'dwarf')
if dbg_format == 'dwarf':
cflags.append('-gdwarf-2')
elif dbg_format == 'stabs':
raise NotImplementedError('stabs debug format is not supported yet.')
elif dbg_format == 'dwarf-with-dsym':
cflags.append('-gdwarf-2')
else:
raise NotImplementedError('Unknown debug format %s' % dbg_format)
if self._Settings().get('GCC_STRICT_ALIASING') == 'YES':
cflags.append('-fstrict-aliasing')
elif self._Settings().get('GCC_STRICT_ALIASING') == 'NO':
cflags.append('-fno-strict-aliasing')
if self._Test('GCC_SYMBOLS_PRIVATE_EXTERN', 'YES', default='NO'):
cflags.append('-fvisibility=hidden')
if self._Test('GCC_TREAT_WARNINGS_AS_ERRORS', 'YES', default='NO'):
cflags.append('-Werror')
if self._Test('GCC_WARN_ABOUT_MISSING_NEWLINE', 'YES', default='NO'):
cflags.append('-Wnewline-eof')
# In Xcode, this is only activated when GCC_COMPILER_VERSION is clang or
# llvm-gcc. It also requires a fairly recent libtool, and
# if the system clang isn't used, DYLD_LIBRARY_PATH needs to contain the
# path to the libLTO.dylib that matches the used clang.
if self._Test('LLVM_LTO', 'YES', default='NO'):
cflags.append('-flto')
self._AppendPlatformVersionMinFlags(cflags)
# TODO:
if self._Test('COPY_PHASE_STRIP', 'YES', default='NO'):
self._WarnUnimplemented('COPY_PHASE_STRIP')
self._WarnUnimplemented('GCC_DEBUGGING_SYMBOLS')
self._WarnUnimplemented('GCC_ENABLE_OBJC_EXCEPTIONS')
# TODO: This is exported correctly, but assigning to it is not supported.
self._WarnUnimplemented('MACH_O_TYPE')
self._WarnUnimplemented('PRODUCT_TYPE')
if arch is not None:
archs = [arch]
else:
assert self.configname
archs = self.GetActiveArchs(self.configname)
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
cflags.append('-arch ' + archs[0])
if archs[0] in ('i386', 'x86_64'):
if self._Test('GCC_ENABLE_SSE3_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse3')
if self._Test('GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS', 'YES',
default='NO'):
cflags.append('-mssse3') # Note 3rd 's'.
if self._Test('GCC_ENABLE_SSE41_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.1')
if self._Test('GCC_ENABLE_SSE42_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.2')
cflags += self._Settings().get('WARNING_CFLAGS', [])
if sdk_root:
framework_root = sdk_root
else:
framework_root = ''
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
cflags.append('-F' + directory.replace('$(SDKROOT)', framework_root))
self.configname = None
return cflags
def GetCflagsC(self, configname):
"""Returns flags that need to be added to .c, and .m compilations."""
self.configname = configname
cflags_c = []
if self._Settings().get('GCC_C_LANGUAGE_STANDARD', '') == 'ansi':
cflags_c.append('-ansi')
else:
self._Appendf(cflags_c, 'GCC_C_LANGUAGE_STANDARD', '-std=%s')
cflags_c += self._Settings().get('OTHER_CFLAGS', [])
self.configname = None
return cflags_c
def GetCflagsCC(self, configname):
"""Returns flags that need to be added to .cc, and .mm compilations."""
self.configname = configname
cflags_cc = []
clang_cxx_language_standard = self._Settings().get(
'CLANG_CXX_LANGUAGE_STANDARD')
# Note: Don't make c++0x to c++11 so that c++0x can be used with older
# clangs that don't understand c++11 yet (like Xcode 4.2's).
if clang_cxx_language_standard:
cflags_cc.append('-std=%s' % clang_cxx_language_standard)
self._Appendf(cflags_cc, 'CLANG_CXX_LIBRARY', '-stdlib=%s')
if self._Test('GCC_ENABLE_CPP_RTTI', 'NO', default='YES'):
cflags_cc.append('-fno-rtti')
if self._Test('GCC_ENABLE_CPP_EXCEPTIONS', 'NO', default='YES'):
cflags_cc.append('-fno-exceptions')
if self._Test('GCC_INLINES_ARE_PRIVATE_EXTERN', 'YES', default='NO'):
cflags_cc.append('-fvisibility-inlines-hidden')
if self._Test('GCC_THREADSAFE_STATICS', 'NO', default='YES'):
cflags_cc.append('-fno-threadsafe-statics')
# Note: This flag is a no-op for clang, it only has an effect for gcc.
if self._Test('GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO', 'NO', default='YES'):
cflags_cc.append('-Wno-invalid-offsetof')
other_ccflags = []
for flag in self._Settings().get('OTHER_CPLUSPLUSFLAGS', ['$(inherited)']):
# TODO: More general variable expansion. Missing in many other places too.
if flag in ('$inherited', '$(inherited)', '${inherited}'):
flag = '$OTHER_CFLAGS'
if flag in ('$OTHER_CFLAGS', '$(OTHER_CFLAGS)', '${OTHER_CFLAGS}'):
other_ccflags += self._Settings().get('OTHER_CFLAGS', [])
else:
other_ccflags.append(flag)
cflags_cc += other_ccflags
self.configname = None
return cflags_cc
def _AddObjectiveCGarbageCollectionFlags(self, flags):
gc_policy = self._Settings().get('GCC_ENABLE_OBJC_GC', 'unsupported')
if gc_policy == 'supported':
flags.append('-fobjc-gc')
elif gc_policy == 'required':
flags.append('-fobjc-gc-only')
def _AddObjectiveCARCFlags(self, flags):
if self._Test('CLANG_ENABLE_OBJC_ARC', 'YES', default='NO'):
flags.append('-fobjc-arc')
def _AddObjectiveCMissingPropertySynthesisFlags(self, flags):
if self._Test('CLANG_WARN_OBJC_MISSING_PROPERTY_SYNTHESIS',
'YES', default='NO'):
flags.append('-Wobjc-missing-property-synthesis')
def GetCflagsObjC(self, configname):
"""Returns flags that need to be added to .m compilations."""
self.configname = configname
cflags_objc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objc)
self._AddObjectiveCARCFlags(cflags_objc)
self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objc)
self.configname = None
return cflags_objc
def GetCflagsObjCC(self, configname):
"""Returns flags that need to be added to .mm compilations."""
self.configname = configname
cflags_objcc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objcc)
self._AddObjectiveCARCFlags(cflags_objcc)
self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objcc)
if self._Test('GCC_OBJC_CALL_CXX_CDTORS', 'YES', default='NO'):
cflags_objcc.append('-fobjc-call-cxx-cdtors')
self.configname = None
return cflags_objcc
def GetInstallNameBase(self):
"""Return DYLIB_INSTALL_NAME_BASE for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
install_base = self.GetPerTargetSetting(
'DYLIB_INSTALL_NAME_BASE',
default='/Library/Frameworks' if self._IsBundle() else '/usr/local/lib')
return install_base
def _StandardizePath(self, path):
"""Do :standardizepath processing for path."""
# I'm not quite sure what :standardizepath does. Just call normpath(),
# but don't let @executable_path/../foo collapse to foo.
if '/' in path:
prefix, rest = '', path
if path.startswith('@'):
prefix, rest = path.split('/', 1)
rest = os.path.normpath(rest) # :standardizepath
path = os.path.join(prefix, rest)
return path
def GetInstallName(self):
"""Return LD_DYLIB_INSTALL_NAME for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
default_install_name = \
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)'
install_name = self.GetPerTargetSetting(
'LD_DYLIB_INSTALL_NAME', default=default_install_name)
# Hardcode support for the variables used in chromium for now, to
# unblock people using the make build.
if '$' in install_name:
assert install_name in ('$(DYLIB_INSTALL_NAME_BASE:standardizepath)/'
'$(WRAPPER_NAME)/$(PRODUCT_NAME)', default_install_name), (
'Variables in LD_DYLIB_INSTALL_NAME are not generally supported '
'yet in target \'%s\' (got \'%s\')' %
(self.spec['target_name'], install_name))
install_name = install_name.replace(
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)',
self._StandardizePath(self.GetInstallNameBase()))
if self._IsBundle():
# These are only valid for bundles, hence the |if|.
install_name = install_name.replace(
'$(WRAPPER_NAME)', self.GetWrapperName())
install_name = install_name.replace(
'$(PRODUCT_NAME)', self.GetProductName())
else:
assert '$(WRAPPER_NAME)' not in install_name
assert '$(PRODUCT_NAME)' not in install_name
install_name = install_name.replace(
'$(EXECUTABLE_PATH)', self.GetExecutablePath())
return install_name
def _MapLinkerFlagFilename(self, ldflag, gyp_to_build_path):
"""Checks if ldflag contains a filename and if so remaps it from
gyp-directory-relative to build-directory-relative."""
# This list is expanded on demand.
# They get matched as:
# -exported_symbols_list file
# -Wl,exported_symbols_list file
# -Wl,exported_symbols_list,file
LINKER_FILE = r'(\S+)'
WORD = r'\S+'
linker_flags = [
['-exported_symbols_list', LINKER_FILE], # Needed for NaCl.
['-unexported_symbols_list', LINKER_FILE],
['-reexported_symbols_list', LINKER_FILE],
['-sectcreate', WORD, WORD, LINKER_FILE], # Needed for remoting.
]
for flag_pattern in linker_flags:
regex = re.compile('(?:-Wl,)?' + '[ ,]'.join(flag_pattern))
m = regex.match(ldflag)
if m:
ldflag = ldflag[:m.start(1)] + gyp_to_build_path(m.group(1)) + \
ldflag[m.end(1):]
# Required for ffmpeg (no idea why they don't use LIBRARY_SEARCH_PATHS,
# TODO(thakis): Update ffmpeg.gyp):
if ldflag.startswith('-L'):
ldflag = '-L' + gyp_to_build_path(ldflag[len('-L'):])
return ldflag
def GetLdflags(self, configname, product_dir, gyp_to_build_path, arch=None):
"""Returns flags that need to be passed to the linker.
Args:
configname: The name of the configuration to get ld flags for.
product_dir: The directory where products such static and dynamic
libraries are placed. This is added to the library search path.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
self.configname = configname
ldflags = []
# The xcode build is relative to a gyp file's directory, and OTHER_LDFLAGS
# can contain entries that depend on this. Explicitly absolutify these.
for ldflag in self._Settings().get('OTHER_LDFLAGS', []):
ldflags.append(self._MapLinkerFlagFilename(ldflag, gyp_to_build_path))
if self._Test('DEAD_CODE_STRIPPING', 'YES', default='NO'):
ldflags.append('-Wl,-dead_strip')
if self._Test('PREBINDING', 'YES', default='NO'):
ldflags.append('-Wl,-prebind')
self._Appendf(
ldflags, 'DYLIB_COMPATIBILITY_VERSION', '-compatibility_version %s')
self._Appendf(
ldflags, 'DYLIB_CURRENT_VERSION', '-current_version %s')
self._AppendPlatformVersionMinFlags(ldflags)
if 'SDKROOT' in self._Settings() and self._SdkPath():
ldflags.append('-isysroot ' + self._SdkPath())
for library_path in self._Settings().get('LIBRARY_SEARCH_PATHS', []):
ldflags.append('-L' + gyp_to_build_path(library_path))
if 'ORDER_FILE' in self._Settings():
ldflags.append('-Wl,-order_file ' +
'-Wl,' + gyp_to_build_path(
self._Settings()['ORDER_FILE']))
if arch is not None:
archs = [arch]
else:
assert self.configname
archs = self.GetActiveArchs(self.configname)
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
ldflags.append('-arch ' + archs[0])
# Xcode adds the product directory by default.
ldflags.append('-L' + product_dir)
install_name = self.GetInstallName()
if install_name and self.spec['type'] != 'loadable_module':
ldflags.append('-install_name ' + install_name.replace(' ', r'\ '))
for rpath in self._Settings().get('LD_RUNPATH_SEARCH_PATHS', []):
ldflags.append('-Wl,-rpath,' + rpath)
sdk_root = self._SdkPath()
if not sdk_root:
sdk_root = ''
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
ldflags.append('-F' + directory.replace('$(SDKROOT)', sdk_root))
is_extension = self._IsIosAppExtension() or self._IsIosWatchKitExtension()
if sdk_root and is_extension:
# Adds the link flags for extensions. These flags are common for all
# extensions and provide loader and main function.
# These flags reflect the compilation options used by xcode to compile
# extensions.
ldflags.append('-lpkstart')
if XcodeVersion() < '0900':
ldflags.append(sdk_root +
'/System/Library/PrivateFrameworks/PlugInKit.framework/PlugInKit')
ldflags.append('-fapplication-extension')
ldflags.append('-Xlinker -rpath '
'-Xlinker @executable_path/../../Frameworks')
self._Appendf(ldflags, 'CLANG_CXX_LIBRARY', '-stdlib=%s')
self.configname = None
return ldflags
def GetLibtoolflags(self, configname):
"""Returns flags that need to be passed to the static linker.
Args:
configname: The name of the configuration to get ld flags for.
"""
self.configname = configname
libtoolflags = []
for libtoolflag in self._Settings().get('OTHER_LDFLAGS', []):
libtoolflags.append(libtoolflag)
# TODO(thakis): ARCHS?
self.configname = None
return libtoolflags
def GetPerTargetSettings(self):
"""Gets a list of all the per-target settings. This will only fetch keys
whose values are the same across all configurations."""
first_pass = True
result = {}
for configname in sorted(self.xcode_settings.keys()):
if first_pass:
result = dict(self.xcode_settings[configname])
first_pass = False
else:
for key, value in self.xcode_settings[configname].iteritems():
if key not in result:
continue
elif result[key] != value:
del result[key]
return result
def GetPerConfigSetting(self, setting, configname, default=None):
if configname in self.xcode_settings:
return self.xcode_settings[configname].get(setting, default)
else:
return self.GetPerTargetSetting(setting, default)
def GetPerTargetSetting(self, setting, default=None):
"""Tries to get xcode_settings.setting from spec. Assumes that the setting
has the same value in all configurations and throws otherwise."""
is_first_pass = True
result = None
for configname in sorted(self.xcode_settings.keys()):
if is_first_pass:
result = self.xcode_settings[configname].get(setting, None)
is_first_pass = False
else:
assert result == self.xcode_settings[configname].get(setting, None), (
"Expected per-target setting for '%s', got per-config setting "
"(target %s)" % (setting, self.spec['target_name']))
if result is None:
return default
return result
def _GetStripPostbuilds(self, configname, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to strip this target's binary. These should be run as postbuilds
before the actual postbuilds run."""
self.configname = configname
result = []
if (self._Test('DEPLOYMENT_POSTPROCESSING', 'YES', default='NO') and
self._Test('STRIP_INSTALLED_PRODUCT', 'YES', default='NO')):
default_strip_style = 'debugging'
if self.spec['type'] == 'loadable_module' and self._IsBundle():
default_strip_style = 'non-global'
elif self.spec['type'] == 'executable':
default_strip_style = 'all'
strip_style = self._Settings().get('STRIP_STYLE', default_strip_style)
strip_flags = {
'all': '',
'non-global': '-x',
'debugging': '-S',
}[strip_style]
explicit_strip_flags = self._Settings().get('STRIPFLAGS', '')
if explicit_strip_flags:
strip_flags += ' ' + _NormalizeEnvVarReferences(explicit_strip_flags)
if not quiet:
result.append('echo STRIP\\(%s\\)' % self.spec['target_name'])
result.append('strip %s %s' % (strip_flags, output_binary))
self.configname = None
return result
def _GetDebugInfoPostbuilds(self, configname, output, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to massage this target's debug information. These should be run
as postbuilds before the actual postbuilds run."""
self.configname = configname
# For static libraries, no dSYMs are created.
result = []
if (self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES') and
self._Test(
'DEBUG_INFORMATION_FORMAT', 'dwarf-with-dsym', default='dwarf') and
self.spec['type'] != 'static_library'):
if not quiet:
result.append('echo DSYMUTIL\\(%s\\)' % self.spec['target_name'])
result.append('dsymutil %s -o %s' % (output_binary, output + '.dSYM'))
self.configname = None
return result
def _GetTargetPostbuilds(self, configname, output, output_binary,
quiet=False):
"""Returns a list of shell commands that contain the shell commands
to run as postbuilds for this target, before the actual postbuilds."""
# dSYMs need to build before stripping happens.
return (
self._GetDebugInfoPostbuilds(configname, output, output_binary, quiet) +
self._GetStripPostbuilds(configname, output_binary, quiet))
def _GetIOSPostbuilds(self, configname, output_binary):
"""Return a shell command to codesign the iOS output binary so it can
be deployed to a device. This should be run as the very last step of the
build."""
if not (self.isIOS and self.spec['type'] == 'executable'):
return []
settings = self.xcode_settings[configname]
key = self._GetIOSCodeSignIdentityKey(settings)
if not key:
return []
# Warn for any unimplemented signing xcode keys.
unimpl = ['OTHER_CODE_SIGN_FLAGS']
unimpl = set(unimpl) & set(self.xcode_settings[configname].keys())
if unimpl:
print 'Warning: Some codesign keys not implemented, ignoring: %s' % (
', '.join(sorted(unimpl)))
return ['%s code-sign-bundle "%s" "%s" "%s" "%s"' % (
os.path.join('${TARGET_BUILD_DIR}', 'gyp-mac-tool'), key,
settings.get('CODE_SIGN_RESOURCE_RULES_PATH', ''),
settings.get('CODE_SIGN_ENTITLEMENTS', ''),
settings.get('PROVISIONING_PROFILE', ''))
]
def _GetIOSCodeSignIdentityKey(self, settings):
identity = settings.get('CODE_SIGN_IDENTITY')
if not identity:
return None
if identity not in XcodeSettings._codesigning_key_cache:
output = subprocess.check_output(
['security', 'find-identity', '-p', 'codesigning', '-v'])
for line in output.splitlines():
if identity in line:
fingerprint = line.split()[1]
cache = XcodeSettings._codesigning_key_cache
assert identity not in cache or fingerprint == cache[identity], (
"Multiple codesigning fingerprints for identity: %s" % identity)
XcodeSettings._codesigning_key_cache[identity] = fingerprint
return XcodeSettings._codesigning_key_cache.get(identity, '')
def AddImplicitPostbuilds(self, configname, output, output_binary,
postbuilds=[], quiet=False):
"""Returns a list of shell commands that should run before and after
|postbuilds|."""
assert output_binary is not None
pre = self._GetTargetPostbuilds(configname, output, output_binary, quiet)
post = self._GetIOSPostbuilds(configname, output_binary)
return pre + postbuilds + post
def _AdjustLibrary(self, library, config_name=None):
if library.endswith('.framework'):
l = '-framework ' + os.path.splitext(os.path.basename(library))[0]
else:
m = self.library_re.match(library)
if m:
l = '-l' + m.group(1)
else:
l = library
sdk_root = self._SdkPath(config_name)
if not sdk_root:
sdk_root = ''
# Xcode 7 started shipping with ".tbd" (text based stubs) files instead of
# ".dylib" without providing a real support for them. What it does, for
# "/usr/lib" libraries, is do "-L/usr/lib -lname" which is dependent on the
# library order and cause collision when building Chrome.
#
# Instead substitude ".tbd" to ".dylib" in the generated project when the
# following conditions are both true:
# - library is referenced in the gyp file as "$(SDKROOT)/**/*.dylib",
# - the ".dylib" file does not exists but a ".tbd" file do.
library = l.replace('$(SDKROOT)', sdk_root)
if l.startswith('$(SDKROOT)'):
basename, ext = os.path.splitext(library)
if ext == '.dylib' and not os.path.exists(library):
tbd_library = basename + '.tbd'
if os.path.exists(tbd_library):
library = tbd_library
return library
def AdjustLibraries(self, libraries, config_name=None):
"""Transforms entries like 'Cocoa.framework' in libraries into entries like
'-framework Cocoa', 'libcrypto.dylib' into '-lcrypto', etc.
"""
libraries = [self._AdjustLibrary(library, config_name)
for library in libraries]
return libraries
def _BuildMachineOSBuild(self):
return GetStdout(['sw_vers', '-buildVersion'])
def _XcodeIOSDeviceFamily(self, configname):
family = self.xcode_settings[configname].get('TARGETED_DEVICE_FAMILY', '1')
return [int(x) for x in family.split(',')]
def GetExtraPlistItems(self, configname=None):
"""Returns a dictionary with extra items to insert into Info.plist."""
if configname not in XcodeSettings._plist_cache:
cache = {}
cache['BuildMachineOSBuild'] = self._BuildMachineOSBuild()
xcode, xcode_build = XcodeVersion()
cache['DTXcode'] = xcode
cache['DTXcodeBuild'] = xcode_build
sdk_root = self._SdkRoot(configname)
if not sdk_root:
sdk_root = self._DefaultSdkRoot()
cache['DTSDKName'] = sdk_root
if xcode >= '0430':
cache['DTSDKBuild'] = self._GetSdkVersionInfoItem(
sdk_root, 'ProductBuildVersion')
else:
cache['DTSDKBuild'] = cache['BuildMachineOSBuild']
if self.isIOS:
cache['DTPlatformName'] = cache['DTSDKName']
if configname.endswith("iphoneos"):
cache['DTPlatformVersion'] = self._GetSdkVersionInfoItem(
sdk_root, 'ProductVersion')
cache['CFBundleSupportedPlatforms'] = ['iPhoneOS']
else:
cache['CFBundleSupportedPlatforms'] = ['iPhoneSimulator']
XcodeSettings._plist_cache[configname] = cache
# Include extra plist items that are per-target, not per global
# XcodeSettings.
items = dict(XcodeSettings._plist_cache[configname])
if self.isIOS:
items['UIDeviceFamily'] = self._XcodeIOSDeviceFamily(configname)
return items
def _DefaultSdkRoot(self):
"""Returns the default SDKROOT to use.
Prior to version 5.0.0, if SDKROOT was not explicitly set in the Xcode
project, then the environment variable was empty. Starting with this
version, Xcode uses the name of the newest SDK installed.
"""
xcode_version, xcode_build = XcodeVersion()
if xcode_version < '0500':
return ''
default_sdk_path = self._XcodeSdkPath('')
default_sdk_root = XcodeSettings._sdk_root_cache.get(default_sdk_path)
if default_sdk_root:
return default_sdk_root
try:
all_sdks = GetStdout(['xcodebuild', '-showsdks'])
except:
# If xcodebuild fails, there will be no valid SDKs
return ''
for line in all_sdks.splitlines():
items = line.split()
if len(items) >= 3 and items[-2] == '-sdk':
sdk_root = items[-1]
sdk_path = self._XcodeSdkPath(sdk_root)
if sdk_path == default_sdk_path:
return sdk_root
return ''
class MacPrefixHeader(object):
"""A class that helps with emulating Xcode's GCC_PREFIX_HEADER feature.
This feature consists of several pieces:
* If GCC_PREFIX_HEADER is present, all compilations in that project get an
additional |-include path_to_prefix_header| cflag.
* If GCC_PRECOMPILE_PREFIX_HEADER is present too, then the prefix header is
instead compiled, and all other compilations in the project get an
additional |-include path_to_compiled_header| instead.
+ Compiled prefix headers have the extension gch. There is one gch file for
every language used in the project (c, cc, m, mm), since gch files for
different languages aren't compatible.
+ gch files themselves are built with the target's normal cflags, but they
obviously don't get the |-include| flag. Instead, they need a -x flag that
describes their language.
+ All o files in the target need to depend on the gch file, to make sure
it's built before any o file is built.
This class helps with some of these tasks, but it needs help from the build
system for writing dependencies to the gch files, for writing build commands
for the gch files, and for figuring out the location of the gch files.
"""
def __init__(self, xcode_settings,
gyp_path_to_build_path, gyp_path_to_build_output):
"""If xcode_settings is None, all methods on this class are no-ops.
Args:
gyp_path_to_build_path: A function that takes a gyp-relative path,
and returns a path relative to the build directory.
gyp_path_to_build_output: A function that takes a gyp-relative path and
a language code ('c', 'cc', 'm', or 'mm'), and that returns a path
to where the output of precompiling that path for that language
should be placed (without the trailing '.gch').
"""
# This doesn't support per-configuration prefix headers. Good enough
# for now.
self.header = None
self.compile_headers = False
if xcode_settings:
self.header = xcode_settings.GetPerTargetSetting('GCC_PREFIX_HEADER')
self.compile_headers = xcode_settings.GetPerTargetSetting(
'GCC_PRECOMPILE_PREFIX_HEADER', default='NO') != 'NO'
self.compiled_headers = {}
if self.header:
if self.compile_headers:
for lang in ['c', 'cc', 'm', 'mm']:
self.compiled_headers[lang] = gyp_path_to_build_output(
self.header, lang)
self.header = gyp_path_to_build_path(self.header)
def _CompiledHeader(self, lang, arch):
assert self.compile_headers
h = self.compiled_headers[lang]
if arch:
h += '.' + arch
return h
def GetInclude(self, lang, arch=None):
"""Gets the cflags to include the prefix header for language |lang|."""
if self.compile_headers and lang in self.compiled_headers:
return '-include %s' % self._CompiledHeader(lang, arch)
elif self.header:
return '-include %s' % self.header
else:
return ''
def _Gch(self, lang, arch):
"""Returns the actual file name of the prefix header for language |lang|."""
assert self.compile_headers
return self._CompiledHeader(lang, arch) + '.gch'
def GetObjDependencies(self, sources, objs, arch=None):
"""Given a list of source files and the corresponding object files, returns
a list of (source, object, gch) tuples, where |gch| is the build-directory
relative path to the gch file each object file depends on. |compilable[i]|
has to be the source file belonging to |objs[i]|."""
if not self.header or not self.compile_headers:
return []
result = []
for source, obj in zip(sources, objs):
ext = os.path.splitext(source)[1]
lang = {
'.c': 'c',
'.cpp': 'cc', '.cc': 'cc', '.cxx': 'cc',
'.m': 'm',
'.mm': 'mm',
}.get(ext, None)
if lang:
result.append((source, obj, self._Gch(lang, arch)))
return result
def GetPchBuildCommands(self, arch=None):
"""Returns [(path_to_gch, language_flag, language, header)].
|path_to_gch| and |header| are relative to the build directory.
"""
if not self.header or not self.compile_headers:
return []
return [
(self._Gch('c', arch), '-x c-header', 'c', self.header),
(self._Gch('cc', arch), '-x c++-header', 'cc', self.header),
(self._Gch('m', arch), '-x objective-c-header', 'm', self.header),
(self._Gch('mm', arch), '-x objective-c++-header', 'mm', self.header),
]
def XcodeVersion():
"""Returns a tuple of version and build version of installed Xcode."""
# `xcodebuild -version` output looks like
# Xcode 4.6.3
# Build version 4H1503
# or like
# Xcode 3.2.6
# Component versions: DevToolsCore-1809.0; DevToolsSupport-1806.0
# BuildVersion: 10M2518
# Convert that to '0463', '4H1503'.
global XCODE_VERSION_CACHE
if XCODE_VERSION_CACHE:
return XCODE_VERSION_CACHE
try:
version_list = GetStdout(['xcodebuild', '-version']).splitlines()
# In some circumstances xcodebuild exits 0 but doesn't return
# the right results; for example, a user on 10.7 or 10.8 with
# a bogus path set via xcode-select
# In that case this may be a CLT-only install so fall back to
# checking that version.
if len(version_list) < 2:
raise GypError("xcodebuild returned unexpected results")
except:
version = CLTVersion()
if version:
version = re.match(r'(\d\.\d\.?\d*)', version).groups()[0]
else:
raise GypError("No Xcode or CLT version detected!")
# The CLT has no build information, so we return an empty string.
version_list = [version, '']
version = version_list[0]
build = version_list[-1]
# Be careful to convert "4.2" to "0420":
version = version.split()[-1].replace('.', '')
version = (version + '0' * (3 - len(version))).zfill(4)
if build:
build = build.split()[-1]
XCODE_VERSION_CACHE = (version, build)
return XCODE_VERSION_CACHE
# This function ported from the logic in Homebrew's CLT version check
def CLTVersion():
"""Returns the version of command-line tools from pkgutil."""
# pkgutil output looks like
# package-id: com.apple.pkg.CLTools_Executables
# version: 5.0.1.0.1.1382131676
# volume: /
# location: /
# install-time: 1382544035
# groups: com.apple.FindSystemFiles.pkg-group com.apple.DevToolsBoth.pkg-group com.apple.DevToolsNonRelocatableShared.pkg-group
STANDALONE_PKG_ID = "com.apple.pkg.DeveloperToolsCLILeo"
FROM_XCODE_PKG_ID = "com.apple.pkg.DeveloperToolsCLI"
MAVERICKS_PKG_ID = "com.apple.pkg.CLTools_Executables"
regex = re.compile('version: (?P<version>.+)')
for key in [MAVERICKS_PKG_ID, STANDALONE_PKG_ID, FROM_XCODE_PKG_ID]:
try:
output = GetStdout(['/usr/sbin/pkgutil', '--pkg-info', key])
return re.search(regex, output).groupdict()['version']
except:
continue
def GetStdout(cmdlist):
"""Returns the content of standard output returned by invoking |cmdlist|.
Raises |GypError| if the command return with a non-zero return code."""
job = subprocess.Popen(cmdlist, stdout=subprocess.PIPE)
out = job.communicate()[0]
if job.returncode != 0:
sys.stderr.write(out + '\n')
raise GypError('Error %d running %s' % (job.returncode, cmdlist[0]))
return out.rstrip('\n')
def MergeGlobalXcodeSettingsToSpec(global_dict, spec):
"""Merges the global xcode_settings dictionary into each configuration of the
target represented by spec. For keys that are both in the global and the local
xcode_settings dict, the local key gets precendence.
"""
# The xcode generator special-cases global xcode_settings and does something
# that amounts to merging in the global xcode_settings into each local
# xcode_settings dict.
global_xcode_settings = global_dict.get('xcode_settings', {})
for config in spec['configurations'].values():
if 'xcode_settings' in config:
new_settings = global_xcode_settings.copy()
new_settings.update(config['xcode_settings'])
config['xcode_settings'] = new_settings
def IsMacBundle(flavor, spec):
"""Returns if |spec| should be treated as a bundle.
Bundles are directories with a certain subdirectory structure, instead of
just a single file. Bundle rules do not produce a binary but also package
resources into that directory."""
is_mac_bundle = (int(spec.get('mac_bundle', 0)) != 0 and flavor == 'mac')
if is_mac_bundle:
assert spec['type'] != 'none', (
'mac_bundle targets cannot have type none (target "%s")' %
spec['target_name'])
return is_mac_bundle
def GetMacBundleResources(product_dir, xcode_settings, resources):
"""Yields (output, resource) pairs for every resource in |resources|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
resources: A list of bundle resources, relative to the build directory.
"""
dest = os.path.join(product_dir,
xcode_settings.GetBundleResourceFolder())
for res in resources:
output = dest
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in res, (
"Spaces in resource filenames not supported (%s)" % res)
# Split into (path,file).
res_parts = os.path.split(res)
# Now split the path into (prefix,maybe.lproj).
lproj_parts = os.path.split(res_parts[0])
# If the resource lives in a .lproj bundle, add that to the destination.
if lproj_parts[1].endswith('.lproj'):
output = os.path.join(output, lproj_parts[1])
output = os.path.join(output, res_parts[1])
# Compiled XIB files are referred to by .nib.
if output.endswith('.xib'):
output = os.path.splitext(output)[0] + '.nib'
# Compiled storyboard files are referred to by .storyboardc.
if output.endswith('.storyboard'):
output = os.path.splitext(output)[0] + '.storyboardc'
yield output, res
def GetMacInfoPlist(product_dir, xcode_settings, gyp_path_to_build_path):
"""Returns (info_plist, dest_plist, defines, extra_env), where:
* |info_plist| is the source plist path, relative to the
build directory,
* |dest_plist| is the destination plist path, relative to the
build directory,
* |defines| is a list of preprocessor defines (empty if the plist
shouldn't be preprocessed,
* |extra_env| is a dict of env variables that should be exported when
invoking |mac_tool copy-info-plist|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
info_plist = xcode_settings.GetPerTargetSetting('INFOPLIST_FILE')
if not info_plist:
return None, None, [], {}
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in info_plist, (
"Spaces in Info.plist filenames not supported (%s)" % info_plist)
info_plist = gyp_path_to_build_path(info_plist)
# If explicitly set to preprocess the plist, invoke the C preprocessor and
# specify any defines as -D flags.
if xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESS', default='NO') == 'YES':
# Create an intermediate file based on the path.
defines = shlex.split(xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESSOR_DEFINITIONS', default=''))
else:
defines = []
dest_plist = os.path.join(product_dir, xcode_settings.GetBundlePlistPath())
extra_env = xcode_settings.GetPerTargetSettings()
return info_plist, dest_plist, defines, extra_env
def _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings=None):
"""Return the environment variables that Xcode would set. See
http://developer.apple.com/library/mac/#documentation/DeveloperTools/Reference/XcodeBuildSettingRef/1-Build_Setting_Reference/build_setting_ref.html#//apple_ref/doc/uid/TP40003931-CH3-SW153
for a full list.
Args:
xcode_settings: An XcodeSettings object. If this is None, this function
returns an empty dict.
built_products_dir: Absolute path to the built products dir.
srcroot: Absolute path to the source root.
configuration: The build configuration name.
additional_settings: An optional dict with more values to add to the
result.
"""
if not xcode_settings: return {}
# This function is considered a friend of XcodeSettings, so let it reach into
# its implementation details.
spec = xcode_settings.spec
# These are filled in on a as-needed basis.
env = {
'BUILT_FRAMEWORKS_DIR' : built_products_dir,
'BUILT_PRODUCTS_DIR' : built_products_dir,
'CONFIGURATION' : configuration,
'PRODUCT_NAME' : xcode_settings.GetProductName(),
# See /Developer/Platforms/MacOSX.platform/Developer/Library/Xcode/Specifications/MacOSX\ Product\ Types.xcspec for FULL_PRODUCT_NAME
'SRCROOT' : srcroot,
'SOURCE_ROOT': '${SRCROOT}',
# This is not true for static libraries, but currently the env is only
# written for bundles:
'TARGET_BUILD_DIR' : built_products_dir,
'TEMP_DIR' : '${TMPDIR}',
}
if xcode_settings.GetPerConfigSetting('SDKROOT', configuration):
env['SDKROOT'] = xcode_settings._SdkPath(configuration)
else:
env['SDKROOT'] = ''
if spec['type'] in (
'executable', 'static_library', 'shared_library', 'loadable_module'):
env['EXECUTABLE_NAME'] = xcode_settings.GetExecutableName()
env['EXECUTABLE_PATH'] = xcode_settings.GetExecutablePath()
env['FULL_PRODUCT_NAME'] = xcode_settings.GetFullProductName()
mach_o_type = xcode_settings.GetMachOType()
if mach_o_type:
env['MACH_O_TYPE'] = mach_o_type
env['PRODUCT_TYPE'] = xcode_settings.GetProductType()
if xcode_settings._IsBundle():
env['CONTENTS_FOLDER_PATH'] = \
xcode_settings.GetBundleContentsFolderPath()
env['UNLOCALIZED_RESOURCES_FOLDER_PATH'] = \
xcode_settings.GetBundleResourceFolder()
env['INFOPLIST_PATH'] = xcode_settings.GetBundlePlistPath()
env['WRAPPER_NAME'] = xcode_settings.GetWrapperName()
install_name = xcode_settings.GetInstallName()
if install_name:
env['LD_DYLIB_INSTALL_NAME'] = install_name
install_name_base = xcode_settings.GetInstallNameBase()
if install_name_base:
env['DYLIB_INSTALL_NAME_BASE'] = install_name_base
if XcodeVersion() >= '0500' and not env.get('SDKROOT'):
sdk_root = xcode_settings._SdkRoot(configuration)
if not sdk_root:
sdk_root = xcode_settings._XcodeSdkPath('')
if sdk_root is None:
sdk_root = ''
env['SDKROOT'] = sdk_root
if not additional_settings:
additional_settings = {}
else:
# Flatten lists to strings.
for k in additional_settings:
if not isinstance(additional_settings[k], str):
additional_settings[k] = ' '.join(additional_settings[k])
additional_settings.update(env)
for k in additional_settings:
additional_settings[k] = _NormalizeEnvVarReferences(additional_settings[k])
return additional_settings
def _NormalizeEnvVarReferences(str):
"""Takes a string containing variable references in the form ${FOO}, $(FOO),
or $FOO, and returns a string with all variable references in the form ${FOO}.
"""
# $FOO -> ${FOO}
str = re.sub(r'\$([a-zA-Z_][a-zA-Z0-9_]*)', r'${\1}', str)
# $(FOO) -> ${FOO}
matches = re.findall(r'(\$\(([a-zA-Z0-9\-_]+)\))', str)
for match in matches:
to_replace, variable = match
assert '$(' not in match, '$($(FOO)) variables not supported: ' + match
str = str.replace(to_replace, '${' + variable + '}')
return str
def ExpandEnvVars(string, expansions):
"""Expands ${VARIABLES}, $(VARIABLES), and $VARIABLES in string per the
expansions list. If the variable expands to something that references
another variable, this variable is expanded as well if it's in env --
until no variables present in env are left."""
for k, v in reversed(expansions):
string = string.replace('${' + k + '}', v)
string = string.replace('$(' + k + ')', v)
string = string.replace('$' + k, v)
return string
def _TopologicallySortedEnvVarKeys(env):
"""Takes a dict |env| whose values are strings that can refer to other keys,
for example env['foo'] = '$(bar) and $(baz)'. Returns a list L of all keys of
env such that key2 is after key1 in L if env[key2] refers to env[key1].
Throws an Exception in case of dependency cycles.
"""
# Since environment variables can refer to other variables, the evaluation
# order is important. Below is the logic to compute the dependency graph
# and sort it.
regex = re.compile(r'\$\{([a-zA-Z0-9\-_]+)\}')
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
# We can then reverse the result of the topological sort at the end.
# Since: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
matches = set([v for v in regex.findall(env[node]) if v in env])
for dependee in matches:
assert '${' not in dependee, 'Nested variables not supported: ' + dependee
return matches
try:
# Topologically sort, and then reverse, because we used an edge definition
# that's inverted from the expected result of this function (see comment
# above).
order = gyp.common.TopologicallySorted(env.keys(), GetEdges)
order.reverse()
return order
except gyp.common.CycleError, e:
raise GypError(
'Xcode environment variables are cyclically dependent: ' + str(e.nodes))
def GetSortedXcodeEnv(xcode_settings, built_products_dir, srcroot,
configuration, additional_settings=None):
env = _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings)
return [(key, env[key]) for key in _TopologicallySortedEnvVarKeys(env)]
def GetSpecPostbuildCommands(spec, quiet=False):
"""Returns the list of postbuilds explicitly defined on |spec|, in a form
executable by a shell."""
postbuilds = []
for postbuild in spec.get('postbuilds', []):
if not quiet:
postbuilds.append('echo POSTBUILD\\(%s\\) %s' % (
spec['target_name'], postbuild['postbuild_name']))
postbuilds.append(gyp.common.EncodePOSIXShellList(postbuild['action']))
return postbuilds
def _HasIOSTarget(targets):
"""Returns true if any target contains the iOS specific key
IPHONEOS_DEPLOYMENT_TARGET."""
for target_dict in targets.values():
for config in target_dict['configurations'].values():
if config.get('xcode_settings', {}).get('IPHONEOS_DEPLOYMENT_TARGET'):
return True
return False
def _AddIOSDeviceConfigurations(targets):
"""Clone all targets and append -iphoneos to the name. Configure these targets
to build for iOS devices and use correct architectures for those builds."""
for target_dict in targets.itervalues():
toolset = target_dict['toolset']
configs = target_dict['configurations']
for config_name, config_dict in dict(configs).iteritems():
iphoneos_config_dict = copy.deepcopy(config_dict)
configs[config_name + '-iphoneos'] = iphoneos_config_dict
configs[config_name + '-iphonesimulator'] = config_dict
if toolset == 'target':
iphoneos_config_dict['xcode_settings']['SDKROOT'] = 'iphoneos'
return targets
def CloneConfigurationForDeviceAndEmulator(target_dicts):
"""If |target_dicts| contains any iOS targets, automatically create -iphoneos
targets for iOS device builds."""
if _HasIOSTarget(target_dicts):
return _AddIOSDeviceConfigurations(target_dicts)
return target_dicts
| mit |
sysadminmatmoz/pmis | project_document_page/project.py | 2 | 1589 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 (Original Author)
# Domsense s.r.l. (<http://www.domsense.com>)
#
# Copyright (C) 2014-now (OpenERP version 7 adaptation)
# Matmoz d.o.o. (<http://www.matmoz.si>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
class Project(models.Model):
_inherit = 'project.project'
page_ids = fields.Many2many(
'document.page',
'project_docu_rel',
'project_id',
'page_id',
'Document pages'
)
class Project(models.Model):
_inherit = 'project.task'
page_ids = fields.Many2many(
'document.page',
'task_page_rel',
'task_id',
'page_id',
'Document pages'
)
| agpl-3.0 |
hehongliang/tensorflow | tensorflow/compiler/tests/depthwise_conv_op_test.py | 4 | 15601 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for depthwise convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
# Reference implementation of depthwise_conv2d
def ReferenceDepthwiseConv2D(input_tensor, filter_tensor, strides, padding,
data_format=None):
# Reference implementation of depthwise convolution that uses regular
# convolution.
convs = []
in_channels = filter_tensor.shape[2]
# Use a custom implementation of depthwise conv2d using slicing.
for channel in xrange(in_channels):
# Slice the input along channel
if data_format == "NCHW":
input_slice = input_tensor[:, channel:channel+1, :, :]
else:
input_slice = input_tensor[:, :, :, channel:channel+1]
# Slice the filters. Filters are H, W, InC, DepthMultiplier
filter_slice = filter_tensor[:, :, channel:channel+1, :]
# Do conv
convs.append(nn_ops.conv2d(input_slice, filter_slice,
strides, padding,
data_format=data_format,
name="depthwise_slice_%d" % channel))
# Concat along dimension.
if data_format == "NCHW":
return array_ops.concat(convs, 1)
else:
return array_ops.concat(convs, 3)
def ConfigsToTest():
"""Iterator for different convolution shapes, strides and paddings.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the depthwise
convolution parameters.
"""
input_sizes = [[4, 5, 5, 48], [4, 8, 8, 84], [4, 17, 17, 48], [4, 9, 27, 8],
[4, 31, 31, 7], [4, 35, 35, 2], [4, 147, 147, 2],
[3, 299, 299, 3], [5, 183, 183, 1]]
filter_sizes = [[1, 1, 48, 2], [1, 3, 84, 1], [3, 1, 48, 4], [3, 3, 8, 1],
[3, 3, 7, 1], [5, 5, 2, 1], [3, 3, 2, 8], [2, 2, 3,
8], [5, 5, 1, 2]]
out_sizes = [[4, 5, 5, 96], [4, 8, 8, 84], [4, 17, 17, 192], [4, 9, 27, 8],
[4, 31, 31, 7], [4, 35, 35, 2], [4, 49, 49, 16],
[3, 150, 150, 24], [5, 92, 92, 2]]
strides = [1, 1, 1, 1, 1, 1, 3, 2, 2]
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [SAME, SAME, SAME, SAME, SAME, SAME, VALID, SAME, SAME, SAME]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
def CheckGradConfigsToTest():
"""Iterator for different convolution shapes, strides and paddings.
compute_gradient_error() is very expensive. So the configs should be
relatively small.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the depthwise
convolution parameters.
"""
input_sizes = [[2, 5, 8, 1], [4, 5, 5, 1], [2, 4, 4, 2], [1, 15, 15, 2],
[2, 15, 16, 1]]
filter_sizes = [[4, 4, 1, 2], [2, 2, 1, 2], [3, 1, 2, 2], [1, 3, 2, 1],
[3, 3, 1, 2]]
out_sizes = [[2, 5, 8, 2], [4, 2, 2, 2], [2, 4, 4, 4], [1, 15, 15, 2],
[2, 5, 5, 2]]
strides = [1, 2, 1, 1, 3]
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [SAME, VALID, SAME, SAME, VALID]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
class DepthwiseConv2DTest(xla_test.XLATestCase):
# This is testing that depthwise_conv2d and depthwise_conv2d_native
# produce the same results. It also tests that NCHW and NWHC
# formats agree, by comparing the depthwise_conv2d_native with
# 'NCHW' format (with transposition) matches the 'NHWC' format using
# the higher level interface.
def _VerifyValues(self,
tensor_in_sizes,
filter_in_sizes,
stride,
padding,
data_type,
data_format="NHWC"):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
data_type: The data type to use.
data_format: The data_format of the input. "NHWC" or "NCHW".
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input and filter tensor with numbers incrementing from 1.
x1 = np.array([f * 1.0 for f in range(1, total_size_1 + 1)],
dtype=data_type).reshape(tensor_in_sizes)
x2 = np.array([f * 1.0 for f in range(1, total_size_2 + 1)],
dtype=data_type).reshape(filter_in_sizes)
with self.cached_session() as sess:
if data_type == np.float32:
tolerance = 1e-4
else:
self.assertEqual(data_type, np.float64)
tolerance = 1e-8
t1 = array_ops.placeholder(shape=tensor_in_sizes, dtype=data_type)
t2 = array_ops.placeholder(shape=filter_in_sizes, dtype=data_type)
native_t1 = t1
strides = [1, stride, stride, 1]
if data_format == "NCHW":
# Transpose from NWHC input to NCHW
# Ex. [4, 5, 5, 48] to [4, 48, 5, 5]
native_t1 = array_ops.transpose(t1, [0, 3, 1, 2])
strides = [1, 1, stride, stride]
with self.test_scope():
conv_native = nn_ops.depthwise_conv2d_native(
native_t1,
t2,
strides=strides,
data_format=data_format,
padding=padding)
if data_format == "NCHW":
# Transpose back from NCHW to NHWC
conv_native = array_ops.transpose(conv_native, [0, 2, 3, 1])
with ops.device("CPU"):
conv_interface = ReferenceDepthwiseConv2D(
t1, t2, strides=[1, stride, stride, 1], padding=padding)
native_result = sess.run(conv_native, {t1: x1, t2: x2})
interface_result = sess.run(conv_interface, {t1: x1, t2: x2})
print("data_type:", data_type, "max diff = ",
np.amax(np.absolute(native_result - interface_result)))
self.assertAllClose(
np.ravel(native_result), np.ravel(interface_result), rtol=tolerance)
def testDepthwiseConv2D(self):
for index, (input_size, filter_size, _, stride,
padding) in enumerate(ConfigsToTest()):
print("Testing DepthwiseConv2D,", index, "th config:", input_size, "*",
filter_size, "stride:", stride, "padding:", padding)
for data_type in self.float_types:
# TODO(phawkins): the reference implementation only supports float32.
if data_type == np.float32:
self._VerifyValues(
input_size, filter_size, stride, padding, data_type)
def testDepthwiseConv2DFormat(self):
for index, (input_size, filter_size, _, stride,
padding) in enumerate(ConfigsToTest()):
print("Testing DepthwiseConv2DFormat,", index, "th config:", input_size,
"*", filter_size, "stride:", stride, "padding:", padding)
for data_type in self.float_types:
# TODO(phawkins): the reference implementation only supports float32.
if data_type == np.float32:
self._VerifyValues(
input_size,
filter_size,
stride,
padding,
data_type,
data_format="NCHW")
# This is testing against hand calculated results.
def _VerifyHandValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected):
"""Verifies the output values of the depthwise convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = np.array([f * 1.0 for f in range(1, total_size_1 + 1)],
dtype=np.float32).reshape(tensor_in_sizes)
x2 = np.array([f * 1.0 for f in range(1, total_size_2 + 1)],
dtype=np.float32).reshape(filter_in_sizes)
with self.cached_session() as sess:
t1 = array_ops.placeholder(shape=tensor_in_sizes, dtype=np.float32)
t2 = array_ops.placeholder(shape=filter_in_sizes, dtype=np.float32)
with self.test_scope():
conv = nn_ops.depthwise_conv2d_native(
t1, t2, strides=[1, stride, stride, 1], padding=padding)
value = sess.run(conv, {t1: x1, t2: x2})
print("value = ", value)
self.assertArrayNear(expected, np.ravel(value), 1e-4)
self.assertShapeEqual(value, conv)
def testConv2D2x2Filter(self):
# The inputs look like this (it's a 3 x 2 matrix, each of depth 2):
#
# [ (1.0, 2.0), (3.0, 4.0), ( 5.0, 6.0) ]
# [ (7.0, 8.0), (9.0, 10.0), (11.0, 12.0) ]
# We can view this as two inputs
#
# input depth 0:
#
# [ 1.0, 3.0, 5.0 ]
# [ 7.0, 9.0, 11.0 ]
#
# input depth 1:
#
# [ 2.0, 4.0, 6.0 ]
# [ 8.0, 10.0, 12.0 ]
#
# The filter looks like this (it has two 2 x 2 patches, each generating 2
# depths):
#
# filter #0:
#
# [ (1.0, 3.0), ( 5.0, 7.0)]
# [ (9.0, 11.0), (13.0, 15.0)]
#
# filter #1:
#
# [ ( 2.0, 4.0), ( 6.0, 8.0)]
# [ (10.0, 12.0), (14.0, 16.0)]
#
# So the outputs are:
#
# (position 0, 0: in_depth 0, output_depth 0 -- using filter #0)
# 1.0 * 1.0 + 7.0 * 9.0 + 3.0 * 5.0 + 9.0 * 13.0 = 196
# (position 0, 0: in_depth 0, output_depth 1 -- using filter #1)
# 1.0 * 2.0 + 7.0 * 10.0 + 3.0 * 6.0 + 9.0 * 14.0 = 216
# (position 0, 0: in_depth 1, output_depth 2 -- using filter #0)
# 2.0 * 3.0 + 8.0 * 11.0 + 4.0 * 7.0 + 10.0 * 15.0 = 272
# (position 0, 0: in_depth 1, output_depth 3 -- using filter #1)
# 2.0 * 4.0 + 8.0 * 12.0 + 4.0 * 8.0 + 10.0 * 16.0 = 296
#
# (position 1, 0: in_depth 0, output_depth 0 -- using filter #0)
# 3.0 * 1.0 + 9.0 * 9.0 + 5.0 * 5.0 + 11.0 * 13.0 = 252
# (position 1, 0: in_depth 0, output_depth 1 -- using filter #1)
# 3.0 * 2.0 + 9.0 * 10.0 + 5.0 * 6.0 + 11.0 * 14.0 = 280
# (position 1, 0: in_depth 1, output_depth 2 -- using filter #0)
# 4.0 * 3.0 + 10.0 * 11.0 + 6.0 * 7.0 + 12.0 * 15.0 = 344
# (position 1, 0: in_depth 1, output_depth 3 -- using filter #1)
# 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376
expected_output = [196, 216, 272, 296, 252, 280, 344, 376]
self._VerifyHandValues(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
stride=1,
padding="VALID",
expected=expected_output)
def _CompareBackpropInput(self, input_sizes, filter_sizes, output_sizes,
stride, padding):
x1 = np.random.rand(*filter_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(use_xla):
with self.cached_session():
t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])
t1 = array_ops.placeholder(np.float32, shape=filter_sizes)
t2 = array_ops.placeholder(np.float32, shape=output_sizes)
if use_xla:
with self.test_scope():
backprop = nn_ops.depthwise_conv2d_native_backprop_input(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
else:
backprop = nn_ops.depthwise_conv2d_native_backprop_input(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
ret = backprop.eval({t1: x1, t2: x2})
self.assertShapeEqual(ret, backprop)
return ret
gpu_value = _GetVal(use_xla=True)
cpu_value = _GetVal(use_xla=False)
self.assertAllClose(cpu_value, gpu_value, rtol=1e-3, atol=1e-3)
def testDepthwiseConv2DInputGradCompare(self):
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(ConfigsToTest()):
print("Testing DepthwiseConv2DInputGradCompare,", index, "th config:",
input_size, "*", filter_size, "stride:", stride, "padding:",
padding)
self._CompareBackpropInput(input_size, filter_size, output_size, stride,
padding)
def _CompareBackpropFilter(self, input_sizes, filter_sizes, output_sizes,
stride, padding):
x0 = np.random.rand(*input_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(use_xla):
with self.cached_session():
t0 = array_ops.placeholder(np.float32, shape=input_sizes)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = array_ops.placeholder(np.float32, shape=output_sizes)
if use_xla:
with self.test_scope():
backprop = nn_ops.depthwise_conv2d_native_backprop_filter(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
else:
backprop = nn_ops.depthwise_conv2d_native_backprop_filter(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
ret = backprop.eval({t0: x0, t2: x2})
self.assertShapeEqual(ret, backprop)
return ret
gpu_value = _GetVal(use_xla=True)
cpu_value = _GetVal(use_xla=False)
self.assertAllClose(cpu_value, gpu_value, rtol=1e-4, atol=1e-4)
def testDepthwiseConv2DFilterGradCompare(self):
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(ConfigsToTest()):
print("Testing DepthwiseConv2DFilterGradCompare,", index, "th config:",
input_size, "*", filter_size, "stride:", stride, "padding:",
padding)
self._CompareBackpropFilter(input_size, filter_size, output_size,
stride, padding)
if __name__ == "__main__":
test.main()
| apache-2.0 |
zaqwes8811/matlab_ext | code-miners/extern/cppclean/cpp/utils.py | 1158 | 1153 | #!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic utilities for C++ parsing."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
import sys
# Set to True to see the start/end token indices.
DEBUG = True
def ReadFile(filename, print_error=True):
"""Returns the contents of a file."""
try:
fp = open(filename)
try:
return fp.read()
finally:
fp.close()
except IOError:
if print_error:
print('Error reading %s: %s' % (filename, sys.exc_info()[1]))
return None
| apache-2.0 |
eRestin/Mezz | mezzanine/pages/models.py | 3 | 10080 | from django.core.urlresolvers import resolve, reverse
from django.db import models
from django.utils.translation import ugettext_lazy as _
from mezzanine.conf import settings
from mezzanine.core.models import Displayable, Orderable, RichText
from mezzanine.pages.fields import MenusField
from mezzanine.pages.managers import PageManager
from mezzanine.utils.urls import path_to_slug, slugify
class BasePage(Orderable, Displayable):
"""
Exists solely to store ``PageManager`` as the main manager.
If it's defined on ``Page``, a concrete model, then each
``Page`` subclass loses the custom manager.
"""
objects = PageManager()
class Meta:
abstract = True
class Page(BasePage):
"""
A page in the page tree. This is the base class that custom content types
need to subclass.
"""
parent = models.ForeignKey("Page", blank=True, null=True,
related_name="children")
in_menus = MenusField(_("Show in menus"), blank=True, null=True)
titles = models.CharField(editable=False, max_length=1000, null=True)
content_model = models.CharField(editable=False, max_length=50, null=True)
login_required = models.BooleanField(_("Login required"),
help_text=_("If checked, only logged in users can view this page"))
class Meta:
verbose_name = _("Page")
verbose_name_plural = _("Pages")
ordering = ("titles",)
order_with_respect_to = "parent"
def __unicode__(self):
return self.titles
def get_absolute_url(self):
"""
URL for a page - for ``Link`` page types, simply return its
slug since these don't have an actual URL pattern. Also handle
the special case of the homepage being a page object.
"""
slug = self.slug
if self.content_model == "link":
# Ensure the URL is absolute.
if not slug.lower().startswith("http"):
slug = "/" + self.slug.lstrip("/")
return slug
if slug == "/":
return reverse("home")
else:
return reverse("page", kwargs={"slug": slug})
def save(self, *args, **kwargs):
"""
Create the titles field using the titles up the parent chain
and set the initial value for ordering.
"""
if self.id is None:
self.content_model = self._meta.object_name.lower()
titles = [self.title]
parent = self.parent
while parent is not None:
titles.insert(0, parent.title)
parent = parent.parent
self.titles = " / ".join(titles)
super(Page, self).save(*args, **kwargs)
def description_from_content(self):
"""
Override ``Displayable.description_from_content`` to load the
content type subclass for when ``save`` is called directly on a
``Page`` instance, so that all fields defined on the subclass
are available for generating the description.
"""
if self.__class__ == Page:
content_model = self.get_content_model()
if content_model:
return content_model.description_from_content()
return super(Page, self).description_from_content()
def get_ascendants(self, for_user=None):
"""
Returns the ascendants for the page. Ascendants are cached in
the ``_ascendants`` attribute, which is populated when the page
is loaded via ``Page.objects.with_ascendants_for_slug``.
"""
if not self.parent_id:
# No parents at all, bail out.
return []
if not hasattr(self, "_ascendants"):
# _ascendants has not been either page.get_ascendants or
# Page.objects.assigned by with_ascendants_for_slug, so
# run it to see if we can retrieve all parents in a single
# query, which will occur if the slugs for each of the pages
# have not been customised.
if self.slug:
kwargs = {"for_user": for_user}
pages = Page.objects.with_ascendants_for_slug(self.slug,
**kwargs)
self._ascendants = pages[0]._ascendants
else:
self._ascendants = []
if not self._ascendants:
# Page has a parent but with_ascendants_for_slug failed to
# find them due to custom slugs, so retrieve the parents
# recursively.
child = self
while child.parent_id is not None:
self._ascendants.append(child.parent)
child = child.parent
return self._ascendants
@classmethod
def get_content_models(cls):
"""
Return all Page subclasses.
"""
is_content_model = lambda m: m is not Page and issubclass(m, Page)
return filter(is_content_model, models.get_models())
def get_content_model(self):
"""
Provies a generic method of retrieving the instance of the custom
content type's model for this page.
"""
return getattr(self, self.content_model, None)
def get_slug(self):
"""
Recursively build the slug from the chain of parents.
"""
slug = slugify(self.title)
if self.parent is not None:
return "%s/%s" % (self.parent.slug, slug)
return slug
def set_slug(self, new_slug):
"""
Changes this page's slug, and all other pages whose slugs
start with this page's slug.
"""
for page in Page.objects.filter(slug__startswith=self.slug):
if not page.overridden():
page.slug = new_slug + page.slug[len(self.slug):]
page.save()
self.slug = new_slug
def set_parent(self, new_parent):
"""
Change the parent of this page, changing this page's slug to match
the new parent if necessary.
"""
self_slug = self.slug
old_parent_slug = self.parent.slug if self.parent else ""
new_parent_slug = new_parent.slug if new_parent else ""
# Make sure setting the new parent won't cause a cycle.
parent = new_parent
while parent is not None:
if parent.pk == self.pk:
raise AttributeError("You can't set a page or its child as"
" a parent.")
parent = parent.parent
self.parent = new_parent
self.save()
if self_slug:
if not old_parent_slug:
self.set_slug("/".join((new_parent_slug, self.slug)))
elif self.slug.startswith(old_parent_slug):
new_slug = self.slug.replace(old_parent_slug,
new_parent_slug, 1)
self.set_slug(new_slug.strip("/"))
def overridden(self):
"""
Returns ``True`` if the page's slug has an explicitly defined
urlpattern and is therefore considered to be overridden.
"""
from mezzanine.pages.views import page
page_url = reverse("page", kwargs={"slug": self.slug})
resolved_view = resolve(page_url)[0]
return resolved_view != page
def can_add(self, request):
"""
Dynamic ``add`` permission for content types to override.
"""
return self.slug != "/"
def can_change(self, request):
"""
Dynamic ``change`` permission for content types to override.
"""
return True
def can_delete(self, request):
"""
Dynamic ``delete`` permission for content types to override.
"""
return True
def set_helpers(self, context):
"""
Called from the ``page_menu`` template tag and assigns a
handful of properties based on the current page, that are used
within the various types of menus.
"""
current_page = context["_current_page"]
current_page_id = getattr(current_page, "id", None)
current_parent_id = getattr(current_page, "parent_id", None)
# Am I a child of the current page?
self.is_current_child = self.parent_id == current_page_id
self.is_child = self.is_current_child # Backward compatibility
# Is my parent the same as the current page's?
self.is_current_sibling = self.parent_id == current_parent_id
# Am I the current page?
try:
request = context["request"]
except KeyError:
# No request context, most likely when tests are run.
self.is_current = False
else:
self.is_current = self.slug == path_to_slug(request.path_info)
# Is the current page me or any page up the parent chain?
def is_c_or_a(page_id):
parent_id = context["_parent_page_ids"].get(page_id)
return self.id == page_id or (parent_id and is_c_or_a(parent_id))
self.is_current_or_ascendant = lambda: bool(is_c_or_a(current_page_id))
# Am I a primary page?
self.is_primary = self.parent_id is None
# What's an ID I can use in HTML?
self.html_id = self.slug.replace("/", "-")
# Default branch level - gets assigned in the page_menu tag.
self.branch_level = 0
def in_menu_template(self, template_name):
if self.in_menus is not None:
for i, l, t in settings.PAGE_MENU_TEMPLATES:
if not unicode(i) in self.in_menus and t == template_name:
return False
return True
class RichTextPage(Page, RichText):
"""
Implements the default type of page with a single Rich Text
content field.
"""
class Meta:
verbose_name = _("Rich text page")
verbose_name_plural = _("Rich text pages")
class Link(Page):
"""
A general content type for creating external links in the page
menu.
"""
class Meta:
verbose_name = _("Link")
verbose_name_plural = _("Links")
| bsd-2-clause |
trojkat/pylama_gjslint | pylama_gjslint/closure_linter/aliaspass.py | 2 | 7576 | #!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pass that scans for goog.scope aliases and lint/usage errors."""
# Allow non-Google copyright
# pylint: disable=g-bad-file-header
__author__ = ('nnaze@google.com (Nathan Naze)')
import itertools
from . import ecmametadatapass
from . import errors
from . import javascripttokens
from . import scopeutil
from . import tokenutil
from .common import error
# TODO(nnaze): Create a Pass interface and move this class, EcmaMetaDataPass,
# and related classes onto it.
def _GetAliasForIdentifier(identifier, alias_map):
"""Returns the aliased_symbol name for an identifier.
Example usage:
>>> alias_map = {'MyClass': 'goog.foo.MyClass'}
>>> _GetAliasForIdentifier('MyClass.prototype.action', alias_map)
'goog.foo.MyClass.prototype.action'
>>> _GetAliasForIdentifier('MyClass.prototype.action', {})
None
Args:
identifier: The identifier.
alias_map: A dictionary mapping a symbol to an alias.
Returns:
The aliased symbol name or None if not found.
"""
ns = identifier.split('.', 1)[0]
aliased_symbol = alias_map.get(ns)
if aliased_symbol:
return aliased_symbol + identifier[len(ns):]
class AliasPass(object):
"""Pass to identify goog.scope() usages.
Identifies goog.scope() usages and finds lint/usage errors. Notes any
aliases of symbols in Closurized namespaces (that is, reassignments
such as "var MyClass = goog.foo.MyClass;") and annotates identifiers
when they're using an alias (so they may be expanded to the full symbol
later -- that "MyClass.prototype.action" refers to
"goog.foo.MyClass.prototype.action" when expanded.).
"""
def __init__(self, closurized_namespaces=None, error_handler=None):
"""Creates a new pass.
Args:
closurized_namespaces: A set of Closurized namespaces (e.g. 'goog').
error_handler: An error handler to report lint errors to.
"""
self._error_handler = error_handler
# If we have namespaces, freeze the set.
if closurized_namespaces:
closurized_namespaces = frozenset(closurized_namespaces)
self._closurized_namespaces = closurized_namespaces
def Process(self, start_token):
"""Runs the pass on a token stream.
Args:
start_token: The first token in the stream.
"""
# TODO(nnaze): Add more goog.scope usage checks.
self._CheckGoogScopeCalls(start_token)
# If we have closurized namespaces, identify aliased identifiers.
if self._closurized_namespaces:
context = start_token.metadata.context
root_context = context.GetRoot()
self._ProcessRootContext(root_context)
def _CheckGoogScopeCalls(self, start_token):
"""Check goog.scope calls for lint/usage errors."""
def IsScopeToken(token):
return (token.type is javascripttokens.JavaScriptTokenType.IDENTIFIER and
token.string == 'goog.scope')
# Find all the goog.scope tokens in the file
scope_tokens = [t for t in start_token if IsScopeToken(t)]
for token in scope_tokens:
scope_context = token.metadata.context
if not (scope_context.type == ecmametadatapass.EcmaContext.STATEMENT and
scope_context.parent.type == ecmametadatapass.EcmaContext.ROOT):
self._MaybeReportError(
error.Error(errors.INVALID_USE_OF_GOOG_SCOPE,
'goog.scope call not in global scope', token))
# There should be only one goog.scope reference. Register errors for
# every instance after the first.
for token in scope_tokens[1:]:
self._MaybeReportError(
error.Error(errors.EXTRA_GOOG_SCOPE_USAGE,
'More than one goog.scope call in file.', token))
def _MaybeReportError(self, err):
"""Report an error to the handler (if registered)."""
if self._error_handler:
self._error_handler.HandleError(err)
@classmethod
def _YieldAllContexts(cls, context):
"""Yields all contexts that are contained by the given context."""
yield context
for child_context in context.children:
for descendent_child in cls._YieldAllContexts(child_context):
yield descendent_child
@staticmethod
def _IsTokenInParentBlock(token, parent_block):
"""Determines whether the given token is contained by the given block.
Args:
token: A token
parent_block: An EcmaContext.
Returns:
Whether the token is in a context that is or is a child of the given
parent_block context.
"""
context = token.metadata.context
while context:
if context is parent_block:
return True
context = context.parent
return False
def _ProcessRootContext(self, root_context):
"""Processes all goog.scope blocks under the root context."""
assert root_context.type is ecmametadatapass.EcmaContext.ROOT
# Identify all goog.scope blocks.
goog_scope_blocks = itertools.ifilter(
scopeutil.IsGoogScopeBlock,
self._YieldAllContexts(root_context))
# Process each block to find aliases.
for scope_block in goog_scope_blocks:
self._ProcessGoogScopeBlock(scope_block)
def _ProcessGoogScopeBlock(self, scope_block):
"""Scans a goog.scope block to find aliases and mark alias tokens."""
alias_map = dict()
# Iterate over every token in the scope_block. Each token points to one
# context, but multiple tokens may point to the same context. We only want
# to check each context once, so keep track of those we've seen.
seen_contexts = set()
token = scope_block.start_token
while token and self._IsTokenInParentBlock(token, scope_block):
token_context = token.metadata.context
# Check to see if this token is an alias.
if token_context not in seen_contexts:
seen_contexts.add(token_context)
# If this is a alias statement in the goog.scope block.
if (token_context.type == ecmametadatapass.EcmaContext.VAR and
token_context.parent.parent is scope_block):
match = scopeutil.MatchAlias(token_context.parent)
# If this is an alias, remember it in the map.
if match:
alias, symbol = match
symbol = _GetAliasForIdentifier(symbol, alias_map) or symbol
if scopeutil.IsInClosurizedNamespace(symbol,
self._closurized_namespaces):
alias_map[alias] = symbol
# If this token is an identifier that matches an alias,
# mark the token as an alias to the original symbol.
if (token.type is javascripttokens.JavaScriptTokenType.SIMPLE_LVALUE or
token.type is javascripttokens.JavaScriptTokenType.IDENTIFIER):
identifier = tokenutil.GetIdentifierForToken(token)
if identifier:
aliased_symbol = _GetAliasForIdentifier(identifier, alias_map)
if aliased_symbol:
token.metadata.aliased_symbol = aliased_symbol
token = token.next # Get next token
| bsd-3-clause |
tuxfux-hlp-notes/python-batches | archieves/Batch-63/14-files/myenv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/escsm.py | 2930 | 7839 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart, eError, eItsMe
HZ_cls = (
1,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,0,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,4,0,5,2,0, # 78 - 7f
1,1,1,1,1,1,1,1, # 80 - 87
1,1,1,1,1,1,1,1, # 88 - 8f
1,1,1,1,1,1,1,1, # 90 - 97
1,1,1,1,1,1,1,1, # 98 - 9f
1,1,1,1,1,1,1,1, # a0 - a7
1,1,1,1,1,1,1,1, # a8 - af
1,1,1,1,1,1,1,1, # b0 - b7
1,1,1,1,1,1,1,1, # b8 - bf
1,1,1,1,1,1,1,1, # c0 - c7
1,1,1,1,1,1,1,1, # c8 - cf
1,1,1,1,1,1,1,1, # d0 - d7
1,1,1,1,1,1,1,1, # d8 - df
1,1,1,1,1,1,1,1, # e0 - e7
1,1,1,1,1,1,1,1, # e8 - ef
1,1,1,1,1,1,1,1, # f0 - f7
1,1,1,1,1,1,1,1, # f8 - ff
)
HZ_st = (
eStart,eError, 3,eStart,eStart,eStart,eError,eError,# 00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 08-0f
eItsMe,eItsMe,eError,eError,eStart,eStart, 4,eError,# 10-17
5,eError, 6,eError, 5, 5, 4,eError,# 18-1f
4,eError, 4, 4, 4,eError, 4,eError,# 20-27
4,eItsMe,eStart,eStart,eStart,eStart,eStart,eStart,# 28-2f
)
HZCharLenTable = (0, 0, 0, 0, 0, 0)
HZSMModel = {'classTable': HZ_cls,
'classFactor': 6,
'stateTable': HZ_st,
'charLenTable': HZCharLenTable,
'name': "HZ-GB-2312"}
ISO2022CN_cls = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,4,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022CN_st = (
eStart, 3,eError,eStart,eStart,eStart,eStart,eStart,# 00-07
eStart,eError,eError,eError,eError,eError,eError,eError,# 08-0f
eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,# 10-17
eItsMe,eItsMe,eItsMe,eError,eError,eError, 4,eError,# 18-1f
eError,eError,eError,eItsMe,eError,eError,eError,eError,# 20-27
5, 6,eError,eError,eError,eError,eError,eError,# 28-2f
eError,eError,eError,eItsMe,eError,eError,eError,eError,# 30-37
eError,eError,eError,eError,eError,eItsMe,eError,eStart,# 38-3f
)
ISO2022CNCharLenTable = (0, 0, 0, 0, 0, 0, 0, 0, 0)
ISO2022CNSMModel = {'classTable': ISO2022CN_cls,
'classFactor': 9,
'stateTable': ISO2022CN_st,
'charLenTable': ISO2022CNCharLenTable,
'name': "ISO-2022-CN"}
ISO2022JP_cls = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,2,2, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,7,0,0,0, # 20 - 27
3,0,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
6,0,4,0,8,0,0,0, # 40 - 47
0,9,5,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022JP_st = (
eStart, 3,eError,eStart,eStart,eStart,eStart,eStart,# 00-07
eStart,eStart,eError,eError,eError,eError,eError,eError,# 08-0f
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 10-17
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,# 18-1f
eError, 5,eError,eError,eError, 4,eError,eError,# 20-27
eError,eError,eError, 6,eItsMe,eError,eItsMe,eError,# 28-2f
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,# 30-37
eError,eError,eError,eItsMe,eError,eError,eError,eError,# 38-3f
eError,eError,eError,eError,eItsMe,eError,eStart,eStart,# 40-47
)
ISO2022JPCharLenTable = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
ISO2022JPSMModel = {'classTable': ISO2022JP_cls,
'classFactor': 10,
'stateTable': ISO2022JP_st,
'charLenTable': ISO2022JPCharLenTable,
'name': "ISO-2022-JP"}
ISO2022KR_cls = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,3,0,0,0, # 20 - 27
0,4,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,5,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022KR_st = (
eStart, 3,eError,eStart,eStart,eStart,eError,eError,# 00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 08-0f
eItsMe,eItsMe,eError,eError,eError, 4,eError,eError,# 10-17
eError,eError,eError,eError, 5,eError,eError,eError,# 18-1f
eError,eError,eError,eItsMe,eStart,eStart,eStart,eStart,# 20-27
)
ISO2022KRCharLenTable = (0, 0, 0, 0, 0, 0)
ISO2022KRSMModel = {'classTable': ISO2022KR_cls,
'classFactor': 6,
'stateTable': ISO2022KR_st,
'charLenTable': ISO2022KRCharLenTable,
'name': "ISO-2022-KR"}
# flake8: noqa
| gpl-3.0 |
pratikmallya/hue | desktop/core/ext-py/Django-1.6.10/django/contrib/gis/geos/tests/test_geos_mutation.py | 109 | 5305 | # Copyright (c) 2008-2009 Aryeh Leib Taurog, all rights reserved.
# Modified from original contribution by Aryeh Leib Taurog, which was
# released under the New BSD license.
from django.utils import unittest
from django.utils.unittest import skipUnless
from .. import HAS_GEOS
if HAS_GEOS:
from .. import *
from ..error import GEOSIndexError
def getItem(o,i): return o[i]
def delItem(o,i): del o[i]
def setItem(o,i,v): o[i] = v
if HAS_GEOS:
def api_get_distance(x): return x.distance(Point(-200,-200))
def api_get_buffer(x): return x.buffer(10)
def api_get_geom_typeid(x): return x.geom_typeid
def api_get_num_coords(x): return x.num_coords
def api_get_centroid(x): return x.centroid
def api_get_empty(x): return x.empty
def api_get_valid(x): return x.valid
def api_get_simple(x): return x.simple
def api_get_ring(x): return x.ring
def api_get_boundary(x): return x.boundary
def api_get_convex_hull(x): return x.convex_hull
def api_get_extent(x): return x.extent
def api_get_area(x): return x.area
def api_get_length(x): return x.length
geos_function_tests = [ val for name, val in vars().items()
if hasattr(val, '__call__')
and name.startswith('api_get_') ]
@skipUnless(HAS_GEOS, "Geos is required.")
class GEOSMutationTest(unittest.TestCase):
"""
Tests Pythonic Mutability of Python GEOS geometry wrappers
get/set/delitem on a slice, normal list methods
"""
def test00_GEOSIndexException(self):
'Testing Geometry GEOSIndexError'
p = Point(1,2)
for i in range(-2,2): p._checkindex(i)
self.assertRaises(GEOSIndexError, p._checkindex, 2)
self.assertRaises(GEOSIndexError, p._checkindex, -3)
def test01_PointMutations(self):
'Testing Point mutations'
for p in (Point(1,2,3), fromstr('POINT (1 2 3)')):
self.assertEqual(p._get_single_external(1), 2.0, 'Point _get_single_external')
# _set_single
p._set_single(0,100)
self.assertEqual(p.coords, (100.0,2.0,3.0), 'Point _set_single')
# _set_list
p._set_list(2,(50,3141))
self.assertEqual(p.coords, (50.0,3141.0), 'Point _set_list')
def test02_PointExceptions(self):
'Testing Point exceptions'
self.assertRaises(TypeError, Point, range(1))
self.assertRaises(TypeError, Point, range(4))
def test03_PointApi(self):
'Testing Point API'
q = Point(4,5,3)
for p in (Point(1,2,3), fromstr('POINT (1 2 3)')):
p[0:2] = [4,5]
for f in geos_function_tests:
self.assertEqual(f(q), f(p), 'Point ' + f.__name__)
def test04_LineStringMutations(self):
'Testing LineString mutations'
for ls in (LineString((1,0),(4,1),(6,-1)),
fromstr('LINESTRING (1 0,4 1,6 -1)')):
self.assertEqual(ls._get_single_external(1), (4.0,1.0), 'LineString _get_single_external')
# _set_single
ls._set_single(0,(-50,25))
self.assertEqual(ls.coords, ((-50.0,25.0),(4.0,1.0),(6.0,-1.0)), 'LineString _set_single')
# _set_list
ls._set_list(2, ((-50.0,25.0),(6.0,-1.0)))
self.assertEqual(ls.coords, ((-50.0,25.0),(6.0,-1.0)), 'LineString _set_list')
lsa = LineString(ls.coords)
for f in geos_function_tests:
self.assertEqual(f(lsa), f(ls), 'LineString ' + f.__name__)
def test05_Polygon(self):
'Testing Polygon mutations'
for pg in (Polygon(((1,0),(4,1),(6,-1),(8,10),(1,0)),
((5,4),(6,4),(6,3),(5,4))),
fromstr('POLYGON ((1 0,4 1,6 -1,8 10,1 0),(5 4,6 4,6 3,5 4))')):
self.assertEqual(pg._get_single_external(0),
LinearRing((1,0),(4,1),(6,-1),(8,10),(1,0)),
'Polygon _get_single_external(0)')
self.assertEqual(pg._get_single_external(1),
LinearRing((5,4),(6,4),(6,3),(5,4)),
'Polygon _get_single_external(1)')
# _set_list
pg._set_list(2, (((1,2),(10,0),(12,9),(-1,15),(1,2)),
((4,2),(5,2),(5,3),(4,2))))
self.assertEqual(pg.coords,
(((1.0,2.0),(10.0,0.0),(12.0,9.0),(-1.0,15.0),(1.0,2.0)),
((4.0,2.0),(5.0,2.0),(5.0,3.0),(4.0,2.0))),
'Polygon _set_list')
lsa = Polygon(*pg.coords)
for f in geos_function_tests:
self.assertEqual(f(lsa), f(pg), 'Polygon ' + f.__name__)
def test06_Collection(self):
'Testing Collection mutations'
for mp in (MultiPoint(*map(Point,((3,4),(-1,2),(5,-4),(2,8)))),
fromstr('MULTIPOINT (3 4,-1 2,5 -4,2 8)')):
self.assertEqual(mp._get_single_external(2), Point(5,-4), 'Collection _get_single_external')
mp._set_list(3, map(Point,((5,5),(3,-2),(8,1))))
self.assertEqual(mp.coords, ((5.0,5.0),(3.0,-2.0),(8.0,1.0)), 'Collection _set_list')
lsa = MultiPoint(*map(Point,((5,5),(3,-2),(8,1))))
for f in geos_function_tests:
self.assertEqual(f(lsa), f(mp), 'MultiPoint ' + f.__name__)
| apache-2.0 |
zeptonaut/catapult | third_party/Paste/paste/util/dateinterval.py | 50 | 2412 | """
DateInterval.py
Convert interval strings (in the form of 1w2d, etc) to
seconds, and back again. Is not exactly about months or
years (leap years in particular).
Accepts (y)ear, (b)month, (w)eek, (d)ay, (h)our, (m)inute, (s)econd.
Exports only timeEncode and timeDecode functions.
"""
import re
__all__ = ['interval_decode', 'interval_encode']
second = 1
minute = second*60
hour = minute*60
day = hour*24
week = day*7
month = day*30
year = day*365
timeValues = {
'y': year,
'b': month,
'w': week,
'd': day,
'h': hour,
'm': minute,
's': second,
}
timeOrdered = list(timeValues.items())
timeOrdered.sort(key=lambda x: x[1], reverse=True)
def interval_encode(seconds, include_sign=False):
"""Encodes a number of seconds (representing a time interval)
into a form like 1h2d3s.
>>> interval_encode(10)
'10s'
>>> interval_encode(493939)
'5d17h12m19s'
"""
s = ''
orig = seconds
seconds = abs(seconds)
for char, amount in timeOrdered:
if seconds >= amount:
i, seconds = divmod(seconds, amount)
s += '%i%s' % (i, char)
if orig < 0:
s = '-' + s
elif not orig:
return '0'
elif include_sign:
s = '+' + s
return s
_timeRE = re.compile(r'[0-9]+[a-zA-Z]')
def interval_decode(s):
"""Decodes a number in the format 1h4d3m (1 hour, 3 days, 3 minutes)
into a number of seconds
>>> interval_decode('40s')
40
>>> interval_decode('10000s')
10000
>>> interval_decode('3d1w45s')
864045
"""
time = 0
sign = 1
s = s.strip()
if s.startswith('-'):
s = s[1:]
sign = -1
elif s.startswith('+'):
s = s[1:]
for match in allMatches(s, _timeRE):
char = match.group(0)[-1].lower()
if char not in timeValues:
# @@: should signal error
continue
time += int(match.group(0)[:-1]) * timeValues[char]
return time
# @@-sgd 2002-12-23 - this function does not belong in this module, find a better place.
def allMatches(source, regex):
"""Return a list of matches for regex in source
"""
pos = 0
end = len(source)
rv = []
match = regex.search(source, pos)
while match:
rv.append(match)
match = regex.search(source, match.end() )
return rv
if __name__ == '__main__':
import doctest
doctest.testmod()
| bsd-3-clause |
jantman/cobbler | cobbler/modules/scm_track.py | 3 | 3276 | """
(C) 2009, Red Hat Inc.
Michael DeHaan <michael.dehaan AT gmail>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import distutils.sysconfig
import sys
import os
import traceback
from cobbler.cexceptions import *
import os
import sys
#import xmlrpclib
import cobbler.module_loader as module_loader
import cobbler.utils as utils
plib = distutils.sysconfig.get_python_lib()
mod_path="%s/cobbler" % plib
sys.path.insert(0, mod_path)
def register():
# this pure python trigger acts as if it were a legacy shell-trigger, but is much faster.
# the return of this method indicates the trigger type
return "/var/lib/cobbler/triggers/change/*"
def run(api,args,logger):
settings = api.settings()
scm_track_enabled = str(settings.scm_track_enabled).lower()
mode = str(settings.scm_track_mode).lower()
if scm_track_enabled not in [ "y", "yes", "1", "true" ]:
# feature disabled
return 0
if mode == "git":
old_dir = os.getcwd()
os.chdir("/var/lib/cobbler")
if os.getcwd() != "/var/lib/cobbler":
raise "danger will robinson"
if not os.path.exists("/var/lib/cobbler/.git"):
rc = utils.subprocess_call(logger,"git init",shell=True)
# FIXME: if we know the remote user of an XMLRPC call
# use them as the author
rc = utils.subprocess_call(logger,"git add --all config",shell=True)
rc = utils.subprocess_call(logger,"git add --all kickstarts",shell=True)
rc = utils.subprocess_call(logger,"git add --all snippets",shell=True)
rc = utils.subprocess_call(logger,"git commit -m 'API update' --author 'cobbler <root@localhost.localdomain>'",shell=True)
os.chdir(old_dir)
return 0
elif mode == "hg":
# use mercurial
old_dir = os.getcwd()
os.chdir("/var/lib/cobbler")
if os.getcwd() != "/var/lib/cobbler":
raise "danger will robinson"
if not os.path.exists("/var/lib/cobbler/.hg"):
rc = utils.subprocess_call(logger,"hg init",shell=True)
# FIXME: if we know the remote user of an XMLRPC call
# use them as the user
rc = utils.subprocess_call(logger,"hg add config",shell=True)
rc = utils.subprocess_call(logger,"hg add kickstarts",shell=True)
rc = utils.subprocess_call(logger,"hg add snippets",shell=True)
rc = utils.subprocess_call(logger,"hg commit -m 'API update' --user 'cobbler <root@localhost.localdomain>'",shell=True)
os.chdir(old_dir)
return 0
else:
raise CX("currently unsupported SCM type: %s" % mode)
| gpl-2.0 |
yarikoptic/seaborn | seaborn/tests/test_algorithms.py | 1 | 6588 | import numpy as np
from scipy import stats
from six.moves import range
import numpy.testing as npt
from numpy.testing import assert_array_equal
import nose.tools
from nose.tools import assert_equal, raises
from .. import algorithms as algo
rs = np.random.RandomState(sum(map(ord, "test_algorithms")))
a_norm = rs.randn(100)
def test_bootstrap():
"""Test that bootstrapping gives the right answer in dumb cases."""
a_ones = np.ones(10)
n_boot = 5
out1 = algo.bootstrap(a_ones, n_boot=n_boot)
assert_array_equal(out1, np.ones(n_boot))
out2 = algo.bootstrap(a_ones, n_boot=n_boot, func=np.median)
assert_array_equal(out2, np.ones(n_boot))
def test_bootstrap_length():
"""Test that we get a bootstrap array of the right shape."""
out = algo.bootstrap(a_norm)
assert_equal(len(out), 10000)
n_boot = 100
out = algo.bootstrap(a_norm, n_boot=n_boot)
assert_equal(len(out), n_boot)
def test_bootstrap_range():
"""Test that boostrapping a random array stays within the right range."""
min, max = a_norm.min(), a_norm.max()
out = algo.bootstrap(a_norm)
nose.tools.assert_less(min, out.min())
nose.tools.assert_greater_equal(max, out.max())
def test_bootstrap_multiarg():
"""Test that bootstrap works with multiple input arrays."""
x = np.vstack([[1, 10] for i in range(10)])
y = np.vstack([[5, 5] for i in range(10)])
test_func = lambda x, y: np.vstack((x, y)).max(axis=0)
out_actual = algo.bootstrap(x, y, n_boot=2, func=test_func)
out_wanted = np.array([[5, 10], [5, 10]])
assert_array_equal(out_actual, out_wanted)
def test_bootstrap_axis():
"""Test axis kwarg to bootstrap function."""
x = rs.randn(10, 20)
n_boot = 100
out_default = algo.bootstrap(x, n_boot=n_boot)
assert_equal(out_default.shape, (n_boot,))
out_axis = algo.bootstrap(x, n_boot=n_boot, axis=0)
assert_equal(out_axis.shape, (n_boot, 20))
def test_bootstrap_random_seed():
"""Test that we can get reproducible resamples by seeding the RNG."""
data = rs.randn(50)
seed = 42
boots1 = algo.bootstrap(data, random_seed=seed)
boots2 = algo.bootstrap(data, random_seed=seed)
assert_array_equal(boots1, boots2)
def test_smooth_bootstrap():
"""Test smooth bootstrap."""
x = rs.randn(15)
n_boot = 100
out_smooth = algo.bootstrap(x, n_boot=n_boot,
smooth=True, func=np.median)
assert(not np.median(out_smooth) in x)
def test_bootstrap_ols():
"""Test bootstrap of OLS model fit."""
ols_fit = lambda X, y: np.dot(np.dot(np.linalg.inv(
np.dot(X.T, X)), X.T), y)
X = np.column_stack((rs.randn(50, 4), np.ones(50)))
w = [2, 4, 0, 3, 5]
y_noisy = np.dot(X, w) + rs.randn(50) * 20
y_lownoise = np.dot(X, w) + rs.randn(50)
n_boot = 500
w_boot_noisy = algo.bootstrap(X, y_noisy,
n_boot=n_boot,
func=ols_fit)
w_boot_lownoise = algo.bootstrap(X, y_lownoise,
n_boot=n_boot,
func=ols_fit)
assert_equal(w_boot_noisy.shape, (n_boot, 5))
assert_equal(w_boot_lownoise.shape, (n_boot, 5))
nose.tools.assert_greater(w_boot_noisy.std(),
w_boot_lownoise.std())
def test_bootstrap_units():
"""Test that results make sense when passing unit IDs to bootstrap."""
data = rs.randn(50)
ids = np.repeat(range(10), 5)
bwerr = rs.normal(0, 2, 10)
bwerr = bwerr[ids]
data_rm = data + bwerr
seed = 77
boots_orig = algo.bootstrap(data_rm, random_seed=seed)
boots_rm = algo.bootstrap(data_rm, units=ids, random_seed=seed)
nose.tools.assert_greater(boots_rm.std(), boots_orig.std())
@raises(ValueError)
def test_bootstrap_arglength():
"""Test that different length args raise ValueError."""
algo.bootstrap(range(5), range(10))
@raises(TypeError)
def test_bootstrap_noncallable():
"""Test that we get a TypeError with noncallable algo.unc."""
non_func = "mean"
algo.bootstrap(a_norm, 100, non_func)
def test_randomize_corrmat():
"""Test the correctness of the correlation matrix p values."""
a = rs.randn(30)
b = a + rs.rand(30) * 3
c = rs.randn(30)
d = [a, b, c]
p_mat, dist = algo.randomize_corrmat(d, tail="upper", corrected=False,
return_dist=True)
nose.tools.assert_greater(p_mat[2, 0], p_mat[1, 0])
corrmat = np.corrcoef(d)
pctile = 100 - stats.percentileofscore(dist[2, 1], corrmat[2, 1])
nose.tools.assert_almost_equal(p_mat[2, 1] * 100, pctile)
d[1] = -a + rs.rand(30)
p_mat = algo.randomize_corrmat(d)
nose.tools.assert_greater(0.05, p_mat[1, 0])
def test_randomize_corrmat_dist():
"""Test that the distribution looks right."""
a = rs.randn(3, 20)
for n_i in [5, 10]:
p_mat, dist = algo.randomize_corrmat(a, n_iter=n_i, return_dist=True)
assert_equal(n_i, dist.shape[-1])
p_mat, dist = algo.randomize_corrmat(a, n_iter=10000, return_dist=True)
diag_mean = dist[0, 0].mean()
assert_equal(diag_mean, 1)
off_diag_mean = dist[0, 1].mean()
nose.tools.assert_greater(0.05, off_diag_mean)
def test_randomize_corrmat_correction():
"""Test that FWE correction works."""
a = rs.randn(3, 20)
p_mat = algo.randomize_corrmat(a, "upper", False)
p_mat_corr = algo.randomize_corrmat(a, "upper", True)
triu = np.triu_indices(3, 1)
npt.assert_array_less(p_mat[triu], p_mat_corr[triu])
def test_randimoize_corrmat_tails():
"""Test that the tail argument works."""
a = rs.randn(30)
b = a + rs.rand(30) * 8
c = rs.randn(30)
d = [a, b, c]
p_mat_b = algo.randomize_corrmat(d, "both", False, random_seed=0)
p_mat_u = algo.randomize_corrmat(d, "upper", False, random_seed=0)
p_mat_l = algo.randomize_corrmat(d, "lower", False, random_seed=0)
assert_equal(p_mat_b[0, 1], p_mat_u[0, 1] * 2)
assert_equal(p_mat_l[0, 1], 1 - p_mat_u[0, 1])
def test_randomise_corrmat_seed():
"""Test that we can seed the corrmat randomization."""
a = rs.randn(3, 20)
_, dist1 = algo.randomize_corrmat(a, random_seed=0, return_dist=True)
_, dist2 = algo.randomize_corrmat(a, random_seed=0, return_dist=True)
assert_array_equal(dist1, dist2)
@raises(ValueError)
def test_randomize_corrmat_tail_error():
"""Test that we are strict about tail paramete."""
a = rs.randn(3, 30)
algo.randomize_corrmat(a, "hello")
| bsd-3-clause |
geerlingguy/ansible | test/support/integration/plugins/modules/selinux.py | 60 | 8292 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Derek Carter<goozbach@friocorte.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'
}
DOCUMENTATION = r'''
---
module: selinux
short_description: Change policy and state of SELinux
description:
- Configures the SELinux mode and policy.
- A reboot may be required after usage.
- Ansible will not issue this reboot but will let you know when it is required.
version_added: "0.7"
options:
policy:
description:
- The name of the SELinux policy to use (e.g. C(targeted)) will be required if state is not C(disabled).
state:
description:
- The SELinux mode.
required: true
choices: [ disabled, enforcing, permissive ]
configfile:
description:
- The path to the SELinux configuration file, if non-standard.
default: /etc/selinux/config
aliases: [ conf, file ]
requirements: [ libselinux-python ]
author:
- Derek Carter (@goozbach) <goozbach@friocorte.com>
'''
EXAMPLES = r'''
- name: Enable SELinux
selinux:
policy: targeted
state: enforcing
- name: Put SELinux in permissive mode, logging actions that would be blocked.
selinux:
policy: targeted
state: permissive
- name: Disable SELinux
selinux:
state: disabled
'''
RETURN = r'''
msg:
description: Messages that describe changes that were made.
returned: always
type: str
sample: Config SELinux state changed from 'disabled' to 'permissive'
configfile:
description: Path to SELinux configuration file.
returned: always
type: str
sample: /etc/selinux/config
policy:
description: Name of the SELinux policy.
returned: always
type: str
sample: targeted
state:
description: SELinux mode.
returned: always
type: str
sample: enforcing
reboot_required:
description: Whether or not an reboot is required for the changes to take effect.
returned: always
type: bool
sample: true
'''
import os
import re
import tempfile
import traceback
SELINUX_IMP_ERR = None
try:
import selinux
HAS_SELINUX = True
except ImportError:
SELINUX_IMP_ERR = traceback.format_exc()
HAS_SELINUX = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.facts.utils import get_file_lines
# getter subroutines
def get_config_state(configfile):
lines = get_file_lines(configfile, strip=False)
for line in lines:
stateline = re.match(r'^SELINUX=.*$', line)
if stateline:
return line.split('=')[1].strip()
def get_config_policy(configfile):
lines = get_file_lines(configfile, strip=False)
for line in lines:
stateline = re.match(r'^SELINUXTYPE=.*$', line)
if stateline:
return line.split('=')[1].strip()
# setter subroutines
def set_config_state(module, state, configfile):
# SELINUX=permissive
# edit config file with state value
stateline = 'SELINUX=%s' % state
lines = get_file_lines(configfile, strip=False)
tmpfd, tmpfile = tempfile.mkstemp()
with open(tmpfile, "w") as write_file:
for line in lines:
write_file.write(re.sub(r'^SELINUX=.*', stateline, line) + '\n')
module.atomic_move(tmpfile, configfile)
def set_state(module, state):
if state == 'enforcing':
selinux.security_setenforce(1)
elif state == 'permissive':
selinux.security_setenforce(0)
elif state == 'disabled':
pass
else:
msg = 'trying to set invalid runtime state %s' % state
module.fail_json(msg=msg)
def set_config_policy(module, policy, configfile):
if not os.path.exists('/etc/selinux/%s/policy' % policy):
module.fail_json(msg='Policy %s does not exist in /etc/selinux/' % policy)
# edit config file with state value
# SELINUXTYPE=targeted
policyline = 'SELINUXTYPE=%s' % policy
lines = get_file_lines(configfile, strip=False)
tmpfd, tmpfile = tempfile.mkstemp()
with open(tmpfile, "w") as write_file:
for line in lines:
write_file.write(re.sub(r'^SELINUXTYPE=.*', policyline, line) + '\n')
module.atomic_move(tmpfile, configfile)
def main():
module = AnsibleModule(
argument_spec=dict(
policy=dict(type='str'),
state=dict(type='str', required='True', choices=['enforcing', 'permissive', 'disabled']),
configfile=dict(type='str', default='/etc/selinux/config', aliases=['conf', 'file']),
),
supports_check_mode=True,
)
if not HAS_SELINUX:
module.fail_json(msg=missing_required_lib('libselinux-python'), exception=SELINUX_IMP_ERR)
# global vars
changed = False
msgs = []
configfile = module.params['configfile']
policy = module.params['policy']
state = module.params['state']
runtime_enabled = selinux.is_selinux_enabled()
runtime_policy = selinux.selinux_getpolicytype()[1]
runtime_state = 'disabled'
reboot_required = False
if runtime_enabled:
# enabled means 'enforcing' or 'permissive'
if selinux.security_getenforce():
runtime_state = 'enforcing'
else:
runtime_state = 'permissive'
if not os.path.isfile(configfile):
module.fail_json(msg="Unable to find file {0}".format(configfile),
details="Please install SELinux-policy package, "
"if this package is not installed previously.")
config_policy = get_config_policy(configfile)
config_state = get_config_state(configfile)
# check to see if policy is set if state is not 'disabled'
if state != 'disabled':
if not policy:
module.fail_json(msg="Policy is required if state is not 'disabled'")
else:
if not policy:
policy = config_policy
# check changed values and run changes
if policy != runtime_policy:
if module.check_mode:
module.exit_json(changed=True)
# cannot change runtime policy
msgs.append("Running SELinux policy changed from '%s' to '%s'" % (runtime_policy, policy))
changed = True
if policy != config_policy:
if module.check_mode:
module.exit_json(changed=True)
set_config_policy(module, policy, configfile)
msgs.append("SELinux policy configuration in '%s' changed from '%s' to '%s'" % (configfile, config_policy, policy))
changed = True
if state != runtime_state:
if runtime_enabled:
if state == 'disabled':
if runtime_state != 'permissive':
# Temporarily set state to permissive
if not module.check_mode:
set_state(module, 'permissive')
module.warn("SELinux state temporarily changed from '%s' to 'permissive'. State change will take effect next reboot." % (runtime_state))
changed = True
else:
module.warn('SELinux state change will take effect next reboot')
reboot_required = True
else:
if not module.check_mode:
set_state(module, state)
msgs.append("SELinux state changed from '%s' to '%s'" % (runtime_state, state))
# Only report changes if the file is changed.
# This prevents the task from reporting changes every time the task is run.
changed = True
else:
module.warn("Reboot is required to set SELinux state to '%s'" % state)
reboot_required = True
if state != config_state:
if not module.check_mode:
set_config_state(module, state, configfile)
msgs.append("Config SELinux state changed from '%s' to '%s'" % (config_state, state))
changed = True
module.exit_json(changed=changed, msg=', '.join(msgs), configfile=configfile, policy=policy, state=state, reboot_required=reboot_required)
if __name__ == '__main__':
main()
| gpl-3.0 |
flakerimi/ajenti | plugins/hosts/backend.py | 16 | 3343 | import re
import os
from ajenti.api import *
from ajenti.utils import *
from ajenti.com import *
from ajenti import apis
class Host:
def __init__(self):
self.ip = '';
self.name = '';
self.aliases = '';
class Config(Plugin):
implements(IConfigurable)
name = 'Hosts'
icon = '/dl/hosts/icon.png'
id = 'hosts'
def list_files(self):
return ['/etc/hosts']
def read(self):
ss = ConfManager.get().load('hosts', '/etc/hosts').split('\n')
r = []
for s in ss:
if s != '' and s[0] != '#':
try:
s = s.split()
h = Host()
try:
h.ip = s[0]
h.name = s[1]
for i in range(2, len(s)):
h.aliases += '%s ' % s[i]
h.aliases = h.aliases.rstrip();
except:
pass
r.append(h)
except:
pass
return r
def save(self, hh):
d = ''
for h in hh:
d += '%s\t%s\t%s\n' % (h.ip, h.name, h.aliases)
ConfManager.get().save('hosts', '/etc/hosts', d)
ConfManager.get().commit('hosts')
def gethostname(self):
return self.app.get_backend(IHostnameManager).gethostname()
def sethostname(self, hn):
self.app.get_backend(IHostnameManager).sethostname(hn)
class IHostnameManager(Interface):
def gethostname(self):
pass
def sethostname(self, hn):
pass
class LinuxGenericHostnameManager(Plugin):
implements(IHostnameManager)
platform = ['debian']
def gethostname(self):
return open('/etc/hostname').read()
def sethostname(self, hn):
open('/etc/hostname', 'w').write(hn)
class ArchHostnameManager(Plugin):
implements(IHostnameManager)
platform = ['arch']
def gethostname(self):
return apis.rcconf.RCConf(self.app).get_param('HOSTNAME')
def sethostname(self, hn):
apis.rcconf.RCConf(self.app).set_param('HOSTNAME', hn, near='HOSTNAME')
class BSDHostnameManager(Plugin):
implements(IHostnameManager)
platform = ['freebsd']
def gethostname(self):
return apis.rcconf.RCConf(self.app).get_param('hostname')
def sethostname(self, hn):
apis.rcconf.RCConf(self.app).set_param('hostname', hn, near='hostname')
class CentOSHostnameManager(Plugin):
implements(IHostnameManager)
platform = ['centos', 'fedora', 'mandriva']
def gethostname(self):
rc = apis.rcconf.RCConf(self.app)
rc.file = '/etc/sysconfig/network'
return rc.get_param('HOSTNAME')
def sethostname(self, hn):
rc = apis.rcconf.RCConf(self.app)
rc.file = '/etc/sysconfig/network'
rc.set_param('HOSTNAME', hn, near='HOSTNAME')
class GentooHostnameManager(Plugin):
implements(IHostnameManager)
platform = ['gentoo']
def gethostname(self):
rc = apis.rcconf.RCConf(self.app)
rc.file = '/etc/conf.d/hostname'
return rc.get_param('hostname')
def sethostname(self, hn):
rc = apis.rcconf.RCConf(self.app)
rc.file = '/etc/conf.d/hostname'
rc.set_param('hostname', hn, near='hostname')
| lgpl-3.0 |
pilou-/ansible | test/units/modules/network/f5/test_bigip_device_syslog.py | 16 | 3296 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_device_syslog import ApiParameters
from library.modules.bigip_device_syslog import ModuleParameters
from library.modules.bigip_device_syslog import ModuleManager
from library.modules.bigip_device_syslog import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_device_syslog import ApiParameters
from ansible.modules.network.f5.bigip_device_syslog import ModuleParameters
from ansible.modules.network.f5.bigip_device_syslog import ModuleManager
from ansible.modules.network.f5.bigip_device_syslog import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
kern_from='err',
kern_to='info',
)
p = ModuleParameters(params=args)
assert p.kern_from == 'err'
assert p.kern_to == 'info'
def test_api_parameters(self):
p = ApiParameters(params=load_fixture('load_sys_syslog_1.json'))
assert p.kern_from == 'debug'
assert p.kern_to == 'emerg'
class TestUntypedManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_update(self, *args):
set_module_args(dict(
kern_from='emerg',
kern_to='debug',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
current = ApiParameters(params=load_fixture('load_sys_syslog_1.json'))
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
| gpl-3.0 |
CasparLi/calibre | src/calibre/ebooks/rtf2xml/paragraph_def.py | 24 | 29763 | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import sys, os
from calibre.ebooks.rtf2xml import copy, border_parse
from calibre.ptempfile import better_mktemp
class ParagraphDef:
"""
=================
Purpose
=================
Write paragraph definition tags.
States:
1. before_1st_para_def.
Before any para_def token is found. This means all the text in the preamble.
Look for the token 'cw<pf<par-def___'. This will changet the state to collect_tokens.
2. collect_tokens.
Found a paragraph_def. Need to get all tokens.
Change with start of a paragrph ('mi<mk<para-start'). State then becomes
in_paragraphs
If another paragraph definition is found, the state does not change.
But the dictionary is reset.
3. in_paragraphs
State changes when 'mi<mk<para-end__', or end of paragraph is found.
State then becomes 'self.__state = 'after_para_end'
4. after_para_end
If 'mi<mk<para-start' (the start of a paragraph) or 'mi<mk<para-end__' (the end of a paragraph--must be empty paragraph?) are found:
state changes to 'in_paragraphs'
If 'cw<pf<par-def___' (paragraph_definition) is found:
state changes to collect_tokens
if 'mi<mk<body-close', 'mi<mk<par-in-fld', 'cw<tb<cell______','cw<tb<row-def___','cw<tb<row_______', 'mi<mk<sect-close', 'mi<mk<header-beg', 'mi<mk<header-end'
are found. (All these tokens mark the start of a bigger element. para_def must
be closed:
state changes to 'after_para_def'
5. after_para_def
'mi<mk<para-start' changes state to in_paragraphs
if another paragraph_def is found, the state changes to collect_tokens.
"""
def __init__(self,
in_file,
bug_handler,
default_font,
copy = None,
run_level = 1,):
"""
Required:
'file'--file to parse
'default_font' --document default font
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file = in_file
self.__bug_handler = bug_handler
self.__default_font = default_font
self.__copy = copy
self.__run_level = run_level
self.__write_to = better_mktemp()
def __initiate_values(self):
"""
Initiate all values.
"""
# Dictionary needed to convert shortened style names to readable names
self.__token_dict={
# paragraph formatting => pf
'par-end___' : 'para',
'par-def___' : 'paragraph-definition',
'keep-w-nex' : 'keep-with-next',
'widow-cntl' : 'widow-control',
'adjust-rgt' : 'adjust-right',
'language__' : 'language',
'right-inde' : 'right-indent',
'fir-ln-ind' : 'first-line-indent',
'left-inden' : 'left-indent',
'space-befo' : 'space-before',
'space-afte' : 'space-after',
'line-space' : 'line-spacing',
'default-ta' : 'default-tab',
'align_____' : 'align',
'widow-cntr' : 'widow-control',
# stylesheet = > ss
'style-shet' : 'stylesheet',
'based-on__' : 'based-on-style',
'next-style' : 'next-style',
'char-style' : 'character-style',
# this is changed to get a nice attribute
'para-style' : 'name',
# graphics => gr
'picture___' : 'pict',
'obj-class_' : 'obj_class',
'mac-pic___' : 'mac-pict',
# section => sc
'section___' : 'section-new',
'sect-defin' : 'section-reset',
'sect-note_' : 'endnotes-in-section',
# list=> ls
'list-text_' : 'list-text',
# this line must be wrong because it duplicates an earlier one
'list-text_' : 'list-text',
'list______' : 'list',
'list-lev-d' : 'list-level-definition',
'list-cardi' : 'list-cardinal-numbering',
'list-decim' : 'list-decimal-numbering',
'list-up-al' : 'list-uppercase-alphabetic-numbering',
'list-up-ro' : 'list-uppercae-roman-numbering',
'list-ord__' : 'list-ordinal-numbering',
'list-ordte' : 'list-ordinal-text-numbering',
'list-bulli' : 'list-bullet',
'list-simpi' : 'list-simple',
'list-conti' : 'list-continue',
'list-hang_' : 'list-hang',
# 'list-tebef' : 'list-text-before',
'list-level' : 'level',
'list-id___' : 'list-id',
'list-start' : 'list-start',
'nest-level' : 'nest-level',
# duplicate
'list-level' : 'list-level',
# notes => nt
'footnote__' : 'footnote',
'type______' : 'type',
# anchor => an
'toc_______' : 'anchor-toc',
'book-mk-st' : 'bookmark-start',
'book-mk-en' : 'bookmark-end',
'index-mark' : 'anchor-index',
'place_____' : 'place',
# field => fd
'field_____' : 'field',
'field-inst' : 'field-instruction',
'field-rslt' : 'field-result',
'datafield_' : 'data-field',
# info-tables => it
'font-table' : 'font-table',
'colr-table' : 'color-table',
'lovr-table' : 'list-override-table',
'listtable_' : 'list-table',
'revi-table' : 'revision-table',
# character info => ci
'hidden____' : 'hidden',
'italics___' : 'italics',
'bold______' : 'bold',
'strike-thr' : 'strike-through',
'shadow____' : 'shadow',
'outline___' : 'outline',
'small-caps' : 'small-caps',
'caps______' : 'caps',
'dbl-strike' : 'double-strike-through',
'emboss____' : 'emboss',
'engrave___' : 'engrave',
'subscript_' : 'subscript',
'superscrip' : 'superscipt',
'font-style' : 'font-style',
'font-color' : 'font-color',
'font-size_' : 'font-size',
'font-up___' : 'superscript',
'font-down_' : 'subscript',
'red_______' : 'red',
'blue______' : 'blue',
'green_____' : 'green',
# table => tb
'row-def___' : 'row-definition',
'cell______' : 'cell',
'row_______' : 'row',
'in-table__' : 'in-table',
'columns___' : 'columns',
'row-pos-le' : 'row-position-left',
'cell-posit' : 'cell-position',
# preamble => pr
# underline
'underlined' : 'underlined',
# border => bd
'bor-t-r-hi' : 'border-table-row-horizontal-inside',
'bor-t-r-vi' : 'border-table-row-vertical-inside',
'bor-t-r-to' : 'border-table-row-top',
'bor-t-r-le' : 'border-table-row-left',
'bor-t-r-bo' : 'border-table-row-bottom',
'bor-t-r-ri' : 'border-table-row-right',
'bor-cel-bo' : 'border-cell-bottom',
'bor-cel-to' : 'border-cell-top',
'bor-cel-le' : 'border-cell-left',
'bor-cel-ri' : 'border-cell-right',
'bor-par-bo' : 'border-paragraph-bottom',
'bor-par-to' : 'border-paragraph-top',
'bor-par-le' : 'border-paragraph-left',
'bor-par-ri' : 'border-paragraph-right',
'bor-par-bo' : 'border-paragraph-box',
'bor-for-ev' : 'border-for-every-paragraph',
'bor-outsid' : 'border-outisde',
'bor-none__' : 'border',
# border type => bt
'bdr-single' : 'single',
'bdr-doubtb' : 'double-thickness-border',
'bdr-shadow' : 'shadowed-border',
'bdr-double' : 'double-border',
'bdr-dotted' : 'dotted-border',
'bdr-dashed' : 'dashed',
'bdr-hair__' : 'hairline',
'bdr-inset_' : 'inset',
'bdr-das-sm' : 'dash-small',
'bdr-dot-sm' : 'dot-dash',
'bdr-dot-do' : 'dot-dot-dash',
'bdr-outset' : 'outset',
'bdr-trippl' : 'tripple',
'bdr-thsm__' : 'thick-thin-small',
'bdr-htsm__' : 'thin-thick-small',
'bdr-hthsm_' : 'thin-thick-thin-small',
'bdr-thm__' : 'thick-thin-medium',
'bdr-htm__' : 'thin-thick-medium',
'bdr-hthm_' : 'thin-thick-thin-medium',
'bdr-thl__' : 'thick-thin-large',
'bdr-hthl_' : 'think-thick-think-large',
'bdr-wavy_' : 'wavy',
'bdr-d-wav' : 'double-wavy',
'bdr-strip' : 'striped',
'bdr-embos' : 'emboss',
'bdr-engra' : 'engrave',
'bdr-frame' : 'frame',
'bdr-li-wid' : 'line-width',
}
self.__tabs_dict = {
'cw<pf<tab-stop__' : self.__tab_stop_func,
'cw<pf<tab-center' : self.__tab_type_func,
'cw<pf<tab-right_' : self.__tab_type_func,
'cw<pf<tab-dec___' : self.__tab_type_func,
'cw<pf<leader-dot' : self.__tab_leader_func,
'cw<pf<leader-hyp' : self.__tab_leader_func,
'cw<pf<leader-und' : self.__tab_leader_func,
'cw<pf<tab-bar-st' : self.__tab_bar_func,
}
self.__tab_type_dict = {
'cw<pf<tab-center' : 'center',
'cw<pf<tab-right_' : 'right',
'cw<pf<tab-dec___' : 'decimal',
'cw<pf<leader-dot' : 'leader-dot',
'cw<pf<leader-hyp' : 'leader-hyphen',
'cw<pf<leader-und' : 'leader-underline',
}
self.__border_obj = border_parse.BorderParse()
self.__style_num_strings = []
self.__body_style_strings = []
self.__state = 'before_1st_para_def'
self.__att_val_dict = {}
self.__start_marker = 'mi<mk<pard-start\n' # outside para tags
self.__start2_marker = 'mi<mk<pardstart_\n' # inside para tags
self.__end2_marker = 'mi<mk<pardend___\n' # inside para tags
self.__end_marker = 'mi<mk<pard-end__\n' # outside para tags
self.__text_string = ''
self.__state_dict = {
'before_1st_para_def' : self.__before_1st_para_def_func,
'collect_tokens' : self.__collect_tokens_func,
'after_para_def' : self.__after_para_def_func,
'in_paragraphs' : self.__in_paragraphs_func,
'after_para_end' : self.__after_para_end_func,
}
self.__collect_tokens_dict = {
'mi<mk<para-start' : self.__end_para_def_func,
'cw<pf<par-def___' : self.__para_def_in_para_def_func,
'cw<tb<cell______' : self.__empty_table_element_func,
'cw<tb<row_______' : self.__empty_table_element_func,
}
self.__after_para_def_dict = {
'mi<mk<para-start' : self.__start_para_after_def_func,
'cw<pf<par-def___' : self.__found_para_def_func,
'cw<tb<cell______' : self.__empty_table_element_func,
'cw<tb<row_______' : self.__empty_table_element_func,
}
self.__in_paragraphs_dict = {
'mi<mk<para-end__' : self.__found_para_end_func,
}
self.__after_para_end_dict = {
'mi<mk<para-start' : self.__continue_block_func,
'mi<mk<para-end__' : self.__continue_block_func,
'cw<pf<par-def___' : self.__new_para_def_func,
'mi<mk<body-close' : self.__stop_block_func,
'mi<mk<par-in-fld' : self.__stop_block_func,
'cw<tb<cell______' : self.__stop_block_func,
'cw<tb<row-def___' : self.__stop_block_func,
'cw<tb<row_______' : self.__stop_block_func,
'mi<mk<sect-close' : self.__stop_block_func,
'mi<mk<sect-start' : self.__stop_block_func,
'mi<mk<header-beg' : self.__stop_block_func,
'mi<mk<header-end' : self.__stop_block_func,
'mi<mk<head___clo' : self.__stop_block_func,
'mi<mk<fldbk-end_' : self.__stop_block_func,
'mi<mk<lst-txbeg_' : self.__stop_block_func,
}
def __before_1st_para_def_func(self, line):
"""
Required:
line -- line to parse
Returns:
nothing
Logic:
Look for the beginning of a paragaraph definition
"""
##cw<pf<par-def___<nu<true
if self.__token_info == 'cw<pf<par-def___':
self.__found_para_def_func()
else:
self.__write_obj.write(line)
def __found_para_def_func(self):
self.__state = 'collect_tokens'
# not exactly right--have to reset the dictionary--give it default
# values
self.__reset_dict()
def __collect_tokens_func(self, line):
"""
Required:
line --line to parse
Returns:
nothing
Logic:
Check the collect_tokens_dict for either the beginning of a
paragraph or a new paragraph definition. Take the actions
according to the value in the dict.
Otherwise, check if the token is not a control word. If it is not,
change the state to after_para_def.
Otherwise, check if the token is a paragraph definition word; if
so, add it to the attributes and values dictionary.
"""
action = self.__collect_tokens_dict.get(self.__token_info)
if action:
action(line)
elif line[0:2] != 'cw':
self.__write_obj.write(line)
self.__state = 'after_para_def'
elif line[0:5] == 'cw<bd':
self.__parse_border(line)
else:
action = self.__tabs_dict.get(self.__token_info)
if action:
action(line)
else:
token = self.__token_dict.get(line[6:16])
if token:
self.__att_val_dict[token] = line[20:-1]
def __tab_stop_func(self, line):
"""
"""
self.__att_val_dict['tabs'] += '%s:' % self.__tab_type
self.__att_val_dict['tabs'] += '%s;' % line[20:-1]
self.__tab_type = 'left'
def __tab_type_func(self, line):
"""
"""
type = self.__tab_type_dict.get(self.__token_info)
if type != None:
self.__tab_type = type
else:
if self.__run_level > 3:
msg = 'no entry for %s\n' % self.__token_info
raise self.__bug_handler, msg
def __tab_leader_func(self, line):
"""
"""
leader = self.__tab_type_dict.get(self.__token_info)
if leader != None:
self.__att_val_dict['tabs'] += '%s^' % leader
else:
if self.__run_level > 3:
msg = 'no entry for %s\n' % self.__token_info
raise self.__bug_handler, msg
def __tab_bar_func(self, line):
"""
"""
# self.__att_val_dict['tabs-bar'] += '%s:' % line[20:-1]
self.__att_val_dict['tabs'] += 'bar:%s;' % (line[20:-1])
self.__tab_type = 'left'
def __parse_border(self, line):
"""
Requires:
line --line to parse
Returns:
nothing (updates dictionary)
Logic:
Uses the border_parse module to return a dictionary of attribute
value pairs for a border line.
"""
border_dict = self.__border_obj.parse_border(line)
self.__att_val_dict.update(border_dict)
def __para_def_in_para_def_func(self, line):
"""
Requires:
line --line to parse
Returns:
nothing
Logic:
I have found a \pard while I am collecting tokens. I want to reset
the dectionary and do nothing else.
"""
# Change this
self.__state = 'collect_tokens'
self.__reset_dict()
def __end_para_def_func(self, line):
"""
Requires:
Nothing
Returns:
Nothing
Logic:
The previous state was collect tokens, and I have found the start
of a paragraph. I want to outut the defintion tag; output the line
itself (telling me of the beginning of a paragraph);change the
state to 'in_paragraphs';
"""
self.__write_para_def_beg()
self.__write_obj.write(line)
self.__state = 'in_paragraphs'
def __start_para_after_def_func(self, line):
"""
Requires:
Nothing
Returns:
Nothing
Logic:
The state was is after_para_def. and I have found the start of a
paragraph. I want to outut the defintion tag; output the line
itself (telling me of the beginning of a paragraph);change the
state to 'in_paragraphs'.
(I now realize that this is absolutely identical to the function above!)
"""
self.__write_para_def_beg()
self.__write_obj.write(line)
self.__state = 'in_paragraphs'
def __after_para_def_func(self, line):
"""
Requires:
line -- line to parse
Returns:
nothing
Logic:
Check if the token info is the start of a paragraph. If so, call
on the function found in the value of the dictionary.
"""
action = self.__after_para_def_dict.get(self.__token_info)
if self.__token_info == 'cw<pf<par-def___':
self.__found_para_def_func()
elif action:
action(line)
else:
self.__write_obj.write(line)
def __in_paragraphs_func(self, line):
"""
Requires:
line --current line
Returns:
nothing
Logic:
Look for the end of a paragraph, the start of a cell or row.
"""
action = self.__in_paragraphs_dict.get(self.__token_info)
if action:
action(line)
else:
self.__write_obj.write(line)
def __found_para_end_func(self,line):
"""
Requires:
line -- line to print out
Returns:
Nothing
Logic:
State is in paragraphs. You have found the end of a paragraph. You
need to print out the line and change the state to after
paragraphs.
"""
self.__state = 'after_para_end'
self.__write_obj.write(line)
def __after_para_end_func(self, line):
"""
Requires:
line -- line to output
Returns:
nothing
Logic:
The state is after the end of a paragraph. You are collecting all
the lines in a string and waiting to see if you need to write
out the paragraph definition. If you find another paragraph
definition, then you write out the old paragraph dictionary and
print out the string. You change the state to collect tokens.
If you find any larger block elemens, such as cell, row,
field-block, or section, you write out the paragraph defintion and
then the text string.
If you find the beginning of a paragraph, then you don't need to
write out the paragraph definition. Write out the string, and
change the state to in paragraphs.
"""
self.__text_string += line
action = self.__after_para_end_dict.get(self.__token_info)
if action:
action(line)
def __continue_block_func(self, line):
"""
Requires:
line --line to print out
Returns:
Nothing
Logic:
The state is after the end of a paragraph. You have found the
start of a paragaph, so you don't need to print out the paragaph
definition. Print out the string, the line, and change the state
to in paragraphs.
"""
self.__state = 'in_paragraphs'
self.__write_obj.write(self.__text_string)
self.__text_string = ''
# found a new paragraph definition after an end of a paragraph
def __new_para_def_func(self, line):
"""
Requires:
line -- line to output
Returns:
Nothing
Logic:
You have found a new paragraph defintion at the end of a
paragraph. Output the end of the old paragraph defintion. Output
the text string. Output the line. Change the state to collect
tokens. (And don't forget to set the text string to ''!)
"""
self.__write_para_def_end_func()
self.__found_para_def_func()
# after a paragraph and found reason to stop this block
def __stop_block_func(self, line):
"""
Requires:
line --(shouldn't be here?)
Returns:
nothing
Logic:
The state is after a paragraph, and you have found a larger block
than paragraph-definition. You want to write the end tag of the
old defintion and reset the text string (handled by other
methods).
"""
self.__write_para_def_end_func()
self.__state = 'after_para_def'
def __write_para_def_end_func(self):
"""
Requires:
nothing
Returns:
nothing
Logic:
Print out the end of the pargraph definition tag, and the markers
that let me know when I have reached this tag. (These markers are
used for later parsing.)
"""
self.__write_obj.write(self.__end2_marker)
self.__write_obj.write('mi<tg<close_____<paragraph-definition\n')
self.__write_obj.write(self.__end_marker)
self.__write_obj.write(self.__text_string)
self.__text_string = ''
keys = self.__att_val_dict.keys()
if 'font-style' in keys:
self.__write_obj.write('mi<mk<font-end__\n')
if 'caps' in keys:
self.__write_obj.write('mi<mk<caps-end__\n')
def __get_num_of_style(self):
"""
Requires:
nothing
Returns:
nothing
Logic:
Get a unique value for each style.
"""
my_string = ''
new_style = 0
# when determining uniqueness for a style, ingorne these values, since
# they don't tell us if the style is unique
ignore_values = ['style-num', 'nest-level', 'in-table']
keys = self.__att_val_dict.keys()
keys.sort()
for key in keys:
if key in ignore_values:
continue
my_string += '%s:%s' % (key, self.__att_val_dict[key])
if my_string in self.__style_num_strings:
num = self.__style_num_strings.index(my_string)
num += 1 # since indexing starts at zero, rather than 1
else:
self.__style_num_strings.append(my_string)
num = len(self.__style_num_strings)
new_style = 1
num = '%04d' % num
self.__att_val_dict['style-num'] = 's' + str(num)
if new_style:
self.__write_body_styles()
def __write_body_styles(self):
style_string = ''
style_string += 'mi<tg<empty-att_<paragraph-style-in-body'
style_string += '<name>%s' % self.__att_val_dict['name']
style_string += '<style-number>%s' % self.__att_val_dict['style-num']
tabs_list = ['tabs-left', 'tabs-right', 'tabs-decimal', 'tabs-center',
'tabs-bar', 'tabs']
if self.__att_val_dict['tabs'] != '':
the_value = self.__att_val_dict['tabs']
# the_value = the_value[:-1]
style_string += ('<%s>%s' % ('tabs', the_value))
keys = self.__att_val_dict.keys()
keys.sort()
for key in keys:
if key != 'name' and key !='style-num' and key != 'in-table'\
and key not in tabs_list:
style_string += ('<%s>%s' % (key, self.__att_val_dict[key]))
style_string += '\n'
self.__body_style_strings.append(style_string)
def __write_para_def_beg(self):
"""
Requires:
nothing
Returns:
nothing
Logic:
Print out the beginning of the pargraph definition tag, and the markers
that let me know when I have reached this tag. (These markers are
used for later parsing.)
"""
self.__get_num_of_style()
table = self.__att_val_dict.get('in-table')
if table:
# del self.__att_val_dict['in-table']
self.__write_obj.write('mi<mk<in-table__\n')
else:
self.__write_obj.write('mi<mk<not-in-tbl\n')
left_indent = self.__att_val_dict.get('left-indent')
if left_indent:
self.__write_obj.write('mi<mk<left_inden<%s\n' % left_indent)
is_list = self.__att_val_dict.get('list-id')
if is_list:
self.__write_obj.write('mi<mk<list-id___<%s\n' % is_list)
else:
self.__write_obj.write('mi<mk<no-list___\n')
self.__write_obj.write('mi<mk<style-name<%s\n' % self.__att_val_dict['name'])
self.__write_obj.write(self.__start_marker)
self.__write_obj.write('mi<tg<open-att__<paragraph-definition')
self.__write_obj.write('<name>%s' % self.__att_val_dict['name'])
self.__write_obj.write('<style-number>%s' % self.__att_val_dict['style-num'])
tabs_list = ['tabs-left', 'tabs-right', 'tabs-decimal', 'tabs-center',
'tabs-bar', 'tabs']
"""
for tab_item in tabs_list:
if self.__att_val_dict[tab_item] != '':
the_value = self.__att_val_dict[tab_item]
the_value = the_value[:-1]
self.__write_obj.write('<%s>%s' % (tab_item, the_value))
"""
if self.__att_val_dict['tabs'] != '':
the_value = self.__att_val_dict['tabs']
# the_value = the_value[:-1]
self.__write_obj.write('<%s>%s' % ('tabs', the_value))
keys = self.__att_val_dict.keys()
keys.sort()
for key in keys:
if key != 'name' and key !='style-num' and key != 'in-table'\
and key not in tabs_list:
self.__write_obj.write('<%s>%s' % (key, self.__att_val_dict[key]))
self.__write_obj.write('\n')
self.__write_obj.write(self.__start2_marker)
if 'font-style' in keys:
face = self.__att_val_dict['font-style']
self.__write_obj.write('mi<mk<font______<%s\n' % face)
if 'caps' in keys:
value = self.__att_val_dict['caps']
self.__write_obj.write('mi<mk<caps______<%s\n' % value)
def __empty_table_element_func(self, line):
self.__write_obj.write('mi<mk<in-table__\n')
self.__write_obj.write(line)
self.__state = 'after_para_def'
def __reset_dict(self):
"""
Requires:
nothing
Returns:
nothing
Logic:
The dictionary containing values and attributes must be reset each
time a new paragraphs definition is found.
"""
self.__att_val_dict.clear()
self.__att_val_dict['name'] = 'Normal'
self.__att_val_dict['font-style'] = self.__default_font
self.__tab_type = 'left'
self.__att_val_dict['tabs-left'] = ''
self.__att_val_dict['tabs-right'] = ''
self.__att_val_dict['tabs-center'] = ''
self.__att_val_dict['tabs-decimal'] = ''
self.__att_val_dict['tabs-bar'] = ''
self.__att_val_dict['tabs'] = ''
def make_paragraph_def(self):
"""
Requires:
nothing
Returns:
nothing (changes the original file)
Logic:
Read one line in at a time. Determine what action to take based on
the state.
"""
self.__initiate_values()
read_obj = open(self.__file, 'r')
self.__write_obj = open(self.__write_to, 'w')
line_to_read = 1
while line_to_read:
line_to_read = read_obj.readline()
line = line_to_read
self.__token_info = line[:16]
action = self.__state_dict.get(self.__state)
if action == None:
sys.stderr.write('no no matching state in module sections.py\n')
sys.stderr.write(self.__state + '\n')
action(line)
read_obj.close()
self.__write_obj.close()
copy_obj = copy.Copy(bug_handler = self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "paragraphs_def.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
return self.__body_style_strings
| gpl-3.0 |
dimagol/trex-core | scripts/external_libs/scapy-2.3.1/python2/scapy/error.py | 21 | 1846 | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
Logging subsystem and basic exception class.
"""
#############################
##### Logging subsystem #####
#############################
class Scapy_Exception(Exception):
pass
import logging,traceback,time
class ScapyFreqFilter(logging.Filter):
def __init__(self):
logging.Filter.__init__(self)
self.warning_table = {}
def filter(self, record):
from config import conf
wt = conf.warning_threshold
if wt > 0:
stk = traceback.extract_stack()
caller=None
for f,l,n,c in stk:
if n == 'warning':
break
caller = l
tm,nb = self.warning_table.get(caller, (0,0))
ltm = time.time()
if ltm-tm > wt:
tm = ltm
nb = 0
else:
if nb < 2:
nb += 1
if nb == 2:
record.msg = "more "+record.msg
else:
return 0
self.warning_table[caller] = (tm,nb)
return 1
log_scapy = logging.getLogger("scapy")
console_handler = logging.StreamHandler()
console_handler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
log_scapy.addHandler(console_handler)
log_runtime = logging.getLogger("scapy.runtime") # logs at runtime
log_runtime.addFilter(ScapyFreqFilter())
log_interactive = logging.getLogger("scapy.interactive") # logs in interactive functions
log_loading = logging.getLogger("scapy.loading") # logs when loading scapy
def warning(x):
log_runtime.warning(x)
| apache-2.0 |
chiviak/headphones | lib/unidecode/x08c.py | 251 | 4630 | data = (
'Yu ', # 0x00
'Shui ', # 0x01
'Shen ', # 0x02
'Diao ', # 0x03
'Chan ', # 0x04
'Liang ', # 0x05
'Zhun ', # 0x06
'Sui ', # 0x07
'Tan ', # 0x08
'Shen ', # 0x09
'Yi ', # 0x0a
'Mou ', # 0x0b
'Chen ', # 0x0c
'Die ', # 0x0d
'Huang ', # 0x0e
'Jian ', # 0x0f
'Xie ', # 0x10
'Nue ', # 0x11
'Ye ', # 0x12
'Wei ', # 0x13
'E ', # 0x14
'Yu ', # 0x15
'Xuan ', # 0x16
'Chan ', # 0x17
'Zi ', # 0x18
'An ', # 0x19
'Yan ', # 0x1a
'Di ', # 0x1b
'Mi ', # 0x1c
'Pian ', # 0x1d
'Xu ', # 0x1e
'Mo ', # 0x1f
'Dang ', # 0x20
'Su ', # 0x21
'Xie ', # 0x22
'Yao ', # 0x23
'Bang ', # 0x24
'Shi ', # 0x25
'Qian ', # 0x26
'Mi ', # 0x27
'Jin ', # 0x28
'Man ', # 0x29
'Zhe ', # 0x2a
'Jian ', # 0x2b
'Miu ', # 0x2c
'Tan ', # 0x2d
'Zen ', # 0x2e
'Qiao ', # 0x2f
'Lan ', # 0x30
'Pu ', # 0x31
'Jue ', # 0x32
'Yan ', # 0x33
'Qian ', # 0x34
'Zhan ', # 0x35
'Chen ', # 0x36
'Gu ', # 0x37
'Qian ', # 0x38
'Hong ', # 0x39
'Xia ', # 0x3a
'Jue ', # 0x3b
'Hong ', # 0x3c
'Han ', # 0x3d
'Hong ', # 0x3e
'Xi ', # 0x3f
'Xi ', # 0x40
'Huo ', # 0x41
'Liao ', # 0x42
'Han ', # 0x43
'Du ', # 0x44
'Long ', # 0x45
'Dou ', # 0x46
'Jiang ', # 0x47
'Qi ', # 0x48
'Shi ', # 0x49
'Li ', # 0x4a
'Deng ', # 0x4b
'Wan ', # 0x4c
'Bi ', # 0x4d
'Shu ', # 0x4e
'Xian ', # 0x4f
'Feng ', # 0x50
'Zhi ', # 0x51
'Zhi ', # 0x52
'Yan ', # 0x53
'Yan ', # 0x54
'Shi ', # 0x55
'Chu ', # 0x56
'Hui ', # 0x57
'Tun ', # 0x58
'Yi ', # 0x59
'Tun ', # 0x5a
'Yi ', # 0x5b
'Jian ', # 0x5c
'Ba ', # 0x5d
'Hou ', # 0x5e
'E ', # 0x5f
'Cu ', # 0x60
'Xiang ', # 0x61
'Huan ', # 0x62
'Jian ', # 0x63
'Ken ', # 0x64
'Gai ', # 0x65
'Qu ', # 0x66
'Fu ', # 0x67
'Xi ', # 0x68
'Bin ', # 0x69
'Hao ', # 0x6a
'Yu ', # 0x6b
'Zhu ', # 0x6c
'Jia ', # 0x6d
'[?] ', # 0x6e
'Xi ', # 0x6f
'Bo ', # 0x70
'Wen ', # 0x71
'Huan ', # 0x72
'Bin ', # 0x73
'Di ', # 0x74
'Zong ', # 0x75
'Fen ', # 0x76
'Yi ', # 0x77
'Zhi ', # 0x78
'Bao ', # 0x79
'Chai ', # 0x7a
'Han ', # 0x7b
'Pi ', # 0x7c
'Na ', # 0x7d
'Pi ', # 0x7e
'Gou ', # 0x7f
'Na ', # 0x80
'You ', # 0x81
'Diao ', # 0x82
'Mo ', # 0x83
'Si ', # 0x84
'Xiu ', # 0x85
'Huan ', # 0x86
'Kun ', # 0x87
'He ', # 0x88
'He ', # 0x89
'Mo ', # 0x8a
'Han ', # 0x8b
'Mao ', # 0x8c
'Li ', # 0x8d
'Ni ', # 0x8e
'Bi ', # 0x8f
'Yu ', # 0x90
'Jia ', # 0x91
'Tuan ', # 0x92
'Mao ', # 0x93
'Pi ', # 0x94
'Xi ', # 0x95
'E ', # 0x96
'Ju ', # 0x97
'Mo ', # 0x98
'Chu ', # 0x99
'Tan ', # 0x9a
'Huan ', # 0x9b
'Jue ', # 0x9c
'Bei ', # 0x9d
'Zhen ', # 0x9e
'Yuan ', # 0x9f
'Fu ', # 0xa0
'Cai ', # 0xa1
'Gong ', # 0xa2
'Te ', # 0xa3
'Yi ', # 0xa4
'Hang ', # 0xa5
'Wan ', # 0xa6
'Pin ', # 0xa7
'Huo ', # 0xa8
'Fan ', # 0xa9
'Tan ', # 0xaa
'Guan ', # 0xab
'Ze ', # 0xac
'Zhi ', # 0xad
'Er ', # 0xae
'Zhu ', # 0xaf
'Shi ', # 0xb0
'Bi ', # 0xb1
'Zi ', # 0xb2
'Er ', # 0xb3
'Gui ', # 0xb4
'Pian ', # 0xb5
'Bian ', # 0xb6
'Mai ', # 0xb7
'Dai ', # 0xb8
'Sheng ', # 0xb9
'Kuang ', # 0xba
'Fei ', # 0xbb
'Tie ', # 0xbc
'Yi ', # 0xbd
'Chi ', # 0xbe
'Mao ', # 0xbf
'He ', # 0xc0
'Bi ', # 0xc1
'Lu ', # 0xc2
'Ren ', # 0xc3
'Hui ', # 0xc4
'Gai ', # 0xc5
'Pian ', # 0xc6
'Zi ', # 0xc7
'Jia ', # 0xc8
'Xu ', # 0xc9
'Zei ', # 0xca
'Jiao ', # 0xcb
'Gai ', # 0xcc
'Zang ', # 0xcd
'Jian ', # 0xce
'Ying ', # 0xcf
'Xun ', # 0xd0
'Zhen ', # 0xd1
'She ', # 0xd2
'Bin ', # 0xd3
'Bin ', # 0xd4
'Qiu ', # 0xd5
'She ', # 0xd6
'Chuan ', # 0xd7
'Zang ', # 0xd8
'Zhou ', # 0xd9
'Lai ', # 0xda
'Zan ', # 0xdb
'Si ', # 0xdc
'Chen ', # 0xdd
'Shang ', # 0xde
'Tian ', # 0xdf
'Pei ', # 0xe0
'Geng ', # 0xe1
'Xian ', # 0xe2
'Mai ', # 0xe3
'Jian ', # 0xe4
'Sui ', # 0xe5
'Fu ', # 0xe6
'Tan ', # 0xe7
'Cong ', # 0xe8
'Cong ', # 0xe9
'Zhi ', # 0xea
'Ji ', # 0xeb
'Zhang ', # 0xec
'Du ', # 0xed
'Jin ', # 0xee
'Xiong ', # 0xef
'Shun ', # 0xf0
'Yun ', # 0xf1
'Bao ', # 0xf2
'Zai ', # 0xf3
'Lai ', # 0xf4
'Feng ', # 0xf5
'Cang ', # 0xf6
'Ji ', # 0xf7
'Sheng ', # 0xf8
'Ai ', # 0xf9
'Zhuan ', # 0xfa
'Fu ', # 0xfb
'Gou ', # 0xfc
'Sai ', # 0xfd
'Ze ', # 0xfe
'Liao ', # 0xff
)
| gpl-3.0 |
ArcherSys/ArcherSys | Lib/opcode.py | 1 | 16466 | <<<<<<< HEAD
<<<<<<< HEAD
"""
opcode module - potentially shared between dis and other modules which
operate on bytecodes (e.g. peephole optimizers).
"""
__all__ = ["cmp_op", "hasconst", "hasname", "hasjrel", "hasjabs",
"haslocal", "hascompare", "hasfree", "opname", "opmap",
"HAVE_ARGUMENT", "EXTENDED_ARG", "hasnargs"]
# It's a chicken-and-egg I'm afraid:
# We're imported before _opcode's made.
# With exception unheeded
# (stack_effect is not needed)
# Both our chickens and eggs are allayed.
# --Larry Hastings, 2013/11/23
try:
from _opcode import stack_effect
__all__.append('stack_effect')
except ImportError:
pass
cmp_op = ('<', '<=', '==', '!=', '>', '>=', 'in', 'not in', 'is',
'is not', 'exception match', 'BAD')
hasconst = []
hasname = []
hasjrel = []
hasjabs = []
haslocal = []
hascompare = []
hasfree = []
hasnargs = []
opmap = {}
opname = [''] * 256
for op in range(256): opname[op] = '<%r>' % (op,)
del op
def def_op(name, op):
opname[op] = name
opmap[name] = op
def name_op(name, op):
def_op(name, op)
hasname.append(op)
def jrel_op(name, op):
def_op(name, op)
hasjrel.append(op)
def jabs_op(name, op):
def_op(name, op)
hasjabs.append(op)
# Instruction opcodes for compiled code
# Blank lines correspond to available opcodes
def_op('POP_TOP', 1)
def_op('ROT_TWO', 2)
def_op('ROT_THREE', 3)
def_op('DUP_TOP', 4)
def_op('DUP_TOP_TWO', 5)
def_op('NOP', 9)
def_op('UNARY_POSITIVE', 10)
def_op('UNARY_NEGATIVE', 11)
def_op('UNARY_NOT', 12)
def_op('UNARY_INVERT', 15)
def_op('BINARY_POWER', 19)
def_op('BINARY_MULTIPLY', 20)
def_op('BINARY_MODULO', 22)
def_op('BINARY_ADD', 23)
def_op('BINARY_SUBTRACT', 24)
def_op('BINARY_SUBSCR', 25)
def_op('BINARY_FLOOR_DIVIDE', 26)
def_op('BINARY_TRUE_DIVIDE', 27)
def_op('INPLACE_FLOOR_DIVIDE', 28)
def_op('INPLACE_TRUE_DIVIDE', 29)
def_op('STORE_MAP', 54)
def_op('INPLACE_ADD', 55)
def_op('INPLACE_SUBTRACT', 56)
def_op('INPLACE_MULTIPLY', 57)
def_op('INPLACE_MODULO', 59)
def_op('STORE_SUBSCR', 60)
def_op('DELETE_SUBSCR', 61)
def_op('BINARY_LSHIFT', 62)
def_op('BINARY_RSHIFT', 63)
def_op('BINARY_AND', 64)
def_op('BINARY_XOR', 65)
def_op('BINARY_OR', 66)
def_op('INPLACE_POWER', 67)
def_op('GET_ITER', 68)
def_op('PRINT_EXPR', 70)
def_op('LOAD_BUILD_CLASS', 71)
def_op('YIELD_FROM', 72)
def_op('INPLACE_LSHIFT', 75)
def_op('INPLACE_RSHIFT', 76)
def_op('INPLACE_AND', 77)
def_op('INPLACE_XOR', 78)
def_op('INPLACE_OR', 79)
def_op('BREAK_LOOP', 80)
def_op('WITH_CLEANUP', 81)
def_op('RETURN_VALUE', 83)
def_op('IMPORT_STAR', 84)
def_op('YIELD_VALUE', 86)
def_op('POP_BLOCK', 87)
def_op('END_FINALLY', 88)
def_op('POP_EXCEPT', 89)
HAVE_ARGUMENT = 90 # Opcodes from here have an argument:
name_op('STORE_NAME', 90) # Index in name list
name_op('DELETE_NAME', 91) # ""
def_op('UNPACK_SEQUENCE', 92) # Number of tuple items
jrel_op('FOR_ITER', 93)
def_op('UNPACK_EX', 94)
name_op('STORE_ATTR', 95) # Index in name list
name_op('DELETE_ATTR', 96) # ""
name_op('STORE_GLOBAL', 97) # ""
name_op('DELETE_GLOBAL', 98) # ""
def_op('LOAD_CONST', 100) # Index in const list
hasconst.append(100)
name_op('LOAD_NAME', 101) # Index in name list
def_op('BUILD_TUPLE', 102) # Number of tuple items
def_op('BUILD_LIST', 103) # Number of list items
def_op('BUILD_SET', 104) # Number of set items
def_op('BUILD_MAP', 105) # Number of dict entries (upto 255)
name_op('LOAD_ATTR', 106) # Index in name list
def_op('COMPARE_OP', 107) # Comparison operator
hascompare.append(107)
name_op('IMPORT_NAME', 108) # Index in name list
name_op('IMPORT_FROM', 109) # Index in name list
jrel_op('JUMP_FORWARD', 110) # Number of bytes to skip
jabs_op('JUMP_IF_FALSE_OR_POP', 111) # Target byte offset from beginning of code
jabs_op('JUMP_IF_TRUE_OR_POP', 112) # ""
jabs_op('JUMP_ABSOLUTE', 113) # ""
jabs_op('POP_JUMP_IF_FALSE', 114) # ""
jabs_op('POP_JUMP_IF_TRUE', 115) # ""
name_op('LOAD_GLOBAL', 116) # Index in name list
jabs_op('CONTINUE_LOOP', 119) # Target address
jrel_op('SETUP_LOOP', 120) # Distance to target address
jrel_op('SETUP_EXCEPT', 121) # ""
jrel_op('SETUP_FINALLY', 122) # ""
def_op('LOAD_FAST', 124) # Local variable number
haslocal.append(124)
def_op('STORE_FAST', 125) # Local variable number
haslocal.append(125)
def_op('DELETE_FAST', 126) # Local variable number
haslocal.append(126)
def_op('RAISE_VARARGS', 130) # Number of raise arguments (1, 2, or 3)
def_op('CALL_FUNCTION', 131) # #args + (#kwargs << 8)
hasnargs.append(131)
def_op('MAKE_FUNCTION', 132) # Number of args with default values
def_op('BUILD_SLICE', 133) # Number of items
def_op('MAKE_CLOSURE', 134)
def_op('LOAD_CLOSURE', 135)
hasfree.append(135)
def_op('LOAD_DEREF', 136)
hasfree.append(136)
def_op('STORE_DEREF', 137)
hasfree.append(137)
def_op('DELETE_DEREF', 138)
hasfree.append(138)
def_op('CALL_FUNCTION_VAR', 140) # #args + (#kwargs << 8)
hasnargs.append(140)
def_op('CALL_FUNCTION_KW', 141) # #args + (#kwargs << 8)
hasnargs.append(141)
def_op('CALL_FUNCTION_VAR_KW', 142) # #args + (#kwargs << 8)
hasnargs.append(142)
jrel_op('SETUP_WITH', 143)
def_op('LIST_APPEND', 145)
def_op('SET_ADD', 146)
def_op('MAP_ADD', 147)
def_op('LOAD_CLASSDEREF', 148)
hasfree.append(148)
def_op('EXTENDED_ARG', 144)
EXTENDED_ARG = 144
del def_op, name_op, jrel_op, jabs_op
=======
"""
opcode module - potentially shared between dis and other modules which
operate on bytecodes (e.g. peephole optimizers).
"""
__all__ = ["cmp_op", "hasconst", "hasname", "hasjrel", "hasjabs",
"haslocal", "hascompare", "hasfree", "opname", "opmap",
"HAVE_ARGUMENT", "EXTENDED_ARG", "hasnargs"]
# It's a chicken-and-egg I'm afraid:
# We're imported before _opcode's made.
# With exception unheeded
# (stack_effect is not needed)
# Both our chickens and eggs are allayed.
# --Larry Hastings, 2013/11/23
try:
from _opcode import stack_effect
__all__.append('stack_effect')
except ImportError:
pass
cmp_op = ('<', '<=', '==', '!=', '>', '>=', 'in', 'not in', 'is',
'is not', 'exception match', 'BAD')
hasconst = []
hasname = []
hasjrel = []
hasjabs = []
haslocal = []
hascompare = []
hasfree = []
hasnargs = []
opmap = {}
opname = [''] * 256
for op in range(256): opname[op] = '<%r>' % (op,)
del op
def def_op(name, op):
opname[op] = name
opmap[name] = op
def name_op(name, op):
def_op(name, op)
hasname.append(op)
def jrel_op(name, op):
def_op(name, op)
hasjrel.append(op)
def jabs_op(name, op):
def_op(name, op)
hasjabs.append(op)
# Instruction opcodes for compiled code
# Blank lines correspond to available opcodes
def_op('POP_TOP', 1)
def_op('ROT_TWO', 2)
def_op('ROT_THREE', 3)
def_op('DUP_TOP', 4)
def_op('DUP_TOP_TWO', 5)
def_op('NOP', 9)
def_op('UNARY_POSITIVE', 10)
def_op('UNARY_NEGATIVE', 11)
def_op('UNARY_NOT', 12)
def_op('UNARY_INVERT', 15)
def_op('BINARY_POWER', 19)
def_op('BINARY_MULTIPLY', 20)
def_op('BINARY_MODULO', 22)
def_op('BINARY_ADD', 23)
def_op('BINARY_SUBTRACT', 24)
def_op('BINARY_SUBSCR', 25)
def_op('BINARY_FLOOR_DIVIDE', 26)
def_op('BINARY_TRUE_DIVIDE', 27)
def_op('INPLACE_FLOOR_DIVIDE', 28)
def_op('INPLACE_TRUE_DIVIDE', 29)
def_op('STORE_MAP', 54)
def_op('INPLACE_ADD', 55)
def_op('INPLACE_SUBTRACT', 56)
def_op('INPLACE_MULTIPLY', 57)
def_op('INPLACE_MODULO', 59)
def_op('STORE_SUBSCR', 60)
def_op('DELETE_SUBSCR', 61)
def_op('BINARY_LSHIFT', 62)
def_op('BINARY_RSHIFT', 63)
def_op('BINARY_AND', 64)
def_op('BINARY_XOR', 65)
def_op('BINARY_OR', 66)
def_op('INPLACE_POWER', 67)
def_op('GET_ITER', 68)
def_op('PRINT_EXPR', 70)
def_op('LOAD_BUILD_CLASS', 71)
def_op('YIELD_FROM', 72)
def_op('INPLACE_LSHIFT', 75)
def_op('INPLACE_RSHIFT', 76)
def_op('INPLACE_AND', 77)
def_op('INPLACE_XOR', 78)
def_op('INPLACE_OR', 79)
def_op('BREAK_LOOP', 80)
def_op('WITH_CLEANUP', 81)
def_op('RETURN_VALUE', 83)
def_op('IMPORT_STAR', 84)
def_op('YIELD_VALUE', 86)
def_op('POP_BLOCK', 87)
def_op('END_FINALLY', 88)
def_op('POP_EXCEPT', 89)
HAVE_ARGUMENT = 90 # Opcodes from here have an argument:
name_op('STORE_NAME', 90) # Index in name list
name_op('DELETE_NAME', 91) # ""
def_op('UNPACK_SEQUENCE', 92) # Number of tuple items
jrel_op('FOR_ITER', 93)
def_op('UNPACK_EX', 94)
name_op('STORE_ATTR', 95) # Index in name list
name_op('DELETE_ATTR', 96) # ""
name_op('STORE_GLOBAL', 97) # ""
name_op('DELETE_GLOBAL', 98) # ""
def_op('LOAD_CONST', 100) # Index in const list
hasconst.append(100)
name_op('LOAD_NAME', 101) # Index in name list
def_op('BUILD_TUPLE', 102) # Number of tuple items
def_op('BUILD_LIST', 103) # Number of list items
def_op('BUILD_SET', 104) # Number of set items
def_op('BUILD_MAP', 105) # Number of dict entries (upto 255)
name_op('LOAD_ATTR', 106) # Index in name list
def_op('COMPARE_OP', 107) # Comparison operator
hascompare.append(107)
name_op('IMPORT_NAME', 108) # Index in name list
name_op('IMPORT_FROM', 109) # Index in name list
jrel_op('JUMP_FORWARD', 110) # Number of bytes to skip
jabs_op('JUMP_IF_FALSE_OR_POP', 111) # Target byte offset from beginning of code
jabs_op('JUMP_IF_TRUE_OR_POP', 112) # ""
jabs_op('JUMP_ABSOLUTE', 113) # ""
jabs_op('POP_JUMP_IF_FALSE', 114) # ""
jabs_op('POP_JUMP_IF_TRUE', 115) # ""
name_op('LOAD_GLOBAL', 116) # Index in name list
jabs_op('CONTINUE_LOOP', 119) # Target address
jrel_op('SETUP_LOOP', 120) # Distance to target address
jrel_op('SETUP_EXCEPT', 121) # ""
jrel_op('SETUP_FINALLY', 122) # ""
def_op('LOAD_FAST', 124) # Local variable number
haslocal.append(124)
def_op('STORE_FAST', 125) # Local variable number
haslocal.append(125)
def_op('DELETE_FAST', 126) # Local variable number
haslocal.append(126)
def_op('RAISE_VARARGS', 130) # Number of raise arguments (1, 2, or 3)
def_op('CALL_FUNCTION', 131) # #args + (#kwargs << 8)
hasnargs.append(131)
def_op('MAKE_FUNCTION', 132) # Number of args with default values
def_op('BUILD_SLICE', 133) # Number of items
def_op('MAKE_CLOSURE', 134)
def_op('LOAD_CLOSURE', 135)
hasfree.append(135)
def_op('LOAD_DEREF', 136)
hasfree.append(136)
def_op('STORE_DEREF', 137)
hasfree.append(137)
def_op('DELETE_DEREF', 138)
hasfree.append(138)
def_op('CALL_FUNCTION_VAR', 140) # #args + (#kwargs << 8)
hasnargs.append(140)
def_op('CALL_FUNCTION_KW', 141) # #args + (#kwargs << 8)
hasnargs.append(141)
def_op('CALL_FUNCTION_VAR_KW', 142) # #args + (#kwargs << 8)
hasnargs.append(142)
jrel_op('SETUP_WITH', 143)
def_op('LIST_APPEND', 145)
def_op('SET_ADD', 146)
def_op('MAP_ADD', 147)
def_op('LOAD_CLASSDEREF', 148)
hasfree.append(148)
def_op('EXTENDED_ARG', 144)
EXTENDED_ARG = 144
del def_op, name_op, jrel_op, jabs_op
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""
opcode module - potentially shared between dis and other modules which
operate on bytecodes (e.g. peephole optimizers).
"""
__all__ = ["cmp_op", "hasconst", "hasname", "hasjrel", "hasjabs",
"haslocal", "hascompare", "hasfree", "opname", "opmap",
"HAVE_ARGUMENT", "EXTENDED_ARG", "hasnargs"]
# It's a chicken-and-egg I'm afraid:
# We're imported before _opcode's made.
# With exception unheeded
# (stack_effect is not needed)
# Both our chickens and eggs are allayed.
# --Larry Hastings, 2013/11/23
try:
from _opcode import stack_effect
__all__.append('stack_effect')
except ImportError:
pass
cmp_op = ('<', '<=', '==', '!=', '>', '>=', 'in', 'not in', 'is',
'is not', 'exception match', 'BAD')
hasconst = []
hasname = []
hasjrel = []
hasjabs = []
haslocal = []
hascompare = []
hasfree = []
hasnargs = []
opmap = {}
opname = [''] * 256
for op in range(256): opname[op] = '<%r>' % (op,)
del op
def def_op(name, op):
opname[op] = name
opmap[name] = op
def name_op(name, op):
def_op(name, op)
hasname.append(op)
def jrel_op(name, op):
def_op(name, op)
hasjrel.append(op)
def jabs_op(name, op):
def_op(name, op)
hasjabs.append(op)
# Instruction opcodes for compiled code
# Blank lines correspond to available opcodes
def_op('POP_TOP', 1)
def_op('ROT_TWO', 2)
def_op('ROT_THREE', 3)
def_op('DUP_TOP', 4)
def_op('DUP_TOP_TWO', 5)
def_op('NOP', 9)
def_op('UNARY_POSITIVE', 10)
def_op('UNARY_NEGATIVE', 11)
def_op('UNARY_NOT', 12)
def_op('UNARY_INVERT', 15)
def_op('BINARY_POWER', 19)
def_op('BINARY_MULTIPLY', 20)
def_op('BINARY_MODULO', 22)
def_op('BINARY_ADD', 23)
def_op('BINARY_SUBTRACT', 24)
def_op('BINARY_SUBSCR', 25)
def_op('BINARY_FLOOR_DIVIDE', 26)
def_op('BINARY_TRUE_DIVIDE', 27)
def_op('INPLACE_FLOOR_DIVIDE', 28)
def_op('INPLACE_TRUE_DIVIDE', 29)
def_op('STORE_MAP', 54)
def_op('INPLACE_ADD', 55)
def_op('INPLACE_SUBTRACT', 56)
def_op('INPLACE_MULTIPLY', 57)
def_op('INPLACE_MODULO', 59)
def_op('STORE_SUBSCR', 60)
def_op('DELETE_SUBSCR', 61)
def_op('BINARY_LSHIFT', 62)
def_op('BINARY_RSHIFT', 63)
def_op('BINARY_AND', 64)
def_op('BINARY_XOR', 65)
def_op('BINARY_OR', 66)
def_op('INPLACE_POWER', 67)
def_op('GET_ITER', 68)
def_op('PRINT_EXPR', 70)
def_op('LOAD_BUILD_CLASS', 71)
def_op('YIELD_FROM', 72)
def_op('INPLACE_LSHIFT', 75)
def_op('INPLACE_RSHIFT', 76)
def_op('INPLACE_AND', 77)
def_op('INPLACE_XOR', 78)
def_op('INPLACE_OR', 79)
def_op('BREAK_LOOP', 80)
def_op('WITH_CLEANUP', 81)
def_op('RETURN_VALUE', 83)
def_op('IMPORT_STAR', 84)
def_op('YIELD_VALUE', 86)
def_op('POP_BLOCK', 87)
def_op('END_FINALLY', 88)
def_op('POP_EXCEPT', 89)
HAVE_ARGUMENT = 90 # Opcodes from here have an argument:
name_op('STORE_NAME', 90) # Index in name list
name_op('DELETE_NAME', 91) # ""
def_op('UNPACK_SEQUENCE', 92) # Number of tuple items
jrel_op('FOR_ITER', 93)
def_op('UNPACK_EX', 94)
name_op('STORE_ATTR', 95) # Index in name list
name_op('DELETE_ATTR', 96) # ""
name_op('STORE_GLOBAL', 97) # ""
name_op('DELETE_GLOBAL', 98) # ""
def_op('LOAD_CONST', 100) # Index in const list
hasconst.append(100)
name_op('LOAD_NAME', 101) # Index in name list
def_op('BUILD_TUPLE', 102) # Number of tuple items
def_op('BUILD_LIST', 103) # Number of list items
def_op('BUILD_SET', 104) # Number of set items
def_op('BUILD_MAP', 105) # Number of dict entries (upto 255)
name_op('LOAD_ATTR', 106) # Index in name list
def_op('COMPARE_OP', 107) # Comparison operator
hascompare.append(107)
name_op('IMPORT_NAME', 108) # Index in name list
name_op('IMPORT_FROM', 109) # Index in name list
jrel_op('JUMP_FORWARD', 110) # Number of bytes to skip
jabs_op('JUMP_IF_FALSE_OR_POP', 111) # Target byte offset from beginning of code
jabs_op('JUMP_IF_TRUE_OR_POP', 112) # ""
jabs_op('JUMP_ABSOLUTE', 113) # ""
jabs_op('POP_JUMP_IF_FALSE', 114) # ""
jabs_op('POP_JUMP_IF_TRUE', 115) # ""
name_op('LOAD_GLOBAL', 116) # Index in name list
jabs_op('CONTINUE_LOOP', 119) # Target address
jrel_op('SETUP_LOOP', 120) # Distance to target address
jrel_op('SETUP_EXCEPT', 121) # ""
jrel_op('SETUP_FINALLY', 122) # ""
def_op('LOAD_FAST', 124) # Local variable number
haslocal.append(124)
def_op('STORE_FAST', 125) # Local variable number
haslocal.append(125)
def_op('DELETE_FAST', 126) # Local variable number
haslocal.append(126)
def_op('RAISE_VARARGS', 130) # Number of raise arguments (1, 2, or 3)
def_op('CALL_FUNCTION', 131) # #args + (#kwargs << 8)
hasnargs.append(131)
def_op('MAKE_FUNCTION', 132) # Number of args with default values
def_op('BUILD_SLICE', 133) # Number of items
def_op('MAKE_CLOSURE', 134)
def_op('LOAD_CLOSURE', 135)
hasfree.append(135)
def_op('LOAD_DEREF', 136)
hasfree.append(136)
def_op('STORE_DEREF', 137)
hasfree.append(137)
def_op('DELETE_DEREF', 138)
hasfree.append(138)
def_op('CALL_FUNCTION_VAR', 140) # #args + (#kwargs << 8)
hasnargs.append(140)
def_op('CALL_FUNCTION_KW', 141) # #args + (#kwargs << 8)
hasnargs.append(141)
def_op('CALL_FUNCTION_VAR_KW', 142) # #args + (#kwargs << 8)
hasnargs.append(142)
jrel_op('SETUP_WITH', 143)
def_op('LIST_APPEND', 145)
def_op('SET_ADD', 146)
def_op('MAP_ADD', 147)
def_op('LOAD_CLASSDEREF', 148)
hasfree.append(148)
def_op('EXTENDED_ARG', 144)
EXTENDED_ARG = 144
del def_op, name_op, jrel_op, jabs_op
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| mit |
Edraak/edx-platform | cms/djangoapps/contentstore/features/course-settings.py | 33 | 7145 | # pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from lettuce import world, step
from selenium.webdriver.common.keys import Keys
from common import type_in_codemirror, upload_file
from django.conf import settings
from nose.tools import assert_true, assert_false
TEST_ROOT = settings.COMMON_TEST_DATA_ROOT
COURSE_START_DATE_CSS = "#course-start-date"
COURSE_END_DATE_CSS = "#course-end-date"
ENROLLMENT_START_DATE_CSS = "#course-enrollment-start-date"
ENROLLMENT_END_DATE_CSS = "#course-enrollment-end-date"
COURSE_START_TIME_CSS = "#course-start-time"
COURSE_END_TIME_CSS = "#course-end-time"
ENROLLMENT_START_TIME_CSS = "#course-enrollment-start-time"
ENROLLMENT_END_TIME_CSS = "#course-enrollment-end-time"
DUMMY_TIME = "15:30"
DEFAULT_TIME = "00:00"
############### ACTIONS ####################
@step('I select Schedule and Details$')
def test_i_select_schedule_and_details(step):
world.click_course_settings()
link_css = 'li.nav-course-settings-schedule a'
world.css_click(link_css)
world.wait_for_requirejs(
["jquery", "js/models/course",
"js/models/settings/course_details", "js/views/settings/main"])
@step('I have set course dates$')
def test_i_have_set_course_dates(step):
step.given('I have opened a new course in Studio')
step.given('I select Schedule and Details')
step.given('And I set course dates')
@step('And I set course dates$')
def test_and_i_set_course_dates(step):
set_date_or_time(COURSE_START_DATE_CSS, '12/20/2013')
set_date_or_time(COURSE_END_DATE_CSS, '12/26/2013')
set_date_or_time(ENROLLMENT_START_DATE_CSS, '12/1/2013')
set_date_or_time(ENROLLMENT_END_DATE_CSS, '12/10/2013')
set_date_or_time(COURSE_START_TIME_CSS, DUMMY_TIME)
set_date_or_time(ENROLLMENT_END_TIME_CSS, DUMMY_TIME)
@step('And I clear all the dates except start$')
def test_and_i_clear_all_the_dates_except_start(step):
set_date_or_time(COURSE_END_DATE_CSS, '')
set_date_or_time(ENROLLMENT_START_DATE_CSS, '')
set_date_or_time(ENROLLMENT_END_DATE_CSS, '')
@step('Then I see cleared dates$')
def test_then_i_see_cleared_dates(step):
verify_date_or_time(COURSE_END_DATE_CSS, '')
verify_date_or_time(ENROLLMENT_START_DATE_CSS, '')
verify_date_or_time(ENROLLMENT_END_DATE_CSS, '')
verify_date_or_time(COURSE_END_TIME_CSS, '')
verify_date_or_time(ENROLLMENT_START_TIME_CSS, '')
verify_date_or_time(ENROLLMENT_END_TIME_CSS, '')
# Verify course start date (required) and time still there
verify_date_or_time(COURSE_START_DATE_CSS, '12/20/2013')
verify_date_or_time(COURSE_START_TIME_CSS, DUMMY_TIME)
@step('I clear the course start date$')
def test_i_clear_the_course_start_date(step):
set_date_or_time(COURSE_START_DATE_CSS, '')
@step('I receive a warning about course start date$')
def test_i_receive_a_warning_about_course_start_date(step):
assert_true(world.css_has_text('.message-error', 'The course must have an assigned start date.'))
assert_true('error' in world.css_find(COURSE_START_DATE_CSS).first._element.get_attribute('class'))
assert_true('error' in world.css_find(COURSE_START_TIME_CSS).first._element.get_attribute('class'))
@step('the previously set start date is shown$')
def test_the_previously_set_start_date_is_shown(step):
verify_date_or_time(COURSE_START_DATE_CSS, '12/20/2013')
verify_date_or_time(COURSE_START_TIME_CSS, DUMMY_TIME)
@step('Given I have tried to clear the course start$')
def test_i_have_tried_to_clear_the_course_start(step):
step.given("I have set course dates")
step.given("I clear the course start date")
step.given("I receive a warning about course start date")
@step('I have entered a new course start date$')
def test_i_have_entered_a_new_course_start_date(step):
set_date_or_time(COURSE_START_DATE_CSS, '12/22/2013')
@step('The warning about course start date goes away$')
def test_the_warning_about_course_start_date_goes_away(step):
assert world.is_css_not_present('.message-error')
assert_false('error' in world.css_find(COURSE_START_DATE_CSS).first._element.get_attribute('class'))
assert_false('error' in world.css_find(COURSE_START_TIME_CSS).first._element.get_attribute('class'))
@step('my new course start date is shown$')
def new_course_start_date_is_shown(step):
verify_date_or_time(COURSE_START_DATE_CSS, '12/22/2013')
# Time should have stayed from before attempt to clear date.
verify_date_or_time(COURSE_START_TIME_CSS, DUMMY_TIME)
@step('I change fields$')
def test_i_change_fields(step):
set_date_or_time(COURSE_START_DATE_CSS, '7/7/7777')
set_date_or_time(COURSE_END_DATE_CSS, '7/7/7777')
set_date_or_time(ENROLLMENT_START_DATE_CSS, '7/7/7777')
set_date_or_time(ENROLLMENT_END_DATE_CSS, '7/7/7777')
@step('I change the course overview')
def test_change_course_overview(_step):
type_in_codemirror(0, "<h1>Overview</h1>")
@step('I click the "Upload Course Image" button')
def click_upload_button(_step):
button_css = '.action-upload-image'
world.css_click(button_css)
@step('I upload a new course image$')
def upload_new_course_image(_step):
upload_file('image.jpg', sub_path="uploads")
@step('I should see the new course image$')
def i_see_new_course_image(_step):
img_css = '#course-image'
images = world.css_find(img_css)
assert len(images) == 1
img = images[0]
expected_src = 'image.jpg'
# Don't worry about the domain in the URL
success_func = lambda _: img['src'].endswith(expected_src)
world.wait_for(success_func)
@step('the image URL should be present in the field')
def image_url_present(_step):
field_css = '#course-image-url'
expected_value = 'image.jpg'
assert world.css_value(field_css).endswith(expected_value)
############### HELPER METHODS ####################
def set_date_or_time(css, date_or_time):
"""
Sets date or time field.
"""
world.css_fill(css, date_or_time)
e = world.css_find(css).first
# hit Enter to apply the changes
e._element.send_keys(Keys.ENTER)
def verify_date_or_time(css, date_or_time):
"""
Verifies date or time field.
"""
# We need to wait for JavaScript to fill in the field, so we use
# css_has_value(), which first checks that the field is not blank
assert_true(world.css_has_value(css, date_or_time))
@step('I do not see the changes')
@step('I see the set dates')
def i_see_the_set_dates(_step):
"""
Ensure that each field has the value set in `test_and_i_set_course_dates`.
"""
verify_date_or_time(COURSE_START_DATE_CSS, '12/20/2013')
verify_date_or_time(COURSE_END_DATE_CSS, '12/26/2013')
verify_date_or_time(ENROLLMENT_START_DATE_CSS, '12/01/2013')
verify_date_or_time(ENROLLMENT_END_DATE_CSS, '12/10/2013')
verify_date_or_time(COURSE_START_TIME_CSS, DUMMY_TIME)
# Unset times get set to 12 AM once the corresponding date has been set.
verify_date_or_time(COURSE_END_TIME_CSS, DEFAULT_TIME)
verify_date_or_time(ENROLLMENT_START_TIME_CSS, DEFAULT_TIME)
verify_date_or_time(ENROLLMENT_END_TIME_CSS, DUMMY_TIME)
| agpl-3.0 |
benagricola/exabgp | lib/exabgp/bgp/message/update/nlri/evpn/multicast.py | 1 | 2426 | """
multicast.py
Created by Thomas Morin on 2014-06-23.
Copyright (c) 2014-2015 Orange. All rights reserved.
"""
from exabgp.protocol.ip import IP
from exabgp.bgp.message.update.nlri.qualifier import RouteDistinguisher
from exabgp.bgp.message.update.nlri.qualifier import EthernetTag
from exabgp.bgp.message.update.nlri.evpn.nlri import EVPN
# +---------------------------------------+
# | RD (8 octets) |
# +---------------------------------------+
# | Ethernet Tag ID (4 octets) |
# +---------------------------------------+
# | IP Address Length (1 octet) |
# +---------------------------------------+
# | Originating Router's IP Addr |
# | (4 or 16 octets) |
# +---------------------------------------+
# ===================================================================== EVPNNLRI
@EVPN.register
class Multicast (EVPN):
CODE = 3
NAME = "Inclusive Multicast Ethernet Tag"
SHORT_NAME = "Multicast"
def __init__ (self, rd, etag, ip, packed=None,nexthop=None,action=None,addpath=None):
EVPN.__init__(self,action,addpath)
self.nexthop = nexthop
self.rd = rd
self.etag = etag
self.ip = ip
self._pack(packed)
def __ne__ (self, other):
return not self.__eq__(other)
def __str__ (self):
return "%s:%s:%s:%s" % (
self._prefix(),
self.rd._str(),
self.etag,
self.ip,
)
def __hash__ (self):
return hash((self.afi,self.safi,self.CODE,self.rd,self.etag,self.ip))
def _pack (self, packed=None):
if self._packed:
return self._packed
if packed:
self._packed = packed
return packed
self._packed = '%s%s%s%s' % (
self.rd.pack(),
self.etag.pack(),
chr(len(self.ip)*8),
self.ip.pack()
)
return self._packed
@classmethod
def unpack (cls, data):
rd = RouteDistinguisher.unpack(data[:8])
etag = EthernetTag.unpack(data[8:12])
iplen = ord(data[12])
if iplen not in (4*8,16*8):
raise Exception("IP len is %d, but EVPN route currently support only IPv4" % iplen)
ip = IP.unpack(data[13:13+iplen/8])
return cls(rd,etag,ip,data)
def json (self, compact=None):
content = ' "code": %d, ' % self.CODE
content += '"parsed": true, '
content += '"raw": "%s", ' % self._raw()
content += '"name": "%s", ' % self.NAME
content += '%s, ' % self.rd.json()
content += self.etag.json()
if self.ip:
content += ', "ip": "%s"' % str(self.ip)
return '{%s }' % content
| bsd-3-clause |
ACJTeam/enigma2 | lib/python/Screens/DVD.py | 7 | 24101 | import os
from enigma import eTimer, iPlayableService, iServiceInformation, eServiceReference, iServiceKeys, getDesktop
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Screens.ChoiceBox import ChoiceBox
from Screens.HelpMenu import HelpableScreen
from Screens.InfoBarGenerics import InfoBarSeek, InfoBarPVRState, InfoBarCueSheetSupport, InfoBarShowHide, InfoBarNotifications, InfoBarAudioSelection, InfoBarSubtitleSupport
from Components.ActionMap import ActionMap, NumberActionMap, HelpableActionMap
from Components.Label import Label
from Components.Pixmap import Pixmap
from Components.ServiceEventTracker import ServiceEventTracker, InfoBarBase
from Components.config import config
from Tools.Directories import pathExists, fileExists
from Components.Harddisk import harddiskmanager
lastpath = ""
class DVDSummary(Screen):
def __init__(self, session, parent):
Screen.__init__(self, session, parent)
self["Title"] = Label("")
self["Time"] = Label("")
self["Chapter"] = Label("")
def updateChapter(self, chapter):
self["Chapter"].setText(chapter)
def setTitle(self, title):
self["Title"].setText(title)
class DVDOverlay(Screen):
def __init__(self, session, args = None, height = None):
desktop_size = getDesktop(0).size()
w = desktop_size.width()
h = desktop_size.height()
if height is not None:
h = height
DVDOverlay.skin = """<screen name="DVDOverlay" position="0,0" size="%d,%d" flags="wfNoBorder" zPosition="-1" backgroundColor="transparent" />""" %(w, h)
Screen.__init__(self, session)
class ChapterZap(Screen):
skin = """
<screen name="ChapterZap" position="235,255" size="250,60" title="Chapter" >
<widget name="chapter" position="35,15" size="110,25" font="Regular;23" />
<widget name="number" position="145,15" size="80,25" halign="right" font="Regular;23" />
</screen>"""
def quit(self):
self.Timer.stop()
self.close(0)
def keyOK(self):
self.Timer.stop()
self.close(self.field and int(self.field))
def keyNumberGlobal(self, number):
self.Timer.start(3000, True)
self.field += str(number)
self["number"].setText(self.field)
if len(self.field) >= 4:
self.keyOK()
def __init__(self, session):
Screen.__init__(self, session)
self.field = ""
self["chapter"] = Label(_("Chapter:"))
self["number"] = Label(self.field)
self["actions"] = NumberActionMap( [ "SetupActions" ],
{
"cancel": self.quit,
"ok": self.keyOK,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal
})
self.Timer = eTimer()
self.Timer.callback.append(self.keyOK)
self.Timer.start(3000, True)
class DVDPlayer(Screen, InfoBarBase, InfoBarNotifications, InfoBarSeek, InfoBarPVRState, InfoBarShowHide, HelpableScreen, InfoBarCueSheetSupport, InfoBarAudioSelection, InfoBarSubtitleSupport):
ALLOW_SUSPEND = Screen.SUSPEND_PAUSES
ENABLE_RESUME_SUPPORT = True
skin = """
<screen name="DVDPlayer" flags="wfNoBorder" position="0,380" size="720,160" title="Info bar" backgroundColor="transparent" >
<!-- Background -->
<ePixmap position="0,0" zPosition="-2" size="720,160" pixmap="skin_default/info-bg_mp.png" alphatest="off" />
<ePixmap position="29,40" zPosition="0" size="665,104" pixmap="skin_default/screws_mp.png" alphatest="on" transparent="1" />
<!-- colorbuttons -->
<ePixmap position="48,70" zPosition="0" size="108,13" pixmap="skin_default/icons/mp_buttons.png" alphatest="on" />
<!-- Servicename -->
<ePixmap pixmap="skin_default/icons/icon_event.png" position="207,78" zPosition="1" size="15,10" alphatest="on" />
<widget source="session.CurrentService" render="Label" position="230,73" size="300,22" font="Regular;20" backgroundColor="#263c59" shadowColor="#1d354c" shadowOffset="-1,-1" transparent="1" noWrap="1">
<convert type="ServiceName">Name</convert>
</widget>
<!-- Chapter info -->
<widget name="chapterLabel" position="230,96" size="360,22" font="Regular;20" foregroundColor="#c3c3c9" backgroundColor="#263c59" transparent="1" />
<!-- Audio track info -->
<ePixmap pixmap="skin_default/icons/icon_dolby.png" position="540,60" zPosition="1" size="26,16" alphatest="on"/>
<widget name="audioLabel" position="570,60" size="130,22" font="Regular;18" backgroundColor="#263c59" shadowColor="#1d354c" shadowOffset="-1,-1" transparent="1" />
<!-- Subtitle track info -->
<widget source="session.CurrentService" render="Pixmap" pixmap="skin_default/icons/icon_txt.png" position="540,83" zPosition="1" size="26,16" alphatest="on" >
<convert type="ServiceInfo">HasTelext</convert>
<convert type="ConditionalShowHide" />
</widget>
<widget name="subtitleLabel" position="570,83" size="130,22" font="Regular;18" backgroundColor="#263c59" shadowColor="#1d354c" shadowOffset="-1,-1" transparent="1" />
<!-- Angle info -->
<widget name="anglePix" pixmap="skin_default/icons/icon_view.png" position="540,106" size="26,16" alphatest="on" />
<widget name="angleLabel" position="570,106" size="130,22" font="Regular;18" backgroundColor="#263c59" shadowColor="#1d354c" shadowOffset="-1,-1" transparent="1" />
<!-- Elapsed time -->
<widget source="session.CurrentService" render="Label" position="205,129" size="100,20" font="Regular;18" halign="center" valign="center" backgroundColor="#06224f" shadowColor="#1d354c" shadowOffset="-1,-1" transparent="1" >
<convert type="ServicePosition">Position,ShowHours</convert>
</widget>
<!-- Progressbar (movie position)-->
<widget source="session.CurrentService" render="PositionGauge" position="300,133" size="270,10" zPosition="2" pointer="skin_default/position_pointer.png:540,0" transparent="1" >
<convert type="ServicePosition">Gauge</convert>
</widget>
<!-- Remaining time -->
<widget source="session.CurrentService" render="Label" position="576,129" size="100,20" font="Regular;18" halign="center" valign="center" backgroundColor="#06224f" shadowColor="#1d354c" shadowOffset="-1,-1" transparent="1" >
<convert type="ServicePosition">Remaining,Negate,ShowHours</convert>
</widget>
</screen>"""
def __init__(self, session, dvd_device = None, dvd_filelist = [ ], args = None):
Screen.__init__(self, session)
InfoBarBase.__init__(self)
InfoBarNotifications.__init__(self)
InfoBarCueSheetSupport.__init__(self, actionmap = "MediaPlayerCueSheetActions")
InfoBarShowHide.__init__(self)
InfoBarAudioSelection.__init__(self)
InfoBarSubtitleSupport.__init__(self)
HelpableScreen.__init__(self)
self.save_infobar_seek_config()
self.change_infobar_seek_config()
InfoBarSeek.__init__(self)
InfoBarPVRState.__init__(self)
self.oldService = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.dvdScreen = None
self.session.nav.stopService()
self["audioLabel"] = Label("n/a")
self["subtitleLabel"] = Label("")
self["angleLabel"] = Label("")
self["chapterLabel"] = Label("")
self["anglePix"] = Pixmap()
self["anglePix"].hide()
self.last_audioTuple = None
self.last_subtitleTuple = None
self.last_angleTuple = None
self.totalChapters = 0
self.currentChapter = 0
self.totalTitles = 0
self.currentTitle = 0
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evEnd: self.__serviceStopped,
iPlayableService.evStopped: self.__serviceStopped,
iPlayableService.evUser: self.__timeUpdated,
iPlayableService.evUser+1: self.__statePlay,
iPlayableService.evUser+2: self.__statePause,
iPlayableService.evUser+3: self.__osdFFwdInfoAvail,
iPlayableService.evUser+4: self.__osdFBwdInfoAvail,
iPlayableService.evUser+5: self.__osdStringAvail,
iPlayableService.evUser+6: self.__osdAudioInfoAvail,
iPlayableService.evUser+7: self.__osdSubtitleInfoAvail,
iPlayableService.evUser+8: self.__chapterUpdated,
iPlayableService.evUser+9: self.__titleUpdated,
iPlayableService.evUser+11: self.__menuOpened,
iPlayableService.evUser+12: self.__menuClosed,
iPlayableService.evUser+13: self.__osdAngleInfoAvail
})
self["DVDPlayerDirectionActions"] = ActionMap(["DirectionActions"],
{
#MENU KEY DOWN ACTIONS
"left": self.keyLeft,
"right": self.keyRight,
"up": self.keyUp,
"down": self.keyDown,
#MENU KEY REPEATED ACTIONS
"leftRepeated": self.doNothing,
"rightRepeated": self.doNothing,
"upRepeated": self.doNothing,
"downRepeated": self.doNothing,
#MENU KEY UP ACTIONS
"leftUp": self.doNothing,
"rightUp": self.doNothing,
"upUp": self.doNothing,
"downUp": self.doNothing,
})
self["OkCancelActions"] = ActionMap(["OkCancelActions"],
{
"ok": self.keyOk,
"cancel": self.keyCancel,
}, -2)
self["DVDPlayerPlaybackActions"] = HelpableActionMap(self, "DVDPlayerActions",
{
#PLAYER ACTIONS
"dvdMenu": (self.enterDVDMenu, _("show DVD main menu")),
"toggleInfo": (self.toggleInfo, _("toggle time, chapter, audio, subtitle info")),
"nextChapter": (self.nextChapter, _("forward to the next chapter")),
"prevChapter": (self.prevChapter, _("rewind to the previous chapter")),
"nextTitle": (self.nextTitle, _("jump forward to the next title")),
"prevTitle": (self.prevTitle, _("jump back to the previous title")),
"tv": (self.askLeavePlayer, _("exit DVD player or return to file browser")),
"dvdAudioMenu": (self.enterDVDAudioMenu, _("(show optional DVD audio menu)")),
"AudioSelection": (self.enterAudioSelection, _("Select audio track")),
"nextAudioTrack": (self.nextAudioTrack, _("switch to the next audio track")),
"nextSubtitleTrack": (self.nextSubtitleTrack, _("switch to the next subtitle language")),
"nextAngle": (self.nextAngle, _("switch to the next angle")),
"seekBeginning": self.seekBeginning,
}, -2)
self["DVDPlayerColorActions"] = HelpableActionMap(self, "ColorActions",
{
"blue": (self.chapterZap, _("jump to chapter by number")),
}, -2)
self.onClose.append(self.__onClose)
try:
from Plugins.SystemPlugins.Hotplug.plugin import hotplugNotifier
hotplugNotifier.append(self.hotplugCB)
except:
pass
self.autoplay = dvd_device or dvd_filelist
if dvd_device:
self.physicalDVD = True
else:
self.scanHotplug()
self.dvd_filelist = dvd_filelist
self.onFirstExecBegin.append(self.opened)
self.service = None
self.in_menu = False
if fileExists("/proc/stb/fb/dst_left"):
self.left = open("/proc/stb/fb/dst_left", "r").read()
self.width = open("/proc/stb/fb/dst_width", "r").read()
self.top = open("/proc/stb/fb/dst_top", "r").read()
self.height = open("/proc/stb/fb/dst_height", "r").read()
if self.left != "00000000" or self.top != "00000000" or self.width != "000002d0" or self.height != "0000000240":
open("/proc/stb/fb/dst_left", "w").write("00000000")
open("/proc/stb/fb/dst_width", "w").write("000002d0")
open("/proc/stb/fb/dst_top", "w").write("00000000")
open("/proc/stb/fb/dst_height", "w").write("0000000240")
self.onClose.append(self.__restoreOSDSize)
def save_infobar_seek_config(self):
self.saved_config_speeds_forward = config.seek.speeds_forward.value
self.saved_config_speeds_backward = config.seek.speeds_backward.value
self.saved_config_enter_forward = config.seek.enter_forward.value
self.saved_config_enter_backward = config.seek.enter_backward.value
self.saved_config_seek_on_pause = config.seek.on_pause.value
self.saved_config_seek_speeds_slowmotion = config.seek.speeds_slowmotion.value
def change_infobar_seek_config(self):
config.seek.speeds_forward.value = [2, 4, 6, 8, 16, 32, 64]
config.seek.speeds_backward.value = [2, 4, 6, 8, 16, 32, 64]
config.seek.speeds_slowmotion.value = [ 2, 3, 4, 6 ]
config.seek.enter_forward.value = "2"
config.seek.enter_backward.value = "2"
config.seek.on_pause.value = "play"
def restore_infobar_seek_config(self):
config.seek.speeds_forward.value = self.saved_config_speeds_forward
config.seek.speeds_backward.value = self.saved_config_speeds_backward
config.seek.speeds_slowmotion.value = self.saved_config_seek_speeds_slowmotion
config.seek.enter_forward.value = self.saved_config_enter_forward
config.seek.enter_backward.value = self.saved_config_enter_backward
config.seek.on_pause.value = self.saved_config_seek_on_pause
def __restoreOSDSize(self):
open("/proc/stb/fb/dst_left", "w").write(self.left)
open("/proc/stb/fb/dst_width", "w").write(self.width)
open("/proc/stb/fb/dst_top", "w").write(self.top)
open("/proc/stb/fb/dst_height", "w").write(self.height)
def chapterZap(self):
if not self.in_menu:
self.session.openWithCallback(self.numberEntered, ChapterZap)
def numberEntered(self, retval):
if retval:
self.zapToNumber(retval)
def getServiceInterface(self, iface):
service = self.service
if service:
attr = getattr(service, iface, None)
if callable(attr):
return attr()
return None
def __serviceStopped(self):
if self.in_menu:
self.in_menu = False
self.dvdScreen and self.dvdScreen.hide()
subs = self.getServiceInterface("subtitle")
if subs:
subs.disableSubtitles(self.session.current_dialog.instance)
def serviceStarted(self): #override InfoBarShowHide function
self.dvdScreen and self.dvdScreen.show()
def doEofInternal(self, playing):
if self.in_menu:
self.hide()
def __menuOpened(self):
self.hide()
self.in_menu = True
def __menuClosed(self):
self.show()
self.in_menu = False
def setChapterLabel(self):
chapterLCD = _("Menu")
chapterOSD = _("DVD Menu")
if self.currentTitle > 0:
chapterLCD = "%s %d" % (_("Chap."), self.currentChapter)
chapterOSD = "DVD %s %d/%d" % (_("Chapter"), self.currentChapter, self.totalChapters)
chapterOSD += " (%s %d/%d)" % (_("Title"), self.currentTitle, self.totalTitles)
self["chapterLabel"].setText(chapterOSD)
try:
self.session.summary and self.session.summary.updateChapter(chapterLCD)
except:
pass
def doNothing(self):
pass
def toggleInfo(self):
if not self.in_menu:
self.toggleShow()
print "[DVD] toggleInfo"
def __timeUpdated(self):
print "[DVD] timeUpdated"
def __statePlay(self):
print "[DVD] statePlay"
def __statePause(self):
print "[DVD] statePause"
def __osdFFwdInfoAvail(self):
self.setChapterLabel()
print "[DVD] FFwdInfoAvail"
def __osdFBwdInfoAvail(self):
self.setChapterLabel()
print "[DVD] FBwdInfoAvail"
def __osdStringAvail(self):
print "[DVD] StringAvail"
def __osdAudioInfoAvail(self):
info = self.getServiceInterface("info")
audioTuple = info and info.getInfoObject(iServiceInformation.sUser+6)
print "[DVD] AudioInfoAvail ", repr(audioTuple)
if audioTuple:
audioString = "%s (%s)" % (audioTuple[1],audioTuple[2])
self["audioLabel"].setText(audioString)
if audioTuple != self.last_audioTuple and not self.in_menu:
self.doShow()
self.last_audioTuple = audioTuple
def __osdSubtitleInfoAvail(self):
info = self.getServiceInterface("info")
subtitleTuple = info and info.getInfoObject(iServiceInformation.sUser+7)
print "[DVD] SubtitleInfoAvail ", repr(subtitleTuple)
if subtitleTuple:
subtitleString = ""
if subtitleTuple[0] is not 0:
subtitleString = "%s" % subtitleTuple[1]
self["subtitleLabel"].setText(subtitleString)
if subtitleTuple != self.last_subtitleTuple and not self.in_menu:
self.doShow()
self.last_subtitleTuple = subtitleTuple
def __osdAngleInfoAvail(self):
info = self.getServiceInterface("info")
angleTuple = info and info.getInfoObject(iServiceInformation.sUser+8)
print "[DVD] AngleInfoAvail ", repr(angleTuple)
if angleTuple:
angleString = ""
if angleTuple[1] > 1:
angleString = "%d / %d" % (angleTuple[0],angleTuple[1])
self["anglePix"].show()
else:
self["anglePix"].hide()
self["angleLabel"].setText(angleString)
if angleTuple != self.last_angleTuple and not self.in_menu:
self.doShow()
self.last_angleTuple = angleTuple
def __chapterUpdated(self):
info = self.getServiceInterface("info")
if info:
self.currentChapter = info.getInfo(iServiceInformation.sCurrentChapter)
self.totalChapters = info.getInfo(iServiceInformation.sTotalChapters)
self.setChapterLabel()
print "[DVD] __chapterUpdated: %d/%d" % (self.currentChapter, self.totalChapters)
def __titleUpdated(self):
info = self.getServiceInterface("info")
if info:
self.currentTitle = info.getInfo(iServiceInformation.sCurrentTitle)
self.totalTitles = info.getInfo(iServiceInformation.sTotalTitles)
self.setChapterLabel()
print "[DVD] __titleUpdated: %d/%d" % (self.currentTitle, self.totalTitles)
if not self.in_menu:
self.doShow()
def askLeavePlayer(self):
if self.autoplay:
self.exitCB((None, "exit"))
return
choices = [(_("Exit"), "exit"), (_("Continue playing"), "play")]
if self.physicalDVD:
cur = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if cur and not cur.toString().endswith(harddiskmanager.getAutofsMountpoint(harddiskmanager.getCD())):
choices.insert(0,(_("Play DVD"), "playPhysical"))
self.session.openWithCallback(self.exitCB, ChoiceBox, title=_("Leave DVD player?"), list = choices)
def sendKey(self, key):
keys = self.getServiceInterface("keys")
if keys:
keys.keyPressed(key)
return keys
def enterAudioSelection(self):
self.audioSelection()
def nextAudioTrack(self):
self.sendKey(iServiceKeys.keyUser)
def nextSubtitleTrack(self):
self.sendKey(iServiceKeys.keyUser+1)
def enterDVDAudioMenu(self):
self.sendKey(iServiceKeys.keyUser+2)
def nextChapter(self):
self.sendKey(iServiceKeys.keyUser+3)
def prevChapter(self):
self.sendKey(iServiceKeys.keyUser+4)
def nextTitle(self):
self.sendKey(iServiceKeys.keyUser+5)
def prevTitle(self):
self.sendKey(iServiceKeys.keyUser+6)
def enterDVDMenu(self):
self.sendKey(iServiceKeys.keyUser+7)
def nextAngle(self):
self.sendKey(iServiceKeys.keyUser+8)
def resumeDvd(self):
self.sendKey(iServiceKeys.keyUser+21)
def seekBeginning(self):
if self.service:
seekable = self.getSeek()
if seekable:
seekable.seekTo(0)
def zapToNumber(self, number):
if self.service:
seekable = self.getSeek()
if seekable:
print "[DVD] seek to chapter %d" % number
seekable.seekChapter(number)
# MENU ACTIONS
def keyRight(self):
self.sendKey(iServiceKeys.keyRight)
def keyLeft(self):
self.sendKey(iServiceKeys.keyLeft)
def keyUp(self):
self.sendKey(iServiceKeys.keyUp)
def keyDown(self):
self.sendKey(iServiceKeys.keyDown)
def keyOk(self):
if self.sendKey(iServiceKeys.keyOk) and not self.in_menu:
self.okButton()
print "[DVD] keyOk"
self.toggleInfo()
def keyCancel(self):
self.askLeavePlayer()
def opened(self):
if self.autoplay and self.dvd_filelist:
# opened via autoplay
self.FileBrowserClosed(self.dvd_filelist[0])
elif self.autoplay and self.physicalDVD:
self.playPhysicalCB(True)
elif self.physicalDVD:
# opened from menu with dvd in drive
self.session.openWithCallback(self.playPhysicalCB, MessageBox, text=_("Do you want to play DVD in drive?"), timeout=5 )
def playPhysicalCB(self, answer):
if answer:
harddiskmanager.setDVDSpeed(harddiskmanager.getCD(), 1)
self.FileBrowserClosed(harddiskmanager.getAutofsMountpoint(harddiskmanager.getCD()))
def FileBrowserClosed(self, val):
curref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
print "[DVD] FileBrowserClosed", val
if val is None:
self.askLeavePlayer()
else:
isopathname = "/VIDEO_TS.ISO"
if os.path.exists(val + isopathname):
val += isopathname
newref = eServiceReference(4369, 0, val)
print "[DVD] play", newref.toString()
if curref is None or curref != newref:
self.service = None
if newref.toString().endswith("/VIDEO_TS") or newref.toString().endswith("/"):
names = newref.toString().rsplit("/",3)
if names[2].startswith("Disk ") or names[2].startswith("DVD "):
name = str(names[1]) + " - " + str(names[2])
else:
name = names[2]
newref.setName(str(name))
# Construct a path for the IFO header assuming it exists
ifofilename = val
if not ifofilename.upper().endswith("/VIDEO_TS"):
ifofilename += "/VIDEO_TS"
files = [("/VIDEO_TS.IFO", 0x100), ("/VTS_01_0.IFO", 0x100), ("/VTS_01_0.IFO", 0x200)] # ( filename, offset )
for name in files:
(status, isNTSC, isLowResolution) = self.readVideoAtributes(ifofilename, name)
if status:
break
height = getDesktop(0).size().height()
print "[DVD] height:", height
if isNTSC:
height = height * 576 / 480
print "[DVD] NTSC height:", height
if isLowResolution:
height *= 2
print "[DVD] LowResolution:", height
if self.dvdScreen is None:
self.dvdScreen = self.session.instantiateDialog(DVDOverlay, height=height)
self.session.nav.playService(newref)
self.service = self.session.nav.getCurrentService()
print "[DVD] cur_dlg", self.session.current_dialog
subs = self.getServiceInterface("subtitle")
if subs and self.dvdScreen:
subs.enableSubtitles(self.dvdScreen.instance, None)
def readVideoAtributes(self, isofilename, checked_file):
(name, offset) = checked_file
isofilename += name
print "[DVD] file", name
status = False
isNTSC = False
isLowResolution = False
ifofile = None
try:
# Try to read the IFO header to determine PAL/NTSC format and the resolution
ifofile = open(isofilename, "r")
ifofile.seek(offset)
video_attr_high = ord(ifofile.read(1))
if video_attr_high != 0:
status = True
video_attr_low = ord(ifofile.read(1))
print "[DVD] %s: video_attr_high = %x" % ( name, video_attr_high ), "video_attr_low = %x" % ( video_attr_low )
isNTSC = (video_attr_high & 0x10 == 0)
isLowResolution = (video_attr_low & 0x18 == 0x18)
except:
# If the service is an .iso or .img or .nrg file we assume it is PAL
# Sorry we cannot open image files here.
print "[DVD] Cannot read file or is ISO/IMG/NRG"
finally:
if ifofile is not None:
ifofile.close()
return status, isNTSC, isLowResolution
def exitCB(self, answer):
if answer is not None:
if answer[1] == "exit":
if self.service:
self.service = None
self.close()
elif answer[1] == "playPhysical":
if self.service:
self.service = None
self.playPhysicalCB(True)
else:
pass
def __onClose(self):
self.restore_infobar_seek_config()
self.session.nav.playService(self.oldService)
try:
from Plugins.SystemPlugins.Hotplug.plugin import hotplugNotifier
hotplugNotifier.remove(self.hotplugCB)
except:
pass
def playLastCB(self, answer): # overwrite infobar cuesheet function
print "[DVD] playLastCB", answer, self.resume_point
if self.service:
if answer:
self.resumeDvd()
seekable = self.getSeek()
if seekable:
seekable.seekTo(self.resume_point)
pause = self.service.pause()
pause.unpause()
self.hideAfterResume()
def showAfterCuesheetOperation(self):
if not self.in_menu:
self.show()
def createSummary(self):
return DVDSummary
#override some InfoBarSeek functions
def doEof(self):
self.setSeekState(self.SEEK_STATE_PLAY)
def calcRemainingTime(self):
return 0
def hotplugCB(self, dev, media_state):
print "[DVD] hotplugCB", dev, media_state
if dev == harddiskmanager.getCD():
if media_state == "1":
self.scanHotplug()
else:
self.physicalDVD = False
def scanHotplug(self):
devicepath = harddiskmanager.getAutofsMountpoint(harddiskmanager.getCD())
if pathExists(devicepath):
from Components.Scanner import scanDevice
res = scanDevice(devicepath)
list = [ (r.description, r, res[r], self.session) for r in res ]
if list:
(desc, scanner, files, session) = list[0]
for file in files:
if file.mimetype == "video/x-dvd":
print "[DVD] physical dvd found:", devicepath
self.physicalDVD = True
return
self.physicalDVD = False
| gpl-2.0 |
robhudson/django | tests/admin_widgets/widgetadmin.py | 368 | 1345 | from django.contrib import admin
from . import models
class WidgetAdmin(admin.AdminSite):
pass
class CarAdmin(admin.ModelAdmin):
list_display = ['make', 'model', 'owner']
list_editable = ['owner']
class CarTireAdmin(admin.ModelAdmin):
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "car":
kwargs["queryset"] = models.Car.objects.filter(owner=request.user)
return db_field.formfield(**kwargs)
return super(CarTireAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
class EventAdmin(admin.ModelAdmin):
raw_id_fields = ['main_band', 'supporting_bands']
class AlbumAdmin(admin.ModelAdmin):
fields = ('name', 'cover_art',)
readonly_fields = ('cover_art',)
class SchoolAdmin(admin.ModelAdmin):
filter_vertical = ('students',)
filter_horizontal = ('alumni',)
site = WidgetAdmin(name='widget-admin')
site.register(models.User)
site.register(models.Car, CarAdmin)
site.register(models.CarTire, CarTireAdmin)
site.register(models.Member)
site.register(models.Band)
site.register(models.Event, EventAdmin)
site.register(models.Album, AlbumAdmin)
site.register(models.Inventory)
site.register(models.Bee)
site.register(models.Advisor)
site.register(models.School, SchoolAdmin)
site.register(models.Profile)
| bsd-3-clause |
yakky/django | tests/ordering/models.py | 261 | 1379 | """
Specifying ordering
Specify default ordering for a model using the ``ordering`` attribute, which
should be a list or tuple of field names. This tells Django how to order
``QuerySet`` results.
If a field name in ``ordering`` starts with a hyphen, that field will be
ordered in descending order. Otherwise, it'll be ordered in ascending order.
The special-case field name ``"?"`` specifies random order.
The ordering attribute is not required. If you leave it off, ordering will be
undefined -- not random, just undefined.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class Author(models.Model):
class Meta:
ordering = ('-pk',)
@python_2_unicode_compatible
class Article(models.Model):
author = models.ForeignKey(Author, models.SET_NULL, null=True)
second_author = models.ForeignKey(Author, models.SET_NULL, null=True)
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
class Meta:
ordering = ('-pub_date', 'headline')
def __str__(self):
return self.headline
class OrderedByAuthorArticle(Article):
class Meta:
proxy = True
ordering = ('author', 'second_author')
class Reference(models.Model):
article = models.ForeignKey(OrderedByAuthorArticle, models.CASCADE)
class Meta:
ordering = ('article',)
| bsd-3-clause |
RHavar/bitcoin | test/functional/rpc_users.py | 18 | 7885 | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiple RPC users."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
get_datadir_path,
str_to_b64str,
)
import os
import http.client
import urllib.parse
import subprocess
from random import SystemRandom
import string
import configparser
import sys
class HTTPBasicsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def setup_chain(self):
super().setup_chain()
#Append rpcauth to bitcoin.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
rpcuser = "rpcuser=rpcuser💻"
rpcpassword = "rpcpassword=rpcpassword🔑"
self.user = ''.join(SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(10))
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
gen_rpcauth = config['environment']['RPCAUTH']
p = subprocess.Popen([sys.executable, gen_rpcauth, self.user], stdout=subprocess.PIPE, universal_newlines=True)
lines = p.stdout.read().splitlines()
rpcauth3 = lines[1]
self.password = lines[3]
with open(os.path.join(get_datadir_path(self.options.tmpdir, 0), "bitcoin.conf"), 'a', encoding='utf8') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
f.write(rpcauth3+"\n")
with open(os.path.join(get_datadir_path(self.options.tmpdir, 1), "bitcoin.conf"), 'a', encoding='utf8') as f:
f.write(rpcuser+"\n")
f.write(rpcpassword+"\n")
def run_test(self):
##################################################
# Check correctness of the rpcauth config option #
##################################################
url = urllib.parse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
#New authpair generated via share/rpcauth tool
password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
#Second authpair with different username
password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
authpairnew = "rt:"+password
self.log.info('Correct...')
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Use new authpair to confirm both work
self.log.info('Correct...')
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong login name with rt's password
self.log.info('Wrong...')
authpairnew = "rtwrong:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Wrong password for rt
self.log.info('Wrong...')
authpairnew = "rt:"+password+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Correct for rt2
self.log.info('Correct...')
authpairnew = "rt2:"+password2
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong password for rt2
self.log.info('Wrong...')
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Correct for randomly generated user
self.log.info('Correct...')
authpairnew = self.user+":"+self.password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong password for randomly generated user
self.log.info('Wrong...')
authpairnew = self.user+":"+self.password+"Wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
###############################################################
# Check correctness of the rpcuser/rpcpassword config options #
###############################################################
url = urllib.parse.urlparse(self.nodes[1].url)
# rpcuser and rpcpassword authpair
self.log.info('Correct...')
rpcuserauthpair = "rpcuser💻:rpcpassword🔑"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong login name with rpcuser's password
rpcuserauthpair = "rpcuserwrong:rpcpassword"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Wrong password for rpcuser
self.log.info('Wrong...')
rpcuserauthpair = "rpcuser:rpcpasswordwrong"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| mit |
emoronayuso/beeton | asterisk-bee/asteriskbee/api_status/scripts_graficas/recoge_marcas_graficas.py | 1 | 2307 | #!/usr/bin/python
import matplotlib.pyplot as plt
import numpy as np
#import calendar
from datetime import datetime
from django.conf import settings
settings.configure()
import os
#para conexion con la bases de datos de beeton (asteriskbee)
import sqlite3 as dbapi
##Directorio de la aplicaion
### STATIC_ROOT = '/var/www/asterisk-bee/asteriskbee/'
#directorio = settings.STATIC_ROOT+"api_status/"
directorio = "/var/www/asterisk-bee/asteriskbee/api_status/"
##Numero de tuplas maximas por grafica
num_cpu_dia = 20
def recoge_marcas():
#Conexion con la base de datos de estadisticas
bbdd = dbapi.connect(directorio+"bbdd/estadisticas.db")
cursor = bbdd.cursor()
os.system("ps -e -o pcpu,cpu,nice,state,cputime,args --sort pcpu | sed '/^ 0.0 /d' > "+directorio+"scripts_graficas/temp/temp_cpu_dia; cat "+directorio+"scripts_graficas/temp/temp_cpu_dia | sed 's/^[ \t]*//;s/[ \t]*$//' | grep -v 'recoge_marcas_graficas.py' | cut -d ' ' -f 1 > "+directorio+"scripts_graficas/temp/temp_cpu_dia2")
total = 0.0
f = open(directorio+'scripts_graficas/temp/temp_cpu_dia2','r')
##Leemos la primera linea para quitar el encabezado
linea = f.readline()
while True:
linea = f.readline()
if not linea:
break
#Quitamos el uso de la cpu del script que recoge las marcas
else:
total = total + float(linea)
f.close()
res = total
# print str(res)
#Creamos la consulta ordenada por fecha
con_ordenada = """select * from api_status_marcas_graficas where tipo='cpu_dia' order by fecha_hora;"""
cursor.execute(con_ordenada)
p = cursor.fetchall()
if len(p) < num_cpu_dia:
#insetar en al base de datos
insert = "insert into api_status_marcas_graficas (tipo,valor) values ('cpu_dia',?);"
cursor.execute(insert ,(res,))
bbdd.commit()
else:
#Ordenar por fecha, eliminar el ultimo e introducir nuevo
# strftime('%d-%m-%Y %H:%M',calldate)
hora_actual = datetime.now()
con_update = " update api_status_marcas_graficas set fecha_hora=datetime(?),valor=? where id=?; "
# print "Antes del update, hora_actual->"+str(hora_actual)+"valor->"+str(res)+ " id->"+str(p[0][0])
cursor.execute(con_update ,(hora_actual,res,p[0][0]))
bbdd.commit()
##Cerramos la conexion con la BBDD
cursor.close()
bbdd.close()
if __name__ == "__main__":
recoge_marcas()
| gpl-3.0 |
zmike/servo | tests/wpt/harness/wptrunner/wptcommandline.py | 32 | 19030 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import ast
import os
import sys
from collections import OrderedDict
from distutils.spawn import find_executable
import config
import wpttest
def abs_path(path):
return os.path.abspath(os.path.expanduser(path))
def url_or_path(path):
import urlparse
parsed = urlparse.urlparse(path)
if len(parsed.scheme) > 2:
return path
else:
return abs_path(path)
def require_arg(kwargs, name, value_func=None):
if value_func is None:
value_func = lambda x: x is not None
if not name in kwargs or not value_func(kwargs[name]):
print >> sys.stderr, "Missing required argument %s" % name
sys.exit(1)
def create_parser(product_choices=None):
from mozlog import commandline
import products
if product_choices is None:
config_data = config.load()
product_choices = products.products_enabled(config_data)
parser = argparse.ArgumentParser(description="Runner for web-platform-tests tests.")
parser.add_argument("--metadata", action="store", type=abs_path, dest="metadata_root",
help="Path to the folder containing test metadata"),
parser.add_argument("--tests", action="store", type=abs_path, dest="tests_root",
help="Path to test files"),
parser.add_argument("--run-info", action="store", type=abs_path,
help="Path to directory containing extra json files to add to run info")
parser.add_argument("--config", action="store", type=abs_path, dest="config",
help="Path to config file")
parser.add_argument("--manifest-update", action="store_true", default=False,
help="Force regeneration of the test manifest")
parser.add_argument("--binary", action="store",
type=abs_path, help="Binary to run tests against")
parser.add_argument('--binary-arg',
default=[], action="append", dest="binary_args",
help="Extra argument for the binary (servo)")
parser.add_argument("--webdriver-binary", action="store", metavar="BINARY",
type=abs_path, help="WebDriver server binary to use")
parser.add_argument("--processes", action="store", type=int, default=None,
help="Number of simultaneous processes to use")
parser.add_argument("--run-by-dir", type=int, nargs="?", default=False,
help="Split run into groups by directories. With a parameter,"
"limit the depth of splits e.g. --run-by-dir=1 to split by top-level"
"directory")
parser.add_argument("--timeout-multiplier", action="store", type=float, default=None,
help="Multiplier relative to standard test timeout to use")
parser.add_argument("--repeat", action="store", type=int, default=1,
help="Number of times to run the tests")
parser.add_argument("--repeat-until-unexpected", action="store_true", default=None,
help="Run tests in a loop until one returns an unexpected result")
parser.add_argument("--no-capture-stdio", action="store_true", default=False,
help="Don't capture stdio and write to logging")
parser.add_argument("--product", action="store", choices=product_choices,
default=None, help="Browser against which to run tests")
parser.add_argument("--list-test-groups", action="store_true",
default=False,
help="List the top level directories containing tests that will run.")
parser.add_argument("--list-disabled", action="store_true",
default=False,
help="List the tests that are disabled on the current platform")
build_type = parser.add_mutually_exclusive_group()
build_type.add_argument("--debug-build", dest="debug", action="store_true",
default=None,
help="Build is a debug build (overrides any mozinfo file)")
build_type.add_argument("--release-build", dest="debug", action="store_false",
default=None,
help="Build is a release (overrides any mozinfo file)")
test_selection_group = parser.add_argument_group("Test Selection")
test_selection_group.add_argument("--test-types", action="store",
nargs="*", default=wpttest.enabled_tests,
choices=wpttest.enabled_tests,
help="Test types to run")
test_selection_group.add_argument("--include", action="append",
help="URL prefix to include")
test_selection_group.add_argument("--exclude", action="append",
help="URL prefix to exclude")
test_selection_group.add_argument("--include-manifest", type=abs_path,
help="Path to manifest listing tests to include")
test_selection_group.add_argument("--tag", action="append", dest="tags",
help="Labels applied to tests to include in the run. Labels starting dir: are equivalent to top-level directories.")
debugging_group = parser.add_argument_group("Debugging")
debugging_group.add_argument('--debugger', const="__default__", nargs="?",
help="run under a debugger, e.g. gdb or valgrind")
debugging_group.add_argument('--debugger-args', help="arguments to the debugger")
debugging_group.add_argument('--pause-after-test', action="store_true", default=None,
help="Halt the test runner after each test (this happens by default if only a single test is run)")
debugging_group.add_argument('--no-pause-after-test', dest="pause_after_test", action="store_false",
help="Don't halt the test runner irrespective of the number of tests run")
debugging_group.add_argument('--pause-on-unexpected', action="store_true",
help="Halt the test runner when an unexpected result is encountered")
debugging_group.add_argument("--symbols-path", action="store", type=url_or_path,
help="Path or url to symbols file used to analyse crash minidumps.")
debugging_group.add_argument("--stackwalk-binary", action="store", type=abs_path,
help="Path to stackwalker program used to analyse minidumps.")
chunking_group = parser.add_argument_group("Test Chunking")
chunking_group.add_argument("--total-chunks", action="store", type=int, default=1,
help="Total number of chunks to use")
chunking_group.add_argument("--this-chunk", action="store", type=int, default=1,
help="Chunk number to run")
chunking_group.add_argument("--chunk-type", action="store", choices=["none", "equal_time", "hash"],
default=None, help="Chunking type to use")
ssl_group = parser.add_argument_group("SSL/TLS")
ssl_group.add_argument("--ssl-type", action="store", default=None,
choices=["openssl", "pregenerated", "none"],
help="Type of ssl support to enable (running without ssl may lead to spurious errors)")
ssl_group.add_argument("--openssl-binary", action="store",
help="Path to openssl binary", default="openssl")
ssl_group.add_argument("--certutil-binary", action="store",
help="Path to certutil binary for use with Firefox + ssl")
ssl_group.add_argument("--ca-cert-path", action="store", type=abs_path,
help="Path to ca certificate when using pregenerated ssl certificates")
ssl_group.add_argument("--host-key-path", action="store", type=abs_path,
help="Path to host private key when using pregenerated ssl certificates")
ssl_group.add_argument("--host-cert-path", action="store", type=abs_path,
help="Path to host certificate when using pregenerated ssl certificates")
gecko_group = parser.add_argument_group("Gecko-specific")
gecko_group.add_argument("--prefs-root", dest="prefs_root", action="store", type=abs_path,
help="Path to the folder containing browser prefs")
gecko_group.add_argument("--disable-e10s", dest="gecko_e10s", action="store_false", default=True,
help="Run tests without electrolysis preferences")
b2g_group = parser.add_argument_group("B2G-specific")
b2g_group.add_argument("--b2g-no-backup", action="store_true", default=False,
help="Don't backup device before testrun with --product=b2g")
servo_group = parser.add_argument_group("Servo-specific")
servo_group.add_argument("--user-stylesheet",
default=[], action="append", dest="user_stylesheets",
help="Inject a user CSS stylesheet into every test.")
servo_group.add_argument("--servo-backend",
default="cpu", choices=["cpu", "webrender"],
help="Rendering backend to use with Servo.")
parser.add_argument("test_list", nargs="*",
help="List of URLs for tests to run, or paths including tests to run. "
"(equivalent to --include)")
commandline.add_logging_group(parser)
return parser
def set_from_config(kwargs):
if kwargs["config"] is None:
config_path = config.path()
else:
config_path = kwargs["config"]
kwargs["config_path"] = config_path
kwargs["config"] = config.read(kwargs["config_path"])
keys = {"paths": [("prefs", "prefs_root", True),
("run_info", "run_info", True)],
"web-platform-tests": [("remote_url", "remote_url", False),
("branch", "branch", False),
("sync_path", "sync_path", True)],
"SSL": [("openssl_binary", "openssl_binary", True),
("certutil_binary", "certutil_binary", True),
("ca_cert_path", "ca_cert_path", True),
("host_cert_path", "host_cert_path", True),
("host_key_path", "host_key_path", True)]}
for section, values in keys.iteritems():
for config_value, kw_value, is_path in values:
if kw_value in kwargs and kwargs[kw_value] is None:
if not is_path:
new_value = kwargs["config"].get(section, config.ConfigDict({})).get(config_value)
else:
new_value = kwargs["config"].get(section, config.ConfigDict({})).get_path(config_value)
kwargs[kw_value] = new_value
kwargs["test_paths"] = get_test_paths(kwargs["config"])
if kwargs["tests_root"]:
if "/" not in kwargs["test_paths"]:
kwargs["test_paths"]["/"] = {}
kwargs["test_paths"]["/"]["tests_path"] = kwargs["tests_root"]
if kwargs["metadata_root"]:
if "/" not in kwargs["test_paths"]:
kwargs["test_paths"]["/"] = {}
kwargs["test_paths"]["/"]["metadata_path"] = kwargs["metadata_root"]
kwargs["suite_name"] = kwargs["config"].get("web-platform-tests", {}).get("name", "web-platform-tests")
def get_test_paths(config):
# Set up test_paths
test_paths = OrderedDict()
for section in config.iterkeys():
if section.startswith("manifest:"):
manifest_opts = config.get(section)
url_base = manifest_opts.get("url_base", "/")
test_paths[url_base] = {
"tests_path": manifest_opts.get_path("tests"),
"metadata_path": manifest_opts.get_path("metadata")}
return test_paths
def exe_path(name):
if name is None:
return
path = find_executable(name)
if os.access(path, os.X_OK):
return path
def check_args(kwargs):
set_from_config(kwargs)
for test_paths in kwargs["test_paths"].itervalues():
if not ("tests_path" in test_paths and
"metadata_path" in test_paths):
print "Fatal: must specify both a test path and metadata path"
sys.exit(1)
for key, path in test_paths.iteritems():
name = key.split("_", 1)[0]
if not os.path.exists(path):
print "Fatal: %s path %s does not exist" % (name, path)
sys.exit(1)
if not os.path.isdir(path):
print "Fatal: %s path %s is not a directory" % (name, path)
sys.exit(1)
if kwargs["product"] is None:
kwargs["product"] = "firefox"
if kwargs["test_list"]:
if kwargs["include"] is not None:
kwargs["include"].extend(kwargs["test_list"])
else:
kwargs["include"] = kwargs["test_list"]
if kwargs["run_info"] is None:
kwargs["run_info"] = kwargs["config_path"]
if kwargs["this_chunk"] > 1:
require_arg(kwargs, "total_chunks", lambda x: x >= kwargs["this_chunk"])
if kwargs["chunk_type"] is None:
if kwargs["total_chunks"] > 1:
kwargs["chunk_type"] = "equal_time"
else:
kwargs["chunk_type"] = "none"
if kwargs["processes"] is None:
kwargs["processes"] = 1
if kwargs["debugger"] is not None:
import mozdebug
if kwargs["debugger"] == "__default__":
kwargs["debugger"] = mozdebug.get_default_debugger_name()
debug_info = mozdebug.get_debugger_info(kwargs["debugger"],
kwargs["debugger_args"])
if debug_info and debug_info.interactive:
if kwargs["processes"] != 1:
kwargs["processes"] = 1
kwargs["no_capture_stdio"] = True
kwargs["debug_info"] = debug_info
else:
kwargs["debug_info"] = None
if kwargs["binary"] is not None:
if not os.path.exists(kwargs["binary"]):
print >> sys.stderr, "Binary path %s does not exist" % kwargs["binary"]
sys.exit(1)
if kwargs["ssl_type"] is None:
if None not in (kwargs["ca_cert_path"], kwargs["host_cert_path"], kwargs["host_key_path"]):
kwargs["ssl_type"] = "pregenerated"
elif exe_path(kwargs["openssl_binary"]) is not None:
kwargs["ssl_type"] = "openssl"
else:
kwargs["ssl_type"] = "none"
if kwargs["ssl_type"] == "pregenerated":
require_arg(kwargs, "ca_cert_path", lambda x:os.path.exists(x))
require_arg(kwargs, "host_cert_path", lambda x:os.path.exists(x))
require_arg(kwargs, "host_key_path", lambda x:os.path.exists(x))
elif kwargs["ssl_type"] == "openssl":
path = exe_path(kwargs["openssl_binary"])
if path is None:
print >> sys.stderr, "openssl-binary argument missing or not a valid executable"
sys.exit(1)
kwargs["openssl_binary"] = path
if kwargs["ssl_type"] != "none" and kwargs["product"] == "firefox":
path = exe_path(kwargs["certutil_binary"])
if path is None:
print >> sys.stderr, "certutil-binary argument missing or not a valid executable"
sys.exit(1)
kwargs["certutil_binary"] = path
return kwargs
def check_args_update(kwargs):
set_from_config(kwargs)
if kwargs["product"] is None:
kwargs["product"] = "firefox"
def create_parser_update(product_choices=None):
from mozlog.structured import commandline
import products
if product_choices is None:
config_data = config.load()
product_choices = products.products_enabled(config_data)
parser = argparse.ArgumentParser("web-platform-tests-update",
description="Update script for web-platform-tests tests.")
parser.add_argument("--product", action="store", choices=product_choices,
default=None, help="Browser for which metadata is being updated")
parser.add_argument("--config", action="store", type=abs_path, help="Path to config file")
parser.add_argument("--metadata", action="store", type=abs_path, dest="metadata_root",
help="Path to the folder containing test metadata"),
parser.add_argument("--tests", action="store", type=abs_path, dest="tests_root",
help="Path to web-platform-tests"),
parser.add_argument("--sync-path", action="store", type=abs_path,
help="Path to store git checkout of web-platform-tests during update"),
parser.add_argument("--remote_url", action="store",
help="URL of web-platfrom-tests repository to sync against"),
parser.add_argument("--branch", action="store", type=abs_path,
help="Remote branch to sync against")
parser.add_argument("--rev", action="store", help="Revision to sync to")
parser.add_argument("--no-patch", action="store_true",
help="Don't create an mq patch or git commit containing the changes.")
parser.add_argument("--sync", dest="sync", action="store_true", default=False,
help="Sync the tests with the latest from upstream")
parser.add_argument("--ignore-existing", action="store_true", help="When updating test results only consider results from the logfiles provided, not existing expectations.")
parser.add_argument("--continue", action="store_true", help="Continue a previously started run of the update script")
parser.add_argument("--abort", action="store_true", help="Clear state from a previous incomplete run of the update script")
# Should make this required iff run=logfile
parser.add_argument("run_log", nargs="*", type=abs_path,
help="Log file from run of tests")
commandline.add_logging_group(parser)
return parser
def create_parser_reduce(product_choices=None):
parser = create_parser(product_choices)
parser.add_argument("target", action="store", help="Test id that is unstable")
return parser
def parse_args():
parser = create_parser()
rv = vars(parser.parse_args())
check_args(rv)
return rv
def parse_args_update():
parser = create_parser_update()
rv = vars(parser.parse_args())
check_args_update(rv)
return rv
def parse_args_reduce():
parser = create_parser_reduce()
rv = vars(parser.parse_args())
check_args(rv)
return rv
| mpl-2.0 |
Turlough/keyczar | cpp/src/tools/scons/scons-local-1.2.0.d20090223/SCons/Tool/ifort.py | 19 | 3334 | """SCons.Tool.ifort
Tool-specific initialization for newer versions of the Intel Fortran Compiler
for Linux/Windows (and possibly Mac OS X).
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/ifort.py 4043 2009/02/23 09:06:45 scons"
import string
import SCons.Defaults
from SCons.Scanner.Fortran import FortranScan
from FortranCommon import add_all_to_env
def generate(env):
"""Add Builders and construction variables for ifort to an Environment."""
# ifort supports Fortran 90 and Fortran 95
# Additionally, ifort recognizes more file extensions.
fscan = FortranScan("FORTRANPATH")
SCons.Tool.SourceFileScanner.add_scanner('.i', fscan)
SCons.Tool.SourceFileScanner.add_scanner('.i90', fscan)
if not env.has_key('FORTRANFILESUFFIXES'):
env['FORTRANFILESUFFIXES'] = ['.i']
else:
env['FORTRANFILESUFFIXES'].append('.i')
if not env.has_key('F90FILESUFFIXES'):
env['F90FILESUFFIXES'] = ['.i90']
else:
env['F90FILESUFFIXES'].append('.i90')
add_all_to_env(env)
fc = 'ifort'
for dialect in ['F77', 'F90', 'FORTRAN', 'F95']:
env['%s' % dialect] = fc
env['SH%s' % dialect] = '$%s' % dialect
env['SH%sFLAGS' % dialect] = SCons.Util.CLVar('$%sFLAGS -fPIC' % dialect)
if env['PLATFORM'] == 'win32':
# On Windows, the ifort compiler specifies the object on the
# command line with -object:, not -o. Massage the necessary
# command-line construction variables.
for dialect in ['F77', 'F90', 'FORTRAN', 'F95']:
for var in ['%sCOM' % dialect, '%sPPCOM' % dialect,
'SH%sCOM' % dialect, 'SH%sPPCOM' % dialect]:
env[var] = string.replace(env[var], '-o $TARGET', '-object:$TARGET')
env['FORTRANMODDIRPREFIX'] = "/module:"
else:
env['FORTRANMODDIRPREFIX'] = "-module "
def exists(env):
return env.Detect('ifort')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
LaMi-/pmatic | ccu_pkg/python/local/lib/python2.7/dist-packages/requests/auth.py | 174 | 7550 | # -*- coding: utf-8 -*-
"""
requests.auth
~~~~~~~~~~~~~
This module contains the authentication handlers for Requests.
"""
import os
import re
import time
import hashlib
import threading
from base64 import b64encode
from .compat import urlparse, str
from .cookies import extract_cookies_to_jar
from .utils import parse_dict_header, to_native_string
from .status_codes import codes
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
authstr = 'Basic ' + to_native_string(
b64encode(('%s:%s' % (username, password)).encode('latin1')).strip()
)
return authstr
class AuthBase(object):
"""Base class that all auth implementations derive from"""
def __call__(self, r):
raise NotImplementedError('Auth hooks must be callable.')
class HTTPBasicAuth(AuthBase):
"""Attaches HTTP Basic Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
def __call__(self, r):
r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPProxyAuth(HTTPBasicAuth):
"""Attaches HTTP Proxy Authentication to a given Request object."""
def __call__(self, r):
r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPDigestAuth(AuthBase):
"""Attaches HTTP Digest Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
# Keep state in per-thread local storage
self._thread_local = threading.local()
def init_per_thread_state(self):
# Ensure state is initialized just once per-thread
if not hasattr(self._thread_local, 'init'):
self._thread_local.init = True
self._thread_local.last_nonce = ''
self._thread_local.nonce_count = 0
self._thread_local.chal = {}
self._thread_local.pos = None
self._thread_local.num_401_calls = None
def build_digest_header(self, method, url):
realm = self._thread_local.chal['realm']
nonce = self._thread_local.chal['nonce']
qop = self._thread_local.chal.get('qop')
algorithm = self._thread_local.chal.get('algorithm')
opaque = self._thread_local.chal.get('opaque')
if algorithm is None:
_algorithm = 'MD5'
else:
_algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
def md5_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.md5(x).hexdigest()
hash_utf8 = md5_utf8
elif _algorithm == 'SHA':
def sha_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
if hash_utf8 is None:
return None
# XXX not implemented yet
entdig = None
p_parsed = urlparse(url)
#: path is request-uri defined in RFC 2616 which should not be empty
path = p_parsed.path or "/"
if p_parsed.query:
path += '?' + p_parsed.query
A1 = '%s:%s:%s' % (self.username, realm, self.password)
A2 = '%s:%s' % (method, path)
HA1 = hash_utf8(A1)
HA2 = hash_utf8(A2)
if nonce == self._thread_local.last_nonce:
self._thread_local.nonce_count += 1
else:
self._thread_local.nonce_count = 1
ncvalue = '%08x' % self._thread_local.nonce_count
s = str(self._thread_local.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
if _algorithm == 'MD5-SESS':
HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
if not qop:
respdig = KD(HA1, "%s:%s" % (nonce, HA2))
elif qop == 'auth' or 'auth' in qop.split(','):
noncebit = "%s:%s:%s:%s:%s" % (
nonce, ncvalue, cnonce, 'auth', HA2
)
respdig = KD(HA1, noncebit)
else:
# XXX handle auth-int.
return None
self._thread_local.last_nonce = nonce
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (self.username, realm, nonce, path, respdig)
if opaque:
base += ', opaque="%s"' % opaque
if algorithm:
base += ', algorithm="%s"' % algorithm
if entdig:
base += ', digest="%s"' % entdig
if qop:
base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return 'Digest %s' % (base)
def handle_redirect(self, r, **kwargs):
"""Reset num_401_calls counter on redirects."""
if r.is_redirect:
self._thread_local.num_401_calls = 1
def handle_401(self, r, **kwargs):
"""Takes the given response and tries digest-auth, if needed."""
if self._thread_local.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self._thread_local.pos)
s_auth = r.headers.get('www-authenticate', '')
if 'digest' in s_auth.lower() and self._thread_local.num_401_calls < 2:
self._thread_local.num_401_calls += 1
pat = re.compile(r'digest ', flags=re.IGNORECASE)
self._thread_local.chal = parse_dict_header(pat.sub('', s_auth, count=1))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.close()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
prep.headers['Authorization'] = self.build_digest_header(
prep.method, prep.url)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
self._thread_local.num_401_calls = 1
return r
def __call__(self, r):
# Initialize per-thread state, if needed
self.init_per_thread_state()
# If we have a saved nonce, skip the 401
if self._thread_local.last_nonce:
r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
try:
self._thread_local.pos = r.body.tell()
except AttributeError:
# In the case of HTTPDigestAuth being reused and the body of
# the previous request was a file-like object, pos has the
# file position of the previous body. Ensure it's set to
# None.
self._thread_local.pos = None
r.register_hook('response', self.handle_401)
r.register_hook('response', self.handle_redirect)
self._thread_local.num_401_calls = 1
return r
| gpl-2.0 |
niktre/espressopp | src/integrator/unittest/PTestVelocityVerlet.py | 7 | 4251 | # Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import espressopp
import espressopp.esutil
import espressopp.unittest
import espressopp.storage
import espressopp.integrator
import espressopp.interaction
import espressopp.analysis
import espressopp.bc
import mpi4py.MPI as MPI
import math
import logging
from espressopp import Real3D
# Input values for system
N = 10
cutoff = 2.5
skin = 0.3
def calcNumberCells(size, nodes, cutoff):
ncells = 1
while size / (ncells * nodes) >= cutoff:
ncells = ncells + 1
return ncells - 1
class TestVerletList(espressopp.unittest.TestCase) :
def test0Build(self) :
system = espressopp.System()
rng = espressopp.esutil.RNG()
SIZE = float(N)
box = Real3D(SIZE)
bc = espressopp.bc.OrthorhombicBC(None, box)
system.bc = bc
system.rng = rng
system.skin = skin
comm = espressopp.MPI.COMM_WORLD
nodeGrid = (1, 1, comm.size)
cellGrid = [1, 1, 1]
for i in xrange(3):
cellGrid[i] = calcNumberCells(SIZE, nodeGrid[i], cutoff)
print 'NodeGrid = %s'%(nodeGrid,)
print 'CellGrid = %s'%cellGrid
dd = espressopp.storage.DomainDecomposition(system, comm, nodeGrid, cellGrid)
system.storage = dd
id = 0
for i in xrange(N):
for j in xrange(N):
for k in xrange(N):
m = (i + 2*j + 3*k) % 11
r = 0.45 + m * 0.01
x = (i + r) / N * SIZE
y = (j + r) / N * SIZE
z = (k + r) / N * SIZE
dd.addParticle(id, Real3D(x, y, z))
# not yet: dd.setVelocity(id, (1.0, 0.0, 0.0))
id = id + 1
dd.decompose()
integrator = espressopp.integrator.VelocityVerlet(system)
print 'integrator.dt = %g, will be set to 0.005'%integrator.dt
integrator.dt = 0.005
print 'integrator.dt = %g, is now '%integrator.dt
# now build Verlet List
# ATTENTION: you have to add the skin explicitly here
vl = espressopp.VerletList(system, cutoff = cutoff + system.skin)
potLJ = espressopp.interaction.LennardJones(1.0, 1.0, cutoff = cutoff)
# ATTENTION: auto shift was enabled
print "potLJ, shift = %g"%potLJ.shift
interLJ = espressopp.interaction.VerletListLennardJones(vl)
interLJ.setPotential(type1 = 0, type2 = 0, potential = potLJ)
# Todo
system.addInteraction(interLJ)
temp = espressopp.analysis.Temperature(system)
temperature = temp.compute()
kineticEnergy = 0.5 * temperature * (3 * N * N * N)
potentialEnergy = interLJ.computeEnergy()
print 'Start: tot energy = %10.6f pot = %10.6f kin = %10.f temp = %10.6f'%(kineticEnergy + potentialEnergy,
potentialEnergy, kineticEnergy, temperature)
nsteps = 10
# logging.getLogger("MDIntegrator").setLevel(logging.DEBUG)
for i in xrange(20):
integrator.run(nsteps)
temperature = temp.compute()
kineticEnergy = 0.5 * temperature * (3 * N * N * N)
potentialEnergy = interLJ.computeEnergy()
print 'Step %6d: tot energy = %10.6f pot = %10.6f kin = %10.6f temp = %f'%(nsteps*(i+1),
kineticEnergy + potentialEnergy, potentialEnergy, kineticEnergy, temperature)
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
suutari-ai/shoop | shuup_tests/core/test_rounding.py | 3 | 5376 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from decimal import Decimal
from shuup.core.models import OrderLine
from shuup.core.models import OrderLineType
from shuup.core.models import Shop
from shuup.core.models import ShopStatus
from shuup.testing.factories import (
add_product_to_order, create_empty_order, create_product,
get_default_shop, get_default_supplier
)
from shuup.utils.numbers import bankers_round
from shuup_tests.utils.basketish_order_source import BasketishOrderSource
PRICE_SPEC = [
([1,2,3,4]),
([1,2,3,6]),
([1,2,3,8]),
([1.23223, 12.24442, 42.26233]),
([1223.46636, 13.24655, 411.234554]),
([101.74363, 12.99346, 4222.57422]),
([112.93549, 199.2446, 422.29234]),
([1994.49654, 940.23452, 425.24566]),
([1994.496541234566, 940.2345298765, 425.2456612334]), # Those prices that will be cut when put in DB
]
@pytest.mark.parametrize("prices", PRICE_SPEC)
@pytest.mark.django_db
def test_rounding(prices):
expected = 0
for p in prices:
expected += bankers_round(p, 2)
order = create_empty_order(prices_include_tax=False)
order.save()
for x, price in enumerate(prices):
ol = OrderLine(
order=order,
type=OrderLineType.OTHER,
quantity=1,
text="Thing",
ordering=x,
base_unit_price=order.shop.create_price(price)
)
ol.save()
order.cache_prices()
for x, order_line in enumerate(order.lines.all().order_by("ordering")):
price = Decimal(prices[x]).quantize(Decimal(".1") ** 9)
# make sure prices are in database with original precision
assert order_line.base_unit_price == order.shop.create_price(price)
# make sure the line taxless price is rounded
assert order_line.taxless_price == order.shop.create_price(bankers_round(price, 2))
# Check that total prices calculated from priceful parts still matches
assert _get_taxless_price(order_line) == order_line.taxless_price
assert _get_taxful_price(order_line) == order_line.taxful_price
# make sure the line price is rounded
assert order_line.price == order.shop.create_price(price)
# make sure order total is rounded
assert order.taxless_total_price == order.shop.create_price(bankers_round(expected, 2))
@pytest.mark.parametrize("prices", PRICE_SPEC)
@pytest.mark.django_db
def test_order_source_rounding(prices):
shop = Shop.objects.create(
name="test",
identifier="test",
status=ShopStatus.ENABLED,
public_name="test",
prices_include_tax=False
)
expected = 0
for p in prices:
expected += bankers_round(p, 2)
source = BasketishOrderSource(shop)
for x, price in enumerate(prices):
source.add_line(
type=OrderLineType.OTHER,
quantity=1,
text=x,
base_unit_price=source.create_price(price),
ordering=x,
)
for x, order_source in enumerate(source.get_lines()):
price = Decimal(prices[x]).quantize(Decimal(".1") ** 9)
# make sure prices are in database with original precision
assert order_source.base_unit_price == source.shop.create_price(price)
# make sure the line taxless price is rounded
assert order_source.taxless_price == source.shop.create_price(bankers_round(price, 2))
# Check that total prices calculated from priceful parts still matches
assert _get_taxless_price(order_source) == order_source.taxless_price
assert _get_taxful_price(order_source) == order_source.taxful_price
# make sure the line price is rounded
assert order_source.price == source.shop.create_price(price)
# make sure order total is rounded
assert source.taxless_total_price == source.shop.create_price(bankers_round(expected, 2))
@pytest.mark.parametrize("prices", PRICE_SPEC)
@pytest.mark.django_db
def test_rounding_with_taxes(prices):
shop = get_default_shop()
supplier = get_default_supplier()
order = create_empty_order(shop=shop)
order.save()
product = create_product("test_sku", shop=shop, supplier=supplier)
tax_rate = Decimal("0.22222")
for x, price in enumerate(prices):
add_product_to_order(
order, supplier, product, quantity=Decimal("2.22"),
taxless_base_unit_price=Decimal(price), tax_rate=tax_rate)
order.cache_prices()
for x, order_line in enumerate(order.lines.all().order_by("ordering")):
# Check that total prices calculated from priceful parts still matches
assert _get_taxless_price(order_line) == order_line.taxless_price
assert _get_taxful_price(order_line) == order_line.taxful_price
assert order_line.price == (order_line.base_unit_price * order_line.quantity - order_line.discount_amount)
def _get_taxless_price(line):
return bankers_round(line.taxless_base_unit_price*line.quantity - line.taxless_discount_amount, 2)
def _get_taxful_price(line):
return bankers_round(line.taxful_base_unit_price*line.quantity - line.taxful_discount_amount, 2)
| agpl-3.0 |
mattcaldwell/boto | boto/cacerts/__init__.py | 260 | 1097 | # Copyright 2010 Google Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
| mit |
kaplun/invenio | modules/webjournal/lib/webjournal_templates.py | 25 | 36303 | # -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
WebJournal templates - Defines the look of various parts of the
WebJournal modules. Most customizations will however be done through
BibFormat format templates files.
"""
import os
from invenio.config import \
CFG_SITE_SUPPORT_EMAIL, \
CFG_ETCDIR, \
CFG_SITE_URL, \
CFG_SITE_LANG, \
CFG_SITE_RECORD
from invenio.messages import gettext_set_language
from invenio.webpage import page
from invenio.webjournal_utils import \
get_number_of_articles_for_issue, \
get_release_datetime, \
get_announcement_datetime, \
get_issue_number_display
class Template:
"""Templating class, refer to bibformat.py for examples of call"""
def tmpl_webjournal_missing_info_box(self, req, ln, title, msg_title, msg):
"""
returns a box indicating that the given journal was not found on the
server, leaving the opportunity to select an existing journal from a list.
"""
_ = gettext_set_language(ln)
box_title = msg_title
box_text = msg
box_list_title = _("Available Journals")
# todo: move to DB call
find_journals = lambda path: [entry for entry in os.listdir(str(path)) \
if os.path.isdir(str(path)+str(entry))]
try:
all_journals = find_journals('%s/webjournal/' % CFG_ETCDIR)
except:
all_journals = []
mail_msg = _("Contact %(x_url_open)sthe administrator%(x_url_close)s") % \
{'x_url_open' :
'<a href="mailto:%s">' % CFG_SITE_SUPPORT_EMAIL,
'x_url_close' : '</a>'}
box = '''
<div style="text-align: center;">
<fieldset style="width:400px; margin-left: auto; margin-right:auto">
<legend style="color:#a70509;background-color:#fff;">
<i>%s</i>
</legend>
<p style="text-align:center;">%s</p>
<h2 style="color:#0D2B88;">%s</h2>
<ul class="webjournalBoxList">
%s
</ul>
<br/>
<div style="text-align:right;">
%s
</div>
</fieldset>
</div>
''' % (box_title,
box_text,
box_list_title,
"".join(['<li><a href="%s/journal/?name=%s">%s</a></li>'
% (CFG_SITE_URL,
journal,
journal) for journal in all_journals]),
mail_msg)
return page(req=req, title=title, body=box)
def tmpl_webjournal_error_box(self, req, ln, title, title_msg, msg):
"""
returns an error box for webjournal errors.
"""
_ = gettext_set_language(ln)
title = _(title)
title_msg = _(title_msg)
msg = _(msg)
mail_msg = _("Contact %(x_url_open)sthe administrator%(x_url_close)s") % \
{'x_url_open' :
'<a href="mailto:%s">' % CFG_SITE_SUPPORT_EMAIL,
'x_url_close' : '</a>'}
box = '''
<div style="text-align: center;">
<fieldset style="width:400px; margin-left: auto; margin-right: auto;">
<legend style="color:#a70509;background-color:#fff;">
<i>%s</i>
</legend>
<p style="text-align:center;">%s</p>
<br/>
<div style="text-align:right;">
%s
</div>
</fieldset>
</div>
''' % (title_msg, msg, mail_msg)
return page(req=req, title=title, body=box)
def tmpl_admin_regenerate_confirm(self,
ln,
journal_name,
issue,
issue_released_p):
"""
Ask user confirmation about regenerating the issue, as well as if
we should move all the drafts to the public collection.
Parameters:
journal_name - the journal for which the cache should
be delete
issue - the issue for which the cache should be
deleted
ln - language
issue_released_p - is issue already released?
"""
out = '''
<form action="/admin/webjournal/webjournaladmin.py/regenerate" name="regenerate" method="post">
You are going to refresh the cache for issue %(issue)s. Do you want to continue? <br/>
<input type="hidden" name="confirmed_p" value="confirmed"/>
<input type="hidden" name="journal_name" value="%(journal_name)s">
<input type="hidden" name="issue" value="%(issue)s">
<input type="hidden" name="ln" value="%(ln)s">
<input type="checkbox" name="publish_draft_articles_p" value="move" id="publish_draft_articles_p" %(disabled)s/><label for="publish_draft_articles_p">Also switch all "<em>Offline</em>" articles to "<em>Online</em>"</label>[<a target="_blank" href="/help/admin/webjournal-editor-guide#cache-online">?</a>]<br/></br>
<input class="adminbutton" type="submit" value="Regenerate"/>
</form>
''' % {'issue': issue,
'journal_name': journal_name,
'ln': ln,
'disabled': not issue_released_p and 'disabled="disabled"' or ""}
return out
def tmpl_admin_regenerate_success(self, ln, journal_name, issue):
"""
Success message if a user applied the "regenerate" link. Links back to
the regenerated journal.
"""
_ = gettext_set_language(ln)
out = '''
The issue number %(issue)s for the %(journal_name)s journal has been successfully
regenerated. <br/>
Look at your changes: » <a href="%(CFG_SITE_URL)s/journal/%(journal_name)s/%(issue_year)s/%(issue_number)s"> %(journal_name)s </a> <br/> or go back to this journal <a href="%(CFG_SITE_URL)s/admin/webjournal/webjournaladmin.py/administrate?journal_name=%(journal_name)s">administration interface</a>.
''' % {'issue': issue,
'journal_name': journal_name,
'CFG_SITE_URL': CFG_SITE_URL,
'issue_year': issue.split('/')[1],
'issue_number': issue.split('/')[0]}
return out
def tmpl_admin_regenerate_error(self, ln, journal_name, issue):
"""
Failure message for a regeneration try.
"""
_ = gettext_set_language(ln)
return page(
title=_("Regeneration Error"),
body = _("The issue could not be correctly regenerated. "
"Please contact your administrator."))
def tmpl_admin_feature_record(self, journal_name,
featured_records=[],
ln=CFG_SITE_LANG,
msg=None):
"""
Display an interface form to feature a specific record from Invenio.
"""
_ = gettext_set_language(ln)
out = ''
out += '''<table class="admin_wvar">
<tr><th colspan="5" class="adminheaderleft" cellspacing="0">%(menu)s</th></tr>
<tr>
<td>0. <small><a href="administrate?journal_name=%(journal_name)s">Administrate</a></small> </td>
<td>1. <small>Feature a Record</small> </td>
<td>2. <small><a href="configure?action=edit&journal_name=%(journal_name)s">Edit Configuration</a></small> </td>
<td>3. <small><a href="%(CFG_SITE_URL)s/journal/%(journal_name)s">Go to the Journal</a></small> </td>
</tr>
</table><br/>''' % {'journal_name': journal_name,
'menu': _("Menu"),
'CFG_SITE_URL': CFG_SITE_URL}
if msg is not None:
out += msg
out += '<br/><br/>'
out += '''<table class="admin_wvar" cellspacing="0" width="400px">
<tr>
<th colspan="3" class="adminheader">Featured records</th>
</tr>'''
color = "fff"
for (recid, img_url) in featured_records:
out += '''<tr style="background-color:#%(color)s">
<td class="admintd"><img src="%(img_url)s" alt="" height="40px"/></td>
<td class="admintdleft"><a href="%(CFG_SITE_URL)s/%(CFG_SITE_RECORD)s/%(recid)s">Record %(recid)s</a></td>
<td class="admintdright"><a href="%(CFG_SITE_URL)s/admin/webjournal/webjournaladmin.py/feature_record?journal_name=%(journal_name)s&action=askremove&recid=%(recid)s">remove</a></td>
</tr>''' % {'color': color,
'journal_name': journal_name,
'recid': recid,
'img_url': img_url,
'CFG_SITE_URL': CFG_SITE_URL,
'CFG_SITE_RECORD': CFG_SITE_RECORD}
if color == 'fff':
color = 'EBF7FF'
else:
color = 'fff'
if len(featured_records) == 0:
out += '<tr><td colspan="3" class="admintd"><em>No record featured for the moment. Add one using the form below.</em></td></tr>'
out += '</table>'
out += '''
<br/><br/><br/>
<form action="%(CFG_SITE_URL)s/admin/webjournal/webjournaladmin.py/feature_record" method="post">
<input type="hidden" name="action" value="add" />
<input type="hidden" name="journal_name" value="%(journal_name)s"/>
<table class="admin_wvar" cellspacing="0">
<tr>
<th colspan="2" class="adminheaderleft">Add a new featured record:</th>
</tr>
<tr>
<td class="admintdright"><label for="recordid"><span style="white-space: nowrap;">Featured Record ID</span></label>: </td>
<td><input tabindex="1" type="text" name="recid" value="" id="recordid"/></td>
</tr>
<tr>
<td class="admintdright"><label for="image_url"><span style="white-space: nowrap;">Icon URL</span></label>: </td>
<td><input tabindex="2" type="text" name="img_url" value="" id="image_url" size="60"/><em><br/><small>Image displayed along the featured record</small></em></td>
</tr>
<tr>
<td colspan="2" align="right"><input tabindex="3" class="adminbutton" type="submit" value="Add"/></td>
</tr>
</table>
</form>
''' % {'CFG_SITE_URL': CFG_SITE_URL,
'journal_name': journal_name}
return out
def tmpl_admin_alert_plain_text(self, journal_name, ln, issue):
"""
Default plain text message for email alert of journal updates.
This will be used to pre-fill the content of the mail alert, that
can be modified by the admin.
Customize this function to return different default texts
based on journal name and language,
"""
current_publication = get_issue_number_display(issue, journal_name, ln)
plain_text = u'''Dear Subscriber,
The latest issue of %(journal_name)s, no. %(current_publication)s, has been released.
You can access it at the following URL:
%(CFG_SITE_URL)s/journal/%(journal_name)s/
Best Wishes,
%(journal_name)s team
----
Cher Abonné,
Le nouveau numéro de %(journal_name)s, no. %(current_publication)s, vient de paraître.
Vous pouvez y accéder à cette adresse :
%(CFG_SITE_URL)s/journal/%(journal_name)s/?ln=fr
Bonne lecture,
L'équipe de %(journal_name)s
''' % {'journal_name': journal_name,
'current_publication': current_publication,
'CFG_SITE_URL': CFG_SITE_URL}
return plain_text
# '
def tmpl_admin_alert_header_html(self, journal_name, ln, issue):
"""
Returns HTML header to be inserted into the HTML alert
@param journal_name: the journal name
@param ln: the current language
@param issue: the issue for wich the alert is sent
"""
_ = gettext_set_language(ln)
journal_url = '%(CFG_SITE_URL)s/journal/%(journal_name)s/%(year)s/%(number)s' % \
{'CFG_SITE_URL': CFG_SITE_URL,
'journal_name': journal_name,
'year': issue.split('/')[1],
'number': issue.split('/')[0]}
journal_link = '<a href="%(journal_url)s">%(journal_url)s</a>' % \
{'journal_url': journal_url}
return '<p class="htmlalertheader">' + \
_('If you cannot read this email please go to %(x_journal_link)s') % {'x_journal_link': journal_link} + \
'</p>'
def tmpl_admin_alert_subject(self, journal_name, ln, issue):
"""
Default subject for email alert of journal updates.
Customize this function to return different default texts
based on journal name and language,
"""
return "%s %s released" % (journal_name, \
get_issue_number_display(issue,
journal_name,
ln))
def tmpl_admin_alert_interface(self, ln, journal_name, default_subject,
default_msg, default_recipients, alert_ln):
"""
Alert email interface.
"""
_ = gettext_set_language(ln)
interface = '''
<table>
<tr>
<td valign="top">
<form action="%(CFG_SITE_URL)s/admin/webjournal/webjournaladmin.py/alert" name="alert" method="post">
<input type="hidden" name="journal_name" value="%(journal_name)s"/>
<p>Recipients:</p>
<input type="text" name="recipients" value="%(default_recipients)s" size="60" />
<p>Subject:</p>
<input type="text" name="subject" value="%(subject)s" size="60" />
<p>Plain Text Message:</p>
<textarea name="plainText" wrap="soft" rows="25" cols="80">%(plain_text)s</textarea>
<p> <input type="checkbox" name="htmlMail" id="htmlMail" value="html" checked="checked" />
<label for="htmlMail">Send journal front-page <small>(<em>HTML newsletter</em>)</small></label>
</p>
<br/>
<input class="formbutton" type="submit" value="Send Alert" name="sent"/>
</form>
</td><td valign="top">
<p>HTML newsletter preview:</p>
<iframe id="htmlMailPreview" src="%(CFG_SITE_URL)s/journal/%(journal_name)s?ln=%(alert_ln)s" height="600" width="600"></iframe>
</tr>
</table>
''' % {'CFG_SITE_URL': CFG_SITE_URL,
'journal_name': journal_name,
'subject': default_subject,
'plain_text': default_msg,
'default_recipients': default_recipients,
'alert_ln': alert_ln}
return interface
def tmpl_admin_alert_was_already_sent(self, ln, journal_name,
subject, plain_text, recipients,
html_mail, issue):
"""
"""
_ = gettext_set_language(ln)
out = '''
<form action="%(CFG_SITE_URL)s/admin/webjournal/webjournaladmin.py/alert" name="alert" method="post">
<input type="hidden" name="journal_name" value="%(journal_name)s"/>
<input type="hidden" name="recipients" value="%(recipients)s" />
<input type="hidden" name="subject" value="%(subject)s" />
<input type="hidden" name="plainText" value="%(plain_text)s" />
<input type="hidden" name="htmlMail" value="%(html_mail)s" />
<input type="hidden" name="force" value="True" />
<p><em>WARNING! </em>The email alert for the issue %(issue)s has already been
sent. Are you absolutely sure you want to send it again?</p>
<p>Maybe you forgot to release an update issue? If so, please do this
first <a href="%(CFG_SITE_URL)s/admin/webjournal/webjournaladmin.py/issue_control?journal_name=%(journal_name)s&issue=%(issue)s">here</a>.</p>
<p>Proceed with caution, or your subscribers will receive the alert a second time.</p>
<br/>
<input class="formbutton" type="submit" value="I really want this!" name="sent"/>
</form>
''' % {'CFG_SITE_URL': CFG_SITE_URL,
'journal_name': journal_name,
'recipients': recipients,
'subject': subject,
'plain_text': plain_text,
'html_mail': html_mail,
'issue': issue}
return out
def tmpl_admin_alert_unreleased_issue(self, ln, journal_name):
"""
Tried to announce an unreleased issue
"""
_ = gettext_set_language(ln)
out = '''<p style="color:#f00">An alert cannot be send for this issue!</p>
You tried to send an alert for an issue that has not yet been released.
Release it first and retry.<br/>
Go back to the <a href="%(CFG_SITE_URL)s/admin/webjournal/webjournaladmin.py/administrate?journal_name=%(journal_name)s">administration interface</a>.
''' % {'CFG_SITE_URL': CFG_SITE_URL,
'journal_name': journal_name}
return out
def tmpl_admin_alert_success_msg(self, ln, journal_name):
"""
Success messge for the alert system.
"""
_ = gettext_set_language(ln)
out = '''<p style="color:#0f0">Alert sent successfully!</p>
Return to your journal here: » \
<a href="%(CFG_SITE_URL)s/journal/%(journal_name)s">%(journal_name)s</a> <br/>
or go back to the <a href="%(CFG_SITE_URL)s/admin/webjournal/webjournaladmin.py/administrate?journal_name=%(journal_name)s">administration interface</a>''' % {'CFG_SITE_URL': CFG_SITE_URL,
'journal_name': journal_name}
return out
def tmpl_admin_control_issue(self, ln, journal_name,
active_issues):
"""
Display the interface allowing to set the current issue.
"""
_ = gettext_set_language(ln)
out = '''
<p>This interface gives you the possibility to create your
current webjournal publication. Every checked issue number
will be in the current publication. Once you have made your
selection you can publish the new issue by clicking the %(publish)s
button at the end.
</p>
<form action="%(CFG_SITE_URL)s/admin/webjournal/webjournaladmin.py/issue_control" name="publish">
<input type="hidden" name="journal_name" value="%(journal_name)s"/>
Issue Numbers to publish:
<ul>
%(issues_list)s
</ul>
<br/>
<p>Add a higher issue number by clicking "%(add)s"</p>
<input class="formbutton" type="submit" value="%(add)s" name="action"/>
<p>.. or add a custom issue number by typing it here and pressing "%(refresh)s"</p>
<input type="text" value="ww/YYYY" name="issue"/>
<input class="formbutton" type="submit" value="%(refresh)s" name="action"/>
<br/>
<br/>
<p>If all issues you want to publish are correctly checked, proceed \
by clicking "%(publish)s".</p>
<input class="formbutton" type="submit" value="%(publish)s" name="action"/>
</form>
''' % {'CFG_SITE_URL': CFG_SITE_URL,
'journal_name': journal_name,
'issues_list': "".join(['<li><input type="checkbox" name="issue" value="%s" CHECKED> %s</input></li>'
% (issue, issue) for issue in active_issues]),
'add' : _("Add"),
'publish' : _("Publish"),
'refresh' : _("Refresh")
}
return out
def tmpl_admin_control_issue_success_msg(self, ln,
active_issues, journal_name):
"""
An issue was successfully published
"""
_ = gettext_set_language(ln)
issue_string = "".join([" - %s" % issue for issue in active_issues])
title = '<h2>Issue(s) %s created successfully!</h2>' % issue_string
body = '''<p>Now you can:</p>
<p>Return to your journal here: »
<a href="%s/journal/%s"> %s </a>
</p>
<p>Make additional publications here: »
<a href="%s/admin/webjournal/webjournaladmin.py/administrate?journal_name=%s">Publishing Interface</a>
</p>
<p>Send an alert email here: »
<a href="%s/admin/webjournal/webjournaladmin.py/alert?journal_name=%s"> Send an alert</a>
</p>''' % (CFG_SITE_URL, journal_name,
journal_name, CFG_SITE_URL,
journal_name, CFG_SITE_URL, journal_name)
return title + body
def tmpl_admin_update_issue(self, ln, journal_name, next_issue,
current_issue):
"""
A form that lets a user make an update to an issue number.
"""
_ = gettext_set_language(ln)
current_articles = get_number_of_articles_for_issue(current_issue,
journal_name,
ln)
next_articles = get_number_of_articles_for_issue(next_issue,
journal_name,
ln)
html = '''
<p>The Issue that was released on week %(current_issue)s has pending updates scheduled. The
next update for this issue is %(next_issue)s.</p>
<p><em>Note: If you want to make a new release, please click through all the
pending updates first.</em></p>
<p>Do you want to release the update from issue: <br/>
<em>%(current_issue)s</em> (%(current_articles)s) <br/>
to issue: <br/>
<em>%(next_issue)s</em> (%(next_articles)s) <br/>
now?</p>
<form action="%(CFG_SITE_URL)s/admin/webjournal/webjournaladmin.py/issue_control" name="publish">
<input type="hidden" name="journal_name" value="%(journal_name)s"/>
<input type="hidden" name="issue" value="%(next_issue)s"/>
<input class="formbutton" type="submit" value="%(update)s" name="action"/>
</form>
''' % {'current_issue': current_issue,
'next_issue' : next_issue,
'current_articles': ",".join(["%s : %s" % (item[0], item[1]) \
for item in current_articles.iteritems()]),
'next_articles': ",".join(["%s : %s" % (item[0], item[1]) \
for item in next_articles.iteritems()]),
'CFG_SITE_URL' : CFG_SITE_URL,
'journal_name': journal_name,
'update': _("Update")}
return html
def tmpl_admin_updated_issue_msg(self, ln, update_issue, journal_name):
"""
Prints a success message for the Update release of a journal.
"""
_ = gettext_set_language(ln)
title = '<h2>Journal update %s published successfully!</h2>' % update_issue
body = '''<p>Now you can:</p>
<p>Return to your journal here: »
<a href="%s/journal/%s"> %s </a>
</p>
<p>Go back to the publishing interface: »
<a href="%s/admin/webjournal/webjournaladmin.py/administrate?journal_name=%s">Issue Interface</a>
</p>
<p>Send an alert email here: »
<a href="%s/journal/alert?name=%s"> Send an alert</a>
</p>''' % (CFG_SITE_URL, journal_name, journal_name,
CFG_SITE_URL, journal_name, CFG_SITE_URL, journal_name)
return title + body
def tmpl_admin_administrate(self, journal_name, current_issue,
current_publication, issue_list,
next_issue_number, ln=CFG_SITE_LANG,
as_editor=True):
"""
Returns an administration interface that shows the current publication and
supports links to all important actions.
@param as_editor: True if can make changes to the configuration. Else read-only mode.
"""
_ = gettext_set_language(ln)
out = ''
if as_editor:
admin_menu = '''<table class="admin_wvar">
<tr><th colspan="5" class="adminheaderleft" cellspacing="0">%(menu)s</th></tr>
<tr>
<td>0. <small>Administrate</small> </td>
<td>1. <small><a href="feature_record?journal_name=%(journal_name)s">Feature a Record</a></small> </td>
<td>2. <small><a href="configure?action=edit&journal_name=%(journal_name)s">Edit Configuration</a></small> </td>
<td>3. <small><a href="%(CFG_SITE_URL)s/journal/%(journal_name)s">Go to the Journal</a></small> </td>
</tr>
</table><br/>'''
else:
admin_menu = '''<table class="admin_wvar">
<tr><th colspan="5" class="adminheaderleft" cellspacing="0">%(menu)s</th></tr>
<tr>
<td>0. <small>Administrate</small> </td>
<td>1. <small><a href="%(CFG_SITE_URL)s/journal/%(journal_name)s">Go to the Journal</a></small> </td>
</tr>
</table><br/>'''
out += admin_menu % {'journal_name': journal_name,
'menu': _("Menu"),
'CFG_SITE_URL': CFG_SITE_URL}
# format the issues
issue_boxes = []
issue_list.append(next_issue_number)
for issue in issue_list:
articles = get_number_of_articles_for_issue(issue,
journal_name,
ln)
released_on = get_release_datetime(issue, journal_name, ln)
announced_on = get_announcement_datetime(issue, journal_name, ln)
issue_box = '''
<tr style="%s">
<td class="admintdright" style="vertical-align: middle;"></td>
<td class="admintdleft" style="white-space: nowrap; vertical-align: middle;">
<p>Issue: %s</p>
<p>Publication: %s</p>
</td>
<td class="admintdright" style="vertical-align: middle;">
%s
</td>
<td class="admintdright" style="vertical-align: middle;">
<p>%s</p>
<p>%s</p>
</td>
<td class="admintdright" style="vertical-align: middle;">
<p><a href="%s/admin/webjournal/webjournaladmin.py/regenerate?journal_name=%s&issue=%s&ln=%s">>regenerate</a></p>
</td>
<tr>
''' % ((issue==current_issue) and "background:#00FF00;" or "background:#F1F1F1;",
issue, (issue==next_issue_number) and "?" or current_publication,
"\n".join(['<p>%s : %s <a href="%s/journal/%s/%s/%s/%s">>view</a></p>' %
(item[0], item[1],
CFG_SITE_URL, journal_name,
issue.split('/')[1], issue.split('/')[0], item[0]) \
for item in articles.iteritems()]),
(not released_on) and
('<em>not released</em>' + (as_editor and '<br/><a href="%s/admin/webjournal/webjournaladmin.py/issue_control?journal_name=%s">>release now</a>' % (CFG_SITE_URL, journal_name) or '')) or
'released on: %s' % released_on.strftime("%d.%m.%Y"),
(not announced_on)
and ('<em>not announced</em>' + (as_editor and '<br/><a href="%s/admin/webjournal/webjournaladmin.py/alert?journal_name=%s&issue=%s">>announce now</a>' % (CFG_SITE_URL, journal_name, issue) or '')) or
'announced on: %s <br/><a href="%s/admin/webjournal/webjournaladmin.py/alert?journal_name=%s&issue=%s">>re-announce</a>' % (announced_on.strftime("%d.%m.%Y"), CFG_SITE_URL, journal_name, issue),
CFG_SITE_URL, journal_name, issue, ln
)
issue_boxes.append(issue_box)
out += '''
<table class="admin_wvar" width="80%%" cellspacing="0">
<tbody>
<tr>
<th class="adminheaderleft"></th>
<th class="adminheaderleft">Issue / Publication</th>
<th class="adminheader">Articles</th>
<th class="adminheaderleft">Release / Announcement</th>
<th class="adminheaderleft">Cache Status</th>
<tr>
%s
</tbody>
</table>
''' % ("\n".join([issue_box for issue_box in issue_boxes]))
return out
def tmpl_admin_index(self, ln, journals, msg=None):
"""
Returns the admin index page content.
Lists the journals, and offers options to edit them, delete them
or add new journal
params:
ln - ln
journals - list of tuples (journal_info dict, as_editor)
msg - message to be displayed
"""
out = ""
if msg is not None:
out += msg
out += '''
<p>Choose the journal you want to administrate.</p>
<table class="admin_wvar" cellspacing="0">
<tr>
<th class="adminheader">Journals</th>
<th colspan="2" class="adminheader"> </th>
</tr>
'''
color = "fff"
for journal_info, as_editor in journals:
row = '''<tr style="background-color:#%(color)s">
<td class="admintdleft"><a href="%(CFG_SITE_URL)s/admin/webjournal/webjournaladmin.py/administrate?journal_name=%(journal_name)s">%(journal_name)s</a></td>
<td class="admintdright"><a href="%(CFG_SITE_URL)s/admin/webjournal/webjournaladmin.py/administrate?journal_name=%(journal_name)s">edit</a></td>'''
if as_editor:
row += '<td class="admintdright"><a href="%(CFG_SITE_URL)s/admin/webjournal/webjournaladmin.py/index?journal_name=%(journal_name)s&action=askDelete">delete</a></td>'
row += '</tr>'
out += row % {'color': color,
'journal_name': journal_info['journal_name'],
'journal_id': journal_info['journal_id'],
'CFG_SITE_URL': CFG_SITE_URL}
if color == 'fff':
color = 'EBF7FF'
else:
color = 'fff'
out += '''<tr style="background-color:#%(color)s">
<td class="admintdleft" colspan="3" style="padding: 5px 10px;"><a href="%(CFG_SITE_URL)s/admin/webjournal/webjournaladmin.py/configure?action=add">Add new journal</a></td>
</tr>''' % {'color': color,
'CFG_SITE_URL': CFG_SITE_URL}
out += '</table>'
return out
def tmpl_admin_configure_journal(self, ln, journal_name='', xml_config=None,
action='edit', msg=None):
"""
Display a page to change the settings of a journal. Also used to
add a new journal.
"""
out = ''
_ = gettext_set_language(ln)
journal_name_readonly = 'readonly="readonly" disabled="disabled"'
journal_name_note = ''
submit_button_label = _('Apply')
if action == 'add':
journal_name = ''
journal_name_readonly = ''
journal_name_note = 'Used in URLs. Choose it short and meaningful. This cannot be changed later'
submit_button_label = _('Add')
elif action in ['edit', 'editDone']:
# Display navigation menu
out += '''<table class="admin_wvar">
<tr><th colspan="5" class="adminheaderleft" cellspacing="0">%(menu)s</th></tr>
<tr>
<td>0. <small><a href="administrate?journal_name=%(journal_name)s">Administrate</a></small> </td>
<td>1. <small><a href="feature_record?journal_name=%(journal_name)s">Feature a Record</a></small> </td>
<td>2. <small>Edit Configuration</small> </td>
<td>3. <small><a href="%(CFG_SITE_URL)s/journal/%(journal_name)s">Go to the Journal</a></small> </td>
</tr>
</table><br/>''' % {'journal_name': journal_name,
'menu': _("Menu"),
'CFG_SITE_URL': CFG_SITE_URL}
if msg is not None:
out += msg
out += '<br/><br/>'
out += '''
<form action="configure" method="post">
<input type="hidden" name="ln" value="%(ln)s" />
<input type="hidden" name="action" value="addDone" />
<table class="admin_wvar" cellspacing="0" style="width:90%%">
<tr>
<th colspan="2" class="adminheaderleft">
Journal settings</th>
</tr>
<tr>
<td class="admintdright" width="100px"><label for="journal_name">Name</label>: </td>
<td><input tabindex="0" name="journal_name" type="text" id="journal_name" maxlength="50" size="15" value="%(journal_name)s" %(readonly)s %(journal_name_readonly)s /><small>%(journal_name_note)s</small></td>
</tr>
<tr>
<td class="admintdright"><label for="xml_config">Config</label>: </td>
<td><textarea wrap="soft" rows="25" style="width:100%%" tabindex="3" name="xml_config" id="xml_config" size="25" %(readonly)s>%(xml_config)s</textarea></td>
</tr>
<td colspan="2" align="right"><input type="submit" class="adminbutton" value="%(submit_button_label)s"></td>
</tr>
</table>
</form>
''' % {'journal_name': journal_name,
'ln': ln,
'readonly': '',
'disabled': '',
'xml_config': xml_config.encode('utf-8'),
'journal_name_note': journal_name_note,
'submit_button_label': submit_button_label,
'journal_name_readonly': journal_name_readonly}
return out
| gpl-2.0 |
ramitalat/odoo | addons/hr_payroll_account/__init__.py | 433 | 1116 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_payroll_account
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
geminy/aidear | oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/third_party/skia/tools/build_command_buffer.py | 18 | 6222 | #!/usr/bin/python
# Copyright 2016 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Script to build the command buffer shared library and copy it to Skia tree
"""
import argparse
import os
import shlex
import shutil
import subprocess
import sys
def main():
parser = argparse.ArgumentParser(description=('Builds command_buffer_gles2 '
'library and copies it'))
parser.add_argument('-c', '--chrome-dir', required=True, help=
'path to Chromium checkout (directory containing .gclient)')
parser.add_argument('-o', '--output-dir', required=True,
help='path to copy the command buffer shared library to. Typically this '
'is out/Debug or out/Release in a Skia repository')
parser.add_argument('--make-output-dir', default=False, action='store_true',
help='Makes the output directory if it does not already exist.')
parser.add_argument('--chrome-out-dir', default='CommandBufferForSkia',
help='Type of name of the gn output directory (e.g. Debug or Release). '
'This is relative to the Chromium src/out directory. Note that this '
'script will reset the gn args in this directory on each run.')
parser.add_argument('--extra-gn-args', default='',
help=('Extra GN arguments to use for the output directory used to build'
'the command buffer'))
parser.add_argument('--extra-ninja-args', default='',
help=('Extra arguments to pass to ninja when building the command '
'buffer shared library'))
parser.add_argument('--chrome-revision', default='origin/lkgr',
help='Revision (hash, branch, tag) of Chromium to use.')
parser.add_argument('--no-sync', action='store_true', default=False,
help='Don\'t run git fetch or gclient sync in the Chromium tree')
parser.add_argument('--no-hooks', action='store_true', default=False,
help='Don\'t run gclient runhooks in the Chromium tree. Implies '
'--no-sync')
args = parser.parse_args()
args.chrome_dir = os.path.abspath(args.chrome_dir)
args.output_dir = os.path.abspath(args.output_dir)
if args.no_hooks:
args.no_sync = True
if os.path.isfile(args.chrome_dir):
sys.exit(args.chrome_dir + ' exists but is a file.')
if os.path.isfile(args.output_dir):
sys.exit(args.output_dir + ' exists but is a file.')
chrome_src_dir = os.path.join(args.chrome_dir, 'src')
if not os.path.isdir(chrome_src_dir):
sys.exit(chrome_src_dir + ' is not a directory.')
if os.path.isfile(args.output_dir):
sys.exit(args.output_dir + ' exists but is a file.')
elif not os.path.isdir(args.output_dir):
if args.make_output_dir:
os.makedirs(args.output_dir)
else:
sys.exit(args.output_dir + ' does not exist (specify --make-output-dir '
'to create).')
chrome_target_dir_rel = os.path.join('out', args.chrome_out_dir)
# The command buffer shared library will have a different name on Linux,
# Mac, and Windows. Also, the name of the gclient executable we call out to
# has a .bat file extension on Windows.
platform = sys.platform
if platform == 'cygwin':
platform = 'win32'
shared_lib_name = 'libcommand_buffer_gles2.so'
gclient = 'gclient'
if platform == 'darwin':
shared_lib_name = 'libcommand_buffer_gles2.dylib'
elif platform == 'win32':
shared_lib_name = 'command_buffer_gles2.dll'
gclient = 'gclient.bat'
if not args.no_sync:
try:
subprocess.check_call(['git', 'fetch'], cwd=chrome_src_dir)
except subprocess.CalledProcessError as error:
sys.exit('Error (ret code: %s) calling "%s" in %s' % (error.returncode,
error.cmd, chrome_src_dir))
try:
subprocess.check_call(['git', 'checkout', args.chrome_revision],
cwd=chrome_src_dir)
except subprocess.CalledProcessError as error:
sys.exit('Error (ret code: %s) calling "%s" in %s' % (error.returncode,
error.cmd, chrome_src_dir))
try:
os.environ['GYP_GENERATORS'] = 'ninja'
subprocess.check_call([gclient, 'sync', '--reset', '--force',
'--nohooks'],
cwd=chrome_src_dir)
except subprocess.CalledProcessError as error:
sys.exit('Error (ret code: %s) calling "%s" in %s' % (error.returncode,
error.cmd, chrome_src_dir))
if not args.no_hooks:
try:
subprocess.check_call([gclient, 'runhooks'], cwd=chrome_src_dir)
except subprocess.CalledProcessError as error:
sys.exit('Error (ret code: %s) calling "%s" in %s' % (
error.returncode, error.cmd, chrome_src_dir))
gn = 'gn'
platform = 'linux64'
if sys.platform == 'darwin':
platform = 'mac'
elif sys.platform == 'win32':
platform = 'win'
gn = 'gn.exe'
gn = os.path.join(chrome_src_dir, 'buildtools', platform, gn)
try:
gnargs = 'is_component_build=false is_debug=false ' + args.extra_gn_args
subprocess.check_call([gn, 'gen', chrome_target_dir_rel, '--args='+gnargs],
cwd=chrome_src_dir)
except subprocess.CalledProcessError as error:
sys.exit('Error (ret code: %s) calling "%s" in %s' % (
error.returncode, error.cmd, chrome_src_dir))
try:
subprocess.check_call(['ninja'] + shlex.split(args.extra_ninja_args) +
['-C', chrome_target_dir_rel, 'command_buffer_gles2'],
cwd=chrome_src_dir)
except subprocess.CalledProcessError as error:
sys.exit('Error (ret code: %s) calling "%s" in %s' % (error.returncode,
error.cmd, chrome_src_dir))
shared_lib_src_dir = os.path.join(chrome_src_dir, chrome_target_dir_rel)
shared_lib_src = os.path.join(shared_lib_src_dir, shared_lib_name)
shared_lib_dst = os.path.join(args.output_dir, shared_lib_name)
if not os.path.isfile(shared_lib_src):
sys.exit('Command buffer shared library not at expected location: ' +
shared_lib_src)
shutil.copy2(shared_lib_src, shared_lib_dst)
if not os.path.isfile(shared_lib_dst):
sys.exit('Command buffer library not copied to ' + shared_lib_dst)
print('Command buffer library copied to ' + shared_lib_dst)
if __name__ == '__main__':
main()
| gpl-3.0 |
lonetwin/opencore | opencore/views/tests/test_contentfeeds.py | 4 | 11838 | # Copyright (C) 2008-2009 Open Society Institute
# Thomas Moroz: tmoroz.org
# 2010-2011 Large Blue
# Fergus Doyle: fergus.doyle@largeblue.com
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License Version 2 as published
# by the Free Software Foundation. You may not use, modify or distribute
# this program under any other version of the GNU General Public License.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# stdlib
import unittest
from datetime import datetime
# Zope
from zope.interface import implements
from zope.interface import Interface
from zope.interface import taggedValue
# Repoze
from repoze.bfg import testing
from repoze.bfg.testing import cleanUp
# testfixtures
from testfixtures import Replacer
# opencore
from opencore.models.adapters import ProfileDict
from opencore.models.interfaces import IProfileDict
from opencore.views.tests import DummyAPI, DummyContext
def _checkCookie(request_or_response, filterby):
from opencore.views.contentfeeds import _FILTER_COOKIE
header = ('Set-Cookie',
'%s=%s; Path=/' % (_FILTER_COOKIE, filterby))
headerlist = getattr(request_or_response, 'headerlist', None)
if headerlist is None:
headerlist = getattr(request_or_response, 'response_headerlist')
assert header in headerlist
class ProfileFeedViewTestCase(unittest.TestCase):
def setUp(self):
cleanUp()
def tearDown(self):
cleanUp()
def _callFUT(self, context, request):
from opencore.views.contentfeeds import profile_feed_view
return profile_feed_view(context, request)
def test_profile_feed_view_returns_profile_and_actions(self):
def _dummy(*args_ignored, **kwargs_ignored):
return []
def _dummy2(*args_ignored, **kwargs_ignored):
class _Dummy(object):
__name__ = 'dummy'
communities_name = __name__
def get_by_id(self, *args_ignored, **kwargs_ignored):
return {'groups': []}
get = get_by_id
return _Dummy()
with Replacer() as r:
r.replace('opencore.utilities.image.thumb_url', _dummy)
r.replace('opencore.views.people.thumb_url', _dummy)
r.replace('opencore.models.adapters.thumb_url', _dummy)
r.replace('opencore.utils.find_users', _dummy2)
r.replace('opencore.views.people.find_users', _dummy2)
r.replace('opencore.utils.find_root', _dummy2)
r.replace('opencore.views.utils.comments_to_display', _dummy2)
r.replace('opencore.views.communities.get_preferred_communities', _dummy)
r.replace('opencore.views.people.get_preferred_communities', _dummy)
r.replace('opencore.views.people.comments_to_display', _dummy2)
api = DummyAPI()
testing.registerUtility(ProfileDict(), IProfileDict, 'profile-details')
context = DummyContext()
request = testing.DummyRequest()
request.api = api
result = self._callFUT(context, request)
profile = result.get('profile_currently_viewed', False)
self.assertTrue(profile)
self.assertEquals(profile['title'], context.title)
self.assertTrue(len(result.get('actions', [])) > 0)
class NewestFeedItemsViewTests(unittest.TestCase):
def setUp(self):
cleanUp()
def tearDown(self):
cleanUp()
def _callFUT(self, context, request):
from opencore.views.contentfeeds import newest_feed_items
return newest_feed_items(context, request)
def test_without_parameter(self):
context = testing.DummyModel()
request = testing.DummyRequest()
context.events = DummyEvents()
(last_gen, last_index, earliest_gen, earliest_index,
feed_items) = self._callFUT(context, request)
self.assertEqual(last_gen, 1)
self.assertEqual(last_index, 2)
self.assertEqual(earliest_gen, 1)
self.assertEqual(earliest_index, 2)
self.assertEqual(len(feed_items), 1)
self.assertEqual(feed_items[0]['content_type'], 'Blog Entry')
self.assertTrue('allowed' not in feed_items[0])
self.assertEqual(feed_items[0]['timeago'], '2010-07-14T12:47:12Z')
def test_filter_cookie_empty_no_param(self):
context = testing.DummyModel()
request = testing.DummyRequest()
context.events = DummyEvents()
(last_gen, last_index, earliest_gen, earliest_index,
feed_items) = self._callFUT(context, request)
_checkCookie(request, '')
def test_filter_cookie_empty(self):
context = testing.DummyModel()
request = testing.DummyRequest()
context.events = DummyEvents()
request.params = {
'filter': '',
}
(last_gen, last_index, earliest_gen, earliest_index,
feed_items) = self._callFUT(context, request)
_checkCookie(request, '')
def test_filter_cookie_mycommunities(self):
context = testing.DummyModel()
request = testing.DummyRequest()
context.events = DummyEvents()
request.params = {
'filter': 'mycommunities',
}
(last_gen, last_index, earliest_gen, earliest_index,
feed_items) = self._callFUT(context, request)
_checkCookie(request, 'mycommunities')
def test_filter_cookie_mycontent(self):
context = testing.DummyModel()
request = testing.DummyRequest()
context.events = DummyEvents()
request.params = {
'filter': 'mycontent',
}
(last_gen, last_index, earliest_gen, earliest_index,
feed_items) = self._callFUT(context, request)
_checkCookie(request, 'mycontent')
def test_filter_cookie_profile(self):
context = testing.DummyModel()
request = testing.DummyRequest()
context.events = DummyEvents()
request.params = {
'filter': 'profile:phred',
}
(last_gen, last_index, earliest_gen, earliest_index,
feed_items) = self._callFUT(context, request)
_checkCookie(request, 'profile:phred')
def test_filter_cookie_community(self):
context = testing.DummyModel()
request = testing.DummyRequest()
context.events = DummyEvents()
request.params = {
'filter': 'community:bhedrock',
}
(last_gen, last_index, earliest_gen, earliest_index,
feed_items) = self._callFUT(context, request)
_checkCookie(request, 'community:bhedrock')
def test_with_parameter_results(self):
context = testing.DummyModel()
request = testing.DummyRequest()
context.events = DummyEvents()
request.params = {
'newer_than': '0:0',
}
(last_gen, last_index, earliest_gen, earliest_index,
feed_items) = self._callFUT(context, request)
self.assertEqual(last_gen, 0)
self.assertEqual(last_index, 1)
self.assertEqual(earliest_gen, 0)
self.assertEqual(earliest_index, 0)
self.assertEqual(len(feed_items), 2)
self.assertEqual(feed_items[0]['content_type'], 'Blog Entry')
self.assertTrue('allowed' not in feed_items[0])
self.assertEqual(feed_items[0]['timeago'], '2010-07-13T12:47:12Z')
def test_with_parameter_noresults(self):
context = testing.DummyModel()
request = testing.DummyRequest()
context.events = DummyEventsEmpty()
request.params = {
'newer_than': '0:0',
}
(last_gen, last_index, earliest_gen, earliest_index,
feed_items) = self._callFUT(context, request)
self.assertEqual(last_gen, 0)
self.assertEqual(last_index, 0)
self.assertEqual(earliest_gen, 0)
self.assertEqual(earliest_index, 0)
self.assertEqual(len(feed_items), 0)
class OlderFeedItemsViewTests(unittest.TestCase):
def setUp(self):
cleanUp()
def tearDown(self):
cleanUp()
def _callFUT(self, context, request):
from opencore.views.contentfeeds import older_feed_items
return older_feed_items(context, request)
def test_without_parameter(self):
context = testing.DummyModel()
request = testing.DummyRequest()
context.events = DummyEvents()
(earliest_gen, earliest_index,
feed_items) = self._callFUT(context, request)
self.assertEqual(earliest_gen, -1L)
self.assertEqual(earliest_index, -1)
self.assertEqual(len(feed_items), 0)
def test_with_parameter_results(self):
context = testing.DummyModel()
request = testing.DummyRequest()
context.events = DummyEvents()
request.params = {
'older_than': '0:5',
}
(earliest_gen, earliest_index,
feed_items) = self._callFUT(context, request)
self.assertEqual(earliest_gen, 2)
self.assertEqual(earliest_index, 3)
self.assertEqual(len(feed_items), 1)
self.assertEqual(feed_items[0]['content_type'], 'Community')
self.assertTrue('allowed' not in feed_items[0])
self.assertEqual(feed_items[0]['timeago'], '2010-07-15T13:47:12Z')
def test_with_parameter_noresults(self):
context = testing.DummyModel()
request = testing.DummyRequest()
context.events = DummyEventsEmpty()
request.params = {
'older_than': '0:5',
}
(earliest_gen, earliest_index,
feed_items) = self._callFUT(context, request)
self.assertEqual(earliest_gen, 0)
self.assertEqual(earliest_index, 5)
self.assertEqual(len(feed_items), 0)
class DummyEvents:
def checked(self, principals, created_by):
results = [(1, 2, {'foo': 'bam', 'allowed': ['phred', 'bharney'],
'content_creator': 'phred', 'content_type': 'Blog Entry',
'timestamp': datetime(2010, 7, 14, 12, 47, 12),
'context_url': None})]
return results
def newer(self, gen, index, principals, created_by):
results = [(0, 1, {'foo': 'bam', 'allowed': ['phred', 'bharney'],
'content_creator': 'phred', 'content_type': 'Blog Entry',
'timestamp': datetime(2010, 7, 13, 12, 47, 12)}),
(0, 0, {'foo': 'bar', 'allowed': ['phred', 'bharney'],
'userid': 'phred', 'content_type': 'Community',
'timestamp': datetime(2010, 7, 13, 13, 47, 12)})]
return results
def older(self, gen, index, principals, created_by):
results = [(2, 3, {'foo': 'bar', 'allowed': ['phred', 'bharney'],
'userid': 'phred', 'content_type': 'Community',
'timestamp': datetime(2010, 7, 15, 13, 47, 12),
'context_url': '/foo'})]
return results
class DummyEventsEmpty:
def checked(self, principals, created_by):
results = []
return results
def newer(self, gen, index, principals, created_by):
results = []
return results
def older(self, gen, index, principals, created_by):
results = []
return results
| gpl-2.0 |
ol-loginov/intellij-community | python/helpers/docutils/parsers/rst/states.py | 41 | 129110 | # $Id: states.py 6314 2010-04-26 10:04:17Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
This is the ``docutils.parsers.restructuredtext.states`` module, the core of
the reStructuredText parser. It defines the following:
:Classes:
- `RSTStateMachine`: reStructuredText parser's entry point.
- `NestedStateMachine`: recursive StateMachine.
- `RSTState`: reStructuredText State superclass.
- `Inliner`: For parsing inline markup.
- `Body`: Generic classifier of the first line of a block.
- `SpecializedBody`: Superclass for compound element members.
- `BulletList`: Second and subsequent bullet_list list_items
- `DefinitionList`: Second+ definition_list_items.
- `EnumeratedList`: Second+ enumerated_list list_items.
- `FieldList`: Second+ fields.
- `OptionList`: Second+ option_list_items.
- `RFC2822List`: Second+ RFC2822-style fields.
- `ExtensionOptions`: Parses directive option fields.
- `Explicit`: Second+ explicit markup constructs.
- `SubstitutionDef`: For embedded directives in substitution definitions.
- `Text`: Classifier of second line of a text block.
- `SpecializedText`: Superclass for continuation lines of Text-variants.
- `Definition`: Second line of potential definition_list_item.
- `Line`: Second line of overlined section title or transition marker.
- `Struct`: An auxiliary collection class.
:Exception classes:
- `MarkupError`
- `ParserError`
- `MarkupMismatch`
:Functions:
- `escape2null()`: Return a string, escape-backslashes converted to nulls.
- `unescape()`: Return a string, nulls removed or restored to backslashes.
:Attributes:
- `state_classes`: set of State classes used with `RSTStateMachine`.
Parser Overview
===============
The reStructuredText parser is implemented as a recursive state machine,
examining its input one line at a time. To understand how the parser works,
please first become familiar with the `docutils.statemachine` module. In the
description below, references are made to classes defined in this module;
please see the individual classes for details.
Parsing proceeds as follows:
1. The state machine examines each line of input, checking each of the
transition patterns of the state `Body`, in order, looking for a match.
The implicit transitions (blank lines and indentation) are checked before
any others. The 'text' transition is a catch-all (matches anything).
2. The method associated with the matched transition pattern is called.
A. Some transition methods are self-contained, appending elements to the
document tree (`Body.doctest` parses a doctest block). The parser's
current line index is advanced to the end of the element, and parsing
continues with step 1.
B. Other transition methods trigger the creation of a nested state machine,
whose job is to parse a compound construct ('indent' does a block quote,
'bullet' does a bullet list, 'overline' does a section [first checking
for a valid section header], etc.).
- In the case of lists and explicit markup, a one-off state machine is
created and run to parse contents of the first item.
- A new state machine is created and its initial state is set to the
appropriate specialized state (`BulletList` in the case of the
'bullet' transition; see `SpecializedBody` for more detail). This
state machine is run to parse the compound element (or series of
explicit markup elements), and returns as soon as a non-member element
is encountered. For example, the `BulletList` state machine ends as
soon as it encounters an element which is not a list item of that
bullet list. The optional omission of inter-element blank lines is
enabled by this nested state machine.
- The current line index is advanced to the end of the elements parsed,
and parsing continues with step 1.
C. The result of the 'text' transition depends on the next line of text.
The current state is changed to `Text`, under which the second line is
examined. If the second line is:
- Indented: The element is a definition list item, and parsing proceeds
similarly to step 2.B, using the `DefinitionList` state.
- A line of uniform punctuation characters: The element is a section
header; again, parsing proceeds as in step 2.B, and `Body` is still
used.
- Anything else: The element is a paragraph, which is examined for
inline markup and appended to the parent element. Processing
continues with step 1.
"""
__docformat__ = 'reStructuredText'
import sys
import re
import roman
from types import FunctionType, MethodType
from docutils import nodes, statemachine, utils, urischemes
from docutils import ApplicationError, DataError
from docutils.statemachine import StateMachineWS, StateWS
from docutils.nodes import fully_normalize_name as normalize_name
from docutils.nodes import whitespace_normalize_name
from docutils.utils import escape2null, unescape, column_width
import docutils.parsers.rst
from docutils.parsers.rst import directives, languages, tableparser, roles
from docutils.parsers.rst.languages import en as _fallback_language_module
class MarkupError(DataError): pass
class UnknownInterpretedRoleError(DataError): pass
class InterpretedRoleNotImplementedError(DataError): pass
class ParserError(ApplicationError): pass
class MarkupMismatch(Exception): pass
class Struct:
"""Stores data attributes for dotted-attribute access."""
def __init__(self, **keywordargs):
self.__dict__.update(keywordargs)
class RSTStateMachine(StateMachineWS):
"""
reStructuredText's master StateMachine.
The entry point to reStructuredText parsing is the `run()` method.
"""
def run(self, input_lines, document, input_offset=0, match_titles=1,
inliner=None):
"""
Parse `input_lines` and modify the `document` node in place.
Extend `StateMachineWS.run()`: set up parse-global data and
run the StateMachine.
"""
self.language = languages.get_language(
document.settings.language_code)
self.match_titles = match_titles
if inliner is None:
inliner = Inliner()
inliner.init_customizations(document.settings)
self.memo = Struct(document=document,
reporter=document.reporter,
language=self.language,
title_styles=[],
section_level=0,
section_bubble_up_kludge=0,
inliner=inliner)
self.document = document
self.attach_observer(document.note_source)
self.reporter = self.memo.reporter
self.node = document
results = StateMachineWS.run(self, input_lines, input_offset,
input_source=document['source'])
assert results == [], 'RSTStateMachine.run() results should be empty!'
self.node = self.memo = None # remove unneeded references
class NestedStateMachine(StateMachineWS):
"""
StateMachine run from within other StateMachine runs, to parse nested
document structures.
"""
def run(self, input_lines, input_offset, memo, node, match_titles=1):
"""
Parse `input_lines` and populate a `docutils.nodes.document` instance.
Extend `StateMachineWS.run()`: set up document-wide data.
"""
self.match_titles = match_titles
self.memo = memo
self.document = memo.document
self.attach_observer(self.document.note_source)
self.reporter = memo.reporter
self.language = memo.language
self.node = node
results = StateMachineWS.run(self, input_lines, input_offset)
assert results == [], ('NestedStateMachine.run() results should be '
'empty!')
return results
class RSTState(StateWS):
"""
reStructuredText State superclass.
Contains methods used by all State subclasses.
"""
nested_sm = NestedStateMachine
nested_sm_cache = []
def __init__(self, state_machine, debug=0):
self.nested_sm_kwargs = {'state_classes': state_classes,
'initial_state': 'Body'}
StateWS.__init__(self, state_machine, debug)
def runtime_init(self):
StateWS.runtime_init(self)
memo = self.state_machine.memo
self.memo = memo
self.reporter = memo.reporter
self.inliner = memo.inliner
self.document = memo.document
self.parent = self.state_machine.node
# enable the reporter to determine source and source-line
if not hasattr(self.reporter, 'locator'):
self.reporter.locator = self.state_machine.get_source_and_line
# print "adding locator to reporter", self.state_machine.input_offset
def goto_line(self, abs_line_offset):
"""
Jump to input line `abs_line_offset`, ignoring jumps past the end.
"""
try:
self.state_machine.goto_line(abs_line_offset)
except EOFError:
pass
def no_match(self, context, transitions):
"""
Override `StateWS.no_match` to generate a system message.
This code should never be run.
"""
src, srcline = self.state_machine.get_source_and_line()
self.reporter.severe(
'Internal error: no transition pattern match. State: "%s"; '
'transitions: %s; context: %s; current line: %r.'
% (self.__class__.__name__, transitions, context,
self.state_machine.line),
source=src, line=srcline)
return context, None, []
def bof(self, context):
"""Called at beginning of file."""
return [], []
def nested_parse(self, block, input_offset, node, match_titles=0,
state_machine_class=None, state_machine_kwargs=None):
"""
Create a new StateMachine rooted at `node` and run it over the input
`block`.
"""
use_default = 0
if state_machine_class is None:
state_machine_class = self.nested_sm
use_default += 1
if state_machine_kwargs is None:
state_machine_kwargs = self.nested_sm_kwargs
use_default += 1
block_length = len(block)
state_machine = None
if use_default == 2:
try:
state_machine = self.nested_sm_cache.pop()
except IndexError:
pass
if not state_machine:
state_machine = state_machine_class(debug=self.debug,
**state_machine_kwargs)
state_machine.run(block, input_offset, memo=self.memo,
node=node, match_titles=match_titles)
if use_default == 2:
self.nested_sm_cache.append(state_machine)
else:
state_machine.unlink()
new_offset = state_machine.abs_line_offset()
# No `block.parent` implies disconnected -- lines aren't in sync:
if block.parent and (len(block) - block_length) != 0:
# Adjustment for block if modified in nested parse:
self.state_machine.next_line(len(block) - block_length)
return new_offset
def nested_list_parse(self, block, input_offset, node, initial_state,
blank_finish,
blank_finish_state=None,
extra_settings={},
match_titles=0,
state_machine_class=None,
state_machine_kwargs=None):
"""
Create a new StateMachine rooted at `node` and run it over the input
`block`. Also keep track of optional intermediate blank lines and the
required final one.
"""
if state_machine_class is None:
state_machine_class = self.nested_sm
if state_machine_kwargs is None:
state_machine_kwargs = self.nested_sm_kwargs.copy()
state_machine_kwargs['initial_state'] = initial_state
state_machine = state_machine_class(debug=self.debug,
**state_machine_kwargs)
if blank_finish_state is None:
blank_finish_state = initial_state
state_machine.states[blank_finish_state].blank_finish = blank_finish
for key, value in extra_settings.items():
setattr(state_machine.states[initial_state], key, value)
state_machine.run(block, input_offset, memo=self.memo,
node=node, match_titles=match_titles)
blank_finish = state_machine.states[blank_finish_state].blank_finish
state_machine.unlink()
return state_machine.abs_line_offset(), blank_finish
def section(self, title, source, style, lineno, messages):
"""Check for a valid subsection and create one if it checks out."""
if self.check_subsection(source, style, lineno):
self.new_subsection(title, lineno, messages)
def check_subsection(self, source, style, lineno):
"""
Check for a valid subsection header. Return 1 (true) or None (false).
When a new section is reached that isn't a subsection of the current
section, back up the line count (use ``previous_line(-x)``), then
``raise EOFError``. The current StateMachine will finish, then the
calling StateMachine can re-examine the title. This will work its way
back up the calling chain until the correct section level isreached.
@@@ Alternative: Evaluate the title, store the title info & level, and
back up the chain until that level is reached. Store in memo? Or
return in results?
:Exception: `EOFError` when a sibling or supersection encountered.
"""
memo = self.memo
title_styles = memo.title_styles
mylevel = memo.section_level
try: # check for existing title style
level = title_styles.index(style) + 1
except ValueError: # new title style
if len(title_styles) == memo.section_level: # new subsection
title_styles.append(style)
return 1
else: # not at lowest level
self.parent += self.title_inconsistent(source, lineno)
return None
if level <= mylevel: # sibling or supersection
memo.section_level = level # bubble up to parent section
if len(style) == 2:
memo.section_bubble_up_kludge = 1
# back up 2 lines for underline title, 3 for overline title
self.state_machine.previous_line(len(style) + 1)
raise EOFError # let parent section re-evaluate
if level == mylevel + 1: # immediate subsection
return 1
else: # invalid subsection
self.parent += self.title_inconsistent(source, lineno)
return None
def title_inconsistent(self, sourcetext, lineno):
src, srcline = self.state_machine.get_source_and_line(lineno)
error = self.reporter.severe(
'Title level inconsistent:', nodes.literal_block('', sourcetext),
source=src, line=srcline)
return error
def new_subsection(self, title, lineno, messages):
"""Append new subsection to document tree. On return, check level."""
memo = self.memo
mylevel = memo.section_level
memo.section_level += 1
section_node = nodes.section()
self.parent += section_node
textnodes, title_messages = self.inline_text(title, lineno)
titlenode = nodes.title(title, '', *textnodes)
name = normalize_name(titlenode.astext())
section_node['names'].append(name)
section_node += titlenode
section_node += messages
section_node += title_messages
self.document.note_implicit_target(section_node, section_node)
offset = self.state_machine.line_offset + 1
absoffset = self.state_machine.abs_line_offset() + 1
newabsoffset = self.nested_parse(
self.state_machine.input_lines[offset:], input_offset=absoffset,
node=section_node, match_titles=1)
self.goto_line(newabsoffset)
if memo.section_level <= mylevel: # can't handle next section?
raise EOFError # bubble up to supersection
# reset section_level; next pass will detect it properly
memo.section_level = mylevel
def paragraph(self, lines, lineno):
"""
Return a list (paragraph & messages) & a boolean: literal_block next?
"""
data = '\n'.join(lines).rstrip()
if re.search(r'(?<!\\)(\\\\)*::$', data):
if len(data) == 2:
return [], 1
elif data[-3] in ' \n':
text = data[:-3].rstrip()
else:
text = data[:-1]
literalnext = 1
else:
text = data
literalnext = 0
textnodes, messages = self.inline_text(text, lineno)
p = nodes.paragraph(data, '', *textnodes)
p.source, p.line = self.state_machine.get_source_and_line(lineno)
return [p] + messages, literalnext
def inline_text(self, text, lineno):
"""
Return 2 lists: nodes (text and inline elements), and system_messages.
"""
return self.inliner.parse(text, lineno, self.memo, self.parent)
def unindent_warning(self, node_name):
# the actual problem is one line below the current line
src, srcline = self.state_machine.get_source_and_line()
return self.reporter.warning('%s ends without a blank line; '
'unexpected unindent.' % node_name,
source=src, line=srcline+1)
def build_regexp(definition, compile=1):
"""
Build, compile and return a regular expression based on `definition`.
:Parameter: `definition`: a 4-tuple (group name, prefix, suffix, parts),
where "parts" is a list of regular expressions and/or regular
expression definitions to be joined into an or-group.
"""
name, prefix, suffix, parts = definition
part_strings = []
for part in parts:
if type(part) is tuple:
part_strings.append(build_regexp(part, None))
else:
part_strings.append(part)
or_group = '|'.join(part_strings)
regexp = '%(prefix)s(?P<%(name)s>%(or_group)s)%(suffix)s' % locals()
if compile:
return re.compile(regexp, re.UNICODE)
else:
return regexp
class Inliner:
"""
Parse inline markup; call the `parse()` method.
"""
def __init__(self):
self.implicit_dispatch = [(self.patterns.uri, self.standalone_uri),]
"""List of (pattern, bound method) tuples, used by
`self.implicit_inline`."""
def init_customizations(self, settings):
"""Setting-based customizations; run when parsing begins."""
if settings.pep_references:
self.implicit_dispatch.append((self.patterns.pep,
self.pep_reference))
if settings.rfc_references:
self.implicit_dispatch.append((self.patterns.rfc,
self.rfc_reference))
def parse(self, text, lineno, memo, parent):
# Needs to be refactored for nested inline markup.
# Add nested_parse() method?
"""
Return 2 lists: nodes (text and inline elements), and system_messages.
Using `self.patterns.initial`, a pattern which matches start-strings
(emphasis, strong, interpreted, phrase reference, literal,
substitution reference, and inline target) and complete constructs
(simple reference, footnote reference), search for a candidate. When
one is found, check for validity (e.g., not a quoted '*' character).
If valid, search for the corresponding end string if applicable, and
check it for validity. If not found or invalid, generate a warning
and ignore the start-string. Implicit inline markup (e.g. standalone
URIs) is found last.
"""
self.reporter = memo.reporter
self.document = memo.document
self.language = memo.language
self.parent = parent
pattern_search = self.patterns.initial.search
dispatch = self.dispatch
remaining = escape2null(text)
processed = []
unprocessed = []
messages = []
while remaining:
match = pattern_search(remaining)
if match:
groups = match.groupdict()
method = dispatch[groups['start'] or groups['backquote']
or groups['refend'] or groups['fnend']]
before, inlines, remaining, sysmessages = method(self, match,
lineno)
unprocessed.append(before)
messages += sysmessages
if inlines:
processed += self.implicit_inline(''.join(unprocessed),
lineno)
processed += inlines
unprocessed = []
else:
break
remaining = ''.join(unprocessed) + remaining
if remaining:
processed += self.implicit_inline(remaining, lineno)
return processed, messages
openers = u'\'"([{<\u2018\u201c\xab\u00a1\u00bf' # see quoted_start below
closers = u'\'")]}>\u2019\u201d\xbb!?'
unicode_delimiters = u'\u2010\u2011\u2012\u2013\u2014\u00a0'
start_string_prefix = (u'((?<=^)|(?<=[-/: \\n\u2019%s%s]))'
% (re.escape(unicode_delimiters),
re.escape(openers)))
end_string_suffix = (r'((?=$)|(?=[-/:.,; \n\x00%s%s]))'
% (re.escape(unicode_delimiters),
re.escape(closers)))
non_whitespace_before = r'(?<![ \n])'
non_whitespace_escape_before = r'(?<![ \n\x00])'
non_whitespace_after = r'(?![ \n])'
# Alphanumerics with isolated internal [-._+:] chars (i.e. not 2 together):
simplename = r'(?:(?!_)\w)+(?:[-._+:](?:(?!_)\w)+)*'
# Valid URI characters (see RFC 2396 & RFC 2732);
# final \x00 allows backslash escapes in URIs:
uric = r"""[-_.!~*'()[\];/:@&=+$,%a-zA-Z0-9\x00]"""
# Delimiter indicating the end of a URI (not part of the URI):
uri_end_delim = r"""[>]"""
# Last URI character; same as uric but no punctuation:
urilast = r"""[_~*/=+a-zA-Z0-9]"""
# End of a URI (either 'urilast' or 'uric followed by a
# uri_end_delim'):
uri_end = r"""(?:%(urilast)s|%(uric)s(?=%(uri_end_delim)s))""" % locals()
emailc = r"""[-_!~*'{|}/#?^`&=+$%a-zA-Z0-9\x00]"""
email_pattern = r"""
%(emailc)s+(?:\.%(emailc)s+)* # name
(?<!\x00)@ # at
%(emailc)s+(?:\.%(emailc)s*)* # host
%(uri_end)s # final URI char
"""
parts = ('initial_inline', start_string_prefix, '',
[('start', '', non_whitespace_after, # simple start-strings
[r'\*\*', # strong
r'\*(?!\*)', # emphasis but not strong
r'``', # literal
r'_`', # inline internal target
r'\|(?!\|)'] # substitution reference
),
('whole', '', end_string_suffix, # whole constructs
[# reference name & end-string
r'(?P<refname>%s)(?P<refend>__?)' % simplename,
('footnotelabel', r'\[', r'(?P<fnend>\]_)',
[r'[0-9]+', # manually numbered
r'\#(%s)?' % simplename, # auto-numbered (w/ label?)
r'\*', # auto-symbol
r'(?P<citationlabel>%s)' % simplename] # citation reference
)
]
),
('backquote', # interpreted text or phrase reference
'(?P<role>(:%s:)?)' % simplename, # optional role
non_whitespace_after,
['`(?!`)'] # but not literal
)
]
)
patterns = Struct(
initial=build_regexp(parts),
emphasis=re.compile(non_whitespace_escape_before
+ r'(\*)' + end_string_suffix),
strong=re.compile(non_whitespace_escape_before
+ r'(\*\*)' + end_string_suffix),
interpreted_or_phrase_ref=re.compile(
r"""
%(non_whitespace_escape_before)s
(
`
(?P<suffix>
(?P<role>:%(simplename)s:)?
(?P<refend>__?)?
)
)
%(end_string_suffix)s
""" % locals(), re.VERBOSE | re.UNICODE),
embedded_uri=re.compile(
r"""
(
(?:[ \n]+|^) # spaces or beginning of line/string
< # open bracket
%(non_whitespace_after)s
([^<>\x00]+) # anything but angle brackets & nulls
%(non_whitespace_before)s
> # close bracket w/o whitespace before
)
$ # end of string
""" % locals(), re.VERBOSE),
literal=re.compile(non_whitespace_before + '(``)'
+ end_string_suffix),
target=re.compile(non_whitespace_escape_before
+ r'(`)' + end_string_suffix),
substitution_ref=re.compile(non_whitespace_escape_before
+ r'(\|_{0,2})'
+ end_string_suffix),
email=re.compile(email_pattern % locals() + '$', re.VERBOSE),
uri=re.compile(
(r"""
%(start_string_prefix)s
(?P<whole>
(?P<absolute> # absolute URI
(?P<scheme> # scheme (http, ftp, mailto)
[a-zA-Z][a-zA-Z0-9.+-]*
)
:
(
( # either:
(//?)? # hierarchical URI
%(uric)s* # URI characters
%(uri_end)s # final URI char
)
( # optional query
\?%(uric)s*
%(uri_end)s
)?
( # optional fragment
\#%(uric)s*
%(uri_end)s
)?
)
)
| # *OR*
(?P<email> # email address
""" + email_pattern + r"""
)
)
%(end_string_suffix)s
""") % locals(), re.VERBOSE),
pep=re.compile(
r"""
%(start_string_prefix)s
(
(pep-(?P<pepnum1>\d+)(.txt)?) # reference to source file
|
(PEP\s+(?P<pepnum2>\d+)) # reference by name
)
%(end_string_suffix)s""" % locals(), re.VERBOSE),
rfc=re.compile(
r"""
%(start_string_prefix)s
(RFC(-|\s+)?(?P<rfcnum>\d+))
%(end_string_suffix)s""" % locals(), re.VERBOSE))
def quoted_start(self, match):
"""Return 1 if inline markup start-string is 'quoted', 0 if not."""
string = match.string
start = match.start()
end = match.end()
if start == 0: # start-string at beginning of text
return 0
prestart = string[start - 1]
try:
poststart = string[end]
if self.openers.index(prestart) \
== self.closers.index(poststart): # quoted
return 1
except IndexError: # start-string at end of text
return 1
except ValueError: # not quoted
pass
return 0
def inline_obj(self, match, lineno, end_pattern, nodeclass,
restore_backslashes=0):
string = match.string
matchstart = match.start('start')
matchend = match.end('start')
if self.quoted_start(match):
return (string[:matchend], [], string[matchend:], [], '')
endmatch = end_pattern.search(string[matchend:])
if endmatch and endmatch.start(1): # 1 or more chars
text = unescape(endmatch.string[:endmatch.start(1)],
restore_backslashes)
textend = matchend + endmatch.end(1)
rawsource = unescape(string[matchstart:textend], 1)
return (string[:matchstart], [nodeclass(rawsource, text)],
string[textend:], [], endmatch.group(1))
msg = self.reporter.warning(
'Inline %s start-string without end-string.'
% nodeclass.__name__, line=lineno)
text = unescape(string[matchstart:matchend], 1)
rawsource = unescape(string[matchstart:matchend], 1)
prb = self.problematic(text, rawsource, msg)
return string[:matchstart], [prb], string[matchend:], [msg], ''
def problematic(self, text, rawsource, message):
msgid = self.document.set_id(message, self.parent)
problematic = nodes.problematic(rawsource, text, refid=msgid)
prbid = self.document.set_id(problematic)
message.add_backref(prbid)
return problematic
def emphasis(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.emphasis, nodes.emphasis)
return before, inlines, remaining, sysmessages
def strong(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.strong, nodes.strong)
return before, inlines, remaining, sysmessages
def interpreted_or_phrase_ref(self, match, lineno):
end_pattern = self.patterns.interpreted_or_phrase_ref
string = match.string
matchstart = match.start('backquote')
matchend = match.end('backquote')
rolestart = match.start('role')
role = match.group('role')
position = ''
if role:
role = role[1:-1]
position = 'prefix'
elif self.quoted_start(match):
return (string[:matchend], [], string[matchend:], [])
endmatch = end_pattern.search(string[matchend:])
if endmatch and endmatch.start(1): # 1 or more chars
textend = matchend + endmatch.end()
if endmatch.group('role'):
if role:
msg = self.reporter.warning(
'Multiple roles in interpreted text (both '
'prefix and suffix present; only one allowed).',
line=lineno)
text = unescape(string[rolestart:textend], 1)
prb = self.problematic(text, text, msg)
return string[:rolestart], [prb], string[textend:], [msg]
role = endmatch.group('suffix')[1:-1]
position = 'suffix'
escaped = endmatch.string[:endmatch.start(1)]
rawsource = unescape(string[matchstart:textend], 1)
if rawsource[-1:] == '_':
if role:
msg = self.reporter.warning(
'Mismatch: both interpreted text role %s and '
'reference suffix.' % position, line=lineno)
text = unescape(string[rolestart:textend], 1)
prb = self.problematic(text, text, msg)
return string[:rolestart], [prb], string[textend:], [msg]
return self.phrase_ref(string[:matchstart], string[textend:],
rawsource, escaped, unescape(escaped))
else:
rawsource = unescape(string[rolestart:textend], 1)
nodelist, messages = self.interpreted(rawsource, escaped, role,
lineno)
return (string[:rolestart], nodelist,
string[textend:], messages)
msg = self.reporter.warning(
'Inline interpreted text or phrase reference start-string '
'without end-string.', line=lineno)
text = unescape(string[matchstart:matchend], 1)
prb = self.problematic(text, text, msg)
return string[:matchstart], [prb], string[matchend:], [msg]
def phrase_ref(self, before, after, rawsource, escaped, text):
match = self.patterns.embedded_uri.search(escaped)
if match:
text = unescape(escaped[:match.start(0)])
uri_text = match.group(2)
uri = ''.join(uri_text.split())
uri = self.adjust_uri(uri)
if uri:
target = nodes.target(match.group(1), refuri=uri)
else:
raise ApplicationError('problem with URI: %r' % uri_text)
if not text:
text = uri
else:
target = None
refname = normalize_name(text)
reference = nodes.reference(rawsource, text,
name=whitespace_normalize_name(text))
node_list = [reference]
if rawsource[-2:] == '__':
if target:
reference['refuri'] = uri
else:
reference['anonymous'] = 1
else:
if target:
reference['refuri'] = uri
target['names'].append(refname)
self.document.note_explicit_target(target, self.parent)
node_list.append(target)
else:
reference['refname'] = refname
self.document.note_refname(reference)
return before, node_list, after, []
def adjust_uri(self, uri):
match = self.patterns.email.match(uri)
if match:
return 'mailto:' + uri
else:
return uri
def interpreted(self, rawsource, text, role, lineno):
role_fn, messages = roles.role(role, self.language, lineno,
self.reporter)
if role_fn:
nodes, messages2 = role_fn(role, rawsource, text, lineno, self)
return nodes, messages + messages2
else:
msg = self.reporter.error(
'Unknown interpreted text role "%s".' % role,
line=lineno)
return ([self.problematic(rawsource, rawsource, msg)],
messages + [msg])
def literal(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.literal, nodes.literal,
restore_backslashes=1)
return before, inlines, remaining, sysmessages
def inline_internal_target(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.target, nodes.target)
if inlines and isinstance(inlines[0], nodes.target):
assert len(inlines) == 1
target = inlines[0]
name = normalize_name(target.astext())
target['names'].append(name)
self.document.note_explicit_target(target, self.parent)
return before, inlines, remaining, sysmessages
def substitution_reference(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.substitution_ref,
nodes.substitution_reference)
if len(inlines) == 1:
subref_node = inlines[0]
if isinstance(subref_node, nodes.substitution_reference):
subref_text = subref_node.astext()
self.document.note_substitution_ref(subref_node, subref_text)
if endstring[-1:] == '_':
reference_node = nodes.reference(
'|%s%s' % (subref_text, endstring), '')
if endstring[-2:] == '__':
reference_node['anonymous'] = 1
else:
reference_node['refname'] = normalize_name(subref_text)
self.document.note_refname(reference_node)
reference_node += subref_node
inlines = [reference_node]
return before, inlines, remaining, sysmessages
def footnote_reference(self, match, lineno):
"""
Handles `nodes.footnote_reference` and `nodes.citation_reference`
elements.
"""
label = match.group('footnotelabel')
refname = normalize_name(label)
string = match.string
before = string[:match.start('whole')]
remaining = string[match.end('whole'):]
if match.group('citationlabel'):
refnode = nodes.citation_reference('[%s]_' % label,
refname=refname)
refnode += nodes.Text(label)
self.document.note_citation_ref(refnode)
else:
refnode = nodes.footnote_reference('[%s]_' % label)
if refname[0] == '#':
refname = refname[1:]
refnode['auto'] = 1
self.document.note_autofootnote_ref(refnode)
elif refname == '*':
refname = ''
refnode['auto'] = '*'
self.document.note_symbol_footnote_ref(
refnode)
else:
refnode += nodes.Text(label)
if refname:
refnode['refname'] = refname
self.document.note_footnote_ref(refnode)
if utils.get_trim_footnote_ref_space(self.document.settings):
before = before.rstrip()
return (before, [refnode], remaining, [])
def reference(self, match, lineno, anonymous=None):
referencename = match.group('refname')
refname = normalize_name(referencename)
referencenode = nodes.reference(
referencename + match.group('refend'), referencename,
name=whitespace_normalize_name(referencename))
if anonymous:
referencenode['anonymous'] = 1
else:
referencenode['refname'] = refname
self.document.note_refname(referencenode)
string = match.string
matchstart = match.start('whole')
matchend = match.end('whole')
return (string[:matchstart], [referencenode], string[matchend:], [])
def anonymous_reference(self, match, lineno):
return self.reference(match, lineno, anonymous=1)
def standalone_uri(self, match, lineno):
if (not match.group('scheme')
or match.group('scheme').lower() in urischemes.schemes):
if match.group('email'):
addscheme = 'mailto:'
else:
addscheme = ''
text = match.group('whole')
unescaped = unescape(text, 0)
return [nodes.reference(unescape(text, 1), unescaped,
refuri=addscheme + unescaped)]
else: # not a valid scheme
raise MarkupMismatch
def pep_reference(self, match, lineno):
text = match.group(0)
if text.startswith('pep-'):
pepnum = int(match.group('pepnum1'))
elif text.startswith('PEP'):
pepnum = int(match.group('pepnum2'))
else:
raise MarkupMismatch
ref = (self.document.settings.pep_base_url
+ self.document.settings.pep_file_url_template % pepnum)
unescaped = unescape(text, 0)
return [nodes.reference(unescape(text, 1), unescaped, refuri=ref)]
rfc_url = 'rfc%d.html'
def rfc_reference(self, match, lineno):
text = match.group(0)
if text.startswith('RFC'):
rfcnum = int(match.group('rfcnum'))
ref = self.document.settings.rfc_base_url + self.rfc_url % rfcnum
else:
raise MarkupMismatch
unescaped = unescape(text, 0)
return [nodes.reference(unescape(text, 1), unescaped, refuri=ref)]
def implicit_inline(self, text, lineno):
"""
Check each of the patterns in `self.implicit_dispatch` for a match,
and dispatch to the stored method for the pattern. Recursively check
the text before and after the match. Return a list of `nodes.Text`
and inline element nodes.
"""
if not text:
return []
for pattern, method in self.implicit_dispatch:
match = pattern.search(text)
if match:
try:
# Must recurse on strings before *and* after the match;
# there may be multiple patterns.
return (self.implicit_inline(text[:match.start()], lineno)
+ method(match, lineno) +
self.implicit_inline(text[match.end():], lineno))
except MarkupMismatch:
pass
return [nodes.Text(unescape(text), rawsource=unescape(text, 1))]
dispatch = {'*': emphasis,
'**': strong,
'`': interpreted_or_phrase_ref,
'``': literal,
'_`': inline_internal_target,
']_': footnote_reference,
'|': substitution_reference,
'_': reference,
'__': anonymous_reference}
def _loweralpha_to_int(s, _zero=(ord('a')-1)):
return ord(s) - _zero
def _upperalpha_to_int(s, _zero=(ord('A')-1)):
return ord(s) - _zero
def _lowerroman_to_int(s):
return roman.fromRoman(s.upper())
class Body(RSTState):
"""
Generic classifier of the first line of a block.
"""
double_width_pad_char = tableparser.TableParser.double_width_pad_char
"""Padding character for East Asian double-width text."""
enum = Struct()
"""Enumerated list parsing information."""
enum.formatinfo = {
'parens': Struct(prefix='(', suffix=')', start=1, end=-1),
'rparen': Struct(prefix='', suffix=')', start=0, end=-1),
'period': Struct(prefix='', suffix='.', start=0, end=-1)}
enum.formats = enum.formatinfo.keys()
enum.sequences = ['arabic', 'loweralpha', 'upperalpha',
'lowerroman', 'upperroman'] # ORDERED!
enum.sequencepats = {'arabic': '[0-9]+',
'loweralpha': '[a-z]',
'upperalpha': '[A-Z]',
'lowerroman': '[ivxlcdm]+',
'upperroman': '[IVXLCDM]+',}
enum.converters = {'arabic': int,
'loweralpha': _loweralpha_to_int,
'upperalpha': _upperalpha_to_int,
'lowerroman': _lowerroman_to_int,
'upperroman': roman.fromRoman}
enum.sequenceregexps = {}
for sequence in enum.sequences:
enum.sequenceregexps[sequence] = re.compile(
enum.sequencepats[sequence] + '$')
grid_table_top_pat = re.compile(r'\+-[-+]+-\+ *$')
"""Matches the top (& bottom) of a full table)."""
simple_table_top_pat = re.compile('=+( +=+)+ *$')
"""Matches the top of a simple table."""
simple_table_border_pat = re.compile('=+[ =]*$')
"""Matches the bottom & header bottom of a simple table."""
pats = {}
"""Fragments of patterns used by transitions."""
pats['nonalphanum7bit'] = '[!-/:-@[-`{-~]'
pats['alpha'] = '[a-zA-Z]'
pats['alphanum'] = '[a-zA-Z0-9]'
pats['alphanumplus'] = '[a-zA-Z0-9_-]'
pats['enum'] = ('(%(arabic)s|%(loweralpha)s|%(upperalpha)s|%(lowerroman)s'
'|%(upperroman)s|#)' % enum.sequencepats)
pats['optname'] = '%(alphanum)s%(alphanumplus)s*' % pats
# @@@ Loosen up the pattern? Allow Unicode?
pats['optarg'] = '(%(alpha)s%(alphanumplus)s*|<[^<>]+>)' % pats
pats['shortopt'] = r'(-|\+)%(alphanum)s( ?%(optarg)s)?' % pats
pats['longopt'] = r'(--|/)%(optname)s([ =]%(optarg)s)?' % pats
pats['option'] = r'(%(shortopt)s|%(longopt)s)' % pats
for format in enum.formats:
pats[format] = '(?P<%s>%s%s%s)' % (
format, re.escape(enum.formatinfo[format].prefix),
pats['enum'], re.escape(enum.formatinfo[format].suffix))
patterns = {
'bullet': u'[-+*\u2022\u2023\u2043]( +|$)',
'enumerator': r'(%(parens)s|%(rparen)s|%(period)s)( +|$)' % pats,
'field_marker': r':(?![: ])([^:\\]|\\.)*(?<! ):( +|$)',
'option_marker': r'%(option)s(, %(option)s)*( +| ?$)' % pats,
'doctest': r'>>>( +|$)',
'line_block': r'\|( +|$)',
'grid_table_top': grid_table_top_pat,
'simple_table_top': simple_table_top_pat,
'explicit_markup': r'\.\.( +|$)',
'anonymous': r'__( +|$)',
'line': r'(%(nonalphanum7bit)s)\1* *$' % pats,
'text': r''}
initial_transitions = (
'bullet',
'enumerator',
'field_marker',
'option_marker',
'doctest',
'line_block',
'grid_table_top',
'simple_table_top',
'explicit_markup',
'anonymous',
'line',
'text')
def indent(self, match, context, next_state):
"""Block quote."""
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
elements = self.block_quote(indented, line_offset)
self.parent += elements
if not blank_finish:
self.parent += self.unindent_warning('Block quote')
return context, next_state, []
def block_quote(self, indented, line_offset):
elements = []
while indented:
(blockquote_lines,
attribution_lines,
attribution_offset,
indented,
new_line_offset) = self.split_attribution(indented, line_offset)
blockquote = nodes.block_quote()
self.nested_parse(blockquote_lines, line_offset, blockquote)
elements.append(blockquote)
if attribution_lines:
attribution, messages = self.parse_attribution(
attribution_lines, attribution_offset)
blockquote += attribution
elements += messages
line_offset = new_line_offset
while indented and not indented[0]:
indented = indented[1:]
line_offset += 1
return elements
# U+2014 is an em-dash:
attribution_pattern = re.compile(u'(---?(?!-)|\u2014) *(?=[^ \\n])')
def split_attribution(self, indented, line_offset):
"""
Check for a block quote attribution and split it off:
* First line after a blank line must begin with a dash ("--", "---",
em-dash; matches `self.attribution_pattern`).
* Every line after that must have consistent indentation.
* Attributions must be preceded by block quote content.
Return a tuple of: (block quote content lines, content offset,
attribution lines, attribution offset, remaining indented lines).
"""
blank = None
nonblank_seen = False
for i in range(len(indented)):
line = indented[i].rstrip()
if line:
if nonblank_seen and blank == i - 1: # last line blank
match = self.attribution_pattern.match(line)
if match:
attribution_end, indent = self.check_attribution(
indented, i)
if attribution_end:
a_lines = indented[i:attribution_end]
a_lines.trim_left(match.end(), end=1)
a_lines.trim_left(indent, start=1)
return (indented[:i], a_lines,
i, indented[attribution_end:],
line_offset + attribution_end)
nonblank_seen = True
else:
blank = i
else:
return (indented, None, None, None, None)
def check_attribution(self, indented, attribution_start):
"""
Check attribution shape.
Return the index past the end of the attribution, and the indent.
"""
indent = None
i = attribution_start + 1
for i in range(attribution_start + 1, len(indented)):
line = indented[i].rstrip()
if not line:
break
if indent is None:
indent = len(line) - len(line.lstrip())
elif len(line) - len(line.lstrip()) != indent:
return None, None # bad shape; not an attribution
else:
# return index of line after last attribution line:
i += 1
return i, (indent or 0)
def parse_attribution(self, indented, line_offset):
text = '\n'.join(indented).rstrip()
lineno = self.state_machine.abs_line_number() + line_offset
textnodes, messages = self.inline_text(text, lineno)
node = nodes.attribution(text, '', *textnodes)
node.line = lineno
# report with source and source-line results in
# ``IndexError: list index out of range``
# node.source, node.line = self.state_machine.get_source_and_line(lineno)
return node, messages
def bullet(self, match, context, next_state):
"""Bullet list item."""
bulletlist = nodes.bullet_list()
self.parent += bulletlist
bulletlist['bullet'] = match.string[0]
i, blank_finish = self.list_item(match.end())
bulletlist += i
offset = self.state_machine.line_offset + 1 # next line
new_line_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=bulletlist, initial_state='BulletList',
blank_finish=blank_finish)
self.goto_line(new_line_offset)
if not blank_finish:
self.parent += self.unindent_warning('Bullet list')
return [], next_state, []
def list_item(self, indent):
if self.state_machine.line[indent:]:
indented, line_offset, blank_finish = (
self.state_machine.get_known_indented(indent))
else:
indented, indent, line_offset, blank_finish = (
self.state_machine.get_first_known_indented(indent))
listitem = nodes.list_item('\n'.join(indented))
if indented:
self.nested_parse(indented, input_offset=line_offset,
node=listitem)
return listitem, blank_finish
def enumerator(self, match, context, next_state):
"""Enumerated List Item"""
format, sequence, text, ordinal = self.parse_enumerator(match)
if not self.is_enumerated_list_item(ordinal, sequence, format):
raise statemachine.TransitionCorrection('text')
enumlist = nodes.enumerated_list()
self.parent += enumlist
if sequence == '#':
enumlist['enumtype'] = 'arabic'
else:
enumlist['enumtype'] = sequence
enumlist['prefix'] = self.enum.formatinfo[format].prefix
enumlist['suffix'] = self.enum.formatinfo[format].suffix
if ordinal != 1:
enumlist['start'] = ordinal
src, srcline = self.state_machine.get_source_and_line()
msg = self.reporter.info(
'Enumerated list start value not ordinal-1: "%s" (ordinal %s)'
% (text, ordinal), source=src, line=srcline)
self.parent += msg
listitem, blank_finish = self.list_item(match.end())
enumlist += listitem
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=enumlist, initial_state='EnumeratedList',
blank_finish=blank_finish,
extra_settings={'lastordinal': ordinal,
'format': format,
'auto': sequence == '#'})
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Enumerated list')
return [], next_state, []
def parse_enumerator(self, match, expected_sequence=None):
"""
Analyze an enumerator and return the results.
:Return:
- the enumerator format ('period', 'parens', or 'rparen'),
- the sequence used ('arabic', 'loweralpha', 'upperroman', etc.),
- the text of the enumerator, stripped of formatting, and
- the ordinal value of the enumerator ('a' -> 1, 'ii' -> 2, etc.;
``None`` is returned for invalid enumerator text).
The enumerator format has already been determined by the regular
expression match. If `expected_sequence` is given, that sequence is
tried first. If not, we check for Roman numeral 1. This way,
single-character Roman numerals (which are also alphabetical) can be
matched. If no sequence has been matched, all sequences are checked in
order.
"""
groupdict = match.groupdict()
sequence = ''
for format in self.enum.formats:
if groupdict[format]: # was this the format matched?
break # yes; keep `format`
else: # shouldn't happen
raise ParserError('enumerator format not matched')
text = groupdict[format][self.enum.formatinfo[format].start
:self.enum.formatinfo[format].end]
if text == '#':
sequence = '#'
elif expected_sequence:
try:
if self.enum.sequenceregexps[expected_sequence].match(text):
sequence = expected_sequence
except KeyError: # shouldn't happen
raise ParserError('unknown enumerator sequence: %s'
% sequence)
elif text == 'i':
sequence = 'lowerroman'
elif text == 'I':
sequence = 'upperroman'
if not sequence:
for sequence in self.enum.sequences:
if self.enum.sequenceregexps[sequence].match(text):
break
else: # shouldn't happen
raise ParserError('enumerator sequence not matched')
if sequence == '#':
ordinal = 1
else:
try:
ordinal = self.enum.converters[sequence](text)
except roman.InvalidRomanNumeralError:
ordinal = None
return format, sequence, text, ordinal
def is_enumerated_list_item(self, ordinal, sequence, format):
"""
Check validity based on the ordinal value and the second line.
Return true if the ordinal is valid and the second line is blank,
indented, or starts with the next enumerator or an auto-enumerator.
"""
if ordinal is None:
return None
try:
next_line = self.state_machine.next_line()
except EOFError: # end of input lines
self.state_machine.previous_line()
return 1
else:
self.state_machine.previous_line()
if not next_line[:1].strip(): # blank or indented
return 1
result = self.make_enumerator(ordinal + 1, sequence, format)
if result:
next_enumerator, auto_enumerator = result
try:
if ( next_line.startswith(next_enumerator) or
next_line.startswith(auto_enumerator) ):
return 1
except TypeError:
pass
return None
def make_enumerator(self, ordinal, sequence, format):
"""
Construct and return the next enumerated list item marker, and an
auto-enumerator ("#" instead of the regular enumerator).
Return ``None`` for invalid (out of range) ordinals.
""" #"
if sequence == '#':
enumerator = '#'
elif sequence == 'arabic':
enumerator = str(ordinal)
else:
if sequence.endswith('alpha'):
if ordinal > 26:
return None
enumerator = chr(ordinal + ord('a') - 1)
elif sequence.endswith('roman'):
try:
enumerator = roman.toRoman(ordinal)
except roman.RomanError:
return None
else: # shouldn't happen
raise ParserError('unknown enumerator sequence: "%s"'
% sequence)
if sequence.startswith('lower'):
enumerator = enumerator.lower()
elif sequence.startswith('upper'):
enumerator = enumerator.upper()
else: # shouldn't happen
raise ParserError('unknown enumerator sequence: "%s"'
% sequence)
formatinfo = self.enum.formatinfo[format]
next_enumerator = (formatinfo.prefix + enumerator + formatinfo.suffix
+ ' ')
auto_enumerator = formatinfo.prefix + '#' + formatinfo.suffix + ' '
return next_enumerator, auto_enumerator
def field_marker(self, match, context, next_state):
"""Field list item."""
field_list = nodes.field_list()
self.parent += field_list
field, blank_finish = self.field(match)
field_list += field
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=field_list, initial_state='FieldList',
blank_finish=blank_finish)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Field list')
return [], next_state, []
def field(self, match):
name = self.parse_field_marker(match)
src, srcline = self.state_machine.get_source_and_line()
lineno = self.state_machine.abs_line_number()
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
field_node = nodes.field()
field_node.source = src
field_node.line = srcline
name_nodes, name_messages = self.inline_text(name, lineno)
field_node += nodes.field_name(name, '', *name_nodes)
field_body = nodes.field_body('\n'.join(indented), *name_messages)
field_node += field_body
if indented:
self.parse_field_body(indented, line_offset, field_body)
return field_node, blank_finish
def parse_field_marker(self, match):
"""Extract & return field name from a field marker match."""
field = match.group()[1:] # strip off leading ':'
field = field[:field.rfind(':')] # strip off trailing ':' etc.
return field
def parse_field_body(self, indented, offset, node):
self.nested_parse(indented, input_offset=offset, node=node)
def option_marker(self, match, context, next_state):
"""Option list item."""
optionlist = nodes.option_list()
try:
listitem, blank_finish = self.option_list_item(match)
except MarkupError, error:
# This shouldn't happen; pattern won't match.
src, srcline = self.state_machine.get_source_and_line()
msg = self.reporter.error('Invalid option list marker: %s' %
str(error), source=src, line=srcline)
self.parent += msg
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
elements = self.block_quote(indented, line_offset)
self.parent += elements
if not blank_finish:
self.parent += self.unindent_warning('Option list')
return [], next_state, []
self.parent += optionlist
optionlist += listitem
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=optionlist, initial_state='OptionList',
blank_finish=blank_finish)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Option list')
return [], next_state, []
def option_list_item(self, match):
offset = self.state_machine.abs_line_offset()
options = self.parse_option_marker(match)
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
if not indented: # not an option list item
self.goto_line(offset)
raise statemachine.TransitionCorrection('text')
option_group = nodes.option_group('', *options)
description = nodes.description('\n'.join(indented))
option_list_item = nodes.option_list_item('', option_group,
description)
if indented:
self.nested_parse(indented, input_offset=line_offset,
node=description)
return option_list_item, blank_finish
def parse_option_marker(self, match):
"""
Return a list of `node.option` and `node.option_argument` objects,
parsed from an option marker match.
:Exception: `MarkupError` for invalid option markers.
"""
optlist = []
optionstrings = match.group().rstrip().split(', ')
for optionstring in optionstrings:
tokens = optionstring.split()
delimiter = ' '
firstopt = tokens[0].split('=')
if len(firstopt) > 1:
# "--opt=value" form
tokens[:1] = firstopt
delimiter = '='
elif (len(tokens[0]) > 2
and ((tokens[0].startswith('-')
and not tokens[0].startswith('--'))
or tokens[0].startswith('+'))):
# "-ovalue" form
tokens[:1] = [tokens[0][:2], tokens[0][2:]]
delimiter = ''
if len(tokens) > 1 and (tokens[1].startswith('<')
and tokens[-1].endswith('>')):
# "-o <value1 value2>" form; join all values into one token
tokens[1:] = [' '.join(tokens[1:])]
if 0 < len(tokens) <= 2:
option = nodes.option(optionstring)
option += nodes.option_string(tokens[0], tokens[0])
if len(tokens) > 1:
option += nodes.option_argument(tokens[1], tokens[1],
delimiter=delimiter)
optlist.append(option)
else:
raise MarkupError(
'wrong number of option tokens (=%s), should be 1 or 2: '
'"%s"' % (len(tokens), optionstring))
return optlist
def doctest(self, match, context, next_state):
data = '\n'.join(self.state_machine.get_text_block())
self.parent += nodes.doctest_block(data, data)
return [], next_state, []
def line_block(self, match, context, next_state):
"""First line of a line block."""
block = nodes.line_block()
self.parent += block
lineno = self.state_machine.abs_line_number()
line, messages, blank_finish = self.line_block_line(match, lineno)
block += line
self.parent += messages
if not blank_finish:
offset = self.state_machine.line_offset + 1 # next line
new_line_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=block, initial_state='LineBlock',
blank_finish=0)
self.goto_line(new_line_offset)
if not blank_finish:
src, srcline = self.state_machine.get_source_and_line()
self.parent += self.reporter.warning(
'Line block ends without a blank line.',
source=src, line=srcline+1)
if len(block):
if block[0].indent is None:
block[0].indent = 0
self.nest_line_block_lines(block)
return [], next_state, []
def line_block_line(self, match, lineno):
"""Return one line element of a line_block."""
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end(),
until_blank=1)
text = u'\n'.join(indented)
text_nodes, messages = self.inline_text(text, lineno)
line = nodes.line(text, '', *text_nodes)
if match.string.rstrip() != '|': # not empty
line.indent = len(match.group(1)) - 1
return line, messages, blank_finish
def nest_line_block_lines(self, block):
for index in range(1, len(block)):
if block[index].indent is None:
block[index].indent = block[index - 1].indent
self.nest_line_block_segment(block)
def nest_line_block_segment(self, block):
indents = [item.indent for item in block]
least = min(indents)
new_items = []
new_block = nodes.line_block()
for item in block:
if item.indent > least:
new_block.append(item)
else:
if len(new_block):
self.nest_line_block_segment(new_block)
new_items.append(new_block)
new_block = nodes.line_block()
new_items.append(item)
if len(new_block):
self.nest_line_block_segment(new_block)
new_items.append(new_block)
block[:] = new_items
def grid_table_top(self, match, context, next_state):
"""Top border of a full table."""
return self.table_top(match, context, next_state,
self.isolate_grid_table,
tableparser.GridTableParser)
def simple_table_top(self, match, context, next_state):
"""Top border of a simple table."""
return self.table_top(match, context, next_state,
self.isolate_simple_table,
tableparser.SimpleTableParser)
def table_top(self, match, context, next_state,
isolate_function, parser_class):
"""Top border of a generic table."""
nodelist, blank_finish = self.table(isolate_function, parser_class)
self.parent += nodelist
if not blank_finish:
src, srcline = self.state_machine.get_source_and_line()
msg = self.reporter.warning(
'Blank line required after table.',
source=src, line=srcline+1)
self.parent += msg
return [], next_state, []
def table(self, isolate_function, parser_class):
"""Parse a table."""
block, messages, blank_finish = isolate_function()
if block:
try:
parser = parser_class()
tabledata = parser.parse(block)
tableline = (self.state_machine.abs_line_number() - len(block)
+ 1)
table = self.build_table(tabledata, tableline)
nodelist = [table] + messages
except tableparser.TableMarkupError, detail:
nodelist = self.malformed_table(
block, ' '.join(detail.args)) + messages
else:
nodelist = messages
return nodelist, blank_finish
def isolate_grid_table(self):
messages = []
blank_finish = 1
try:
block = self.state_machine.get_text_block(flush_left=1)
except statemachine.UnexpectedIndentationError, instance:
block, src, srcline = instance.args
messages.append(self.reporter.error('Unexpected indentation.',
source=src, line=srcline))
blank_finish = 0
block.disconnect()
# for East Asian chars:
block.pad_double_width(self.double_width_pad_char)
width = len(block[0].strip())
for i in range(len(block)):
block[i] = block[i].strip()
if block[i][0] not in '+|': # check left edge
blank_finish = 0
self.state_machine.previous_line(len(block) - i)
del block[i:]
break
if not self.grid_table_top_pat.match(block[-1]): # find bottom
blank_finish = 0
# from second-last to third line of table:
for i in range(len(block) - 2, 1, -1):
if self.grid_table_top_pat.match(block[i]):
self.state_machine.previous_line(len(block) - i + 1)
del block[i+1:]
break
else:
messages.extend(self.malformed_table(block))
return [], messages, blank_finish
for i in range(len(block)): # check right edge
if len(block[i]) != width or block[i][-1] not in '+|':
messages.extend(self.malformed_table(block))
return [], messages, blank_finish
return block, messages, blank_finish
def isolate_simple_table(self):
start = self.state_machine.line_offset
lines = self.state_machine.input_lines
limit = len(lines) - 1
toplen = len(lines[start].strip())
pattern_match = self.simple_table_border_pat.match
found = 0
found_at = None
i = start + 1
while i <= limit:
line = lines[i]
match = pattern_match(line)
if match:
if len(line.strip()) != toplen:
self.state_machine.next_line(i - start)
messages = self.malformed_table(
lines[start:i+1], 'Bottom/header table border does '
'not match top border.')
return [], messages, i == limit or not lines[i+1].strip()
found += 1
found_at = i
if found == 2 or i == limit or not lines[i+1].strip():
end = i
break
i += 1
else: # reached end of input_lines
if found:
extra = ' or no blank line after table bottom'
self.state_machine.next_line(found_at - start)
block = lines[start:found_at+1]
else:
extra = ''
self.state_machine.next_line(i - start - 1)
block = lines[start:]
messages = self.malformed_table(
block, 'No bottom table border found%s.' % extra)
return [], messages, not extra
self.state_machine.next_line(end - start)
block = lines[start:end+1]
# for East Asian chars:
block.pad_double_width(self.double_width_pad_char)
return block, [], end == limit or not lines[end+1].strip()
def malformed_table(self, block, detail=''):
block.replace(self.double_width_pad_char, '')
data = '\n'.join(block)
message = 'Malformed table.'
startline = self.state_machine.abs_line_number() - len(block) + 1
src, srcline = self.state_machine.get_source_and_line(startline)
if detail:
message += '\n' + detail
error = self.reporter.error(message, nodes.literal_block(data, data),
source=src, line=srcline)
return [error]
def build_table(self, tabledata, tableline, stub_columns=0):
colwidths, headrows, bodyrows = tabledata
table = nodes.table()
tgroup = nodes.tgroup(cols=len(colwidths))
table += tgroup
for colwidth in colwidths:
colspec = nodes.colspec(colwidth=colwidth)
if stub_columns:
colspec.attributes['stub'] = 1
stub_columns -= 1
tgroup += colspec
if headrows:
thead = nodes.thead()
tgroup += thead
for row in headrows:
thead += self.build_table_row(row, tableline)
tbody = nodes.tbody()
tgroup += tbody
for row in bodyrows:
tbody += self.build_table_row(row, tableline)
return table
def build_table_row(self, rowdata, tableline):
row = nodes.row()
for cell in rowdata:
if cell is None:
continue
morerows, morecols, offset, cellblock = cell
attributes = {}
if morerows:
attributes['morerows'] = morerows
if morecols:
attributes['morecols'] = morecols
entry = nodes.entry(**attributes)
row += entry
if ''.join(cellblock):
self.nested_parse(cellblock, input_offset=tableline+offset,
node=entry)
return row
explicit = Struct()
"""Patterns and constants used for explicit markup recognition."""
explicit.patterns = Struct(
target=re.compile(r"""
(
_ # anonymous target
| # *OR*
(?!_) # no underscore at the beginning
(?P<quote>`?) # optional open quote
(?![ `]) # first char. not space or
# backquote
(?P<name> # reference name
.+?
)
%(non_whitespace_escape_before)s
(?P=quote) # close quote if open quote used
)
(?<!(?<!\x00):) # no unescaped colon at end
%(non_whitespace_escape_before)s
[ ]? # optional space
: # end of reference name
([ ]+|$) # followed by whitespace
""" % vars(Inliner), re.VERBOSE),
reference=re.compile(r"""
(
(?P<simple>%(simplename)s)_
| # *OR*
` # open backquote
(?![ ]) # not space
(?P<phrase>.+?) # hyperlink phrase
%(non_whitespace_escape_before)s
`_ # close backquote,
# reference mark
)
$ # end of string
""" % vars(Inliner), re.VERBOSE | re.UNICODE),
substitution=re.compile(r"""
(
(?![ ]) # first char. not space
(?P<name>.+?) # substitution text
%(non_whitespace_escape_before)s
\| # close delimiter
)
([ ]+|$) # followed by whitespace
""" % vars(Inliner), re.VERBOSE),)
def footnote(self, match):
src, srcline = self.state_machine.get_source_and_line()
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
label = match.group(1)
name = normalize_name(label)
footnote = nodes.footnote('\n'.join(indented))
footnote.source = src
footnote.line = srcline
if name[0] == '#': # auto-numbered
name = name[1:] # autonumber label
footnote['auto'] = 1
if name:
footnote['names'].append(name)
self.document.note_autofootnote(footnote)
elif name == '*': # auto-symbol
name = ''
footnote['auto'] = '*'
self.document.note_symbol_footnote(footnote)
else: # manually numbered
footnote += nodes.label('', label)
footnote['names'].append(name)
self.document.note_footnote(footnote)
if name:
self.document.note_explicit_target(footnote, footnote)
else:
self.document.set_id(footnote, footnote)
if indented:
self.nested_parse(indented, input_offset=offset, node=footnote)
return [footnote], blank_finish
def citation(self, match):
src, srcline = self.state_machine.get_source_and_line()
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
label = match.group(1)
name = normalize_name(label)
citation = nodes.citation('\n'.join(indented))
citation.source = src
citation.line = srcline
citation += nodes.label('', label)
citation['names'].append(name)
self.document.note_citation(citation)
self.document.note_explicit_target(citation, citation)
if indented:
self.nested_parse(indented, input_offset=offset, node=citation)
return [citation], blank_finish
def hyperlink_target(self, match):
pattern = self.explicit.patterns.target
lineno = self.state_machine.abs_line_number()
src, srcline = self.state_machine.get_source_and_line()
block, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(
match.end(), until_blank=1, strip_indent=0)
blocktext = match.string[:match.end()] + '\n'.join(block)
block = [escape2null(line) for line in block]
escaped = block[0]
blockindex = 0
while 1:
targetmatch = pattern.match(escaped)
if targetmatch:
break
blockindex += 1
try:
escaped += block[blockindex]
except IndexError:
raise MarkupError('malformed hyperlink target.')
del block[:blockindex]
block[0] = (block[0] + ' ')[targetmatch.end()-len(escaped)-1:].strip()
target = self.make_target(block, blocktext, lineno,
targetmatch.group('name'))
return [target], blank_finish
def make_target(self, block, block_text, lineno, target_name):
target_type, data = self.parse_target(block, block_text, lineno)
if target_type == 'refname':
target = nodes.target(block_text, '', refname=normalize_name(data))
target.indirect_reference_name = data
self.add_target(target_name, '', target, lineno)
self.document.note_indirect_target(target)
return target
elif target_type == 'refuri':
target = nodes.target(block_text, '')
self.add_target(target_name, data, target, lineno)
return target
else:
return data
def parse_target(self, block, block_text, lineno):
"""
Determine the type of reference of a target.
:Return: A 2-tuple, one of:
- 'refname' and the indirect reference name
- 'refuri' and the URI
- 'malformed' and a system_message node
"""
if block and block[-1].strip()[-1:] == '_': # possible indirect target
reference = ' '.join([line.strip() for line in block])
refname = self.is_reference(reference)
if refname:
return 'refname', refname
reference = ''.join([''.join(line.split()) for line in block])
return 'refuri', unescape(reference)
def is_reference(self, reference):
match = self.explicit.patterns.reference.match(
whitespace_normalize_name(reference))
if not match:
return None
return unescape(match.group('simple') or match.group('phrase'))
def add_target(self, targetname, refuri, target, lineno):
target.line = lineno
if targetname:
name = normalize_name(unescape(targetname))
target['names'].append(name)
if refuri:
uri = self.inliner.adjust_uri(refuri)
if uri:
target['refuri'] = uri
else:
raise ApplicationError('problem with URI: %r' % refuri)
self.document.note_explicit_target(target, self.parent)
else: # anonymous target
if refuri:
target['refuri'] = refuri
target['anonymous'] = 1
self.document.note_anonymous_target(target)
def substitution_def(self, match):
pattern = self.explicit.patterns.substitution
src, srcline = self.state_machine.get_source_and_line()
block, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end(),
strip_indent=0)
blocktext = (match.string[:match.end()] + '\n'.join(block))
block.disconnect()
escaped = escape2null(block[0].rstrip())
blockindex = 0
while 1:
subdefmatch = pattern.match(escaped)
if subdefmatch:
break
blockindex += 1
try:
escaped = escaped + ' ' + escape2null(block[blockindex].strip())
except IndexError:
raise MarkupError('malformed substitution definition.')
del block[:blockindex] # strip out the substitution marker
block[0] = (block[0].strip() + ' ')[subdefmatch.end()-len(escaped)-1:-1]
if not block[0]:
del block[0]
offset += 1
while block and not block[-1].strip():
block.pop()
subname = subdefmatch.group('name')
substitution_node = nodes.substitution_definition(blocktext)
substitution_node.source = src
substitution_node.line = srcline
if not block:
msg = self.reporter.warning(
'Substitution definition "%s" missing contents.' % subname,
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline)
return [msg], blank_finish
block[0] = block[0].strip()
substitution_node['names'].append(
nodes.whitespace_normalize_name(subname))
new_abs_offset, blank_finish = self.nested_list_parse(
block, input_offset=offset, node=substitution_node,
initial_state='SubstitutionDef', blank_finish=blank_finish)
i = 0
for node in substitution_node[:]:
if not (isinstance(node, nodes.Inline) or
isinstance(node, nodes.Text)):
self.parent += substitution_node[i]
del substitution_node[i]
else:
i += 1
for node in substitution_node.traverse(nodes.Element):
if self.disallowed_inside_substitution_definitions(node):
pformat = nodes.literal_block('', node.pformat().rstrip())
msg = self.reporter.error(
'Substitution definition contains illegal element:',
pformat, nodes.literal_block(blocktext, blocktext),
source=src, line=srcline)
return [msg], blank_finish
if len(substitution_node) == 0:
msg = self.reporter.warning(
'Substitution definition "%s" empty or invalid.' % subname,
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline)
return [msg], blank_finish
self.document.note_substitution_def(
substitution_node, subname, self.parent)
return [substitution_node], blank_finish
def disallowed_inside_substitution_definitions(self, node):
if (node['ids'] or
isinstance(node, nodes.reference) and node.get('anonymous') or
isinstance(node, nodes.footnote_reference) and node.get('auto')):
return 1
else:
return 0
def directive(self, match, **option_presets):
"""Returns a 2-tuple: list of nodes, and a "blank finish" boolean."""
type_name = match.group(1)
directive_class, messages = directives.directive(
type_name, self.memo.language, self.document)
self.parent += messages
if directive_class:
return self.run_directive(
directive_class, match, type_name, option_presets)
else:
return self.unknown_directive(type_name)
def run_directive(self, directive, match, type_name, option_presets):
"""
Parse a directive then run its directive function.
Parameters:
- `directive`: The class implementing the directive. Must be
a subclass of `rst.Directive`.
- `match`: A regular expression match object which matched the first
line of the directive.
- `type_name`: The directive name, as used in the source text.
- `option_presets`: A dictionary of preset options, defaults for the
directive options. Currently, only an "alt" option is passed by
substitution definitions (value: the substitution name), which may
be used by an embedded image directive.
Returns a 2-tuple: list of nodes, and a "blank finish" boolean.
"""
if isinstance(directive, (FunctionType, MethodType)):
from docutils.parsers.rst import convert_directive_function
directive = convert_directive_function(directive)
lineno = self.state_machine.abs_line_number()
src, srcline = self.state_machine.get_source_and_line()
initial_line_offset = self.state_machine.line_offset
indented, indent, line_offset, blank_finish \
= self.state_machine.get_first_known_indented(match.end(),
strip_top=0)
block_text = '\n'.join(self.state_machine.input_lines[
initial_line_offset : self.state_machine.line_offset + 1])
try:
arguments, options, content, content_offset = (
self.parse_directive_block(indented, line_offset,
directive, option_presets))
except MarkupError, detail:
error = self.reporter.error(
'Error in "%s" directive:\n%s.' % (type_name,
' '.join(detail.args)),
nodes.literal_block(block_text, block_text),
source=src, line=srcline)
return [error], blank_finish
directive_instance = directive(
type_name, arguments, options, content, lineno,
content_offset, block_text, self, self.state_machine)
try:
result = directive_instance.run()
except docutils.parsers.rst.DirectiveError, error:
msg_node = self.reporter.system_message(error.level, error.msg,
source=src, line=srcline)
msg_node += nodes.literal_block(block_text, block_text)
result = [msg_node]
assert isinstance(result, list), \
'Directive "%s" must return a list of nodes.' % type_name
for i in range(len(result)):
assert isinstance(result[i], nodes.Node), \
('Directive "%s" returned non-Node object (index %s): %r'
% (type_name, i, result[i]))
return (result,
blank_finish or self.state_machine.is_next_line_blank())
def parse_directive_block(self, indented, line_offset, directive,
option_presets):
option_spec = directive.option_spec
has_content = directive.has_content
if indented and not indented[0].strip():
indented.trim_start()
line_offset += 1
while indented and not indented[-1].strip():
indented.trim_end()
if indented and (directive.required_arguments
or directive.optional_arguments
or option_spec):
for i in range(len(indented)):
if not indented[i].strip():
break
else:
i += 1
arg_block = indented[:i]
content = indented[i+1:]
content_offset = line_offset + i + 1
else:
content = indented
content_offset = line_offset
arg_block = []
while content and not content[0].strip():
content.trim_start()
content_offset += 1
if option_spec:
options, arg_block = self.parse_directive_options(
option_presets, option_spec, arg_block)
if arg_block and not (directive.required_arguments
or directive.optional_arguments):
raise MarkupError('no arguments permitted; blank line '
'required before content block')
else:
options = {}
if directive.required_arguments or directive.optional_arguments:
arguments = self.parse_directive_arguments(
directive, arg_block)
else:
arguments = []
if content and not has_content:
raise MarkupError('no content permitted')
return (arguments, options, content, content_offset)
def parse_directive_options(self, option_presets, option_spec, arg_block):
options = option_presets.copy()
for i in range(len(arg_block)):
if arg_block[i][:1] == ':':
opt_block = arg_block[i:]
arg_block = arg_block[:i]
break
else:
opt_block = []
if opt_block:
success, data = self.parse_extension_options(option_spec,
opt_block)
if success: # data is a dict of options
options.update(data)
else: # data is an error string
raise MarkupError(data)
return options, arg_block
def parse_directive_arguments(self, directive, arg_block):
required = directive.required_arguments
optional = directive.optional_arguments
arg_text = '\n'.join(arg_block)
arguments = arg_text.split()
if len(arguments) < required:
raise MarkupError('%s argument(s) required, %s supplied'
% (required, len(arguments)))
elif len(arguments) > required + optional:
if directive.final_argument_whitespace:
arguments = arg_text.split(None, required + optional - 1)
else:
raise MarkupError(
'maximum %s argument(s) allowed, %s supplied'
% (required + optional, len(arguments)))
return arguments
def parse_extension_options(self, option_spec, datalines):
"""
Parse `datalines` for a field list containing extension options
matching `option_spec`.
:Parameters:
- `option_spec`: a mapping of option name to conversion
function, which should raise an exception on bad input.
- `datalines`: a list of input strings.
:Return:
- Success value, 1 or 0.
- An option dictionary on success, an error string on failure.
"""
node = nodes.field_list()
newline_offset, blank_finish = self.nested_list_parse(
datalines, 0, node, initial_state='ExtensionOptions',
blank_finish=1)
if newline_offset != len(datalines): # incomplete parse of block
return 0, 'invalid option block'
try:
options = utils.extract_extension_options(node, option_spec)
except KeyError, detail:
return 0, ('unknown option: "%s"' % detail.args[0])
except (ValueError, TypeError), detail:
return 0, ('invalid option value: %s' % ' '.join(detail.args))
except utils.ExtensionOptionError, detail:
return 0, ('invalid option data: %s' % ' '.join(detail.args))
if blank_finish:
return 1, options
else:
return 0, 'option data incompletely parsed'
def unknown_directive(self, type_name):
src, srcline = self.state_machine.get_source_and_line()
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(0, strip_indent=0)
text = '\n'.join(indented)
error = self.reporter.error(
'Unknown directive type "%s".' % type_name,
nodes.literal_block(text, text), source=src, line=srcline)
return [error], blank_finish
def comment(self, match):
if not match.string[match.end():].strip() \
and self.state_machine.is_next_line_blank(): # an empty comment?
return [nodes.comment()], 1 # "A tiny but practical wart."
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
while indented and not indented[-1].strip():
indented.trim_end()
text = '\n'.join(indented)
return [nodes.comment(text, text)], blank_finish
explicit.constructs = [
(footnote,
re.compile(r"""
\.\.[ ]+ # explicit markup start
\[
( # footnote label:
[0-9]+ # manually numbered footnote
| # *OR*
\# # anonymous auto-numbered footnote
| # *OR*
\#%s # auto-number ed?) footnote label
| # *OR*
\* # auto-symbol footnote
)
\]
([ ]+|$) # whitespace or end of line
""" % Inliner.simplename, re.VERBOSE | re.UNICODE)),
(citation,
re.compile(r"""
\.\.[ ]+ # explicit markup start
\[(%s)\] # citation label
([ ]+|$) # whitespace or end of line
""" % Inliner.simplename, re.VERBOSE | re.UNICODE)),
(hyperlink_target,
re.compile(r"""
\.\.[ ]+ # explicit markup start
_ # target indicator
(?![ ]|$) # first char. not space or EOL
""", re.VERBOSE)),
(substitution_def,
re.compile(r"""
\.\.[ ]+ # explicit markup start
\| # substitution indicator
(?![ ]|$) # first char. not space or EOL
""", re.VERBOSE)),
(directive,
re.compile(r"""
\.\.[ ]+ # explicit markup start
(%s) # directive name
[ ]? # optional space
:: # directive delimiter
([ ]+|$) # whitespace or end of line
""" % Inliner.simplename, re.VERBOSE | re.UNICODE))]
def explicit_markup(self, match, context, next_state):
"""Footnotes, hyperlink targets, directives, comments."""
nodelist, blank_finish = self.explicit_construct(match)
self.parent += nodelist
self.explicit_list(blank_finish)
return [], next_state, []
def explicit_construct(self, match):
"""Determine which explicit construct this is, parse & return it."""
errors = []
for method, pattern in self.explicit.constructs:
expmatch = pattern.match(match.string)
if expmatch:
try:
return method(self, expmatch)
except MarkupError, error: # never reached?
message = ' '.join(error.args)
src, srcline = self.state_machine.get_source_and_line()
errors.append(self.reporter.warning(
message, source=src, line=srcline))
break
nodelist, blank_finish = self.comment(match)
return nodelist + errors, blank_finish
def explicit_list(self, blank_finish):
"""
Create a nested state machine for a series of explicit markup
constructs (including anonymous hyperlink targets).
"""
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=self.parent, initial_state='Explicit',
blank_finish=blank_finish,
match_titles=self.state_machine.match_titles)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Explicit markup')
def anonymous(self, match, context, next_state):
"""Anonymous hyperlink targets."""
nodelist, blank_finish = self.anonymous_target(match)
self.parent += nodelist
self.explicit_list(blank_finish)
return [], next_state, []
def anonymous_target(self, match):
lineno = self.state_machine.abs_line_number()
block, indent, offset, blank_finish \
= self.state_machine.get_first_known_indented(match.end(),
until_blank=1)
blocktext = match.string[:match.end()] + '\n'.join(block)
block = [escape2null(line) for line in block]
target = self.make_target(block, blocktext, lineno, '')
return [target], blank_finish
def line(self, match, context, next_state):
"""Section title overline or transition marker."""
if self.state_machine.match_titles:
return [match.string], 'Line', []
elif match.string.strip() == '::':
raise statemachine.TransitionCorrection('text')
elif len(match.string.strip()) < 4:
msg = self.reporter.info(
'Unexpected possible title overline or transition.\n'
"Treating it as ordinary text because it's so short.",
line=self.state_machine.abs_line_number())
self.parent += msg
raise statemachine.TransitionCorrection('text')
else:
blocktext = self.state_machine.line
msg = self.reporter.severe(
'Unexpected section title or transition.',
nodes.literal_block(blocktext, blocktext),
line=self.state_machine.abs_line_number())
self.parent += msg
return [], next_state, []
def text(self, match, context, next_state):
"""Titles, definition lists, paragraphs."""
return [match.string], 'Text', []
class RFC2822Body(Body):
"""
RFC2822 headers are only valid as the first constructs in documents. As
soon as anything else appears, the `Body` state should take over.
"""
patterns = Body.patterns.copy() # can't modify the original
patterns['rfc2822'] = r'[!-9;-~]+:( +|$)'
initial_transitions = [(name, 'Body')
for name in Body.initial_transitions]
initial_transitions.insert(-1, ('rfc2822', 'Body')) # just before 'text'
def rfc2822(self, match, context, next_state):
"""RFC2822-style field list item."""
fieldlist = nodes.field_list(classes=['rfc2822'])
self.parent += fieldlist
field, blank_finish = self.rfc2822_field(match)
fieldlist += field
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=fieldlist, initial_state='RFC2822List',
blank_finish=blank_finish)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning(
'RFC2822-style field list')
return [], next_state, []
def rfc2822_field(self, match):
name = match.string[:match.string.find(':')]
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end(),
until_blank=1)
fieldnode = nodes.field()
fieldnode += nodes.field_name(name, name)
fieldbody = nodes.field_body('\n'.join(indented))
fieldnode += fieldbody
if indented:
self.nested_parse(indented, input_offset=line_offset,
node=fieldbody)
return fieldnode, blank_finish
class SpecializedBody(Body):
"""
Superclass for second and subsequent compound element members. Compound
elements are lists and list-like constructs.
All transition methods are disabled (redefined as `invalid_input`).
Override individual methods in subclasses to re-enable.
For example, once an initial bullet list item, say, is recognized, the
`BulletList` subclass takes over, with a "bullet_list" node as its
container. Upon encountering the initial bullet list item, `Body.bullet`
calls its ``self.nested_list_parse`` (`RSTState.nested_list_parse`), which
starts up a nested parsing session with `BulletList` as the initial state.
Only the ``bullet`` transition method is enabled in `BulletList`; as long
as only bullet list items are encountered, they are parsed and inserted
into the container. The first construct which is *not* a bullet list item
triggers the `invalid_input` method, which ends the nested parse and
closes the container. `BulletList` needs to recognize input that is
invalid in the context of a bullet list, which means everything *other
than* bullet list items, so it inherits the transition list created in
`Body`.
"""
def invalid_input(self, match=None, context=None, next_state=None):
"""Not a compound element member. Abort this state machine."""
self.state_machine.previous_line() # back up so parent SM can reassess
raise EOFError
indent = invalid_input
bullet = invalid_input
enumerator = invalid_input
field_marker = invalid_input
option_marker = invalid_input
doctest = invalid_input
line_block = invalid_input
grid_table_top = invalid_input
simple_table_top = invalid_input
explicit_markup = invalid_input
anonymous = invalid_input
line = invalid_input
text = invalid_input
class BulletList(SpecializedBody):
"""Second and subsequent bullet_list list_items."""
def bullet(self, match, context, next_state):
"""Bullet list item."""
if match.string[0] != self.parent['bullet']:
# different bullet: new list
self.invalid_input()
listitem, blank_finish = self.list_item(match.end())
self.parent += listitem
self.blank_finish = blank_finish
return [], next_state, []
class DefinitionList(SpecializedBody):
"""Second and subsequent definition_list_items."""
def text(self, match, context, next_state):
"""Definition lists."""
return [match.string], 'Definition', []
class EnumeratedList(SpecializedBody):
"""Second and subsequent enumerated_list list_items."""
def enumerator(self, match, context, next_state):
"""Enumerated list item."""
format, sequence, text, ordinal = self.parse_enumerator(
match, self.parent['enumtype'])
if ( format != self.format
or (sequence != '#' and (sequence != self.parent['enumtype']
or self.auto
or ordinal != (self.lastordinal + 1)))
or not self.is_enumerated_list_item(ordinal, sequence, format)):
# different enumeration: new list
self.invalid_input()
if sequence == '#':
self.auto = 1
listitem, blank_finish = self.list_item(match.end())
self.parent += listitem
self.blank_finish = blank_finish
self.lastordinal = ordinal
return [], next_state, []
class FieldList(SpecializedBody):
"""Second and subsequent field_list fields."""
def field_marker(self, match, context, next_state):
"""Field list field."""
field, blank_finish = self.field(match)
self.parent += field
self.blank_finish = blank_finish
return [], next_state, []
class OptionList(SpecializedBody):
"""Second and subsequent option_list option_list_items."""
def option_marker(self, match, context, next_state):
"""Option list item."""
try:
option_list_item, blank_finish = self.option_list_item(match)
except MarkupError:
self.invalid_input()
self.parent += option_list_item
self.blank_finish = blank_finish
return [], next_state, []
class RFC2822List(SpecializedBody, RFC2822Body):
"""Second and subsequent RFC2822-style field_list fields."""
patterns = RFC2822Body.patterns
initial_transitions = RFC2822Body.initial_transitions
def rfc2822(self, match, context, next_state):
"""RFC2822-style field list item."""
field, blank_finish = self.rfc2822_field(match)
self.parent += field
self.blank_finish = blank_finish
return [], 'RFC2822List', []
blank = SpecializedBody.invalid_input
class ExtensionOptions(FieldList):
"""
Parse field_list fields for extension options.
No nested parsing is done (including inline markup parsing).
"""
def parse_field_body(self, indented, offset, node):
"""Override `Body.parse_field_body` for simpler parsing."""
lines = []
for line in list(indented) + ['']:
if line.strip():
lines.append(line)
elif lines:
text = '\n'.join(lines)
node += nodes.paragraph(text, text)
lines = []
class LineBlock(SpecializedBody):
"""Second and subsequent lines of a line_block."""
blank = SpecializedBody.invalid_input
def line_block(self, match, context, next_state):
"""New line of line block."""
lineno = self.state_machine.abs_line_number()
line, messages, blank_finish = self.line_block_line(match, lineno)
self.parent += line
self.parent.parent += messages
self.blank_finish = blank_finish
return [], next_state, []
class Explicit(SpecializedBody):
"""Second and subsequent explicit markup construct."""
def explicit_markup(self, match, context, next_state):
"""Footnotes, hyperlink targets, directives, comments."""
nodelist, blank_finish = self.explicit_construct(match)
self.parent += nodelist
self.blank_finish = blank_finish
return [], next_state, []
def anonymous(self, match, context, next_state):
"""Anonymous hyperlink targets."""
nodelist, blank_finish = self.anonymous_target(match)
self.parent += nodelist
self.blank_finish = blank_finish
return [], next_state, []
blank = SpecializedBody.invalid_input
class SubstitutionDef(Body):
"""
Parser for the contents of a substitution_definition element.
"""
patterns = {
'embedded_directive': re.compile(r'(%s)::( +|$)'
% Inliner.simplename, re.UNICODE),
'text': r''}
initial_transitions = ['embedded_directive', 'text']
def embedded_directive(self, match, context, next_state):
nodelist, blank_finish = self.directive(match,
alt=self.parent['names'][0])
self.parent += nodelist
if not self.state_machine.at_eof():
self.blank_finish = blank_finish
raise EOFError
def text(self, match, context, next_state):
if not self.state_machine.at_eof():
self.blank_finish = self.state_machine.is_next_line_blank()
raise EOFError
class Text(RSTState):
"""
Classifier of second line of a text block.
Could be a paragraph, a definition list item, or a title.
"""
patterns = {'underline': Body.patterns['line'],
'text': r''}
initial_transitions = [('underline', 'Body'), ('text', 'Body')]
def blank(self, match, context, next_state):
"""End of paragraph."""
paragraph, literalnext = self.paragraph(
context, self.state_machine.abs_line_number() - 1)
self.parent += paragraph
if literalnext:
self.parent += self.literal_block()
return [], 'Body', []
def eof(self, context):
if context:
self.blank(None, context, None)
return []
def indent(self, match, context, next_state):
"""Definition list item."""
definitionlist = nodes.definition_list()
definitionlistitem, blank_finish = self.definition_list_item(context)
definitionlist += definitionlistitem
self.parent += definitionlist
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=definitionlist, initial_state='DefinitionList',
blank_finish=blank_finish, blank_finish_state='Definition')
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Definition list')
return [], 'Body', []
def underline(self, match, context, next_state):
"""Section title."""
lineno = self.state_machine.abs_line_number()
src, srcline = self.state_machine.get_source_and_line()
title = context[0].rstrip()
underline = match.string.rstrip()
source = title + '\n' + underline
messages = []
if column_width(title) > len(underline):
if len(underline) < 4:
if self.state_machine.match_titles:
msg = self.reporter.info(
'Possible title underline, too short for the title.\n'
"Treating it as ordinary text because it's so short.",
source=src, line=srcline)
self.parent += msg
raise statemachine.TransitionCorrection('text')
else:
blocktext = context[0] + '\n' + self.state_machine.line
msg = self.reporter.warning(
'Title underline too short.',
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline)
messages.append(msg)
if not self.state_machine.match_titles:
blocktext = context[0] + '\n' + self.state_machine.line
msg = self.reporter.severe(
'Unexpected section title.',
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline)
self.parent += messages
self.parent += msg
return [], next_state, []
style = underline[0]
context[:] = []
self.section(title, source, style, lineno - 1, messages)
return [], next_state, []
def text(self, match, context, next_state):
"""Paragraph."""
startline = self.state_machine.abs_line_number() - 1
msg = None
try:
block = self.state_machine.get_text_block(flush_left=1)
except statemachine.UnexpectedIndentationError, instance:
block, src, srcline = instance.args
msg = self.reporter.error('Unexpected indentation.',
source=src, line=srcline)
lines = context + list(block)
paragraph, literalnext = self.paragraph(lines, startline)
self.parent += paragraph
self.parent += msg
if literalnext:
try:
self.state_machine.next_line()
except EOFError:
pass
self.parent += self.literal_block()
return [], next_state, []
def literal_block(self):
"""Return a list of nodes."""
indented, indent, offset, blank_finish = \
self.state_machine.get_indented()
while indented and not indented[-1].strip():
indented.trim_end()
if not indented:
return self.quoted_literal_block()
data = '\n'.join(indented)
literal_block = nodes.literal_block(data, data)
literal_block.line = offset + 1
nodelist = [literal_block]
if not blank_finish:
nodelist.append(self.unindent_warning('Literal block'))
return nodelist
def quoted_literal_block(self):
abs_line_offset = self.state_machine.abs_line_offset()
offset = self.state_machine.line_offset
parent_node = nodes.Element()
new_abs_offset = self.nested_parse(
self.state_machine.input_lines[offset:],
input_offset=abs_line_offset, node=parent_node, match_titles=0,
state_machine_kwargs={'state_classes': (QuotedLiteralBlock,),
'initial_state': 'QuotedLiteralBlock'})
self.goto_line(new_abs_offset)
return parent_node.children
def definition_list_item(self, termline):
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
definitionlistitem = nodes.definition_list_item(
'\n'.join(termline + list(indented)))
lineno = self.state_machine.abs_line_number() - 1
src, srcline = self.state_machine.get_source_and_line()
definitionlistitem.source = src
definitionlistitem.line = srcline - 1
termlist, messages = self.term(termline, lineno)
definitionlistitem += termlist
definition = nodes.definition('', *messages)
definitionlistitem += definition
if termline[0][-2:] == '::':
definition += self.reporter.info(
'Blank line missing before literal block (after the "::")? '
'Interpreted as a definition list item.',
source=src, line=srcline)
self.nested_parse(indented, input_offset=line_offset, node=definition)
return definitionlistitem, blank_finish
classifier_delimiter = re.compile(' +: +')
def term(self, lines, lineno):
"""Return a definition_list's term and optional classifiers."""
assert len(lines) == 1
text_nodes, messages = self.inline_text(lines[0], lineno)
term_node = nodes.term()
node_list = [term_node]
for i in range(len(text_nodes)):
node = text_nodes[i]
if isinstance(node, nodes.Text):
parts = self.classifier_delimiter.split(node.rawsource)
if len(parts) == 1:
node_list[-1] += node
else:
node_list[-1] += nodes.Text(parts[0].rstrip())
for part in parts[1:]:
classifier_node = nodes.classifier('', part)
node_list.append(classifier_node)
else:
node_list[-1] += node
return node_list, messages
class SpecializedText(Text):
"""
Superclass for second and subsequent lines of Text-variants.
All transition methods are disabled. Override individual methods in
subclasses to re-enable.
"""
def eof(self, context):
"""Incomplete construct."""
return []
def invalid_input(self, match=None, context=None, next_state=None):
"""Not a compound element member. Abort this state machine."""
raise EOFError
blank = invalid_input
indent = invalid_input
underline = invalid_input
text = invalid_input
class Definition(SpecializedText):
"""Second line of potential definition_list_item."""
def eof(self, context):
"""Not a definition."""
self.state_machine.previous_line(2) # so parent SM can reassess
return []
def indent(self, match, context, next_state):
"""Definition list item."""
definitionlistitem, blank_finish = self.definition_list_item(context)
self.parent += definitionlistitem
self.blank_finish = blank_finish
return [], 'DefinitionList', []
class Line(SpecializedText):
"""
Second line of over- & underlined section title or transition marker.
"""
eofcheck = 1 # @@@ ???
"""Set to 0 while parsing sections, so that we don't catch the EOF."""
def eof(self, context):
"""Transition marker at end of section or document."""
marker = context[0].strip()
if self.memo.section_bubble_up_kludge:
self.memo.section_bubble_up_kludge = 0
elif len(marker) < 4:
self.state_correction(context)
if self.eofcheck: # ignore EOFError with sections
lineno = self.state_machine.abs_line_number() - 1
transition = nodes.transition(rawsource=context[0])
transition.line = lineno
self.parent += transition
self.eofcheck = 1
return []
def blank(self, match, context, next_state):
"""Transition marker."""
src, srcline = self.state_machine.get_source_and_line()
marker = context[0].strip()
if len(marker) < 4:
self.state_correction(context)
transition = nodes.transition(rawsource=marker)
transition.source = src
transition.line = srcline - 1
self.parent += transition
return [], 'Body', []
def text(self, match, context, next_state):
"""Potential over- & underlined title."""
lineno = self.state_machine.abs_line_number() - 1
src, srcline = self.state_machine.get_source_and_line()
overline = context[0]
title = match.string
underline = ''
try:
underline = self.state_machine.next_line()
except EOFError:
blocktext = overline + '\n' + title
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.severe(
'Incomplete section title.',
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline-1)
self.parent += msg
return [], 'Body', []
source = '%s\n%s\n%s' % (overline, title, underline)
overline = overline.rstrip()
underline = underline.rstrip()
if not self.transitions['underline'][0].match(underline):
blocktext = overline + '\n' + title + '\n' + underline
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.severe(
'Missing matching underline for section title overline.',
nodes.literal_block(source, source),
source=src, line=srcline-1)
self.parent += msg
return [], 'Body', []
elif overline != underline:
blocktext = overline + '\n' + title + '\n' + underline
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.severe(
'Title overline & underline mismatch.',
nodes.literal_block(source, source),
source=src, line=srcline-1)
self.parent += msg
return [], 'Body', []
title = title.rstrip()
messages = []
if column_width(title) > len(overline):
blocktext = overline + '\n' + title + '\n' + underline
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.warning(
'Title overline too short.',
nodes.literal_block(source, source),
source=src, line=srcline-1)
messages.append(msg)
style = (overline[0], underline[0])
self.eofcheck = 0 # @@@ not sure this is correct
self.section(title.lstrip(), source, style, lineno + 1, messages)
self.eofcheck = 1
return [], 'Body', []
indent = text # indented title
def underline(self, match, context, next_state):
overline = context[0]
blocktext = overline + '\n' + self.state_machine.line
lineno = self.state_machine.abs_line_number() - 1
src, srcline = self.state_machine.get_source_and_line()
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 1)
msg = self.reporter.error(
'Invalid section title or transition marker.',
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline-1)
self.parent += msg
return [], 'Body', []
def short_overline(self, context, blocktext, lineno, lines=1):
src, srcline = self.state_machine.get_source_and_line(lineno)
msg = self.reporter.info(
'Possible incomplete section title.\nTreating the overline as '
"ordinary text because it's so short.",
source=src, line=srcline)
self.parent += msg
self.state_correction(context, lines)
def state_correction(self, context, lines=1):
self.state_machine.previous_line(lines)
context[:] = []
raise statemachine.StateCorrection('Body', 'text')
class QuotedLiteralBlock(RSTState):
"""
Nested parse handler for quoted (unindented) literal blocks.
Special-purpose. Not for inclusion in `state_classes`.
"""
patterns = {'initial_quoted': r'(%(nonalphanum7bit)s)' % Body.pats,
'text': r''}
initial_transitions = ('initial_quoted', 'text')
def __init__(self, state_machine, debug=0):
RSTState.__init__(self, state_machine, debug)
self.messages = []
self.initial_lineno = None
def blank(self, match, context, next_state):
if context:
raise EOFError
else:
return context, next_state, []
def eof(self, context):
if context:
src, srcline = self.state_machine.get_source_and_line(
self.initial_lineno)
text = '\n'.join(context)
literal_block = nodes.literal_block(text, text)
literal_block.source = src
literal_block.line = srcline
self.parent += literal_block
else:
self.parent += self.reporter.warning(
'Literal block expected; none found.',
line=self.state_machine.abs_line_number())
# src not available, because statemachine.input_lines is empty
self.state_machine.previous_line()
self.parent += self.messages
return []
def indent(self, match, context, next_state):
assert context, ('QuotedLiteralBlock.indent: context should not '
'be empty!')
self.messages.append(
self.reporter.error('Unexpected indentation.',
line=self.state_machine.abs_line_number()))
self.state_machine.previous_line()
raise EOFError
def initial_quoted(self, match, context, next_state):
"""Match arbitrary quote character on the first line only."""
self.remove_transition('initial_quoted')
quote = match.string[0]
pattern = re.compile(re.escape(quote))
# New transition matches consistent quotes only:
self.add_transition('quoted',
(pattern, self.quoted, self.__class__.__name__))
self.initial_lineno = self.state_machine.abs_line_number()
return [match.string], next_state, []
def quoted(self, match, context, next_state):
"""Match consistent quotes on subsequent lines."""
context.append(match.string)
return context, next_state, []
def text(self, match, context, next_state):
if context:
src, srcline = self.state_machine.get_source_and_line()
self.messages.append(
self.reporter.error('Inconsistent literal block quoting.',
source=src, line=srcline))
self.state_machine.previous_line()
raise EOFError
state_classes = (Body, BulletList, DefinitionList, EnumeratedList, FieldList,
OptionList, LineBlock, ExtensionOptions, Explicit, Text,
Definition, Line, SubstitutionDef, RFC2822Body, RFC2822List)
"""Standard set of State classes used to start `RSTStateMachine`."""
| apache-2.0 |
jxs/servo | tests/wpt/css-tests/tools/html5lib/html5lib/utils.py | 982 | 2545 | from __future__ import absolute_import, division, unicode_literals
from types import ModuleType
try:
import xml.etree.cElementTree as default_etree
except ImportError:
import xml.etree.ElementTree as default_etree
__all__ = ["default_etree", "MethodDispatcher", "isSurrogatePair",
"surrogatePairToCodepoint", "moduleFactoryFactory"]
class MethodDispatcher(dict):
"""Dict with 2 special properties:
On initiation, keys that are lists, sets or tuples are converted to
multiple keys so accessing any one of the items in the original
list-like object returns the matching value
md = MethodDispatcher({("foo", "bar"):"baz"})
md["foo"] == "baz"
A default value which can be set through the default attribute.
"""
def __init__(self, items=()):
# Using _dictEntries instead of directly assigning to self is about
# twice as fast. Please do careful performance testing before changing
# anything here.
_dictEntries = []
for name, value in items:
if type(name) in (list, tuple, frozenset, set):
for item in name:
_dictEntries.append((item, value))
else:
_dictEntries.append((name, value))
dict.__init__(self, _dictEntries)
self.default = None
def __getitem__(self, key):
return dict.get(self, key, self.default)
# Some utility functions to dal with weirdness around UCS2 vs UCS4
# python builds
def isSurrogatePair(data):
return (len(data) == 2 and
ord(data[0]) >= 0xD800 and ord(data[0]) <= 0xDBFF and
ord(data[1]) >= 0xDC00 and ord(data[1]) <= 0xDFFF)
def surrogatePairToCodepoint(data):
char_val = (0x10000 + (ord(data[0]) - 0xD800) * 0x400 +
(ord(data[1]) - 0xDC00))
return char_val
# Module Factory Factory (no, this isn't Java, I know)
# Here to stop this being duplicated all over the place.
def moduleFactoryFactory(factory):
moduleCache = {}
def moduleFactory(baseModule, *args, **kwargs):
if isinstance(ModuleType.__name__, type("")):
name = "_%s_factory" % baseModule.__name__
else:
name = b"_%s_factory" % baseModule.__name__
if name in moduleCache:
return moduleCache[name]
else:
mod = ModuleType(name)
objs = factory(baseModule, *args, **kwargs)
mod.__dict__.update(objs)
moduleCache[name] = mod
return mod
return moduleFactory
| mpl-2.0 |
mszewczy/odoo | addons/hr_timesheet_sheet/hr_timesheet_sheet.py | 72 | 35950 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
from pytz import timezone
import pytz
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.translate import _
class hr_timesheet_sheet(osv.osv):
_name = "hr_timesheet_sheet.sheet"
_inherit = "mail.thread"
_table = 'hr_timesheet_sheet_sheet'
_order = "id desc"
_description="Timesheet"
def _total(self, cr, uid, ids, name, args, context=None):
""" Compute the attendances, analytic lines timesheets and differences between them
for all the days of a timesheet and the current day
"""
res = {}
for sheet in self.browse(cr, uid, ids, context=context or {}):
res.setdefault(sheet.id, {
'total_attendance': 0.0,
'total_timesheet': 0.0,
'total_difference': 0.0,
})
for period in sheet.period_ids:
res[sheet.id]['total_attendance'] += period.total_attendance
res[sheet.id]['total_timesheet'] += period.total_timesheet
res[sheet.id]['total_difference'] += period.total_attendance - period.total_timesheet
return res
def check_employee_attendance_state(self, cr, uid, sheet_id, context=None):
ids_signin = self.pool.get('hr.attendance').search(cr,uid,[('sheet_id', '=', sheet_id),('action','=','sign_in')])
ids_signout = self.pool.get('hr.attendance').search(cr,uid,[('sheet_id', '=', sheet_id),('action','=','sign_out')])
if len(ids_signin) != len(ids_signout):
raise osv.except_osv(('Warning!'),_('The timesheet cannot be validated as it does not contain an equal number of sign ins and sign outs.'))
return True
def copy(self, cr, uid, ids, *args, **argv):
raise osv.except_osv(_('Error!'), _('You cannot duplicate a timesheet.'))
def create(self, cr, uid, vals, context=None):
if 'employee_id' in vals:
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).user_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link him/her to a user.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).product_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link the employee to a product, like \'Consultant\'.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).journal_id:
raise osv.except_osv(_('Configuration Error!'), _('In order to create a timesheet for this employee, you must assign an analytic journal to the employee, like \'Timesheet Journal\'.'))
if vals.get('attendances_ids'):
# If attendances, we sort them by date asc before writing them, to satisfy the alternance constraint
vals['attendances_ids'] = self.sort_attendances(cr, uid, vals['attendances_ids'], context=context)
return super(hr_timesheet_sheet, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if 'employee_id' in vals:
new_user_id = self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).user_id.id or False
if not new_user_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link him/her to a user.'))
if not self._sheet_date(cr, uid, ids, forced_user_id=new_user_id, context=context):
raise osv.except_osv(_('Error!'), _('You cannot have 2 timesheets that overlap!\nYou should use the menu \'My Timesheet\' to avoid this problem.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).product_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link the employee to a product.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).journal_id:
raise osv.except_osv(_('Configuration Error!'), _('In order to create a timesheet for this employee, you must assign an analytic journal to the employee, like \'Timesheet Journal\'.'))
if vals.get('attendances_ids'):
# If attendances, we sort them by date asc before writing them, to satisfy the alternance constraint
# In addition to the date order, deleting attendances are done before inserting attendances
vals['attendances_ids'] = self.sort_attendances(cr, uid, vals['attendances_ids'], context=context)
res = super(hr_timesheet_sheet, self).write(cr, uid, ids, vals, context=context)
if vals.get('attendances_ids'):
for timesheet in self.browse(cr, uid, ids):
if not self.pool['hr.attendance']._altern_si_so(cr, uid, [att.id for att in timesheet.attendances_ids]):
raise osv.except_osv(_('Warning !'), _('Error ! Sign in (resp. Sign out) must follow Sign out (resp. Sign in)'))
return res
def sort_attendances(self, cr, uid, attendance_tuples, context=None):
date_attendances = []
for att_tuple in attendance_tuples:
if att_tuple[0] in [0,1,4]:
if att_tuple[0] in [0,1]:
if att_tuple[2] and att_tuple[2].has_key('name'):
name = att_tuple[2]['name']
else:
name = self.pool['hr.attendance'].browse(cr, uid, att_tuple[1]).name
else:
name = self.pool['hr.attendance'].browse(cr, uid, att_tuple[1]).name
date_attendances.append((1, name, att_tuple))
elif att_tuple[0] in [2,3]:
date_attendances.append((0, self.pool['hr.attendance'].browse(cr, uid, att_tuple[1]).name, att_tuple))
else:
date_attendances.append((0, False, att_tuple))
date_attendances.sort()
return [att[2] for att in date_attendances]
def button_confirm(self, cr, uid, ids, context=None):
for sheet in self.browse(cr, uid, ids, context=context):
if sheet.employee_id and sheet.employee_id.parent_id and sheet.employee_id.parent_id.user_id:
self.message_subscribe_users(cr, uid, [sheet.id], user_ids=[sheet.employee_id.parent_id.user_id.id], context=context)
self.check_employee_attendance_state(cr, uid, sheet.id, context=context)
di = sheet.user_id.company_id.timesheet_max_difference
if (abs(sheet.total_difference) < di) or not di:
sheet.signal_workflow('confirm')
else:
raise osv.except_osv(_('Warning!'), _('Please verify that the total difference of the sheet is lower than %.2f.') %(di,))
return True
def attendance_action_change(self, cr, uid, ids, context=None):
hr_employee = self.pool.get('hr.employee')
employee_ids = []
for sheet in self.browse(cr, uid, ids, context=context):
if sheet.employee_id.id not in employee_ids: employee_ids.append(sheet.employee_id.id)
return hr_employee.attendance_action_change(cr, uid, employee_ids, context=context)
def _count_all(self, cr, uid, ids, field_name, arg, context=None):
Timesheet = self.pool['hr.analytic.timesheet']
Attendance = self.pool['hr.attendance']
return {
sheet_id: {
'timesheet_activity_count': Timesheet.search_count(cr,uid, [('sheet_id','=', sheet_id)], context=context),
'attendance_count': Attendance.search_count(cr,uid, [('sheet_id', '=', sheet_id)], context=context)
}
for sheet_id in ids
}
_columns = {
'name': fields.char('Note', select=1,
states={'confirm':[('readonly', True)], 'done':[('readonly', True)]}),
'employee_id': fields.many2one('hr.employee', 'Employee', required=True),
'user_id': fields.related('employee_id', 'user_id', type="many2one", relation="res.users", store=True, string="User", required=False, readonly=True),#fields.many2one('res.users', 'User', required=True, select=1, states={'confirm':[('readonly', True)], 'done':[('readonly', True)]}),
'date_from': fields.date('Date from', required=True, select=1, readonly=True, states={'new':[('readonly', False)]}),
'date_to': fields.date('Date to', required=True, select=1, readonly=True, states={'new':[('readonly', False)]}),
'timesheet_ids' : fields.one2many('hr.analytic.timesheet', 'sheet_id',
'Timesheet lines',
readonly=True, states={
'draft': [('readonly', False)],
'new': [('readonly', False)]}
),
'attendances_ids' : fields.one2many('hr.attendance', 'sheet_id', 'Attendances'),
'state' : fields.selection([
('new', 'New'),
('draft','Open'),
('confirm','Waiting Approval'),
('done','Approved')], 'Status', select=True, required=True, readonly=True,
help=' * The \'Draft\' status is used when a user is encoding a new and unconfirmed timesheet. \
\n* The \'Confirmed\' status is used for to confirm the timesheet by user. \
\n* The \'Done\' status is used when users timesheet is accepted by his/her senior.'),
'state_attendance' : fields.related('employee_id', 'state', type='selection', selection=[('absent', 'Absent'), ('present', 'Present')], string='Current Status', readonly=True),
'total_attendance': fields.function(_total, method=True, string='Total Attendance', multi="_total"),
'total_timesheet': fields.function(_total, method=True, string='Total Timesheet', multi="_total"),
'total_difference': fields.function(_total, method=True, string='Difference', multi="_total"),
'period_ids': fields.one2many('hr_timesheet_sheet.sheet.day', 'sheet_id', 'Period', readonly=True),
'account_ids': fields.one2many('hr_timesheet_sheet.sheet.account', 'sheet_id', 'Analytic accounts', readonly=True),
'company_id': fields.many2one('res.company', 'Company'),
'department_id':fields.many2one('hr.department','Department'),
'timesheet_activity_count': fields.function(_count_all, type='integer', string='Timesheet Activities', multi=True),
'attendance_count': fields.function(_count_all, type='integer', string="Attendances", multi=True),
}
def _default_date_from(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
r = user.company_id and user.company_id.timesheet_range or 'month'
if r=='month':
return time.strftime('%Y-%m-01')
elif r=='week':
return (datetime.today() + relativedelta(weekday=0, days=-6)).strftime('%Y-%m-%d')
elif r=='year':
return time.strftime('%Y-01-01')
return fields.date.context_today(self, cr, uid, context)
def _default_date_to(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
r = user.company_id and user.company_id.timesheet_range or 'month'
if r=='month':
return (datetime.today() + relativedelta(months=+1,day=1,days=-1)).strftime('%Y-%m-%d')
elif r=='week':
return (datetime.today() + relativedelta(weekday=6)).strftime('%Y-%m-%d')
elif r=='year':
return time.strftime('%Y-12-31')
return fields.date.context_today(self, cr, uid, context)
def _default_employee(self, cr, uid, context=None):
emp_ids = self.pool.get('hr.employee').search(cr, uid, [('user_id','=',uid)], context=context)
return emp_ids and emp_ids[0] or False
_defaults = {
'date_from' : _default_date_from,
'date_to' : _default_date_to,
'state': 'new',
'employee_id': _default_employee,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'hr_timesheet_sheet.sheet', context=c)
}
def _sheet_date(self, cr, uid, ids, forced_user_id=False, context=None):
for sheet in self.browse(cr, uid, ids, context=context):
new_user_id = forced_user_id or sheet.employee_id.user_id and sheet.employee_id.user_id.id
if new_user_id:
cr.execute('SELECT id \
FROM hr_timesheet_sheet_sheet \
WHERE (date_from <= %s and %s <= date_to) \
AND user_id=%s \
AND id <> %s',(sheet.date_to, sheet.date_from, new_user_id, sheet.id))
if cr.fetchall():
return False
return True
_constraints = [
(_sheet_date, 'You cannot have 2 timesheets that overlap!\nPlease use the menu \'My Current Timesheet\' to avoid this problem.', ['date_from','date_to']),
]
def action_set_to_draft(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {'state': 'draft'})
self.create_workflow(cr, uid, ids)
return True
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
if isinstance(ids, (long, int)):
ids = [ids]
return [(r['id'], _('Week ')+datetime.strptime(r['date_from'], '%Y-%m-%d').strftime('%U')) \
for r in self.read(cr, uid, ids, ['date_from'],
context=context, load='_classic_write')]
def unlink(self, cr, uid, ids, context=None):
sheets = self.read(cr, uid, ids, ['state','total_attendance'], context=context)
for sheet in sheets:
if sheet['state'] in ('confirm', 'done'):
raise osv.except_osv(_('Invalid Action!'), _('You cannot delete a timesheet which is already confirmed.'))
elif sheet['total_attendance'] <> 0.00:
raise osv.except_osv(_('Invalid Action!'), _('You cannot delete a timesheet which have attendance entries.'))
toremove = []
analytic_timesheet = self.pool.get('hr.analytic.timesheet')
for sheet in self.browse(cr, uid, ids, context=context):
for timesheet in sheet.timesheet_ids:
toremove.append(timesheet.id)
analytic_timesheet.unlink(cr, uid, toremove, context=context)
return super(hr_timesheet_sheet, self).unlink(cr, uid, ids, context=context)
def onchange_employee_id(self, cr, uid, ids, employee_id, context=None):
department_id = False
user_id = False
if employee_id:
empl_id = self.pool.get('hr.employee').browse(cr, uid, employee_id, context=context)
department_id = empl_id.department_id.id
user_id = empl_id.user_id.id
return {'value': {'department_id': department_id, 'user_id': user_id,}}
# ------------------------------------------------
# OpenChatter methods and notifications
# ------------------------------------------------
def _needaction_domain_get(self, cr, uid, context=None):
emp_obj = self.pool.get('hr.employee')
empids = emp_obj.search(cr, uid, [('parent_id.user_id', '=', uid)], context=context)
if not empids:
return False
dom = ['&', ('state', '=', 'confirm'), ('employee_id', 'in', empids)]
return dom
class account_analytic_line(osv.osv):
_inherit = "account.analytic.line"
def _get_default_date(self, cr, uid, context=None):
if context is None:
context = {}
#get the default date (should be: today)
res = super(account_analytic_line, self)._get_default_date(cr, uid, context=context)
#if we got the dates from and to from the timesheet and if the default date is in between, we use the default
#but if the default isn't included in those dates, we use the date start of the timesheet as default
if context.get('timesheet_date_from') and context.get('timesheet_date_to'):
if context['timesheet_date_from'] <= res <= context['timesheet_date_to']:
return res
return context.get('timesheet_date_from')
#if we don't get the dates from the timesheet, we return the default value from super()
return res
class account_analytic_account(osv.osv):
_inherit = "account.analytic.account"
def name_create(self, cr, uid, name, context=None):
if context is None:
context = {}
group_template_required = self.pool['res.users'].has_group(cr, uid, 'account_analytic_analysis.group_template_required')
if not context.get('default_use_timesheets') or group_template_required:
return super(account_analytic_account, self).name_create(cr, uid, name, context=context)
rec_id = self.create(cr, uid, {self._rec_name: name}, context)
return self.name_get(cr, uid, [rec_id], context)[0]
class hr_timesheet_line(osv.osv):
_inherit = "hr.analytic.timesheet"
def _sheet(self, cursor, user, ids, name, args, context=None):
sheet_obj = self.pool.get('hr_timesheet_sheet.sheet')
res = {}.fromkeys(ids, False)
for ts_line in self.browse(cursor, user, ids, context=context):
sheet_ids = sheet_obj.search(cursor, user,
[('date_to', '>=', ts_line.date), ('date_from', '<=', ts_line.date),
('employee_id.user_id', '=', ts_line.user_id.id),
('state', 'in', ['draft', 'new'])],
context=context)
if sheet_ids:
# [0] because only one sheet possible for an employee between 2 dates
res[ts_line.id] = sheet_obj.name_get(cursor, user, sheet_ids, context=context)[0]
return res
def _get_hr_timesheet_sheet(self, cr, uid, ids, context=None):
ts_line_ids = []
for ts in self.browse(cr, uid, ids, context=context):
cr.execute("""
SELECT l.id
FROM hr_analytic_timesheet l
INNER JOIN account_analytic_line al
ON (l.line_id = al.id)
WHERE %(date_to)s >= al.date
AND %(date_from)s <= al.date
AND %(user_id)s = al.user_id
GROUP BY l.id""", {'date_from': ts.date_from,
'date_to': ts.date_to,
'user_id': ts.employee_id.user_id.id,})
ts_line_ids.extend([row[0] for row in cr.fetchall()])
return ts_line_ids
def _get_account_analytic_line(self, cr, uid, ids, context=None):
ts_line_ids = self.pool.get('hr.analytic.timesheet').search(cr, uid, [('line_id', 'in', ids)])
return ts_line_ids
_columns = {
'sheet_id': fields.function(_sheet, string='Sheet', select="1",
type='many2one', relation='hr_timesheet_sheet.sheet', ondelete="cascade",
store={
'hr_timesheet_sheet.sheet': (_get_hr_timesheet_sheet, ['employee_id', 'date_from', 'date_to'], 10),
'account.analytic.line': (_get_account_analytic_line, ['user_id', 'date'], 10),
'hr.analytic.timesheet': (lambda self,cr,uid,ids,context=None: ids, None, 10),
},
),
}
def write(self, cr, uid, ids, values, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
self._check(cr, uid, ids)
return super(hr_timesheet_line, self).write(cr, uid, ids, values, context=context)
def unlink(self, cr, uid, ids, *args, **kwargs):
if isinstance(ids, (int, long)):
ids = [ids]
self._check(cr, uid, ids)
return super(hr_timesheet_line,self).unlink(cr, uid, ids,*args, **kwargs)
def _check(self, cr, uid, ids):
for att in self.browse(cr, uid, ids):
if att.sheet_id and att.sheet_id.state not in ('draft', 'new'):
raise osv.except_osv(_('Error!'), _('You cannot modify an entry in a confirmed timesheet.'))
return True
def multi_on_change_account_id(self, cr, uid, ids, account_ids, context=None):
return dict([(el, self.on_change_account_id(cr, uid, ids, el, context.get('user_id', uid))) for el in account_ids])
class hr_attendance(osv.osv):
_inherit = "hr.attendance"
def _get_default_date(self, cr, uid, context=None):
if context is None:
context = {}
if 'name' in context:
return context['name'] + time.strftime(' %H:%M:%S')
return time.strftime('%Y-%m-%d %H:%M:%S')
def _get_hr_timesheet_sheet(self, cr, uid, ids, context=None):
attendance_ids = []
for ts in self.browse(cr, uid, ids, context=context):
cr.execute("""
SELECT a.id
FROM hr_attendance a
INNER JOIN hr_employee e
INNER JOIN resource_resource r
ON (e.resource_id = r.id)
ON (a.employee_id = e.id)
LEFT JOIN res_users u
ON r.user_id = u.id
LEFT JOIN res_partner p
ON u.partner_id = p.id
WHERE %(date_to)s >= date_trunc('day', a.name AT TIME ZONE 'UTC' AT TIME ZONE coalesce(p.tz, 'UTC'))
AND %(date_from)s <= date_trunc('day', a.name AT TIME ZONE 'UTC' AT TIME ZONE coalesce(p.tz, 'UTC'))
AND %(user_id)s = r.user_id
GROUP BY a.id""", {'date_from': ts.date_from,
'date_to': ts.date_to,
'user_id': ts.employee_id.user_id.id,})
attendance_ids.extend([row[0] for row in cr.fetchall()])
return attendance_ids
def _get_attendance_employee_tz(self, cr, uid, employee_id, date, context=None):
""" Simulate timesheet in employee timezone
Return the attendance date in string format in the employee
tz converted from utc timezone as we consider date of employee
timesheet is in employee timezone
"""
employee_obj = self.pool['hr.employee']
tz = False
if employee_id:
employee = employee_obj.browse(cr, uid, employee_id, context=context)
tz = employee.user_id.partner_id.tz
if not date:
date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
att_tz = timezone(tz or 'utc')
attendance_dt = datetime.strptime(date, DEFAULT_SERVER_DATETIME_FORMAT)
att_tz_dt = pytz.utc.localize(attendance_dt)
att_tz_dt = att_tz_dt.astimezone(att_tz)
# We take only the date omiting the hours as we compare with timesheet
# date_from which is a date format thus using hours would lead to
# be out of scope of timesheet
att_tz_date_str = datetime.strftime(att_tz_dt, DEFAULT_SERVER_DATE_FORMAT)
return att_tz_date_str
def _get_current_sheet(self, cr, uid, employee_id, date=False, context=None):
sheet_obj = self.pool['hr_timesheet_sheet.sheet']
if not date:
date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
att_tz_date_str = self._get_attendance_employee_tz(
cr, uid, employee_id,
date=date, context=context)
sheet_ids = sheet_obj.search(cr, uid,
[('date_from', '<=', att_tz_date_str),
('date_to', '>=', att_tz_date_str),
('employee_id', '=', employee_id)],
limit=1, context=context)
return sheet_ids and sheet_ids[0] or False
def _sheet(self, cursor, user, ids, name, args, context=None):
res = {}.fromkeys(ids, False)
for attendance in self.browse(cursor, user, ids, context=context):
res[attendance.id] = self._get_current_sheet(
cursor, user, attendance.employee_id.id, attendance.name,
context=context)
return res
_columns = {
'sheet_id': fields.function(_sheet, string='Sheet',
type='many2one', relation='hr_timesheet_sheet.sheet',
store={
'hr_timesheet_sheet.sheet': (_get_hr_timesheet_sheet, ['employee_id', 'date_from', 'date_to'], 10),
'hr.attendance': (lambda self,cr,uid,ids,context=None: ids, ['employee_id', 'name', 'day'], 10),
},
)
}
_defaults = {
'name': _get_default_date,
}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
sheet_id = context.get('sheet_id') or self._get_current_sheet(cr, uid, vals.get('employee_id'), vals.get('name'), context=context)
if sheet_id:
att_tz_date_str = self._get_attendance_employee_tz(
cr, uid, vals.get('employee_id'),
date=vals.get('name'), context=context)
ts = self.pool.get('hr_timesheet_sheet.sheet').browse(cr, uid, sheet_id, context=context)
if ts.state not in ('draft', 'new'):
raise osv.except_osv(_('Error!'), _('You can not enter an attendance in a submitted timesheet. Ask your manager to reset it before adding attendance.'))
elif ts.date_from > att_tz_date_str or ts.date_to < att_tz_date_str:
raise osv.except_osv(_('User Error!'), _('You can not enter an attendance date outside the current timesheet dates.'))
return super(hr_attendance,self).create(cr, uid, vals, context=context)
def unlink(self, cr, uid, ids, *args, **kwargs):
if isinstance(ids, (int, long)):
ids = [ids]
self._check(cr, uid, ids)
return super(hr_attendance,self).unlink(cr, uid, ids,*args, **kwargs)
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
self._check(cr, uid, ids)
res = super(hr_attendance,self).write(cr, uid, ids, vals, context=context)
if 'sheet_id' in context:
for attendance in self.browse(cr, uid, ids, context=context):
if context['sheet_id'] != attendance.sheet_id.id:
raise osv.except_osv(_('User Error!'), _('You cannot enter an attendance ' \
'date outside the current timesheet dates.'))
return res
def _check(self, cr, uid, ids):
for att in self.browse(cr, uid, ids):
if att.sheet_id and att.sheet_id.state not in ('draft', 'new'):
raise osv.except_osv(_('Error!'), _('You cannot modify an entry in a confirmed timesheet'))
return True
class hr_timesheet_sheet_sheet_day(osv.osv):
_name = "hr_timesheet_sheet.sheet.day"
_description = "Timesheets by Period"
_auto = False
_order='name'
_columns = {
'name': fields.date('Date', readonly=True),
'sheet_id': fields.many2one('hr_timesheet_sheet.sheet', 'Sheet', readonly=True, select="1"),
'total_timesheet': fields.float('Total Timesheet', readonly=True),
'total_attendance': fields.float('Attendance', readonly=True),
'total_difference': fields.float('Difference', readonly=True),
}
_depends = {
'account.analytic.line': ['date', 'unit_amount'],
'hr.analytic.timesheet': ['line_id', 'sheet_id'],
'hr.attendance': ['action', 'name', 'sheet_id'],
}
def init(self, cr):
cr.execute("""create or replace view hr_timesheet_sheet_sheet_day as
SELECT
id,
name,
sheet_id,
total_timesheet,
total_attendance,
cast(round(cast(total_attendance - total_timesheet as Numeric),2) as Double Precision) AS total_difference
FROM
((
SELECT
MAX(id) as id,
name,
sheet_id,
timezone,
SUM(total_timesheet) as total_timesheet,
CASE WHEN SUM(orphan_attendances) != 0
THEN (SUM(total_attendance) +
CASE WHEN current_date <> name
THEN 1440
ELSE (EXTRACT(hour FROM current_time AT TIME ZONE 'UTC' AT TIME ZONE coalesce(timezone, 'UTC')) * 60) + EXTRACT(minute FROM current_time AT TIME ZONE 'UTC' AT TIME ZONE coalesce(timezone, 'UTC'))
END
)
ELSE SUM(total_attendance)
END /60 as total_attendance
FROM
((
select
min(hrt.id) as id,
p.tz as timezone,
l.date::date as name,
s.id as sheet_id,
sum(l.unit_amount) as total_timesheet,
0 as orphan_attendances,
0.0 as total_attendance
from
hr_analytic_timesheet hrt
JOIN account_analytic_line l ON l.id = hrt.line_id
LEFT JOIN hr_timesheet_sheet_sheet s ON s.id = hrt.sheet_id
JOIN hr_employee e ON s.employee_id = e.id
JOIN resource_resource r ON e.resource_id = r.id
LEFT JOIN res_users u ON r.user_id = u.id
LEFT JOIN res_partner p ON u.partner_id = p.id
group by l.date::date, s.id, timezone
) union (
select
-min(a.id) as id,
p.tz as timezone,
(a.name AT TIME ZONE 'UTC' AT TIME ZONE coalesce(p.tz, 'UTC'))::date as name,
s.id as sheet_id,
0.0 as total_timesheet,
SUM(CASE WHEN a.action = 'sign_in' THEN -1 ELSE 1 END) as orphan_attendances,
SUM(((EXTRACT(hour FROM (a.name AT TIME ZONE 'UTC' AT TIME ZONE coalesce(p.tz, 'UTC'))) * 60) + EXTRACT(minute FROM (a.name AT TIME ZONE 'UTC' AT TIME ZONE coalesce(p.tz, 'UTC')))) * (CASE WHEN a.action = 'sign_in' THEN -1 ELSE 1 END)) as total_attendance
from
hr_attendance a
LEFT JOIN hr_timesheet_sheet_sheet s
ON s.id = a.sheet_id
JOIN hr_employee e
ON a.employee_id = e.id
JOIN resource_resource r
ON e.resource_id = r.id
LEFT JOIN res_users u
ON r.user_id = u.id
LEFT JOIN res_partner p
ON u.partner_id = p.id
WHERE action in ('sign_in', 'sign_out')
group by (a.name AT TIME ZONE 'UTC' AT TIME ZONE coalesce(p.tz, 'UTC'))::date, s.id, timezone
)) AS foo
GROUP BY name, sheet_id, timezone
)) AS bar""")
class hr_timesheet_sheet_sheet_account(osv.osv):
_name = "hr_timesheet_sheet.sheet.account"
_description = "Timesheets by Period"
_auto = False
_order='name'
_columns = {
'name': fields.many2one('account.analytic.account', 'Project / Analytic Account', readonly=True),
'sheet_id': fields.many2one('hr_timesheet_sheet.sheet', 'Sheet', readonly=True),
'total': fields.float('Total Time', digits=(16,2), readonly=True),
'invoice_rate': fields.many2one('hr_timesheet_invoice.factor', 'Invoice rate', readonly=True),
}
_depends = {
'account.analytic.line': ['account_id', 'date', 'to_invoice', 'unit_amount', 'user_id'],
'hr.analytic.timesheet': ['line_id'],
'hr_timesheet_sheet.sheet': ['date_from', 'date_to', 'user_id'],
}
def init(self, cr):
cr.execute("""create or replace view hr_timesheet_sheet_sheet_account as (
select
min(hrt.id) as id,
l.account_id as name,
s.id as sheet_id,
sum(l.unit_amount) as total,
l.to_invoice as invoice_rate
from
hr_analytic_timesheet hrt
left join (account_analytic_line l
LEFT JOIN hr_timesheet_sheet_sheet s
ON (s.date_to >= l.date
AND s.date_from <= l.date
AND s.user_id = l.user_id))
on (l.id = hrt.line_id)
group by l.account_id, s.id, l.to_invoice
)""")
class res_company(osv.osv):
_inherit = 'res.company'
_columns = {
'timesheet_range': fields.selection(
[('day','Day'),('week','Week'),('month','Month')], 'Timesheet range',
help="Periodicity on which you validate your timesheets."),
'timesheet_max_difference': fields.float('Timesheet allowed difference(Hours)',
help="Allowed difference in hours between the sign in/out and the timesheet " \
"computation for one sheet. Set this to 0 if you do not want any control."),
}
_defaults = {
'timesheet_range': lambda *args: 'week',
'timesheet_max_difference': lambda *args: 0.0
}
class hr_employee(osv.osv):
'''
Employee
'''
_inherit = 'hr.employee'
_description = 'Employee'
def _timesheet_count(self, cr, uid, ids, field_name, arg, context=None):
Sheet = self.pool['hr_timesheet_sheet.sheet']
return {
employee_id: Sheet.search_count(cr,uid, [('employee_id', '=', employee_id)], context=context)
for employee_id in ids
}
_columns = {
'timesheet_count': fields.function(_timesheet_count, type='integer', string='Timesheets'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
skycucumber/restful | python/venv/lib/python2.7/site-packages/sqlalchemy/ext/instrumentation.py | 56 | 14646 | """Extensible class instrumentation.
The :mod:`sqlalchemy.ext.instrumentation` package provides for alternate
systems of class instrumentation within the ORM. Class instrumentation
refers to how the ORM places attributes on the class which maintain
data and track changes to that data, as well as event hooks installed
on the class.
.. note::
The extension package is provided for the benefit of integration
with other object management packages, which already perform
their own instrumentation. It is not intended for general use.
For examples of how the instrumentation extension is used,
see the example :ref:`examples_instrumentation`.
.. versionchanged:: 0.8
The :mod:`sqlalchemy.orm.instrumentation` was split out so
that all functionality having to do with non-standard
instrumentation was moved out to :mod:`sqlalchemy.ext.instrumentation`.
When imported, the module installs itself within
:mod:`sqlalchemy.orm.instrumentation` so that it
takes effect, including recognition of
``__sa_instrumentation_manager__`` on mapped classes, as
well :data:`.instrumentation_finders`
being used to determine class instrumentation resolution.
"""
from ..orm import instrumentation as orm_instrumentation
from ..orm.instrumentation import (
ClassManager, InstrumentationFactory, _default_state_getter,
_default_dict_getter, _default_manager_getter
)
from ..orm import attributes, collections, base as orm_base
from .. import util
from ..orm import exc as orm_exc
import weakref
INSTRUMENTATION_MANAGER = '__sa_instrumentation_manager__'
"""Attribute, elects custom instrumentation when present on a mapped class.
Allows a class to specify a slightly or wildly different technique for
tracking changes made to mapped attributes and collections.
Only one instrumentation implementation is allowed in a given object
inheritance hierarchy.
The value of this attribute must be a callable and will be passed a class
object. The callable must return one of:
- An instance of an InstrumentationManager or subclass
- An object implementing all or some of InstrumentationManager (TODO)
- A dictionary of callables, implementing all or some of the above (TODO)
- An instance of a ClassManager or subclass
This attribute is consulted by SQLAlchemy instrumentation
resolution, once the :mod:`sqlalchemy.ext.instrumentation` module
has been imported. If custom finders are installed in the global
instrumentation_finders list, they may or may not choose to honor this
attribute.
"""
def find_native_user_instrumentation_hook(cls):
"""Find user-specified instrumentation management for a class."""
return getattr(cls, INSTRUMENTATION_MANAGER, None)
instrumentation_finders = [find_native_user_instrumentation_hook]
"""An extensible sequence of callables which return instrumentation
implementations
When a class is registered, each callable will be passed a class object.
If None is returned, the
next finder in the sequence is consulted. Otherwise the return must be an
instrumentation factory that follows the same guidelines as
sqlalchemy.ext.instrumentation.INSTRUMENTATION_MANAGER.
By default, the only finder is find_native_user_instrumentation_hook, which
searches for INSTRUMENTATION_MANAGER. If all finders return None, standard
ClassManager instrumentation is used.
"""
class ExtendedInstrumentationRegistry(InstrumentationFactory):
"""Extends :class:`.InstrumentationFactory` with additional
bookkeeping, to accommodate multiple types of
class managers.
"""
_manager_finders = weakref.WeakKeyDictionary()
_state_finders = weakref.WeakKeyDictionary()
_dict_finders = weakref.WeakKeyDictionary()
_extended = False
def _locate_extended_factory(self, class_):
for finder in instrumentation_finders:
factory = finder(class_)
if factory is not None:
manager = self._extended_class_manager(class_, factory)
return manager, factory
else:
return None, None
def _check_conflicts(self, class_, factory):
existing_factories = self._collect_management_factories_for(class_).\
difference([factory])
if existing_factories:
raise TypeError(
"multiple instrumentation implementations specified "
"in %s inheritance hierarchy: %r" % (
class_.__name__, list(existing_factories)))
def _extended_class_manager(self, class_, factory):
manager = factory(class_)
if not isinstance(manager, ClassManager):
manager = _ClassInstrumentationAdapter(class_, manager)
if factory != ClassManager and not self._extended:
# somebody invoked a custom ClassManager.
# reinstall global "getter" functions with the more
# expensive ones.
self._extended = True
_install_instrumented_lookups()
self._manager_finders[class_] = manager.manager_getter()
self._state_finders[class_] = manager.state_getter()
self._dict_finders[class_] = manager.dict_getter()
return manager
def _collect_management_factories_for(self, cls):
"""Return a collection of factories in play or specified for a
hierarchy.
Traverses the entire inheritance graph of a cls and returns a
collection of instrumentation factories for those classes. Factories
are extracted from active ClassManagers, if available, otherwise
instrumentation_finders is consulted.
"""
hierarchy = util.class_hierarchy(cls)
factories = set()
for member in hierarchy:
manager = self.manager_of_class(member)
if manager is not None:
factories.add(manager.factory)
else:
for finder in instrumentation_finders:
factory = finder(member)
if factory is not None:
break
else:
factory = None
factories.add(factory)
factories.discard(None)
return factories
def unregister(self, class_):
if class_ in self._manager_finders:
del self._manager_finders[class_]
del self._state_finders[class_]
del self._dict_finders[class_]
super(ExtendedInstrumentationRegistry, self).unregister(class_)
def manager_of_class(self, cls):
if cls is None:
return None
return self._manager_finders.get(cls, _default_manager_getter)(cls)
def state_of(self, instance):
if instance is None:
raise AttributeError("None has no persistent state.")
return self._state_finders.get(
instance.__class__, _default_state_getter)(instance)
def dict_of(self, instance):
if instance is None:
raise AttributeError("None has no persistent state.")
return self._dict_finders.get(
instance.__class__, _default_dict_getter)(instance)
orm_instrumentation._instrumentation_factory = \
_instrumentation_factory = ExtendedInstrumentationRegistry()
orm_instrumentation.instrumentation_finders = instrumentation_finders
class InstrumentationManager(object):
"""User-defined class instrumentation extension.
:class:`.InstrumentationManager` can be subclassed in order
to change
how class instrumentation proceeds. This class exists for
the purposes of integration with other object management
frameworks which would like to entirely modify the
instrumentation methodology of the ORM, and is not intended
for regular usage. For interception of class instrumentation
events, see :class:`.InstrumentationEvents`.
The API for this class should be considered as semi-stable,
and may change slightly with new releases.
.. versionchanged:: 0.8
:class:`.InstrumentationManager` was moved from
:mod:`sqlalchemy.orm.instrumentation` to
:mod:`sqlalchemy.ext.instrumentation`.
"""
# r4361 added a mandatory (cls) constructor to this interface.
# given that, perhaps class_ should be dropped from all of these
# signatures.
def __init__(self, class_):
pass
def manage(self, class_, manager):
setattr(class_, '_default_class_manager', manager)
def dispose(self, class_, manager):
delattr(class_, '_default_class_manager')
def manager_getter(self, class_):
def get(cls):
return cls._default_class_manager
return get
def instrument_attribute(self, class_, key, inst):
pass
def post_configure_attribute(self, class_, key, inst):
pass
def install_descriptor(self, class_, key, inst):
setattr(class_, key, inst)
def uninstall_descriptor(self, class_, key):
delattr(class_, key)
def install_member(self, class_, key, implementation):
setattr(class_, key, implementation)
def uninstall_member(self, class_, key):
delattr(class_, key)
def instrument_collection_class(self, class_, key, collection_class):
return collections.prepare_instrumentation(collection_class)
def get_instance_dict(self, class_, instance):
return instance.__dict__
def initialize_instance_dict(self, class_, instance):
pass
def install_state(self, class_, instance, state):
setattr(instance, '_default_state', state)
def remove_state(self, class_, instance):
delattr(instance, '_default_state')
def state_getter(self, class_):
return lambda instance: getattr(instance, '_default_state')
def dict_getter(self, class_):
return lambda inst: self.get_instance_dict(class_, inst)
class _ClassInstrumentationAdapter(ClassManager):
"""Adapts a user-defined InstrumentationManager to a ClassManager."""
def __init__(self, class_, override):
self._adapted = override
self._get_state = self._adapted.state_getter(class_)
self._get_dict = self._adapted.dict_getter(class_)
ClassManager.__init__(self, class_)
def manage(self):
self._adapted.manage(self.class_, self)
def dispose(self):
self._adapted.dispose(self.class_)
def manager_getter(self):
return self._adapted.manager_getter(self.class_)
def instrument_attribute(self, key, inst, propagated=False):
ClassManager.instrument_attribute(self, key, inst, propagated)
if not propagated:
self._adapted.instrument_attribute(self.class_, key, inst)
def post_configure_attribute(self, key):
super(_ClassInstrumentationAdapter, self).post_configure_attribute(key)
self._adapted.post_configure_attribute(self.class_, key, self[key])
def install_descriptor(self, key, inst):
self._adapted.install_descriptor(self.class_, key, inst)
def uninstall_descriptor(self, key):
self._adapted.uninstall_descriptor(self.class_, key)
def install_member(self, key, implementation):
self._adapted.install_member(self.class_, key, implementation)
def uninstall_member(self, key):
self._adapted.uninstall_member(self.class_, key)
def instrument_collection_class(self, key, collection_class):
return self._adapted.instrument_collection_class(
self.class_, key, collection_class)
def initialize_collection(self, key, state, factory):
delegate = getattr(self._adapted, 'initialize_collection', None)
if delegate:
return delegate(key, state, factory)
else:
return ClassManager.initialize_collection(self, key,
state, factory)
def new_instance(self, state=None):
instance = self.class_.__new__(self.class_)
self.setup_instance(instance, state)
return instance
def _new_state_if_none(self, instance):
"""Install a default InstanceState if none is present.
A private convenience method used by the __init__ decorator.
"""
if self.has_state(instance):
return False
else:
return self.setup_instance(instance)
def setup_instance(self, instance, state=None):
self._adapted.initialize_instance_dict(self.class_, instance)
if state is None:
state = self._state_constructor(instance, self)
# the given instance is assumed to have no state
self._adapted.install_state(self.class_, instance, state)
return state
def teardown_instance(self, instance):
self._adapted.remove_state(self.class_, instance)
def has_state(self, instance):
try:
self._get_state(instance)
except orm_exc.NO_STATE:
return False
else:
return True
def state_getter(self):
return self._get_state
def dict_getter(self):
return self._get_dict
def _install_instrumented_lookups():
"""Replace global class/object management functions
with ExtendedInstrumentationRegistry implementations, which
allow multiple types of class managers to be present,
at the cost of performance.
This function is called only by ExtendedInstrumentationRegistry
and unit tests specific to this behavior.
The _reinstall_default_lookups() function can be called
after this one to re-establish the default functions.
"""
_install_lookups(
dict(
instance_state=_instrumentation_factory.state_of,
instance_dict=_instrumentation_factory.dict_of,
manager_of_class=_instrumentation_factory.manager_of_class
)
)
def _reinstall_default_lookups():
"""Restore simplified lookups."""
_install_lookups(
dict(
instance_state=_default_state_getter,
instance_dict=_default_dict_getter,
manager_of_class=_default_manager_getter
)
)
def _install_lookups(lookups):
global instance_state, instance_dict, manager_of_class
instance_state = lookups['instance_state']
instance_dict = lookups['instance_dict']
manager_of_class = lookups['manager_of_class']
orm_base.instance_state = attributes.instance_state = \
orm_instrumentation.instance_state = instance_state
orm_base.instance_dict = attributes.instance_dict = \
orm_instrumentation.instance_dict = instance_dict
orm_base.manager_of_class = attributes.manager_of_class = \
orm_instrumentation.manager_of_class = manager_of_class
| gpl-2.0 |
diedthreetimes/VCrash | pybindgen-0.15.0.795/pybindgen/typehandlers/inttype.py | 1 | 29684 | # docstrings not needed here (the type handler interfaces are fully
# documented in base.py)
# pylint: disable-msg=C0111
import struct
assert struct.calcsize('i') == 4 # assumption is made that sizeof(int) == 4 for all platforms pybindgen runs on
from base import ReturnValue, Parameter, PointerParameter, PointerReturnValue, \
ReverseWrapperBase, ForwardWrapperBase, TypeConfigurationError, NotSupportedError
class IntParam(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = ['int', 'int32_t']
def convert_c_to_python(self, wrapper):
assert isinstance(wrapper, ReverseWrapperBase)
wrapper.build_params.add_parameter('i', [self.value])
def convert_python_to_c(self, wrapper):
assert isinstance(wrapper, ForwardWrapperBase)
name = wrapper.declarations.declare_variable(self.ctype_no_const, self.name, self.default_value)
wrapper.parse_params.add_parameter('i', ['&'+name], self.name, optional=bool(self.default_value))
wrapper.call_params.append(name)
class UnsignedIntParam(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = ['unsigned int', 'uint32_t']
def convert_c_to_python(self, wrapper):
assert isinstance(wrapper, ReverseWrapperBase)
wrapper.build_params.add_parameter('N', ["PyLong_FromUnsignedLong(%s)" % self.value])
def convert_python_to_c(self, wrapper):
assert isinstance(wrapper, ForwardWrapperBase)
name = wrapper.declarations.declare_variable('unsigned int', self.name, self.default_value)
wrapper.parse_params.add_parameter('I', ['&'+name], self.name, optional=bool(self.default_value))
wrapper.call_params.append(name)
class UnsignedIntPtrParam(PointerParameter):
DIRECTIONS = [Parameter.DIRECTION_IN, Parameter.DIRECTION_OUT, Parameter.DIRECTION_INOUT]
CTYPES = ['unsigned int*', 'uint32_t*']
def __init__(self, ctype, name, direction=Parameter.DIRECTION_IN, is_const=False,
default_value=None, transfer_ownership=False, array_length=None):
super(UnsignedIntPtrParam, self).__init__(ctype, name, direction, is_const, default_value, transfer_ownership)
self.array_length = array_length
if transfer_ownership:
raise NotSupportedError("%s: transfer_ownership=True not yet implemented." % ctype)
def convert_c_to_python(self, wrapper):
if self.direction & self.DIRECTION_IN:
wrapper.build_params.add_parameter('I', ['*'+self.value])
if self.direction & self.DIRECTION_OUT:
wrapper.parse_params.add_parameter('I', [self.value], self.name)
def convert_python_to_c(self, wrapper):
#assert self.ctype == 'unsigned int*'
if self.array_length is None:
name = wrapper.declarations.declare_variable(str(self.type_traits.target), self.name)
wrapper.call_params.append('&'+name)
if self.direction & self.DIRECTION_IN:
wrapper.parse_params.add_parameter('I', ['&'+name], self.name)
if self.direction & self.DIRECTION_OUT:
wrapper.build_params.add_parameter('I', [name])
else: # complicated code path to deal with arrays...
name = wrapper.declarations.declare_variable(str(self.type_traits.target), self.name, array="[%i]" % self.array_length)
py_list = wrapper.declarations.declare_variable("PyObject*", "py_list")
idx = wrapper.declarations.declare_variable("int", "idx")
wrapper.call_params.append(name)
if self.direction & self.DIRECTION_IN:
elem = wrapper.declarations.declare_variable("PyObject*", "element")
wrapper.parse_params.add_parameter('O!', ['&PyList_Type', '&'+py_list], self.name)
wrapper.before_call.write_error_check(
'PyList_Size(%s) != %i' % (py_list, self.array_length),
'PyErr_SetString(PyExc_TypeError, "Parameter `%s\' must be a list of %i ints/longs");'
% (self.name, self.array_length))
wrapper.before_call.write_code(
"for (%s = 0; %s < %i; %s++) {" % (idx, idx, self.array_length, idx))
wrapper.before_call.indent()
wrapper.before_call.write_code("%(elem)s = PyList_GET_ITEM(%(py_list)s, %(idx)s);" % vars())
wrapper.before_call.write_error_check(
'!(PyInt_Check(%(elem)s) || PyLong_Check(%(elem)s))',
'PyErr_SetString(PyExc_TypeError, "Parameter `%s\' must be a list of %i ints / longs");'
% (self.name, self.array_length))
wrapper.before_call.write_code("%(name)s[%(idx)s] = PyLong_AsUnsignedInt(%(elem)s);" % vars())
wrapper.before_call.unindent()
wrapper.before_call.write_code('}')
if self.direction & self.DIRECTION_OUT:
wrapper.after_call.write_code("%s = PyList_New(%i);" % (py_list, self.array_length))
wrapper.after_call.write_code(
"for (%s = 0; %s < %i; %s++) {" % (idx, idx, self.array_length, idx))
wrapper.after_call.indent()
wrapper.after_call.write_code("PyList_SET_ITEM(%(py_list)s, %(idx)s, PyLong_FromUnsignedLong(%(name)s[%(idx)s]));"
% vars())
wrapper.after_call.unindent()
wrapper.after_call.write_code('}')
wrapper.build_params.add_parameter("N", [py_list])
class IntReturn(ReturnValue):
CTYPES = ['int', 'int32_t']
def get_c_error_return(self):
return "return INT_MIN;"
def convert_python_to_c(self, wrapper):
wrapper.parse_params.add_parameter("i", ["&"+self.value], prepend=True)
def convert_c_to_python(self, wrapper):
wrapper.build_params.add_parameter("i", [self.value], prepend=True)
class UnsignedIntReturn(ReturnValue):
CTYPES = ['unsigned int', 'uint32_t']
def get_c_error_return(self):
return "return 0;"
def convert_python_to_c(self, wrapper):
wrapper.parse_params.add_parameter("I", ["&"+self.value], prepend=True)
def convert_c_to_python(self, wrapper):
wrapper.build_params.add_parameter('N', ["PyLong_FromUnsignedLong(%s)" % self.value], prepend=True)
class IntPtrParam(PointerParameter):
DIRECTIONS = [Parameter.DIRECTION_IN, Parameter.DIRECTION_OUT,
Parameter.DIRECTION_IN|Parameter.DIRECTION_OUT]
CTYPES = ['int*']
def __init__(self, ctype, name, direction=None, is_const=None, transfer_ownership=None):
if direction is None:
if is_const:
direction = Parameter.DIRECTION_IN
else:
raise TypeConfigurationError("direction not given")
super(IntPtrParam, self).__init__(ctype, name, direction, is_const, transfer_ownership)
def convert_c_to_python(self, wrapper):
if self.direction & self.DIRECTION_IN:
wrapper.build_params.add_parameter('i', ['*'+self.value])
if self.direction & self.DIRECTION_OUT:
wrapper.parse_params.add_parameter("i", [self.value], self.name)
def convert_python_to_c(self, wrapper):
name = wrapper.declarations.declare_variable(self.ctype_no_const[:-1], self.name)
wrapper.call_params.append('&'+name)
if self.direction & self.DIRECTION_IN:
wrapper.parse_params.add_parameter('i', ['&'+name], self.name)
if self.direction & self.DIRECTION_OUT:
wrapper.build_params.add_parameter("i", [name])
class IntRefParam(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN, Parameter.DIRECTION_OUT,
Parameter.DIRECTION_IN|Parameter.DIRECTION_OUT]
CTYPES = ['int&']
def convert_c_to_python(self, wrapper):
if self.direction & self.DIRECTION_IN:
wrapper.build_params.add_parameter('i', [self.value])
if self.direction & self.DIRECTION_OUT:
wrapper.parse_params.add_parameter("i", [self.value], self.name)
def convert_python_to_c(self, wrapper):
#assert self.ctype == 'int&'
name = wrapper.declarations.declare_variable(self.ctype_no_const[:-1], self.name)
wrapper.call_params.append(name)
if self.direction & self.DIRECTION_IN:
wrapper.parse_params.add_parameter('i', ['&'+name], self.name)
if self.direction & self.DIRECTION_OUT:
wrapper.build_params.add_parameter("i", [name])
class UnsignedIntRefParam(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN, Parameter.DIRECTION_OUT,
Parameter.DIRECTION_IN|Parameter.DIRECTION_OUT]
CTYPES = ['unsigned int&', 'unsigned &']
def convert_c_to_python(self, wrapper):
if self.direction & self.DIRECTION_IN:
wrapper.build_params.add_parameter('I', [self.value])
if self.direction & self.DIRECTION_OUT:
wrapper.parse_params.add_parameter("I", [self.value], self.name)
def convert_python_to_c(self, wrapper):
#assert self.ctype == 'int&'
name = wrapper.declarations.declare_variable(self.ctype_no_const[:-1], self.name)
wrapper.call_params.append(name)
if self.direction & self.DIRECTION_IN:
wrapper.parse_params.add_parameter('I', ['&'+name], self.name)
if self.direction & self.DIRECTION_OUT:
wrapper.build_params.add_parameter("I", [name])
class UInt16Return(ReturnValue):
CTYPES = ['uint16_t', 'unsigned short', 'unsigned short int', 'short unsigned int']
def get_c_error_return(self):
return "return 0;"
def convert_python_to_c(self, wrapper):
tmp_var = wrapper.declarations.declare_variable("int", "tmp")
wrapper.parse_params.add_parameter("i", ["&"+tmp_var], prepend=True)
wrapper.after_call.write_error_check('%s > 0xffff' % tmp_var,
'PyErr_SetString(PyExc_ValueError, "Out of range");')
wrapper.after_call.write_code(
"%s = %s;" % (self.value, tmp_var))
def convert_c_to_python(self, wrapper):
wrapper.build_params.add_parameter("i", [self.value], prepend=True)
class Int16Return(ReturnValue):
CTYPES = ['int16_t', 'short', 'short int']
def get_c_error_return(self):
return "return 0;"
def convert_python_to_c(self, wrapper):
tmp_var = wrapper.declarations.declare_variable("int", "tmp")
wrapper.parse_params.add_parameter("i", ["&"+tmp_var], prepend=True)
wrapper.after_call.write_error_check('%s > 32767 || %s < -32768' % (tmp_var, tmp_var),
'PyErr_SetString(PyExc_ValueError, "Out of range");')
wrapper.after_call.write_code(
"%s = %s;" % (self.value, tmp_var))
def convert_c_to_python(self, wrapper):
wrapper.build_params.add_parameter("i", [self.value], prepend=True)
class UInt16Param(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = ['uint16_t', 'unsigned short', 'unsigned short int']
def convert_c_to_python(self, wrapper):
assert isinstance(wrapper, ReverseWrapperBase)
wrapper.build_params.add_parameter('i', ["(int) "+self.value])
def convert_python_to_c(self, wrapper):
assert isinstance(wrapper, ForwardWrapperBase)
name = wrapper.declarations.declare_variable("int", self.name, self.default_value)
wrapper.parse_params.add_parameter('i', ['&'+name], self.name, optional=bool(self.default_value))
wrapper.before_call.write_error_check('%s > 0xffff' % name,
'PyErr_SetString(PyExc_ValueError, "Out of range");')
wrapper.call_params.append(name)
class UInt16RefParam(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN, Parameter.DIRECTION_INOUT, Parameter.DIRECTION_OUT]
CTYPES = ['uint16_t&', 'unsigned short&', 'unsigned short int&', 'short unsigned&', 'short unsigned int&']
def convert_c_to_python(self, wrapper):
assert isinstance(wrapper, ReverseWrapperBase)
if self.direction & self.DIRECTION_IN:
wrapper.build_params.add_parameter('H', [self.value])
if self.direction & self.DIRECTION_OUT:
wrapper.parse_params.add_parameter("H", [self.value], self.name)
def convert_python_to_c(self, wrapper):
name = wrapper.declarations.declare_variable(self.ctype_no_const[:-1], self.name)
wrapper.call_params.append(name)
if self.direction & self.DIRECTION_IN:
wrapper.parse_params.add_parameter('H', ['&'+name], self.name)
if self.direction & self.DIRECTION_OUT:
wrapper.build_params.add_parameter("H", [name])
class Int16Param(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = ['int16_t', 'short', 'short int']
def convert_c_to_python(self, wrapper):
assert isinstance(wrapper, ReverseWrapperBase)
wrapper.build_params.add_parameter('i', ["(int) "+self.value])
def convert_python_to_c(self, wrapper):
assert isinstance(wrapper, ForwardWrapperBase)
name = wrapper.declarations.declare_variable("int", self.name, self.default_value)
wrapper.parse_params.add_parameter('i', ['&'+name], self.name, optional=bool(self.default_value))
wrapper.before_call.write_error_check('%s > 0x7fff' % name,
'PyErr_SetString(PyExc_ValueError, "Out of range");')
wrapper.call_params.append(name)
class Int16RefParam(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN, Parameter.DIRECTION_INOUT, Parameter.DIRECTION_OUT]
CTYPES = ['int16_t&', 'short&', 'short int&']
def convert_c_to_python(self, wrapper):
assert isinstance(wrapper, ReverseWrapperBase)
if self.direction & self.DIRECTION_IN:
wrapper.build_params.add_parameter('h', [self.value])
if self.direction & self.DIRECTION_OUT:
wrapper.parse_params.add_parameter("h", [self.value], self.name)
def convert_python_to_c(self, wrapper):
name = wrapper.declarations.declare_variable(self.ctype_no_const[:-1], self.name)
wrapper.call_params.append(name)
if self.direction & self.DIRECTION_IN:
wrapper.parse_params.add_parameter('h', ['&'+name], self.name)
if self.direction & self.DIRECTION_OUT:
wrapper.build_params.add_parameter("h", [name])
class UInt8Param(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = ['uint8_t', 'unsigned char', 'char unsigned']
def convert_c_to_python(self, wrapper):
assert isinstance(wrapper, ReverseWrapperBase)
wrapper.build_params.add_parameter('i', ["(int) "+self.value])
def convert_python_to_c(self, wrapper):
assert isinstance(wrapper, ForwardWrapperBase)
name = wrapper.declarations.declare_variable("int", self.name, self.default_value)
wrapper.parse_params.add_parameter('i', ['&'+name], self.name, optional=bool(self.default_value))
wrapper.before_call.write_error_check('%s > 0xff' % name,
'PyErr_SetString(PyExc_ValueError, "Out of range");')
wrapper.call_params.append(name)
class UInt8RefParam(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN, Parameter.DIRECTION_INOUT, Parameter.DIRECTION_OUT]
CTYPES = ['uint8_t&', 'unsigned char&', 'char unsigned&']
def convert_c_to_python(self, wrapper):
assert isinstance(wrapper, ReverseWrapperBase)
if self.direction & self.DIRECTION_IN:
wrapper.build_params.add_parameter('B', [self.value])
if self.direction & self.DIRECTION_OUT:
wrapper.parse_params.add_parameter("B", [self.value], self.name)
def convert_python_to_c(self, wrapper):
name = wrapper.declarations.declare_variable(self.ctype_no_const[:-1], self.name)
wrapper.call_params.append(name)
if self.direction & self.DIRECTION_IN:
wrapper.parse_params.add_parameter('B', ['&'+name], self.name)
if self.direction & self.DIRECTION_OUT:
wrapper.build_params.add_parameter("B", [name])
class UInt8Return(ReturnValue):
CTYPES = ['uint8_t', 'unsigned char', 'char unsigned']
def get_c_error_return(self):
return "return 0;"
def convert_python_to_c(self, wrapper):
tmp_var = wrapper.declarations.declare_variable("int", "tmp")
wrapper.parse_params.add_parameter("i", ["&"+tmp_var], prepend=True)
wrapper.after_call.write_error_check('%s > 0xff' % tmp_var,
'PyErr_SetString(PyExc_ValueError, "Out of range");')
wrapper.after_call.write_code(
"%s = %s;" % (self.value, tmp_var))
def convert_c_to_python(self, wrapper):
wrapper.build_params.add_parameter("i", ['(int)' + self.value], prepend=True)
class Int8Param(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = ['int8_t', 'signed char', 'char signed']
def convert_c_to_python(self, wrapper):
assert isinstance(wrapper, ReverseWrapperBase)
wrapper.build_params.add_parameter('i', ["(int) "+self.value])
def convert_python_to_c(self, wrapper):
assert isinstance(wrapper, ForwardWrapperBase)
name = wrapper.declarations.declare_variable("int", self.name, self.default_value)
wrapper.parse_params.add_parameter('i', ['&'+name], self.name, optional=bool(self.default_value))
wrapper.before_call.write_error_check('%s > 0x7f' % name,
'PyErr_SetString(PyExc_ValueError, "Out of range");')
wrapper.call_params.append(name)
class Int8RefParam(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN, Parameter.DIRECTION_INOUT, Parameter.DIRECTION_OUT]
CTYPES = ['int8_t&', 'signed char &', 'char signed&']
def convert_c_to_python(self, wrapper):
assert isinstance(wrapper, ReverseWrapperBase)
if self.direction & self.DIRECTION_IN:
wrapper.build_params.add_parameter('b', [self.value])
if self.direction & self.DIRECTION_OUT:
wrapper.parse_params.add_parameter("b", [self.value], self.name)
def convert_python_to_c(self, wrapper):
name = wrapper.declarations.declare_variable(self.ctype_no_const[:-1], self.name)
wrapper.call_params.append(name)
if self.direction & self.DIRECTION_IN:
wrapper.parse_params.add_parameter('b', ['&'+name], self.name)
if self.direction & self.DIRECTION_OUT:
wrapper.build_params.add_parameter("b", [name])
class Int8Return(ReturnValue):
CTYPES = ['int8_t', 'signed char']
def get_c_error_return(self):
return "return 0;"
def convert_python_to_c(self, wrapper):
tmp_var = wrapper.declarations.declare_variable("int", "tmp")
wrapper.parse_params.add_parameter("i", ["&"+tmp_var], prepend=True)
wrapper.after_call.write_error_check('%s > 128 || %s < -127' % (tmp_var, tmp_var),
'PyErr_SetString(PyExc_ValueError, "Out of range");')
wrapper.after_call.write_code(
"%s = %s;" % (self.value, tmp_var))
def convert_c_to_python(self, wrapper):
wrapper.build_params.add_parameter("i", [self.value], prepend=True)
class UnsignedLongLongParam(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = ['unsigned long long', 'uint64_t', 'unsigned long long int', 'long long unsigned int', 'long long unsigned']
def get_ctype_without_ref(self):
return str(self.type_traits.ctype_no_const)
def convert_c_to_python(self, wrapper):
assert isinstance(wrapper, ReverseWrapperBase)
wrapper.build_params.add_parameter('K', [self.value])
def convert_python_to_c(self, wrapper):
assert isinstance(wrapper, ForwardWrapperBase)
name = wrapper.declarations.declare_variable(self.get_ctype_without_ref(), self.name, self.default_value)
wrapper.parse_params.add_parameter('K', ['&'+name], self.name, optional=bool(self.default_value))
wrapper.call_params.append(name)
class UnsignedLongLongRefParam(UnsignedLongLongParam):
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = ['unsigned long long&', 'uint64_t&', 'long long unsigned int&']
def get_ctype_without_ref(self):
assert self.type_traits.target is not None
return str(self.type_traits.target)
class UnsignedLongLongReturn(ReturnValue):
CTYPES = ['unsigned long long', 'uint64_t', 'long long unsigned int']
def get_c_error_return(self):
return "return 0;"
def convert_python_to_c(self, wrapper):
wrapper.parse_params.add_parameter("K", ["&"+self.value], prepend=True)
def convert_c_to_python(self, wrapper):
wrapper.build_params.add_parameter("K", [self.value], prepend=True)
class UnsignedLongParam(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = ['unsigned long', 'unsigned long int', 'long unsigned', 'long unsigned int']
def get_ctype_without_ref(self):
return str(self.type_traits.ctype_no_const)
def convert_c_to_python(self, wrapper):
assert isinstance(wrapper, ReverseWrapperBase)
wrapper.build_params.add_parameter('k', [self.value])
def convert_python_to_c(self, wrapper):
assert isinstance(wrapper, ForwardWrapperBase)
name = wrapper.declarations.declare_variable(self.get_ctype_without_ref(), self.name, self.default_value)
wrapper.parse_params.add_parameter('k', ['&'+name], self.name, optional=bool(self.default_value))
wrapper.call_params.append(name)
class UnsignedLongRefParam(UnsignedLongParam):
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = ['unsigned long&', 'long unsigned&', 'long unsigned int&', 'unsigned long int&']
def get_ctype_without_ref(self):
assert self.type_traits.target is not None
return str(self.type_traits.target)
class UnsignedLongReturn(ReturnValue):
CTYPES = ['unsigned long', 'long unsigned int']
def get_c_error_return(self):
return "return 0;"
def convert_python_to_c(self, wrapper):
wrapper.parse_params.add_parameter("k", ["&"+self.value], prepend=True)
def convert_c_to_python(self, wrapper):
wrapper.build_params.add_parameter("k", [self.value], prepend=True)
class LongParam(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = ['signed long', 'signed long int', 'long', 'long int', 'long signed', 'long signed int']
def get_ctype_without_ref(self):
return str(self.type_traits.ctype_no_const)
def convert_c_to_python(self, wrapper):
assert isinstance(wrapper, ReverseWrapperBase)
wrapper.build_params.add_parameter('l', [self.value])
def convert_python_to_c(self, wrapper):
assert isinstance(wrapper, ForwardWrapperBase)
name = wrapper.declarations.declare_variable(self.get_ctype_without_ref(), self.name, self.default_value)
wrapper.parse_params.add_parameter('l', ['&'+name], self.name, optional=bool(self.default_value))
wrapper.call_params.append(name)
class LongRefParam(LongParam):
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = ['signed long&', 'long signed&', 'long&', 'long int&', 'long signed int&', 'signed long int&']
def get_ctype_without_ref(self):
assert self.type_traits.target is not None
return str(self.type_traits.target)
class LongReturn(ReturnValue):
CTYPES = ['signed long', 'long signed int', 'long', 'long int']
def get_c_error_return(self):
return "return 0;"
def convert_python_to_c(self, wrapper):
wrapper.parse_params.add_parameter("l", ["&"+self.value], prepend=True)
def convert_c_to_python(self, wrapper):
wrapper.build_params.add_parameter("l", [self.value], prepend=True)
class SizeTReturn(ReturnValue):
CTYPES = ['size_t',]
def get_c_error_return(self):
return "return 0;"
def convert_python_to_c(self, wrapper):
# using the intermediate variable is not always necessary but
# it's safer this way in case of weird platforms where
# sizeof(size_t) != sizeof(unsigned PY_LONG_LONG).
name = wrapper.declarations.declare_variable("unsigned PY_LONG_LONG", "retval_tmp", self.value)
wrapper.parse_params.add_parameter("K", ["&"+name], prepend=True)
wrapper.after_call.write_code("retval = %s;" % (name))
def convert_c_to_python(self, wrapper):
wrapper.build_params.add_parameter("K", ["((unsigned PY_LONG_LONG) %s)" % self.value], prepend=True)
class SizeTParam(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = ['size_t']
def get_ctype_without_ref(self):
return str(self.type_traits.ctype_no_const)
def convert_c_to_python(self, wrapper):
assert isinstance(wrapper, ReverseWrapperBase)
wrapper.build_params.add_parameter('K', ["((unsigned PY_LONG_LONG) %s)" % self.value])
def convert_python_to_c(self, wrapper):
assert isinstance(wrapper, ForwardWrapperBase)
name = wrapper.declarations.declare_variable("unsigned PY_LONG_LONG", self.name, self.default_value)
wrapper.parse_params.add_parameter('K', ['&'+name], self.name, optional=bool(self.default_value))
wrapper.call_params.append(name)
class LongLongParam(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = ['long long', 'int64_t', 'long long int']
def get_ctype_without_ref(self):
return str(self.type_traits.ctype_no_const)
def convert_c_to_python(self, wrapper):
assert isinstance(wrapper, ReverseWrapperBase)
wrapper.build_params.add_parameter('L', [self.value])
def convert_python_to_c(self, wrapper):
assert isinstance(wrapper, ForwardWrapperBase)
name = wrapper.declarations.declare_variable(self.get_ctype_without_ref(), self.name, self.default_value)
wrapper.parse_params.add_parameter('L', ['&'+name], self.name, optional=bool(self.default_value))
wrapper.call_params.append(name)
class LongLongRefParam(LongLongParam):
DIRECTIONS = [Parameter.DIRECTION_IN] # other directions not yet implemented
CTYPES = ['long long&', 'int64_t&', 'long long int&']
def get_ctype_without_ref(self):
assert self.type_traits.target is not None
return str(self.type_traits.target)
class LongLongReturn(ReturnValue):
CTYPES = ['long long', 'int64_t', 'long long int']
def get_c_error_return(self):
return "return 0;"
def convert_python_to_c(self, wrapper):
wrapper.parse_params.add_parameter("L", ["&"+self.value], prepend=True)
def convert_c_to_python(self, wrapper):
wrapper.build_params.add_parameter("L", [self.value], prepend=True)
class Int8PtrParam(PointerParameter):
DIRECTIONS = [Parameter.DIRECTION_IN, Parameter.DIRECTION_OUT,
Parameter.DIRECTION_IN|Parameter.DIRECTION_OUT]
CTYPES = ['int8_t*']
def __init__(self, ctype, name, direction=None, is_const=None, default_value=None, transfer_ownership=None):
if direction is None:
if is_const:
direction = Parameter.DIRECTION_IN
else:
raise TypeConfigurationError("direction not given")
super(Int8PtrParam, self).__init__(ctype, name, direction, is_const, default_value, transfer_ownership)
def convert_c_to_python(self, wrapper):
if self.direction & self.DIRECTION_IN:
wrapper.build_params.add_parameter('b', ['*'+self.value])
if self.direction & self.DIRECTION_OUT:
wrapper.parse_params.add_parameter("b", [self.value], self.name)
def convert_python_to_c(self, wrapper):
name = wrapper.declarations.declare_variable('int8_t', self.name)
wrapper.call_params.append('&'+name)
if self.direction & self.DIRECTION_IN:
wrapper.parse_params.add_parameter('b', ['&'+name], self.name)
if self.direction & self.DIRECTION_OUT:
wrapper.build_params.add_parameter("b", [name])
class UInt8PtrParam(PointerParameter):
DIRECTIONS = [Parameter.DIRECTION_IN, Parameter.DIRECTION_OUT,
Parameter.DIRECTION_IN|Parameter.DIRECTION_OUT]
CTYPES = ['uint8_t*']
def __init__(self, ctype, name, direction=None, is_const=None, default_value=None, transfer_ownership=None):
if direction is None:
if is_const:
direction = Parameter.DIRECTION_IN
else:
raise TypeConfigurationError("direction not given")
super(UInt8PtrParam, self).__init__(ctype, name, direction, is_const, default_value, transfer_ownership)
def convert_c_to_python(self, wrapper):
if self.direction & self.DIRECTION_IN:
wrapper.build_params.add_parameter('B', ['*'+self.value])
if self.direction & self.DIRECTION_OUT:
wrapper.parse_params.add_parameter("B", [self.value], self.name)
def convert_python_to_c(self, wrapper):
name = wrapper.declarations.declare_variable('uint8_t', self.name)
wrapper.call_params.append('&'+name)
if self.direction & self.DIRECTION_IN:
wrapper.parse_params.add_parameter('B', ['&'+name], self.name)
if self.direction & self.DIRECTION_OUT:
wrapper.build_params.add_parameter("B", [name])
| gpl-2.0 |
maartenq/ansible | lib/ansible/parsing/mod_args.py | 46 | 12036 | # (c) 2014 Michael DeHaan, <michael@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleParserError, AnsibleError, AnsibleAssertionError
from ansible.module_utils.six import iteritems, string_types
from ansible.module_utils._text import to_text
from ansible.parsing.splitter import parse_kv, split_args
from ansible.plugins.loader import module_loader, action_loader
from ansible.template import Templar
# For filtering out modules correctly below
FREEFORM_ACTIONS = frozenset((
'command',
'win_command',
'shell',
'win_shell',
'script',
'raw'
))
RAW_PARAM_MODULES = FREEFORM_ACTIONS.union((
'include',
'include_vars',
'include_tasks',
'include_role',
'import_tasks',
'import_role',
'add_host',
'group_by',
'set_fact',
'meta',
))
BUILTIN_TASKS = frozenset((
'meta',
'include',
'include_tasks',
'include_role',
'import_tasks',
'import_role'
))
class ModuleArgsParser:
"""
There are several ways a module and argument set can be expressed:
# legacy form (for a shell command)
- action: shell echo hi
# common shorthand for local actions vs delegate_to
- local_action: shell echo hi
# most commonly:
- copy: src=a dest=b
# legacy form
- action: copy src=a dest=b
# complex args form, for passing structured data
- copy:
src: a
dest: b
# gross, but technically legal
- action:
module: copy
args:
src: a
dest: b
# Standard YAML form for command-type modules. In this case, the args specified
# will act as 'defaults' and will be overridden by any args specified
# in one of the other formats (complex args under the action, or
# parsed from the k=v string
- command: 'pwd'
args:
chdir: '/tmp'
This class has some of the logic to canonicalize these into the form
- module: <module_name>
delegate_to: <optional>
args: <args>
Args may also be munged for certain shell command parameters.
"""
def __init__(self, task_ds=None):
task_ds = {} if task_ds is None else task_ds
if not isinstance(task_ds, dict):
raise AnsibleAssertionError("the type of 'task_ds' should be a dict, but is a %s" % type(task_ds))
self._task_ds = task_ds
def _split_module_string(self, module_string):
'''
when module names are expressed like:
action: copy src=a dest=b
the first part of the string is the name of the module
and the rest are strings pertaining to the arguments.
'''
tokens = split_args(module_string)
if len(tokens) > 1:
return (tokens[0], " ".join(tokens[1:]))
else:
return (tokens[0], "")
def _normalize_parameters(self, thing, action=None, additional_args=None):
'''
arguments can be fuzzy. Deal with all the forms.
'''
additional_args = {} if additional_args is None else additional_args
# final args are the ones we'll eventually return, so first update
# them with any additional args specified, which have lower priority
# than those which may be parsed/normalized next
final_args = dict()
if additional_args:
if isinstance(additional_args, string_types):
templar = Templar(loader=None)
if templar._contains_vars(additional_args):
final_args['_variable_params'] = additional_args
else:
raise AnsibleParserError("Complex args containing variables cannot use bare variables, and must use the full variable style "
"('{{var_name}}')")
elif isinstance(additional_args, dict):
final_args.update(additional_args)
else:
raise AnsibleParserError('Complex args must be a dictionary or variable string ("{{var}}").')
# how we normalize depends if we figured out what the module name is
# yet. If we have already figured it out, it's a 'new style' invocation.
# otherwise, it's not
if action is not None:
args = self._normalize_new_style_args(thing, action)
else:
(action, args) = self._normalize_old_style_args(thing)
# this can occasionally happen, simplify
if args and 'args' in args:
tmp_args = args.pop('args')
if isinstance(tmp_args, string_types):
tmp_args = parse_kv(tmp_args)
args.update(tmp_args)
# only internal variables can start with an underscore, so
# we don't allow users to set them directly in arguments
if args and action not in FREEFORM_ACTIONS:
for arg in args:
arg = to_text(arg)
if arg.startswith('_ansible_'):
raise AnsibleError("invalid parameter specified for action '%s': '%s'" % (action, arg))
# finally, update the args we're going to return with the ones
# which were normalized above
if args:
final_args.update(args)
return (action, final_args)
def _normalize_new_style_args(self, thing, action):
'''
deals with fuzziness in new style module invocations
accepting key=value pairs and dictionaries, and returns
a dictionary of arguments
possible example inputs:
'echo hi', 'shell'
{'region': 'xyz'}, 'ec2'
standardized outputs like:
{ _raw_params: 'echo hi', _uses_shell: True }
'''
if isinstance(thing, dict):
# form is like: { xyz: { x: 2, y: 3 } }
args = thing
elif isinstance(thing, string_types):
# form is like: copy: src=a dest=b
check_raw = action in FREEFORM_ACTIONS
args = parse_kv(thing, check_raw=check_raw)
elif thing is None:
# this can happen with modules which take no params, like ping:
args = None
else:
raise AnsibleParserError("unexpected parameter type in action: %s" % type(thing), obj=self._task_ds)
return args
def _normalize_old_style_args(self, thing):
'''
deals with fuzziness in old-style (action/local_action) module invocations
returns tuple of (module_name, dictionary_args)
possible example inputs:
{ 'shell' : 'echo hi' }
'shell echo hi'
{'module': 'ec2', 'x': 1 }
standardized outputs like:
('ec2', { 'x': 1} )
'''
action = None
args = None
if isinstance(thing, dict):
# form is like: action: { module: 'copy', src: 'a', dest: 'b' }
thing = thing.copy()
if 'module' in thing:
action, module_args = self._split_module_string(thing['module'])
args = thing.copy()
check_raw = action in FREEFORM_ACTIONS
args.update(parse_kv(module_args, check_raw=check_raw))
del args['module']
elif isinstance(thing, string_types):
# form is like: action: copy src=a dest=b
(action, args) = self._split_module_string(thing)
check_raw = action in FREEFORM_ACTIONS
args = parse_kv(args, check_raw=check_raw)
else:
# need a dict or a string, so giving up
raise AnsibleParserError("unexpected parameter type in action: %s" % type(thing), obj=self._task_ds)
return (action, args)
def parse(self):
'''
Given a task in one of the supported forms, parses and returns
returns the action, arguments, and delegate_to values for the
task, dealing with all sorts of levels of fuzziness.
'''
thing = None
action = None
delegate_to = self._task_ds.get('delegate_to', None)
args = dict()
# This is the standard YAML form for command-type modules. We grab
# the args and pass them in as additional arguments, which can/will
# be overwritten via dict updates from the other arg sources below
additional_args = self._task_ds.get('args', dict())
# We can have one of action, local_action, or module specified
# action
if 'action' in self._task_ds:
# an old school 'action' statement
thing = self._task_ds['action']
action, args = self._normalize_parameters(thing, action=action, additional_args=additional_args)
# local_action
if 'local_action' in self._task_ds:
# local_action is similar but also implies a delegate_to
if action is not None:
raise AnsibleParserError("action and local_action are mutually exclusive", obj=self._task_ds)
thing = self._task_ds.get('local_action', '')
delegate_to = 'localhost'
action, args = self._normalize_parameters(thing, action=action, additional_args=additional_args)
# module: <stuff> is the more new-style invocation
# walk the input dictionary to see we recognize a module name
for (item, value) in iteritems(self._task_ds):
if item in BUILTIN_TASKS or item in action_loader or item in module_loader:
# finding more than one module name is a problem
if action is not None:
raise AnsibleParserError("conflicting action statements: %s, %s" % (action, item), obj=self._task_ds)
action = item
thing = value
action, args = self._normalize_parameters(thing, action=action, additional_args=additional_args)
# if we didn't see any module in the task at all, it's not a task really
if action is None:
if 'ping' not in module_loader:
raise AnsibleParserError("The requested action was not found in configured module paths. "
"Additionally, core modules are missing. If this is a checkout, "
"run 'git pull --rebase' to correct this problem.",
obj=self._task_ds)
else:
raise AnsibleParserError("no action detected in task. This often indicates a misspelled module name, or incorrect module path.",
obj=self._task_ds)
elif args.get('_raw_params', '') != '' and action not in RAW_PARAM_MODULES:
templar = Templar(loader=None)
raw_params = args.pop('_raw_params')
if templar._contains_vars(raw_params):
args['_variable_params'] = raw_params
else:
raise AnsibleParserError("this task '%s' has extra params, which is only allowed in the following modules: %s" % (action,
", ".join(RAW_PARAM_MODULES)),
obj=self._task_ds)
return (action, args, delegate_to)
| gpl-3.0 |
baylee-d/osf.io | addons/figshare/messages.py | 25 | 2467 | # MODEL MESSAGES :model.py
BEFORE_PAGE_LOAD_PRIVATE_NODE_MIXED_FS = 'Warning: This OSF {category} is private but figshare project {project_id} may contain some public files or filesets.'
BEFORE_PAGE_LOAD_PUBLIC_NODE_MIXED_FS = 'Warning: This OSF {category} is public but figshare project {project_id} may contain some private files or filesets.'
BEFORE_PAGE_LOAD_PERM_MISMATCH = 'Warning: This OSF {category} is {node_perm}, but the figshare {folder_type} {figshare_id} is {figshare_perm}. '
BEFORE_PAGE_LOAD_PUBLIC_NODE_PRIVATE_FS = 'Users can view the contents of this private figshare {folder_type}. '
# END MODEL MESSAGES
# MFR MESSAGES :views/crud.py
FIGSHARE_VIEW_FILE_PRIVATE = 'Since this figshare file is unpublished we cannot render it. In order to access this content you will need to log into the <u><a href="{url}">figshare page</a></u> and view it there. '
FIGSHARE_VIEW_FILE_OVERSIZED = 'This figshare file is too large to render; <u><a href="{url}">download file</a></u> to view it. '
# TODO: Language not associated with any string; trace intent before deleting. See [#OSF-6101]
# '''
# Publishing this article is an irreversible operation. Once a figshare article is published it can never be deleted. Proceed with caution.
# <br /><br />
# Also, figshare requires some additional info before this article can be published: <br />
# <form id='figsharePublishForm' action='${nodeApiUrl}figshare/publish/article/${parent_id}/'>
# <h3><label><Title></label></h3>
# <input name='title' type='text' value='${figshare_title}'>
# <h3><label>Category:</label></h3>
# <select name='cat' id='figshareCategory' value='${figshare_category}'>${figshare_categories}</select><br />
# <h3><label>Tag(s):</label></h3>
# <input name='tags' type='text' value='${figshare_tags}' placeholder='e.g. neuroscience, cognition'><br />
# <h3><label>Description</label></h3>
# <textarea name='description' placeholder='Please type a description of this file here'>${figshare_desc}</textarea>
# </form>
# '''
OAUTH_INVALID = 'Your OAuth key for figshare is no longer valid. Please re-authenticate. '
FIGSHARE_INTERNAL_SERVER_ERROR = 'Figshare is experiencing technical problems when connecting to the OSF. Please wait while they resolve the problem or contact them at https://support.figshare.com.'
FIGSHARE_UNSPECIFIED_ERROR = 'Figshare was contacted and returned with the following error message: {error_message}.'
# END MFR MESSAGES
| apache-2.0 |
igemsoftware/SYSU-Software2013 | project/Python27/Lib/site-packages/pythonwin/pywin/framework/intpydde.py | 17 | 1334 | # DDE support for Pythonwin
#
# Seems to work fine (in the context that IE4 seems to have broken
# DDE on _all_ NT4 machines I have tried, but only when a "Command Prompt" window
# is open. Strange, but true. If you have problems with this, close all Command Prompts!
import win32ui
import win32api, win32con
from pywin.mfc import object
from dde import *
import sys, traceback
class DDESystemTopic(object.Object):
def __init__(self, app):
self.app = app
object.Object.__init__(self, CreateServerSystemTopic())
def Exec(self, data):
try:
# print "Executing", cmd
self.app.OnDDECommand(data)
except:
t,v,tb = sys.exc_info()
# The DDE Execution failed.
print "Error executing DDE command."
traceback.print_exception(t,v,tb)
return 0
class DDEServer(object.Object):
def __init__(self, app):
self.app = app
object.Object.__init__(self, CreateServer())
self.topic = self.item = None
def CreateSystemTopic(self):
return DDESystemTopic(self.app)
def Shutdown(self):
self._obj_.Shutdown()
self._obj_.Destroy()
if self.topic is not None:
self.topic.Destroy()
self.topic = None
if self.item is not None:
self.item.Destroy()
self.item = None
def OnCreate(self):
return 1
def Status(self, msg):
try:
win32ui.SetStatusText(msg)
except win32ui.error:
pass
| mit |
SSSD/sssd | src/tests/intg/test_files_provider.py | 1 | 37497 | #
# SSSD files domain tests
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import stat
import time
import config
import signal
import subprocess
import pwd
import grp
import pytest
import tempfile
import ent
import sssd_id
from sssd_nss import NssReturnCode
from sssd_passwd import (call_sssd_getpwnam,
call_sssd_getpwuid)
from sssd_group import call_sssd_getgrnam, call_sssd_getgrgid
from files_ops import passwd_ops_setup, group_ops_setup, PasswdOps, GroupOps
from util import unindent
# Sync this with files_ops.c
FILES_REALLOC_CHUNK = 64
CANARY = dict(name='canary', passwd='x', uid=100001, gid=200001,
gecos='Used to check if passwd is resolvable',
dir='/home/canary',
shell='/bin/bash')
USER1 = dict(name='user1', passwd='x', uid=10001, gid=20001,
gecos='User for tests',
dir='/home/user1',
shell='/bin/bash')
USER2 = dict(name='user2', passwd='x', uid=10002, gid=20001,
gecos='User2 for tests',
dir='/home/user2',
shell='/bin/bash')
OV_USER1 = dict(name='ov_user1', passwd='x', uid=10010, gid=20010,
gecos='Overriden User 1',
dir='/home/ov/user1',
shell='/bin/ov_user1_shell')
ALT_USER1 = dict(name='alt_user1', passwd='x', uid=60001, gid=70001,
gecos='User for tests from alt files',
dir='/home/altuser1',
shell='/bin/bash')
ALL_USERS = [CANARY, USER1, USER2, OV_USER1, ALT_USER1]
CANARY_GR = dict(name='canary',
gid=300001,
mem=[])
GROUP1 = dict(name='group1',
gid=30001,
mem=['user1'])
OV_GROUP1 = dict(name='ov_group1',
gid=30002,
mem=['user1'])
GROUP12 = dict(name='group12',
gid=30012,
mem=['user1', 'user2'])
GROUP_NOMEM = dict(name='group_nomem',
gid=40000,
mem=[])
ALT_GROUP1 = dict(name='alt_group1',
gid=80001,
mem=['alt_user1'])
def start_sssd():
"""Start sssd and add teardown for stopping it and removing state"""
os.environ["SSS_FILES_PASSWD"] = os.environ["NSS_WRAPPER_PASSWD"]
os.environ["SSS_FILES_GROUP"] = os.environ["NSS_WRAPPER_GROUP"]
if subprocess.call(["sssd", "-D", "--logger=files"]) != 0:
raise Exception("sssd start failed")
def stop_sssd():
pid_file = open(config.PIDFILE_PATH, "r")
pid = int(pid_file.read())
os.kill(pid, signal.SIGTERM)
while True:
try:
os.kill(pid, signal.SIGCONT)
except:
break
time.sleep(1)
def restart_sssd():
stop_sssd()
start_sssd()
def create_conf_fixture(request, contents):
"""Generate sssd.conf and add teardown for removing it"""
conf = open(config.CONF_PATH, "w")
conf.write(contents)
conf.close()
os.chmod(config.CONF_PATH, stat.S_IRUSR | stat.S_IWUSR)
request.addfinalizer(lambda: os.unlink(config.CONF_PATH))
def create_sssd_fixture(request):
start_sssd()
def teardown():
try:
stop_sssd()
except:
pass
for path in os.listdir(config.DB_PATH):
os.unlink(config.DB_PATH + "/" + path)
for path in os.listdir(config.MCACHE_PATH):
os.unlink(config.MCACHE_PATH + "/" + path)
request.addfinalizer(teardown)
# Fixtures
@pytest.fixture
def files_domain_only(request):
conf = unindent("""\
[sssd]
domains = files
services = nss
[domain/files]
id_provider = files
""").format(**locals())
create_conf_fixture(request, conf)
create_sssd_fixture(request)
return None
@pytest.fixture
def files_multiple_sources(request):
_, alt_passwd_path = tempfile.mkstemp(prefix='altpasswd')
request.addfinalizer(lambda: os.unlink(alt_passwd_path))
alt_pwops = PasswdOps(alt_passwd_path)
_, alt_group_path = tempfile.mkstemp(prefix='altgroup')
request.addfinalizer(lambda: os.unlink(alt_group_path))
alt_grops = GroupOps(alt_group_path)
passwd_list = ",".join([os.environ["NSS_WRAPPER_PASSWD"], alt_passwd_path])
group_list = ",".join([os.environ["NSS_WRAPPER_GROUP"], alt_group_path])
conf = unindent("""\
[sssd]
domains = files
services = nss
[nss]
debug_level = 10
[domain/files]
id_provider = files
passwd_files = {passwd_list}
group_files = {group_list}
debug_level = 10
""").format(**locals())
create_conf_fixture(request, conf)
create_sssd_fixture(request)
return alt_pwops, alt_grops
@pytest.fixture
def files_multiple_sources_nocreate(request):
"""
Sets up SSSD with multiple sources, but does not actually create
the files.
"""
alt_passwd_path = tempfile.mktemp(prefix='altpasswd')
request.addfinalizer(lambda: os.unlink(alt_passwd_path))
alt_group_path = tempfile.mktemp(prefix='altgroup')
request.addfinalizer(lambda: os.unlink(alt_group_path))
passwd_list = ",".join([os.environ["NSS_WRAPPER_PASSWD"], alt_passwd_path])
group_list = ",".join([os.environ["NSS_WRAPPER_GROUP"], alt_group_path])
conf = unindent("""\
[sssd]
domains = files
services = nss
[nss]
debug_level = 10
[domain/files]
id_provider = files
passwd_files = {passwd_list}
group_files = {group_list}
debug_level = 10
""").format(**locals())
create_conf_fixture(request, conf)
create_sssd_fixture(request)
return alt_passwd_path, alt_group_path
@pytest.fixture
def proxy_to_files_domain_only(request):
conf = unindent("""\
[sssd]
domains = proxy, local
services = nss
[domain/local]
id_provider = local
[domain/proxy]
id_provider = proxy
proxy_lib_name = files
auth_provider = none
resolver_provider = none
""").format(**locals())
create_conf_fixture(request, conf)
create_sssd_fixture(request)
return None
@pytest.fixture
def no_sssd_domain(request):
conf = unindent("""\
[sssd]
services = nss
enable_files_domain = true
""").format(**locals())
create_conf_fixture(request, conf)
create_sssd_fixture(request)
return None
@pytest.fixture
def no_files_domain(request):
conf = unindent("""\
[sssd]
domains = local
services = nss
enable_files_domain = true
[domain/local]
id_provider = local
[domain/disabled.files]
id_provider = files
""").format(**locals())
create_conf_fixture(request, conf)
create_sssd_fixture(request)
return None
@pytest.fixture
def disabled_files_domain(request):
conf = unindent("""\
[sssd]
domains = local
services = nss
enable_files_domain = false
[domain/local]
id_provider = local
""").format(**locals())
create_conf_fixture(request, conf)
create_sssd_fixture(request)
return None
@pytest.fixture
def no_sssd_conf(request):
create_sssd_fixture(request)
return None
@pytest.fixture
def domain_resolution_order(request):
conf = unindent("""\
[sssd]
domains = files
services = nss
domain_resolution_order = foo
[domain/files]
id_provider = files
""").format(**locals())
create_conf_fixture(request, conf)
create_sssd_fixture(request)
return None
@pytest.fixture
def default_domain_suffix(request):
conf = unindent("""\
[sssd]
domains = files
services = nss
default_domain_suffix = foo
[domain/files]
id_provider = files
""").format(**locals())
create_conf_fixture(request, conf)
create_sssd_fixture(request)
return None
@pytest.fixture
def override_homedir_and_shell(request):
conf = unindent("""\
[sssd]
domains = files
services = nss
[domain/files]
id_provider = files
override_homedir = /test/bar
override_shell = /bin/bar
[nss]
override_homedir = /test/foo
override_shell = /bin/foo
""").format(**locals())
create_conf_fixture(request, conf)
create_sssd_fixture(request)
return None
def setup_pw_with_list(pwd_ops, user_list):
for user in user_list:
pwd_ops.useradd(**user)
ent.assert_passwd_by_name(CANARY['name'], CANARY)
return pwd_ops
@pytest.fixture
def add_user_with_canary(passwd_ops_setup):
return setup_pw_with_list(passwd_ops_setup, [CANARY, USER1])
@pytest.fixture
def setup_pw_with_canary(passwd_ops_setup):
return setup_pw_with_list(passwd_ops_setup, [CANARY])
def add_group_members(pwd_ops, group):
members = {x['name']: x for x in ALL_USERS}
for member in group['mem']:
if pwd_ops.userexist(member):
continue
pwd_ops.useradd(**members[member])
def setup_gr_with_list(pwd_ops, grp_ops, group_list):
for group in group_list:
add_group_members(pwd_ops, group)
grp_ops.groupadd(**group)
ent.assert_group_by_name(CANARY_GR['name'], CANARY_GR)
return grp_ops
@pytest.fixture
def add_group_with_canary(passwd_ops_setup, group_ops_setup):
return setup_gr_with_list(
passwd_ops_setup, group_ops_setup, [GROUP1, CANARY_GR]
)
@pytest.fixture
def setup_gr_with_canary(passwd_ops_setup, group_ops_setup):
return setup_gr_with_list(passwd_ops_setup, group_ops_setup, [CANARY_GR])
def poll_canary(fn, name, threshold=20):
"""
If we query SSSD while it's updating its cache, it would return NOTFOUND
rather than a result from potentially outdated or incomplete cache. In
reality this doesn't hurt because the order of the modules is normally
"sss files" so the user lookup would fall back to files. But in tests
we use this loop to wait until the canary user who is always there is
resolved.
"""
for _ in range(0, threshold):
res, _ = fn(name)
if res == NssReturnCode.SUCCESS:
return True
elif res == NssReturnCode.NOTFOUND:
time.sleep(0.1)
continue
else:
return False
return False
def sssd_getpwnam_sync(name):
ret = poll_canary(call_sssd_getpwnam, CANARY["name"])
if ret is False:
return NssReturnCode.NOTFOUND, None
return call_sssd_getpwnam(name)
def sssd_getpwuid_sync(uid):
ret = poll_canary(call_sssd_getpwnam, CANARY["name"])
if ret is False:
return NssReturnCode.NOTFOUND, None
return call_sssd_getpwuid(uid)
def sssd_getgrnam_sync(name):
ret = poll_canary(call_sssd_getgrnam, CANARY_GR["name"])
if ret is False:
return NssReturnCode.NOTFOUND, None
return call_sssd_getgrnam(name)
def sssd_getgrgid_sync(name):
ret = poll_canary(call_sssd_getgrnam, CANARY_GR["name"])
if ret is False:
return NssReturnCode.NOTFOUND, None
return call_sssd_getgrgid(name)
def sssd_id_sync(name):
sssd_getpwnam_sync(CANARY["name"])
res, _, groups = sssd_id.get_user_groups(name)
return res, groups
# Helper functions
def user_generator(seqnum):
return dict(name='user%d' % seqnum,
passwd='x',
uid=10000 + seqnum,
gid=20000 + seqnum,
gecos='User for tests',
dir='/home/user%d' % seqnum,
shell='/bin/bash')
def check_user(exp_user, delay=1.0):
if delay > 0:
time.sleep(delay)
res, found_user = sssd_getpwnam_sync(exp_user["name"])
assert res == NssReturnCode.SUCCESS
assert found_user == exp_user
def group_generator(seqnum):
return dict(name='group%d' % seqnum,
gid=30000 + seqnum,
mem=[])
def check_group(exp_group, delay=1.0):
if delay > 0:
time.sleep(delay)
res, found_group = sssd_getgrnam_sync(exp_group["name"])
assert res == NssReturnCode.SUCCESS
assert found_group == exp_group
def check_group_by_gid(exp_group, delay=1.0):
if delay > 0:
time.sleep(delay)
res, found_group = sssd_getgrgid_sync(exp_group["gid"])
assert res == NssReturnCode.SUCCESS
assert found_group == exp_group
def check_group_list(exp_groups_list):
for exp_group in exp_groups_list:
check_group(exp_group)
def assert_user_overriden():
# There is an issue in nss_wrapper [0] and nss_wrapper always looks into
# the files first before using the NSS module. This lets this check fail
# because the user is found in the file and hence will be returned
# without overridden values.
# In order to work this around while there's no fix for nss_wrapper, let's
# use the fully-qualified name when looking up the USER1
#
# https://bugzilla.samba.org/show_bug.cgi?id=12883)
ent.assert_passwd_by_name(USER1["name"]+"@files", OV_USER1)
ent.assert_passwd_by_name(OV_USER1["name"], OV_USER1)
def assert_group_overriden():
# There is an issue in nss_wrapper [0] and nss_wrapper always looks into
# the files first before using the NSS module. This lets this check fail
# because the user is found in the file and hence will be returned
# without overridden values.
# In order to work this around while there's no fix for nss_wrapper, let's
# use the fully-qualified name when looking up the GROUP1
#
# https://bugzilla.samba.org/show_bug.cgi?id=12883)
ent.assert_group_by_name(GROUP1["name"]+"@files", OV_GROUP1)
ent.assert_group_by_name(OV_GROUP1["name"], OV_GROUP1)
# User tests
def test_getpwnam_after_start(add_user_with_canary, files_domain_only):
"""
Test that after startup without any additional operations, a user
can be resolved through sssd
"""
res, user = sssd_getpwnam_sync(USER1["name"])
assert res == NssReturnCode.SUCCESS
assert user == USER1
def test_getpwuid_after_start(add_user_with_canary, files_domain_only):
"""
Test that after startup without any additional operations, a user
can be resolved through sssd
"""
res, user = sssd_getpwuid_sync(USER1["uid"])
assert res == NssReturnCode.SUCCESS
assert user == USER1
def test_user_overriden(add_user_with_canary, files_domain_only):
"""
Test that user override works with files domain only
"""
# Override
subprocess.check_call(["sss_override", "user-add", USER1["name"],
"-u", str(OV_USER1["uid"]),
"-g", str(OV_USER1["gid"]),
"-n", OV_USER1["name"],
"-c", OV_USER1["gecos"],
"-h", OV_USER1["dir"],
"-s", OV_USER1["shell"]])
restart_sssd()
assert_user_overriden()
def test_group_overriden(add_group_with_canary, files_domain_only):
"""
Test that user override works with files domain only
"""
# Override
subprocess.check_call(["sss_override", "group-add", GROUP1["name"],
"-n", OV_GROUP1["name"],
"-g", str(OV_GROUP1["gid"])])
restart_sssd()
assert_group_overriden()
def test_getpwnam_neg(files_domain_only):
"""
Test that a nonexistent user cannot be resolved by name
"""
res, _ = call_sssd_getpwnam("nosuchuser")
assert res == NssReturnCode.NOTFOUND
def test_getpwuid_neg(files_domain_only):
"""
Test that a nonexistent user cannot be resolved by UID
"""
res, _ = call_sssd_getpwuid(12345)
assert res == NssReturnCode.NOTFOUND
def test_root_does_not_resolve(files_domain_only):
"""
SSSD currently does not resolve the root user even though it can
be resolved through the NSS interface
"""
nss_root = pwd.getpwnam("root")
assert nss_root is not None
res, _ = call_sssd_getpwnam("root")
assert res == NssReturnCode.NOTFOUND
def test_uid_zero_does_not_resolve(files_domain_only):
"""
SSSD currently does not resolve the UID 0 even though it can
be resolved through the NSS interface
"""
nss_root = pwd.getpwuid(0)
assert nss_root is not None
res, _ = call_sssd_getpwuid(0)
assert res == NssReturnCode.NOTFOUND
def test_add_remove_add_file_user(setup_pw_with_canary, files_domain_only):
"""
Test that removing a user is detected and the user
is removed from the sssd database. Similarly, an add
should be detected. Do this several times to test retaining
the inotify watch for moved and unlinked files.
"""
res, _ = call_sssd_getpwnam(USER1["name"])
assert res == NssReturnCode.NOTFOUND
setup_pw_with_canary.useradd(**USER1)
check_user(USER1)
setup_pw_with_canary.userdel(USER1["name"])
time.sleep(1.0)
res, _ = sssd_getpwnam_sync(USER1["name"])
assert res == NssReturnCode.NOTFOUND
setup_pw_with_canary.useradd(**USER1)
check_user(USER1)
def test_mod_user_shell(add_user_with_canary, files_domain_only):
"""
Test that modifying a user shell is detected and the user
is modified in the sssd database
"""
res, user = sssd_getpwnam_sync(USER1["name"])
assert res == NssReturnCode.SUCCESS
assert user == USER1
moduser = dict(USER1)
moduser['shell'] = '/bin/zsh'
add_user_with_canary.usermod(**moduser)
check_user(moduser)
def incomplete_user_setup(pwd_ops, del_field, exp_field):
adduser = dict(USER1)
del adduser[del_field]
exp_user = dict(USER1)
exp_user[del_field] = exp_field
pwd_ops.useradd(**adduser)
return exp_user
def test_user_no_shell(setup_pw_with_canary, files_domain_only):
"""
Test that resolving a user without a shell defined works and returns
a fallback value
"""
check_user(incomplete_user_setup(setup_pw_with_canary, 'shell', ''))
def test_user_no_dir(setup_pw_with_canary, files_domain_only):
"""
Test that resolving a user without a homedir defined works and returns
a fallback value
"""
check_user(incomplete_user_setup(setup_pw_with_canary, 'dir', ''))
def test_user_no_gecos(setup_pw_with_canary, files_domain_only):
"""
Test that resolving a user without a gecos defined works and returns
a fallback value
"""
check_user(incomplete_user_setup(setup_pw_with_canary, 'gecos', ''))
def test_user_no_passwd(setup_pw_with_canary, files_domain_only):
"""
Test that resolving a user without a password defined works and returns
a fallback value
"""
check_user(incomplete_user_setup(setup_pw_with_canary, 'passwd', 'x'))
def bad_incomplete_user_setup(pwd_ops, del_field):
adduser = dict(USER1)
adduser[del_field] = ''
pwd_ops.useradd(**adduser)
def test_incomplete_user_fail(setup_pw_with_canary, files_domain_only):
"""
Test resolving an incomplete user where the missing field is required
to be present in the user record and thus the user shouldn't resolve.
We cannot test UID and GID missing because nss_wrapper doesn't even
load the malformed passwd file, then.
"""
bad_incomplete_user_setup(setup_pw_with_canary, 'name')
res, user = sssd_getpwnam_sync(USER1["name"])
assert res == NssReturnCode.NOTFOUND
def test_getgrnam_after_start(add_group_with_canary, files_domain_only):
"""
Test that after startup without any additional operations, a group
can be resolved through sssd by name
"""
check_group(GROUP1)
def test_getgrgid_after_start(add_group_with_canary, files_domain_only):
"""
Test that after startup without any additional operations, a group
can be resolved through sssd by GID
"""
check_group_by_gid(GROUP1)
def test_getgrnam_neg(files_domain_only):
"""
Test that a nonexistent group cannot be resolved
"""
res, user = sssd_getgrnam_sync("nosuchgroup")
assert res == NssReturnCode.NOTFOUND
def test_getgrgid_neg(files_domain_only):
"""
Test that a nonexistent group cannot be resolved
"""
res, user = sssd_getgrgid_sync(123456)
assert res == NssReturnCode.NOTFOUND
def test_root_group_does_not_resolve(files_domain_only):
"""
SSSD currently does not resolve the root group even though it can
be resolved through the NSS interface
"""
nss_root = grp.getgrnam("root")
assert nss_root is not None
res, user = call_sssd_getgrnam("root")
assert res == NssReturnCode.NOTFOUND
def test_gid_zero_does_not_resolve(files_domain_only):
"""
SSSD currently does not resolve the group with GID 0 even though it
can be resolved through the NSS interface
"""
nss_root = grp.getgrgid(0)
assert nss_root is not None
res, user = call_sssd_getgrgid(0)
assert res == NssReturnCode.NOTFOUND
def test_add_remove_add_file_group(
setup_pw_with_canary, setup_gr_with_canary, files_domain_only
):
"""
Test that removing a group is detected and the group
is removed from the sssd database. Similarly, an add
should be detected. Do this several times to test retaining
the inotify watch for moved and unlinked files.
"""
res, group = call_sssd_getgrnam(GROUP1["name"])
assert res == NssReturnCode.NOTFOUND
add_group_members(setup_pw_with_canary, GROUP1)
setup_gr_with_canary.groupadd(**GROUP1)
check_group(GROUP1)
setup_gr_with_canary.groupdel(GROUP1["name"])
time.sleep(1)
res, group = call_sssd_getgrnam(GROUP1["name"])
assert res == NssReturnCode.NOTFOUND
setup_gr_with_canary.groupadd(**GROUP1)
check_group(GROUP1)
def test_mod_group_name(add_group_with_canary, files_domain_only):
"""
Test that modifying a group name is detected and the group
is modified in the sssd database
"""
check_group(GROUP1)
modgroup = dict(GROUP1)
modgroup['name'] = 'group1_mod'
add_group_with_canary.groupmod(old_name=GROUP1["name"], **modgroup)
check_group(modgroup)
def test_mod_group_gid(add_group_with_canary, files_domain_only):
"""
Test that modifying a group name is detected and the group
is modified in the sssd database
"""
check_group(GROUP1)
modgroup = dict(GROUP1)
modgroup['gid'] = 30002
add_group_with_canary.groupmod(old_name=GROUP1["name"], **modgroup)
check_group(modgroup)
@pytest.fixture
def add_group_nomem_with_canary(passwd_ops_setup, group_ops_setup):
return setup_gr_with_list(
passwd_ops_setup, group_ops_setup, [GROUP_NOMEM, CANARY_GR]
)
def test_getgrnam_no_members(add_group_nomem_with_canary, files_domain_only):
"""
Test that after startup without any additional operations, a group
can be resolved through sssd
"""
check_group(GROUP_NOMEM)
def groupadd_list(grp_ops, groups):
for grp in groups:
grp_ops.groupadd(**grp)
def useradd_list(pwd_ops, users):
for usr in users:
pwd_ops.useradd(**usr)
def user_and_group_setup(pwd_ops, grp_ops, users, groups, reverse):
"""
The reverse is added so that we test cases where a group is added first,
then a user for this group is created -- in that case, we need to properly
link the group after the user is added.
"""
if reverse is False:
useradd_list(pwd_ops, users)
groupadd_list(grp_ops, groups)
else:
groupadd_list(grp_ops, groups)
useradd_list(pwd_ops, users)
def members_check(added_groups):
# Test that users are members as per getgrnam
check_group_list(added_groups)
# Test that users are members as per initgroups
for group in added_groups:
for member in group['mem']:
res, groups = sssd_id_sync(member)
assert res == sssd_id.NssReturnCode.SUCCESS
assert group['name'] in groups
def test_getgrnam_members_users_first(setup_pw_with_canary,
setup_gr_with_canary,
files_domain_only):
"""
A user is linked with a group
"""
user_and_group_setup(setup_pw_with_canary,
setup_gr_with_canary,
[USER1],
[GROUP1],
False)
members_check([GROUP1])
def test_getgrnam_members_users_multiple(setup_pw_with_canary,
setup_gr_with_canary,
files_domain_only):
"""
Multiple users are linked with a group
"""
user_and_group_setup(setup_pw_with_canary,
setup_gr_with_canary,
[USER1, USER2],
[GROUP12],
False)
members_check([GROUP12])
def test_getgrnam_members_groups_first(setup_pw_with_canary,
setup_gr_with_canary,
files_domain_only):
"""
A group is linked with a user
"""
user_and_group_setup(setup_pw_with_canary,
setup_gr_with_canary,
[USER1],
[GROUP1],
True)
members_check([GROUP1])
def test_getgrnam_ghost(setup_pw_with_canary,
setup_gr_with_canary,
files_domain_only):
"""
Test that group if not found (and will be handled by nss_files) if there
are any ghost members.
"""
user_and_group_setup(setup_pw_with_canary,
setup_gr_with_canary,
[],
[GROUP12],
False)
time.sleep(1)
res, group = call_sssd_getgrnam(GROUP12["name"])
assert res == NssReturnCode.NOTFOUND
for member in GROUP12['mem']:
res, _ = call_sssd_getpwnam(member)
assert res == NssReturnCode.NOTFOUND
def ghost_and_member_test(pw_ops, grp_ops, reverse):
user_and_group_setup(pw_ops,
grp_ops,
[USER1],
[GROUP12],
reverse)
time.sleep(1)
res, group = call_sssd_getgrnam(GROUP12["name"])
assert res == NssReturnCode.NOTFOUND
# We checked that the group added has the same members as group12,
# so both user1 and user2. Now check that user1 is a member of
# group12 and its own primary GID but user2 doesn't exist, it's
# just a ghost entry
res, groups = sssd_id_sync('user1')
assert res == sssd_id.NssReturnCode.SUCCESS
assert len(groups) == 2
assert 'group12' in groups
res, _ = call_sssd_getpwnam('user2')
assert res == NssReturnCode.NOTFOUND
def test_getgrnam_user_ghost_and_member(setup_pw_with_canary,
setup_gr_with_canary,
files_domain_only):
"""
Test that a group with one member and one ghost.
"""
ghost_and_member_test(setup_pw_with_canary,
setup_gr_with_canary,
False)
def test_getgrnam_user_member_and_ghost(setup_pw_with_canary,
setup_gr_with_canary,
files_domain_only):
"""
Test that a group with one member and one ghost, adding the group
first and then linking the member
"""
ghost_and_member_test(setup_pw_with_canary,
setup_gr_with_canary,
True)
def test_getgrnam_add_remove_members(setup_pw_with_canary,
add_group_nomem_with_canary,
files_domain_only):
"""
Test that a user is linked with a group
"""
pwd_ops = setup_pw_with_canary
check_group(GROUP_NOMEM)
for usr in [USER1, USER2]:
pwd_ops.useradd(**usr)
modgroup = dict(GROUP_NOMEM)
modgroup['mem'] = ['user1', 'user2']
add_group_nomem_with_canary.groupmod(old_name=modgroup['name'], **modgroup)
check_group(modgroup)
res, groups = sssd_id_sync('user1')
assert res == sssd_id.NssReturnCode.SUCCESS
assert len(groups) == 2
assert 'group_nomem' in groups
res, groups = sssd_id_sync('user2')
assert res == sssd_id.NssReturnCode.SUCCESS
assert 'group_nomem' in groups
modgroup['mem'] = ['user2']
add_group_nomem_with_canary.groupmod(old_name=modgroup['name'], **modgroup)
check_group(modgroup)
# User1 exists, but is not a member of any supplementary group anymore
res, _ = call_sssd_getpwnam('user1')
assert res == sssd_id.NssReturnCode.SUCCESS
res, groups = sssd_id_sync('user1')
assert res == sssd_id.NssReturnCode.NOTFOUND
# user2 still is
res, groups = sssd_id_sync('user2')
assert res == sssd_id.NssReturnCode.SUCCESS
assert len(groups) == 2
assert 'group_nomem' in groups
def test_getgrnam_add_remove_ghosts(setup_pw_with_canary,
add_group_nomem_with_canary,
files_domain_only):
"""
Test that a user is linked with a group
"""
pwd_ops = setup_pw_with_canary
check_group(GROUP_NOMEM)
modgroup = dict(GROUP_NOMEM)
modgroup['mem'] = ['user1', 'user2']
add_group_nomem_with_canary.groupmod(old_name=modgroup['name'], **modgroup)
time.sleep(1)
res, group = call_sssd_getgrnam(modgroup['name'])
assert res == sssd_id.NssReturnCode.NOTFOUND
modgroup['mem'] = ['user2']
add_group_nomem_with_canary.groupmod(old_name=modgroup['name'], **modgroup)
time.sleep(1)
res, group = call_sssd_getgrnam(modgroup['name'])
assert res == sssd_id.NssReturnCode.NOTFOUND
res, _ = call_sssd_getpwnam('user1')
assert res == NssReturnCode.NOTFOUND
res, _ = call_sssd_getpwnam('user2')
assert res == NssReturnCode.NOTFOUND
def realloc_users(pwd_ops, num):
# Intentionally not including the last one because
# canary is added first
for i in range(1, num):
user = user_generator(i)
pwd_ops.useradd(**user)
user = user_generator(num-1)
check_user(user)
def test_realloc_users_exact(setup_pw_with_canary, files_domain_only):
"""
Test that returning exactly FILES_REALLOC_CHUNK users (see files_ops.c)
works fine to test reallocation logic. Test exact number of users to
check for off-by-one errors.
"""
realloc_users(setup_pw_with_canary, FILES_REALLOC_CHUNK)
def test_realloc_users(setup_pw_with_canary, files_domain_only):
"""
Test that returning exactly FILES_REALLOC_CHUNK users (see files_ops.c)
works fine to test reallocation logic.
"""
realloc_users(setup_pw_with_canary, FILES_REALLOC_CHUNK*3)
def realloc_groups(grp_ops, num):
for i in range(1, num):
group = group_generator(i)
grp_ops.groupadd(**group)
group = group_generator(num-1)
check_group(group)
def test_realloc_groups_exact(setup_gr_with_canary, files_domain_only):
"""
Test that returning exactly FILES_REALLOC_CHUNK groups (see files_ops.c)
works fine to test reallocation logic. Test exact number of groups to
check for off-by-one errors.
"""
realloc_groups(setup_gr_with_canary, FILES_REALLOC_CHUNK*3)
def test_realloc_groups(setup_gr_with_canary, files_domain_only):
"""
Test that returning exactly FILES_REALLOC_CHUNK groups (see files_ops.c)
works fine to test reallocation logic. Test exact number of groups to
check for off-by-one errors.
"""
realloc_groups(setup_gr_with_canary, FILES_REALLOC_CHUNK*3)
# Files domain autoconfiguration tests
def test_no_sssd_domain(add_user_with_canary, no_sssd_domain):
"""
Test that if no sssd domain is configured, sssd will add the implicit one
"""
res, user = sssd_getpwnam_sync(USER1["name"])
assert res == NssReturnCode.SUCCESS
assert user == USER1
def test_proxy_to_files_domain_only(add_user_with_canary,
proxy_to_files_domain_only):
"""
Test that implicit_files domain is not started together with proxy to files
"""
local_user1 = dict(name='user1', passwd='*', uid=10009, gid=10009,
gecos='user1', dir='/home/user1', shell='/bin/bash')
# Add a user with a different UID than the one in files
subprocess.check_call(
["sss_useradd", "-u", "10009", "-M", USER1["name"]])
res, user = call_sssd_getpwnam(USER1["name"])
assert res == NssReturnCode.SUCCESS
assert user == local_user1
res, _ = call_sssd_getpwnam("{0}@implicit_files".format(USER1["name"]))
assert res == NssReturnCode.NOTFOUND
def test_no_files_domain(add_user_with_canary, no_files_domain):
"""
Test that if no files domain is configured, sssd will add the implicit one
before any explicitly configured domains
"""
# Add a user with a different UID than the one in files
subprocess.check_call(
["sss_useradd", "-u", "10009", "-M", USER1["name"]])
# Even though the local domain is the only one configured,
# files will be resolved first
res, user = sssd_getpwnam_sync(USER1["name"])
assert res == NssReturnCode.SUCCESS
assert user == USER1
def test_disable_files_domain(add_user_with_canary, disabled_files_domain):
"""
Test that if no files domain is configured, sssd will add the implicit one
before any explicitly configured domains
"""
# The local user will not be resolvable through nss_sss now
res, user = sssd_getpwnam_sync(USER1["name"])
assert res != NssReturnCode.SUCCESS
def test_no_sssd_conf(add_user_with_canary, no_sssd_conf):
"""
Test that running without sssd.conf implicitly configures one with
id_provider=files
"""
res, user = sssd_getpwnam_sync(USER1["name"])
assert res == NssReturnCode.SUCCESS
assert user == USER1
def test_multiple_passwd_group_files(add_user_with_canary,
add_group_with_canary,
files_multiple_sources):
"""
Test that users and groups can be mirrored from multiple files
"""
alt_pwops, alt_grops = files_multiple_sources
alt_pwops.useradd(**ALT_USER1)
alt_grops.groupadd(**ALT_GROUP1)
check_user(USER1)
check_user(ALT_USER1)
check_group(GROUP1)
check_group(ALT_GROUP1)
def test_multiple_files_created_after_startup(add_user_with_canary,
add_group_with_canary,
files_multiple_sources_nocreate):
"""
Test that users and groups can be mirrored from multiple files,
but those files are not created when SSSD starts, only afterwards.
"""
alt_passwd_path, alt_group_path = files_multiple_sources_nocreate
check_user(USER1)
check_group(GROUP1)
# touch the files
for fpath in (alt_passwd_path, alt_group_path):
with open(fpath, "w") as f:
pass
alt_pwops = PasswdOps(alt_passwd_path)
alt_grops = GroupOps(alt_group_path)
alt_pwops.useradd(**ALT_USER1)
alt_grops.groupadd(**ALT_GROUP1)
check_user(ALT_USER1)
check_group(ALT_GROUP1)
def test_files_with_domain_resolution_order(add_user_with_canary,
domain_resolution_order):
"""
Test that when using domain_resolution_order the user won't be using
its fully-qualified name.
"""
check_user(USER1)
def test_files_with_default_domain_suffix(add_user_with_canary,
default_domain_suffix):
"""
Test that when using domain_resolution_order the user won't be using
its fully-qualified name.
"""
ret = poll_canary(call_sssd_getpwuid, CANARY["uid"])
if ret is False:
return NssReturnCode.NOTFOUND, None
res, found_user = call_sssd_getpwuid(USER1["uid"])
assert res == NssReturnCode.SUCCESS
assert found_user == USER1
def test_files_with_override_homedir(add_user_with_canary,
override_homedir_and_shell):
res, user = sssd_getpwnam_sync(USER1["name"])
assert res == NssReturnCode.SUCCESS
assert user["dir"] == USER1["dir"]
def test_files_with_override_shell(add_user_with_canary,
override_homedir_and_shell):
res, user = sssd_getpwnam_sync(USER1["name"])
assert res == NssReturnCode.SUCCESS
assert user["shell"] == USER1["shell"]
| gpl-3.0 |
boris-savic/swampdragon | tests/test_sessions.py | 13 | 2677 | from swampdragon.connections.sockjs_connection import SubscriberConnection
from swampdragon.route_handler import BaseRouter
from swampdragon.testing.dragon_testcase import DragonTestCase
from swampdragon.connections.mock_connection import TestConnection
from swampdragon.sessions.session_store import BaseSessionStore
from swampdragon.sessions import sessions
from .test_subscriber_connection import TestSession
import json
class FooRouter(BaseRouter):
route_name = 'foo-router'
valid_verbs = ['write_session', 'read_session']
def write_session(self, **kwargs):
self.connection.session_store.set('key', kwargs['value'])
def read_session(self):
val = self.connection.session_store.get('key')
self.send(val)
class TestSessionStore(BaseSessionStore):
data = {}
def set(self, key, val):
self.data[key] = val
def get(self, key):
return self.data[key]
class TestSessions(DragonTestCase):
def setUp(self):
self.session = TestSession()
self.connection = SubscriberConnection(self.session)
self.session_store = self.connection.session_store
def test_read_from_session(self):
val = 'test val'
self.session_store.set('a_key', val)
self.assertEqual(val, self.session_store.get('a_key'))
def test_overwrite_value(self):
val = 'test val'
self.session_store.set('a_key', val)
self.session_store.set('a_key', 'updated val')
self.assertEqual('updated val', self.session_store.get('a_key'))
def test_write_dict(self):
data = {'a': 'val', 'b': 1}
key = 'key'
self.session_store.set(key, data)
data_from_session = json.loads(self.session_store.get(key))
self.assertDictEqual(data, data_from_session)
def test_session_from_router(self):
"""
Ensure that sessions are unique per connection
"""
connection_a = TestConnection()
connection_b = TestConnection()
foo_router = FooRouter(connection_a)
foo_router.write_session(**{'value': 'a value'})
foo_router.read_session()
self.assertEqual(connection_a.get_last_message()['data'], 'a value')
foo_router = FooRouter(connection_b)
foo_router.read_session()
self.assertNotEqual(connection_b.get_last_message()['data'], 'a value')
class TestCustomSessionStore(DragonTestCase):
def test_custom_session_store(self):
sessions.session_store = TestSessionStore
session_store = sessions.get_session_store()(self.connection)
session_store.set('key', 'val')
self.assertEqual(session_store.get('key'), 'val')
| bsd-3-clause |
persandstrom/home-assistant | homeassistant/components/sensor/netatmo_public.py | 1 | 4390 | """
Support for Sensors using public Netatmo data.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.netatmo_public/.
"""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (CONF_NAME, CONF_TYPE)
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['netatmo']
CONF_AREAS = 'areas'
CONF_LAT_NE = 'lat_ne'
CONF_LON_NE = 'lon_ne'
CONF_LAT_SW = 'lat_sw'
CONF_LON_SW = 'lon_sw'
DEFAULT_NAME = 'Netatmo Public Data'
DEFAULT_TYPE = 'max'
SENSOR_TYPES = {'max', 'avg'}
# NetAtmo Data is uploaded to server every 10 minutes
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=600)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_AREAS): vol.All(cv.ensure_list, [
{
vol.Required(CONF_LAT_NE): cv.latitude,
vol.Required(CONF_LAT_SW): cv.latitude,
vol.Required(CONF_LON_NE): cv.longitude,
vol.Required(CONF_LON_SW): cv.longitude,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_TYPE, default=DEFAULT_TYPE):
vol.In(SENSOR_TYPES)
}
]),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the access to Netatmo binary sensor."""
netatmo = hass.components.netatmo
sensors = []
areas = config.get(CONF_AREAS)
for area_conf in areas:
data = NetatmoPublicData(netatmo.NETATMO_AUTH,
lat_ne=area_conf.get(CONF_LAT_NE),
lon_ne=area_conf.get(CONF_LON_NE),
lat_sw=area_conf.get(CONF_LAT_SW),
lon_sw=area_conf.get(CONF_LON_SW),
calculation=area_conf.get(CONF_TYPE))
sensors.append(NetatmoPublicSensor(area_conf.get(CONF_NAME), data))
add_entities(sensors)
class NetatmoPublicSensor(Entity):
"""Represent a single sensor in a Netatmo."""
def __init__(self, name, data):
"""Initialize the sensor."""
self.netatmo_data = data
self._name = name
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Icon to use in the frontend."""
return 'mdi:weather-rainy'
@property
def device_class(self):
"""Return the device class of the sensor."""
return None
@property
def state(self):
"""Return true if binary sensor is on."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return 'mm'
def update(self):
"""Get the latest data from NetAtmo API and updates the states."""
self.netatmo_data.update()
self._state = self.netatmo_data.data
class NetatmoPublicData:
"""Get the latest data from NetAtmo."""
def __init__(self, auth, lat_ne, lon_ne, lat_sw, lon_sw, calculation):
"""Initialize the data object."""
self.auth = auth
self.data = None
self.lat_ne = lat_ne
self.lon_ne = lon_ne
self.lat_sw = lat_sw
self.lon_sw = lon_sw
self.calculation = calculation
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Request an update from the Netatmo API."""
import pyatmo
raindata = pyatmo.PublicData(self.auth,
LAT_NE=self.lat_ne,
LON_NE=self.lon_ne,
LAT_SW=self.lat_sw,
LON_SW=self.lon_sw,
required_data_type="rain")
if raindata.CountStationInArea() == 0:
_LOGGER.warning('No Rain Station available in this area.')
return
raindata_live = raindata.getLive()
if self.calculation == 'avg':
self.data = sum(raindata_live.values()) / len(raindata_live)
else:
self.data = max(raindata_live.values())
| apache-2.0 |
asnorkin/sentiment_analysis | site/lib/python2.7/site-packages/sqlalchemy/dialects/mysql/gaerdbms.py | 33 | 3387 | # mysql/gaerdbms.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+gaerdbms
:name: Google Cloud SQL
:dbapi: rdbms
:connectstring: mysql+gaerdbms:///<dbname>?instance=<instancename>
:url: https://developers.google.com/appengine/docs/python/cloud-sql/\
developers-guide
This dialect is based primarily on the :mod:`.mysql.mysqldb` dialect with
minimal changes.
.. versionadded:: 0.7.8
.. deprecated:: 1.0 This dialect is **no longer necessary** for
Google Cloud SQL; the MySQLdb dialect can be used directly.
Cloud SQL now recommends creating connections via the
mysql dialect using the URL format
``mysql+mysqldb://root@/<dbname>?unix_socket=/cloudsql/<projectid>:<instancename>``
Pooling
-------
Google App Engine connections appear to be randomly recycled,
so the dialect does not pool connections. The :class:`.NullPool`
implementation is installed within the :class:`.Engine` by
default.
"""
import os
from .mysqldb import MySQLDialect_mysqldb
from ...pool import NullPool
import re
from sqlalchemy.util import warn_deprecated
def _is_dev_environment():
return os.environ.get('SERVER_SOFTWARE', '').startswith('Development/')
class MySQLDialect_gaerdbms(MySQLDialect_mysqldb):
@classmethod
def dbapi(cls):
warn_deprecated(
"Google Cloud SQL now recommends creating connections via the "
"MySQLdb dialect directly, using the URL format "
"mysql+mysqldb://root@/<dbname>?unix_socket=/cloudsql/"
"<projectid>:<instancename>"
)
# from django:
# http://code.google.com/p/googleappengine/source/
# browse/trunk/python/google/storage/speckle/
# python/django/backend/base.py#118
# see also [ticket:2649]
# see also http://stackoverflow.com/q/14224679/34549
from google.appengine.api import apiproxy_stub_map
if _is_dev_environment():
from google.appengine.api import rdbms_mysqldb
return rdbms_mysqldb
elif apiproxy_stub_map.apiproxy.GetStub('rdbms'):
from google.storage.speckle.python.api import rdbms_apiproxy
return rdbms_apiproxy
else:
from google.storage.speckle.python.api import rdbms_googleapi
return rdbms_googleapi
@classmethod
def get_pool_class(cls, url):
# Cloud SQL connections die at any moment
return NullPool
def create_connect_args(self, url):
opts = url.translate_connect_args()
if not _is_dev_environment():
# 'dsn' and 'instance' are because we are skipping
# the traditional google.api.rdbms wrapper
opts['dsn'] = ''
opts['instance'] = url.query['instance']
return [], opts
def _extract_error_code(self, exception):
match = re.compile(r"^(\d+)L?:|^\((\d+)L?,").match(str(exception))
# The rdbms api will wrap then re-raise some types of errors
# making this regex return no matches.
code = match.group(1) or match.group(2) if match else None
if code:
return int(code)
dialect = MySQLDialect_gaerdbms
| mit |
sencha/chromium-spacewalk | tools/valgrind/asan/third_party/asan_symbolize.py | 8 | 13087 | #!/usr/bin/env python
#===- lib/asan/scripts/asan_symbolize.py -----------------------------------===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
import bisect
import getopt
import os
import pty
import re
import subprocess
import sys
import termios
symbolizers = {}
DEBUG = False
demangle = False;
# FIXME: merge the code that calls fix_filename().
def fix_filename(file_name):
for path_to_cut in sys.argv[1:]:
file_name = re.sub('.*' + path_to_cut, '', file_name)
file_name = re.sub('.*asan_[a-z_]*.cc:[0-9]*', '_asan_rtl_', file_name)
file_name = re.sub('.*crtstuff.c:0', '???:0', file_name)
return file_name
def GuessArch(addr):
# Guess which arch we're running. 10 = len('0x') + 8 hex digits.
if len(addr) > 10:
return 'x86_64'
else:
return 'i386'
class Symbolizer(object):
def __init__(self):
pass
def symbolize(self, addr, binary, offset):
"""Symbolize the given address (pair of binary and offset).
Overriden in subclasses.
Args:
addr: virtual address of an instruction.
binary: path to executable/shared object containing this instruction.
offset: instruction offset in the @binary.
Returns:
list of strings (one string for each inlined frame) describing
the code locations for this instruction (that is, function name, file
name, line and column numbers).
"""
return None
class LLVMSymbolizer(Symbolizer):
def __init__(self, symbolizer_path, addr):
super(LLVMSymbolizer, self).__init__()
self.symbolizer_path = symbolizer_path
self.default_arch = GuessArch(addr)
self.pipe = self.open_llvm_symbolizer()
def open_llvm_symbolizer(self):
cmd = [self.symbolizer_path,
'--use-symbol-table=true',
'--demangle=%s' % demangle,
'--functions=short',
'--inlining=true',
'--default-arch=%s' % self.default_arch]
if DEBUG:
print ' '.join(cmd)
try:
result = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
except OSError:
result = None
return result
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
if not self.pipe:
return None
result = []
try:
symbolizer_input = '%s %s' % (binary, offset)
if DEBUG:
print symbolizer_input
print >> self.pipe.stdin, symbolizer_input
while True:
function_name = self.pipe.stdout.readline().rstrip()
if not function_name:
break
file_name = self.pipe.stdout.readline().rstrip()
file_name = fix_filename(file_name)
if (not function_name.startswith('??') or
not file_name.startswith('??')):
# Append only non-trivial frames.
result.append('%s in %s %s' % (addr, function_name,
file_name))
except Exception:
result = []
if not result:
result = None
return result
def LLVMSymbolizerFactory(system, addr):
symbolizer_path = os.getenv('LLVM_SYMBOLIZER_PATH')
if not symbolizer_path:
symbolizer_path = os.getenv('ASAN_SYMBOLIZER_PATH')
if not symbolizer_path:
# Assume llvm-symbolizer is in PATH.
symbolizer_path = 'llvm-symbolizer'
return LLVMSymbolizer(symbolizer_path, addr)
class Addr2LineSymbolizer(Symbolizer):
def __init__(self, binary):
super(Addr2LineSymbolizer, self).__init__()
self.binary = binary
self.pipe = self.open_addr2line()
def open_addr2line(self):
cmd = ['addr2line', '-f']
if demangle:
cmd += ['--demangle']
cmd += ['-e', self.binary]
if DEBUG:
print ' '.join(cmd)
return subprocess.Popen(cmd,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
if self.binary != binary:
return None
try:
print >> self.pipe.stdin, offset
function_name = self.pipe.stdout.readline().rstrip()
file_name = self.pipe.stdout.readline().rstrip()
except Exception:
function_name = ''
file_name = ''
file_name = fix_filename(file_name)
return ['%s in %s %s' % (addr, function_name, file_name)]
class UnbufferedLineConverter(object):
"""
Wrap a child process that responds to each line of input with one line of
output. Uses pty to trick the child into providing unbuffered output.
"""
def __init__(self, args, close_stderr=False):
pid, fd = pty.fork()
if pid == 0:
# We're the child. Transfer control to command.
if close_stderr:
dev_null = os.open('/dev/null', 0)
os.dup2(dev_null, 2)
os.execvp(args[0], args)
else:
# Disable echoing.
attr = termios.tcgetattr(fd)
attr[3] = attr[3] & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, attr)
# Set up a file()-like interface to the child process
self.r = os.fdopen(fd, "r", 1)
self.w = os.fdopen(os.dup(fd), "w", 1)
def convert(self, line):
self.w.write(line + "\n")
return self.readline()
def readline(self):
return self.r.readline().rstrip()
class DarwinSymbolizer(Symbolizer):
def __init__(self, addr, binary):
super(DarwinSymbolizer, self).__init__()
self.binary = binary
self.arch = GuessArch(addr)
self.open_atos()
def open_atos(self):
if DEBUG:
print 'atos -o %s -arch %s' % (self.binary, self.arch)
cmdline = ['atos', '-o', self.binary, '-arch', self.arch]
self.atos = UnbufferedLineConverter(cmdline, close_stderr=True)
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
if self.binary != binary:
return None
atos_line = self.atos.convert('0x%x' % int(offset, 16))
while "got symbolicator for" in atos_line:
atos_line = self.atos.readline()
# A well-formed atos response looks like this:
# foo(type1, type2) (in object.name) (filename.cc:80)
match = re.match('^(.*) \(in (.*)\) \((.*:\d*)\)$', atos_line)
if DEBUG:
print 'atos_line: ', atos_line
if match:
function_name = match.group(1)
function_name = re.sub('\(.*?\)', '', function_name)
file_name = fix_filename(match.group(3))
return ['%s in %s %s' % (addr, function_name, file_name)]
else:
return ['%s in %s' % (addr, atos_line)]
# Chain several symbolizers so that if one symbolizer fails, we fall back
# to the next symbolizer in chain.
class ChainSymbolizer(Symbolizer):
def __init__(self, symbolizer_list):
super(ChainSymbolizer, self).__init__()
self.symbolizer_list = symbolizer_list
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
for symbolizer in self.symbolizer_list:
if symbolizer:
result = symbolizer.symbolize(addr, binary, offset)
if result:
return result
return None
def append_symbolizer(self, symbolizer):
self.symbolizer_list.append(symbolizer)
def BreakpadSymbolizerFactory(binary):
suffix = os.getenv('BREAKPAD_SUFFIX')
if suffix:
filename = binary + suffix
if os.access(filename, os.F_OK):
return BreakpadSymbolizer(filename)
return None
def SystemSymbolizerFactory(system, addr, binary):
if system == 'Darwin':
return DarwinSymbolizer(addr, binary)
elif system == 'Linux':
return Addr2LineSymbolizer(binary)
class BreakpadSymbolizer(Symbolizer):
def __init__(self, filename):
super(BreakpadSymbolizer, self).__init__()
self.filename = filename
lines = file(filename).readlines()
self.files = []
self.symbols = {}
self.address_list = []
self.addresses = {}
# MODULE mac x86_64 A7001116478B33F18FF9BEDE9F615F190 t
fragments = lines[0].rstrip().split()
self.arch = fragments[2]
self.debug_id = fragments[3]
self.binary = ' '.join(fragments[4:])
self.parse_lines(lines[1:])
def parse_lines(self, lines):
cur_function_addr = ''
for line in lines:
fragments = line.split()
if fragments[0] == 'FILE':
assert int(fragments[1]) == len(self.files)
self.files.append(' '.join(fragments[2:]))
elif fragments[0] == 'PUBLIC':
self.symbols[int(fragments[1], 16)] = ' '.join(fragments[3:])
elif fragments[0] in ['CFI', 'STACK']:
pass
elif fragments[0] == 'FUNC':
cur_function_addr = int(fragments[1], 16)
if not cur_function_addr in self.symbols.keys():
self.symbols[cur_function_addr] = ' '.join(fragments[4:])
else:
# Line starting with an address.
addr = int(fragments[0], 16)
self.address_list.append(addr)
# Tuple of symbol address, size, line, file number.
self.addresses[addr] = (cur_function_addr,
int(fragments[1], 16),
int(fragments[2]),
int(fragments[3]))
self.address_list.sort()
def get_sym_file_line(self, addr):
key = None
if addr in self.addresses.keys():
key = addr
else:
index = bisect.bisect_left(self.address_list, addr)
if index == 0:
return None
else:
key = self.address_list[index - 1]
sym_id, size, line_no, file_no = self.addresses[key]
symbol = self.symbols[sym_id]
filename = self.files[file_no]
if addr < key + size:
return symbol, filename, line_no
else:
return None
def symbolize(self, addr, binary, offset):
if self.binary != binary:
return None
res = self.get_sym_file_line(int(offset, 16))
if res:
function_name, file_name, line_no = res
result = ['%s in %s %s:%d' % (
addr, function_name, file_name, line_no)]
print result
return result
else:
return None
class SymbolizationLoop(object):
def __init__(self, binary_name_filter=None):
# Used by clients who may want to supply a different binary name.
# E.g. in Chrome several binaries may share a single .dSYM.
self.binary_name_filter = binary_name_filter
self.system = os.uname()[0]
if self.system not in ['Linux', 'Darwin']:
raise Exception('Unknown system')
self.llvm_symbolizer = None
self.frame_no = 0
def symbolize_address(self, addr, binary, offset):
# Initialize llvm-symbolizer lazily.
if not self.llvm_symbolizer:
self.llvm_symbolizer = LLVMSymbolizerFactory(self.system, addr)
# Use the chain of symbolizers:
# Breakpad symbolizer -> LLVM symbolizer -> addr2line/atos
# (fall back to next symbolizer if the previous one fails).
if not binary in symbolizers:
symbolizers[binary] = ChainSymbolizer(
[BreakpadSymbolizerFactory(binary), self.llvm_symbolizer])
result = symbolizers[binary].symbolize(addr, binary, offset)
if result is None:
# Initialize system symbolizer only if other symbolizers failed.
symbolizers[binary].append_symbolizer(
SystemSymbolizerFactory(self.system, addr, binary))
result = symbolizers[binary].symbolize(addr, binary, offset)
# The system symbolizer must produce some result.
assert result
return result
def get_symbolized_lines(self, symbolized_lines):
if not symbolized_lines:
return [self.current_line]
else:
result = []
for symbolized_frame in symbolized_lines:
result.append(' #%s %s' % (str(self.frame_no), symbolized_frame.rstrip()))
self.frame_no += 1
return result
def process_stdin(self):
self.frame_no = 0
while True:
line = sys.stdin.readline()
if not line:
break
processed = self.process_line(line)
print ''.join(processed)
def process_line(self, line):
self.current_line = line.rstrip()
#0 0x7f6e35cf2e45 (/blah/foo.so+0x11fe45)
stack_trace_line_format = (
'^( *#([0-9]+) *)(0x[0-9a-f]+) *\((.*)\+(0x[0-9a-f]+)\)')
match = re.match(stack_trace_line_format, line)
if not match:
return [self.current_line]
if DEBUG:
print line
_, frameno_str, addr, binary, offset = match.groups()
if frameno_str == '0':
# Assume that frame #0 is the first frame of new stack trace.
self.frame_no = 0
original_binary = binary
if self.binary_name_filter:
binary = self.binary_name_filter(binary)
symbolized_line = self.symbolize_address(addr, binary, offset)
if not symbolized_line:
if original_binary != binary:
symbolized_line = self.symbolize_address(addr, binary, offset)
return self.get_symbolized_lines(symbolized_line)
if __name__ == '__main__':
opts, args = getopt.getopt(sys.argv[1:], "d", ["demangle"])
for o, a in opts:
if o in ("-d", "--demangle"):
demangle = True;
loop = SymbolizationLoop()
loop.process_stdin()
| bsd-3-clause |
ryfeus/lambda-packs | pytorch/source/torch/nn/parallel/deprecated/distributed_cpu.py | 1 | 4290 | import torch
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
import torch.distributed.deprecated as dist
from torch.nn.modules import Module
from collections import defaultdict
from torch.autograd import Variable
import torch.utils.hooks
class DistributedDataParallelCPU(Module):
r"""Implements distributed data parallelism for CPU at the module level.
This module support the ``mpi``, ``gloo``, ``tcp`` backends.
This container parallelizes the application of the given module by
splitting the input across the specified devices by chunking in the batch
dimension. The module is replicated on each machine, and each such replica
handles a portion of the input. During the backwards pass, gradients from
each node are averaged.
This module could be used in conjunction with the DistributedSampler,
(see :class `torch.utils.data.distributed.DistributedSampler`)
which will load a subset of the original datset for each node with the same
batch size. So strong scaling should be configured like this:
n = 1, batch size = 128
n = 2, batch size = 64
n = 4, batch size = 32
n = 8, batch size = 16
Creation of this class requires the distributed package to be already
initialized in the process group mode
(see :func:`torch.distributed.deprecated.init_process_group`).
.. warning::
Constructor, forward method, and differentiation of the output (or a
function of the output of this module) is a distributed synchronization
point. Take that into account in case different node might be
executing different code.
.. warning::
This module assumes all parameters are registered in the model by the
time it is created. No parameters should be added nor removed later.
.. warning::
This module assumes all gradients are dense.
.. warning::
This module doesn't work with :func:`torch.autograd.grad` (i.e. it will
only work if gradients are to be accumulated in ``.grad`` attributes of
parameters).
.. note::
Parameters are broadcast between nodes in the __init__() function. The
module performs an all-reduce step on gradients and assumes that they
will be modified by the optimizer in all nodes in the same way.
.. warning::
Forward and backward hooks defined on :attr:`module` and its submodules
won't be invoked anymore, unless the hooks are initialized in the
:meth:`forward` method.
Args:
module: module to be parallelized
Example::
>>> torch.distributed.deprecated.init_process_group(world_size=4, init_method='...')
>>> net = torch.nn.DistributedDataParallelCPU(model)
"""
def __init__(self, module):
super(DistributedDataParallelCPU, self).__init__()
self.module = module
self.sync_parameters()
def allreduce_params():
if self.needs_reduction:
self.needs_reduction = False
buckets = defaultdict(list)
for param in self.module.parameters():
if param.requires_grad and param.grad is not None:
tp = type(param.data)
buckets[tp].append(param)
for bucket in buckets.values():
grads = [param.grad.data for param in bucket]
coalesced = _flatten_dense_tensors(grads)
dist.all_reduce(coalesced)
coalesced /= dist.get_world_size()
for buf, synced in zip(grads, _unflatten_dense_tensors(coalesced, grads)):
buf.copy_(synced)
for param in list(self.module.parameters()):
@torch.utils.hooks.unserializable_hook
def allreduce_hook(*unused):
Variable._execution_engine.queue_callback(allreduce_params)
if param.requires_grad:
param.register_hook(allreduce_hook)
def sync_parameters(self):
for param in self.module.parameters():
dist.broadcast(param.data, 0)
def forward(self, *inputs, **kwargs):
self.needs_reduction = True
return self.module(*inputs, **kwargs)
| mit |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/OpenGL/raw/GL/ARB/texture_compression.py | 9 | 2731 | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_ARB_texture_compression'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_ARB_texture_compression',error_checker=_errors._error_checker)
GL_COMPRESSED_ALPHA_ARB=_C('GL_COMPRESSED_ALPHA_ARB',0x84E9)
GL_COMPRESSED_INTENSITY_ARB=_C('GL_COMPRESSED_INTENSITY_ARB',0x84EC)
GL_COMPRESSED_LUMINANCE_ALPHA_ARB=_C('GL_COMPRESSED_LUMINANCE_ALPHA_ARB',0x84EB)
GL_COMPRESSED_LUMINANCE_ARB=_C('GL_COMPRESSED_LUMINANCE_ARB',0x84EA)
GL_COMPRESSED_RGBA_ARB=_C('GL_COMPRESSED_RGBA_ARB',0x84EE)
GL_COMPRESSED_RGB_ARB=_C('GL_COMPRESSED_RGB_ARB',0x84ED)
GL_COMPRESSED_TEXTURE_FORMATS_ARB=_C('GL_COMPRESSED_TEXTURE_FORMATS_ARB',0x86A3)
GL_NUM_COMPRESSED_TEXTURE_FORMATS_ARB=_C('GL_NUM_COMPRESSED_TEXTURE_FORMATS_ARB',0x86A2)
GL_TEXTURE_COMPRESSED_ARB=_C('GL_TEXTURE_COMPRESSED_ARB',0x86A1)
GL_TEXTURE_COMPRESSED_IMAGE_SIZE_ARB=_C('GL_TEXTURE_COMPRESSED_IMAGE_SIZE_ARB',0x86A0)
GL_TEXTURE_COMPRESSION_HINT_ARB=_C('GL_TEXTURE_COMPRESSION_HINT_ARB',0x84EF)
@_f
@_p.types(None,_cs.GLenum,_cs.GLint,_cs.GLenum,_cs.GLsizei,_cs.GLint,_cs.GLsizei,ctypes.c_void_p)
def glCompressedTexImage1DARB(target,level,internalformat,width,border,imageSize,data):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLint,_cs.GLenum,_cs.GLsizei,_cs.GLsizei,_cs.GLint,_cs.GLsizei,ctypes.c_void_p)
def glCompressedTexImage2DARB(target,level,internalformat,width,height,border,imageSize,data):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLint,_cs.GLenum,_cs.GLsizei,_cs.GLsizei,_cs.GLsizei,_cs.GLint,_cs.GLsizei,ctypes.c_void_p)
def glCompressedTexImage3DARB(target,level,internalformat,width,height,depth,border,imageSize,data):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLint,_cs.GLint,_cs.GLsizei,_cs.GLenum,_cs.GLsizei,ctypes.c_void_p)
def glCompressedTexSubImage1DARB(target,level,xoffset,width,format,imageSize,data):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLsizei,_cs.GLsizei,_cs.GLenum,_cs.GLsizei,ctypes.c_void_p)
def glCompressedTexSubImage2DARB(target,level,xoffset,yoffset,width,height,format,imageSize,data):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLsizei,_cs.GLsizei,_cs.GLsizei,_cs.GLenum,_cs.GLsizei,ctypes.c_void_p)
def glCompressedTexSubImage3DARB(target,level,xoffset,yoffset,zoffset,width,height,depth,format,imageSize,data):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLint,ctypes.c_void_p)
def glGetCompressedTexImageARB(target,level,img):pass
| gpl-3.0 |
tragiclifestories/django | django/contrib/auth/context_processors.py | 514 | 1938 | # PermWrapper and PermLookupDict proxy the permissions system into objects that
# the template system can understand.
class PermLookupDict(object):
def __init__(self, user, app_label):
self.user, self.app_label = user, app_label
def __repr__(self):
return str(self.user.get_all_permissions())
def __getitem__(self, perm_name):
return self.user.has_perm("%s.%s" % (self.app_label, perm_name))
def __iter__(self):
# To fix 'item in perms.someapp' and __getitem__ iteraction we need to
# define __iter__. See #18979 for details.
raise TypeError("PermLookupDict is not iterable.")
def __bool__(self):
return self.user.has_module_perms(self.app_label)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
class PermWrapper(object):
def __init__(self, user):
self.user = user
def __getitem__(self, app_label):
return PermLookupDict(self.user, app_label)
def __iter__(self):
# I am large, I contain multitudes.
raise TypeError("PermWrapper is not iterable.")
def __contains__(self, perm_name):
"""
Lookup by "someapp" or "someapp.someperm" in perms.
"""
if '.' not in perm_name:
# The name refers to module.
return bool(self[perm_name])
app_label, perm_name = perm_name.split('.', 1)
return self[app_label][perm_name]
def auth(request):
"""
Returns context variables required by apps that use Django's authentication
system.
If there is no 'user' attribute in the request, uses AnonymousUser (from
django.contrib.auth).
"""
if hasattr(request, 'user'):
user = request.user
else:
from django.contrib.auth.models import AnonymousUser
user = AnonymousUser()
return {
'user': user,
'perms': PermWrapper(user),
}
| bsd-3-clause |
Leonnash21/flask_heroku | Tools/scripts/svneol.py | 97 | 3493 | #! /usr/bin/env python3
"""
SVN helper script.
Try to set the svn:eol-style property to "native" on every .py, .txt, .c and
.h file in the directory tree rooted at the current directory.
Files with the svn:eol-style property already set (to anything) are skipped.
svn will itself refuse to set this property on a file that's not under SVN
control, or that has a binary mime-type property set. This script inherits
that behavior, and passes on whatever warning message the failing "svn
propset" command produces.
In the Python project, it's safe to invoke this script from the root of
a checkout.
No output is produced for files that are ignored. For a file that gets
svn:eol-style set, output looks like:
property 'svn:eol-style' set on 'Lib\ctypes\__init__.py'
For a file not under version control:
svn: warning: 'patch-finalizer.txt' is not under version control
and for a file with a binary mime-type property:
svn: File 'Lib\test\test_pep263.py' has binary mime type property
"""
import re
import os
import sys
import subprocess
def propfiles(root, fn):
default = os.path.join(root, ".svn", "props", fn + ".svn-work")
try:
format = int(open(os.path.join(root, ".svn", "format")).read().strip())
except IOError:
return []
if format in (8, 9):
# In version 8 and 9, committed props are stored in prop-base, local
# modifications in props
return [os.path.join(root, ".svn", "prop-base", fn + ".svn-base"),
os.path.join(root, ".svn", "props", fn + ".svn-work")]
raise ValueError("Unknown repository format")
def proplist(root, fn):
"""Return a list of property names for file fn in directory root."""
result = []
for path in propfiles(root, fn):
try:
f = open(path)
except IOError:
# no properties file: not under version control,
# or no properties set
continue
while True:
# key-value pairs, of the form
# K <length>
# <keyname>NL
# V length
# <value>NL
# END
line = f.readline()
if line.startswith("END"):
break
assert line.startswith("K ")
L = int(line.split()[1])
key = f.read(L)
result.append(key)
f.readline()
line = f.readline()
assert line.startswith("V ")
L = int(line.split()[1])
value = f.read(L)
f.readline()
f.close()
return result
def set_eol_native(path):
cmd = 'svn propset svn:eol-style native "{}"'.format(path)
propset = subprocess.Popen(cmd, shell=True)
propset.wait()
possible_text_file = re.compile(r"\.([hc]|py|txt|sln|vcproj)$").search
def main():
for arg in sys.argv[1:] or [os.curdir]:
if os.path.isfile(arg):
root, fn = os.path.split(arg)
if 'svn:eol-style' not in proplist(root, fn):
set_eol_native(arg)
elif os.path.isdir(arg):
for root, dirs, files in os.walk(arg):
if '.svn' in dirs:
dirs.remove('.svn')
for fn in files:
if possible_text_file(fn):
if 'svn:eol-style' not in proplist(root, fn):
path = os.path.join(root, fn)
set_eol_native(path)
if __name__ == '__main__':
main()
| mit |
ppasq/geonode | geonode/invitations/forms.py | 9 | 3321 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2018 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django import forms
from django.utils.translation import ugettext as _
from django.contrib.auth import get_user_model
from invitations.adapters import get_invitations_adapter
from invitations.exceptions import AlreadyInvited, AlreadyAccepted, UserRegisteredEmail
from invitations.utils import get_invitation_model
Invitation = get_invitation_model()
class CleanEmailMixin(object):
def validate_invitation(self, email):
if Invitation.objects.all_valid().filter(
email__iexact=email, accepted=False):
raise AlreadyInvited
elif Invitation.objects.filter(
email__iexact=email, accepted=True):
raise AlreadyAccepted
elif get_user_model().objects.filter(email__iexact=email):
raise UserRegisteredEmail
else:
return True
def clean_email(self):
emails = self.cleaned_data["email"]
emails = emails.replace(";", ",").split(",")
for em in emails:
email = get_invitations_adapter().clean_email(em.strip())
errors = {
"already_invited": _("The e-mail address '%(email)s' has already been"
" invited." % {"email": email}),
"already_accepted": _("The e-mail address '%(email)s' has already"
" accepted an invite." % {"email": email}),
"email_in_use": _("An active user is already using the"
" e-mail address '%(email)s'" % {"email": email}),
}
try:
self.validate_invitation(email)
except(AlreadyInvited):
raise forms.ValidationError(errors["already_invited"])
except(AlreadyAccepted):
raise forms.ValidationError(errors["already_accepted"])
except(UserRegisteredEmail):
raise forms.ValidationError(errors["email_in_use"])
return emails
class GeoNodeInviteForm(forms.Form, CleanEmailMixin):
email = forms.CharField(
label=_("E-mail"),
required=True,
widget=forms.TextInput(
attrs={"type": "text", "size": "1200"}), initial="")
def save(self, emails):
invitations = []
for em in emails:
invitations.append(Invitation.create(email=em.strip()))
return invitations
| gpl-3.0 |
ypu/tp-qemu | qemu/tests/rv_input.py | 3 | 10997 | """
rv_input.py - test keyboard inputs through spice
Requires: Two VMs - client and guest and remote-viewer session
from client VM to guest VM created by rv_connect test.
Deployed PyGTK on guest VM.
Presumes the numlock state at startup is 'OFF'.
"""
import logging
import os
from autotest.client.shared import error
from virttest.aexpect import ShellCmdError
from virttest import utils_misc, utils_spice, aexpect
def install_pygtk(guest_session, params):
"""
Install PyGTK to a VM with yum package manager.
:param guest_session - ssh session to guest VM
:param params
"""
cmd = "rpm -q pygtk2"
try:
guest_session.cmd(cmd)
except ShellCmdError:
cmd = "yum -y install pygtk2 --nogpgcheck > /dev/null"
logging.info("Installing pygtk2 package to %s",
params.get("guest_vm"))
guest_session.cmd(cmd, timeout=60)
def deploy_test_form(test, guest_vm, params):
"""
Copy wxPython Test form to guest VM.
Test form is copied to /tmp directory.
:param test
:param guest_vm - vm object
:param params
"""
script = params.get("guest_script")
scriptdir = os.path.join("scripts", script)
script_path = utils_misc.get_path(test.virtdir, scriptdir)
guest_vm.copy_files_to(script_path, "/tmp/%s" % params.get("guest_script"),
timeout=60)
def run_test_form(guest_session, params):
"""
Start wxPython simple test form on guest VM.
Test form catches KeyEvents and is located in /tmp.
:param guest_session - ssh session to guest VM
:param params
"""
logging.info("Starting test form for catching key events on guest")
cmd = "python /tmp/%s &> /dev/null &" % params.get("guest_script")
guest_session.cmd(cmd)
cmd = "disown -ar"
guest_session.cmd(cmd)
def get_test_results(guest_vm):
"""
:param guest_vm - vm object
"""
path = "/tmp/autotest-rv_input"
guest_vm.copy_files_from(path, path, timeout=60)
return path
def test_type_and_func_keys(client_vm, guest_session, params):
"""
Test typewriter and functional keys.
Function sends various keys through qemu monitor to client VM.
:param client_vm - vm object
:param guest_session - ssh session to guest VM
:param params
"""
run_test_form(guest_session, params)
utils_spice.wait_timeout(3)
# Send typewriter and functional keys to client machine based on scancodes
logging.info("Sending typewriter and functional keys to client machine")
for i in range(1, 69):
# Avoid Ctrl, RSH, LSH, PtScr, Alt, CpsLk
if not (i in [29, 42, 54, 55, 56, 58]):
client_vm.send_key(str(hex(i)))
utils_spice.wait_timeout(0.3)
def test_leds_and_esc_keys(client_vm, guest_session, params):
"""
Test LEDS and Escaped keys.
Function sends various keys through qemu monitor to client VM.
:param client_vm - vm object
:param guest_session - ssh session to guest VM
:param params
"""
#Run PyGTK form catching KeyEvents on guest
run_test_form(guest_session, params)
utils_spice.wait_timeout(3)
# Prepare lists with the keys to be sent to client machine
leds = ['a', 'caps_lock', 'a', 'caps_lock', 'num_lock', 'kp_1', 'num_lock',
'kp_1']
shortcuts = ['a', 'shift-a', 'shift_r-a', 'ctrl-a', 'ctrl-c', 'ctrl-v',
'alt-x']
escaped = ['insert', 'delete', 'home', 'end', 'pgup', 'pgdn', 'up',
'down', 'right', 'left']
test_keys = leds + shortcuts + escaped
# Send keys to client machine
logging.info("Sending leds and escaped keys to client machine")
for key in test_keys:
client_vm.send_key(key)
utils_spice.wait_timeout(0.3)
def test_nonus_layout(client_vm, guest_session, params):
"""
Test some keys of non-us keyboard layouts (de, cz).
Function sends various keys through qemu monitor to client VM.
:param client_vm - vm object
:param guest_session - ssh session to guest VM
:param params
"""
#Run PyGTK form catching KeyEvents on guest
run_test_form(guest_session, params)
utils_spice.wait_timeout(3)
# Czech layout - test some special keys
cmd = "setxkbmap cz"
guest_session.cmd(cmd)
test_keys = ['7', '8', '9', '0', 'alt_r-x', 'alt_r-c', 'alt_r-v']
logging.info("Sending czech keys to client machine")
for key in test_keys:
client_vm.send_key(key)
utils_spice.wait_timeout(0.3)
# German layout - test some special keys
cmd = "setxkbmap de"
guest_session.cmd(cmd)
test_keys = ['minus', '0x1a', 'alt_r-q', 'alt_r-m']
logging.info("Sending german keys to client machine")
for key in test_keys:
client_vm.send_key(key)
utils_spice.wait_timeout(0.3)
cmd = "setxkbmap us"
guest_session.cmd(cmd)
def test_leds_migration(client_vm, guest_vm, guest_session, params):
"""
Check LEDS after migration.
Function sets LEDS (caps, num) to ON and send scancodes of "a" and "1 (num)"
and expected to get keycodes of "A" and "1" after migration.
:param client_vm - vm object
:param guest_vm - vm object
:param guest_session - ssh session to guest VM
:param params
"""
# Turn numlock on RHEL6 on before the test begins:
grep_ver_cmd = "grep -o 'release [[:digit:]]' /etc/redhat-release"
rhel_ver = guest_session.cmd(grep_ver_cmd).strip()
logging.info("RHEL version: #{0}#".format(rhel_ver))
if rhel_ver == "release 6":
client_vm.send_key('num_lock')
#Run PyGTK form catching KeyEvents on guest
run_test_form(guest_session, params)
utils_spice.wait_timeout(3)
# Tested keys before migration
test_keys = ['a', 'kp_1', 'caps_lock', 'num_lock', 'a', 'kp_1']
logging.info("Sending leds keys to client machine before migration")
for key in test_keys:
client_vm.send_key(key)
utils_spice.wait_timeout(0.3)
guest_vm.migrate()
utils_spice.wait_timeout(8)
#Tested keys after migration
test_keys = ['a', 'kp_1', 'caps_lock', 'num_lock']
logging.info("Sending leds keys to client machine after migration")
for key in test_keys:
client_vm.send_key(key)
utils_spice.wait_timeout(0.3)
utils_spice.wait_timeout(30)
def analyze_results(file_path, test_type):
"""
Analyze results - compare caught keycodes and expected keycodes.
:param file_path - path to file with results
:param test_type - type of the test
"""
if test_type == "type_and_func_keys":
#List of expected keycodes from guest machine
correct_keycodes = ['65307', '49', '50', '51', '52', '53', '54', '55',
'56', '57', '48', '45', '61', '65288', '65289',
'113', '119', '101', '114', '116', '121', '117',
'105', '111', '112', '91', '93', '65293', '97',
'115', '100', '102', '103', '104', '106', '107',
'108', '59', '39', '96', '92', '122', '120', '99',
'118', '98', '110', '109', '44', '46', '47', '32',
'65470', '65471', '65472', '65473', '65474',
'65475', '65476', '65477', '65478', '65479']
elif test_type == "leds_and_esc_keys":
correct_keycodes = ['97', '65509', '65', '65509', '65407', '65457',
'65407', '65436', '97', '65505', '65', '65506',
'65', '65507', '97', '65507', '99', '65507', '118',
'65513', '120', '65379', '65535', '65360', '65367',
'65365', '65366', '65362', '65364', '65363',
'65361']
elif test_type == "nonus_layout":
correct_keycodes = ['253', '225', '237', '233', '65027', '35', '65027',
'38', '65027', '64', '223', '252', '65027', '64',
'65027', '181']
elif test_type == "leds_migration":
correct_keycodes = ['97', '65457', '65509', '65407', '65', '65436',
'65', '65436', '65509', '65407']
# Read caught keycodes on guest machine
fileobj = open(file_path, "r")
keycodes = fileobj.read()
fileobj.close()
#Compare caught keycodes with expected keycodes
test_keycodes = keycodes.split()
logging.info("Caught keycodes:%s", test_keycodes)
for i in range(len(correct_keycodes)):
if not (test_keycodes[i] == correct_keycodes[i]):
return correct_keycodes[i]
return None
def run(test, params, env):
"""
Test for testing keyboard inputs through spice.
Test depends on rv_connect test.
:param test: QEMU test object.
:param params: Dictionary with the test parameters.
:param env: Dictionary with test environment.
"""
guest_vm = env.get_vm(params["guest_vm"])
guest_vm.verify_alive()
client_vm = env.get_vm(params["client_vm"])
client_vm.verify_alive()
guest_session = guest_vm.wait_for_login(
timeout=int(params.get("login_timeout", 360)))
guest_root_session = guest_vm.wait_for_login(
timeout=int(params.get("login_timeout", 360)),
username="root", password="123456")
# Verify that gnome is now running on the guest
try:
guest_session.cmd("ps aux | grep -v grep | grep gnome-session")
except aexpect.ShellCmdError:
raise error.TestWarn("gnome-session was probably not corretly started")
guest_session.cmd("export DISPLAY=:0.0")
install_pygtk(guest_root_session, params)
deploy_test_form(test, guest_vm, params)
# Get test type and perform proper test
test_type = params.get("config_test")
test_mapping = {'type_and_func_keys': test_type_and_func_keys,
'leds_and_esc_keys': test_leds_and_esc_keys,
'nonus_layout': test_nonus_layout,
'leds_migration': test_leds_migration}
test_parameters = {
'type_and_func_keys': (client_vm, guest_session, params),
'leds_and_esc_keys': (client_vm, guest_session, params),
'nonus_layout': (client_vm, guest_session, params),
'leds_migration': (client_vm, guest_vm, guest_session, params)}
try:
func = test_mapping[test_type]
args = test_parameters[test_type]
except:
raise error.TestFail("Unknown type of test")
func(*args)
# Get file with caught keycodes from guest
result_path = get_test_results(guest_vm)
# Analyze results and raise fail exp. If sent scancodes
# do not match with expected keycodes
result = analyze_results(result_path, test_type)
if result is not None:
raise error.TestFail("Testing of sending keys failed:"
" Expected keycode = %s" % result)
guest_session.close()
| gpl-2.0 |
shiftcontrol/UnityOpenCV | opencv/samples/python/laplace.py | 6 | 1374 | #!/usr/bin/python
import urllib2
import cv
import sys
if __name__ == "__main__":
laplace = None
colorlaplace = None
planes = [ None, None, None ]
capture = None
if len(sys.argv) == 1:
capture = cv.CreateCameraCapture(0)
elif len(sys.argv) == 2 and sys.argv[1].isdigit():
capture = cv.CreateCameraCapture(int(sys.argv[1]))
elif len(sys.argv) == 2:
capture = cv.CreateFileCapture(sys.argv[1])
if not capture:
print "Could not initialize capturing..."
sys.exit(-1)
cv.NamedWindow("Laplacian", 1)
while True:
frame = cv.QueryFrame(capture)
if frame:
if not laplace:
planes = [cv.CreateImage((frame.width, frame.height), 8, 1) for i in range(3)]
laplace = cv.CreateImage((frame.width, frame.height), cv.IPL_DEPTH_16S, 1)
colorlaplace = cv.CreateImage((frame.width, frame.height), 8, 3)
cv.Split(frame, planes[0], planes[1], planes[2], None)
for plane in planes:
cv.Laplace(plane, laplace, 3)
cv.ConvertScaleAbs(laplace, plane, 1, 0)
cv.Merge(planes[0], planes[1], planes[2], None, colorlaplace)
cv.ShowImage("Laplacian", colorlaplace)
if cv.WaitKey(10) != -1:
break
cv.DestroyWindow("Laplacian")
| gpl-3.0 |
o5k/openerp-oemedical-v0.1 | openerp/addons/stock_location/procurement_pull.py | 53 | 6951 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp import netsvc
from openerp.tools.translate import _
class procurement_order(osv.osv):
_inherit = 'procurement.order'
def check_buy(self, cr, uid, ids, context=None):
for procurement in self.browse(cr, uid, ids, context=context):
for line in procurement.product_id.flow_pull_ids:
if line.location_id==procurement.location_id:
return line.type_proc=='buy'
return super(procurement_order, self).check_buy(cr, uid, ids)
def check_produce(self, cr, uid, ids, context=None):
for procurement in self.browse(cr, uid, ids, context=context):
for line in procurement.product_id.flow_pull_ids:
if line.location_id==procurement.location_id:
return line.type_proc=='produce'
return super(procurement_order, self).check_produce(cr, uid, ids)
def check_move(self, cr, uid, ids, context=None):
for procurement in self.browse(cr, uid, ids, context=context):
for line in procurement.product_id.flow_pull_ids:
if line.location_id==procurement.location_id:
return (line.type_proc=='move') and (line.location_src_id)
return False
def action_move_create(self, cr, uid, ids, context=None):
proc_obj = self.pool.get('procurement.order')
move_obj = self.pool.get('stock.move')
picking_obj=self.pool.get('stock.picking')
wf_service = netsvc.LocalService("workflow")
for proc in proc_obj.browse(cr, uid, ids, context=context):
line = None
for line in proc.product_id.flow_pull_ids:
if line.location_id == proc.location_id:
break
assert line, 'Line cannot be False if we are on this state of the workflow'
origin = (proc.origin or proc.name or '').split(':')[0] +':'+line.name
picking_id = picking_obj.create(cr, uid, {
'origin': origin,
'company_id': line.company_id and line.company_id.id or False,
'type': line.picking_type,
'stock_journal_id': line.journal_id and line.journal_id.id or False,
'move_type': 'one',
'partner_id': line.partner_address_id.id,
'note': _('Picking for pulled procurement coming from original location %s, pull rule %s, via original Procurement %s (#%d)') % (proc.location_id.name, line.name, proc.name, proc.id),
'invoice_state': line.invoice_state,
})
move_id = move_obj.create(cr, uid, {
'name': line.name,
'picking_id': picking_id,
'company_id': line.company_id and line.company_id.id or False,
'product_id': proc.product_id.id,
'date': proc.date_planned,
'product_qty': proc.product_qty,
'product_uom': proc.product_uom.id,
'product_uos_qty': (proc.product_uos and proc.product_uos_qty)\
or proc.product_qty,
'product_uos': (proc.product_uos and proc.product_uos.id)\
or proc.product_uom.id,
'partner_id': line.partner_address_id.id,
'location_id': line.location_src_id.id,
'location_dest_id': line.location_id.id,
'move_dest_id': proc.move_id and proc.move_id.id or False, # to verif, about history ?
'tracking_id': False,
'cancel_cascade': line.cancel_cascade,
'state': 'confirmed',
'note': _('Move for pulled procurement coming from original location %s, pull rule %s, via original Procurement %s (#%d)') % (proc.location_id.name, line.name, proc.name, proc.id),
})
if proc.move_id and proc.move_id.state in ('confirmed'):
move_obj.write(cr,uid, [proc.move_id.id], {
'state':'waiting'
}, context=context)
proc_id = proc_obj.create(cr, uid, {
'name': line.name,
'origin': origin,
'note': _('Pulled procurement coming from original location %s, pull rule %s, via original Procurement %s (#%d)') % (proc.location_id.name, line.name, proc.name, proc.id),
'company_id': line.company_id and line.company_id.id or False,
'date_planned': proc.date_planned,
'product_id': proc.product_id.id,
'product_qty': proc.product_qty,
'product_uom': proc.product_uom.id,
'product_uos_qty': (proc.product_uos and proc.product_uos_qty)\
or proc.product_qty,
'product_uos': (proc.product_uos and proc.product_uos.id)\
or proc.product_uom.id,
'location_id': line.location_src_id.id,
'procure_method': line.procure_method,
'move_id': move_id,
})
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'stock.picking', picking_id, 'button_confirm', cr)
wf_service.trg_validate(uid, 'procurement.order', proc_id, 'button_confirm', cr)
if proc.move_id:
move_obj.write(cr, uid, [proc.move_id.id],
{'location_id':proc.location_id.id})
msg = _('Pulled from another location.')
self.write(cr, uid, [proc.id], {'state':'running', 'message': msg})
self.message_post(cr, uid, [proc.id], body=msg, context=context)
# trigger direct processing (the new procurement shares the same planned date as the original one, which is already being processed)
wf_service.trg_validate(uid, 'procurement.order', proc_id, 'button_check', cr)
return False
procurement_order()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ahb0327/intellij-community | python/helpers/python-skeletons/multiprocessing/managers.py | 68 | 1616 | """Skeleton for 'multiprocessing.managers' stdlib module."""
import threading
import queue
import multiprocessing
import multiprocessing.managers
class BaseManager(object):
def __init__(self, address=None, authkey=None):
self.address = address
def start(self, initializer=None, initargs=None):
pass
def get_server(self):
pass
def connect(self):
pass
def shutdown(self):
pass
@classmethod
def register(cls, typeid, callable=None, proxytype=None, exposed=None,
method_to_typeid=None, create_method=None):
pass
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
class SyncManager(multiprocessing.managers.BaseManager):
def Barrier(self, parties, action=None, timeout=None):
return threading.Barrier(parties, action, timeout)
def BoundedSemaphore(self, value=None):
return threading.BoundedSemaphore(value)
def Condition(self, lock=None):
return threading.Condition(lock)
def Event(self):
return threading.Event()
def Lock(self):
return threading.Lock()
def Namespace(self):
pass
def Queue(self, maxsize=None):
return queue.Queue()
def RLock(self):
return threading.RLock()
def Semaphore(self, value=None):
return threading.Semaphore(value)
def Array(self, typecode, sequence):
pass
def Value(self, typecode, value):
pass
def dict(self, mapping_or_sequence):
pass
def list(self, sequence):
pass
| apache-2.0 |
ClearCorp/odoo-clearcorp | TODO-7.0/purchase_order_archive/__init__.py | 4 | 1060 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import purchase_order_archive
| agpl-3.0 |
Juniper/neutron | neutron/openstack/common/context.py | 38 | 2735 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Simple class that stores security context information in the web request.
Projects should subclass this class if they wish to enhance the request
context or provide additional information in their specific WSGI pipeline.
"""
import itertools
from neutron.openstack.common import uuidutils
def generate_request_id():
return 'req-%s' % uuidutils.generate_uuid()
class RequestContext(object):
"""Helper class to represent useful information about a request context.
Stores information about the security context under which the user
accesses the system, as well as additional request information.
"""
def __init__(self, auth_token=None, user=None, tenant=None, is_admin=False,
read_only=False, show_deleted=False, request_id=None):
self.auth_token = auth_token
self.user = user
self.tenant = tenant
self.is_admin = is_admin
self.read_only = read_only
self.show_deleted = show_deleted
if not request_id:
request_id = generate_request_id()
self.request_id = request_id
def to_dict(self):
return {'user': self.user,
'tenant': self.tenant,
'is_admin': self.is_admin,
'read_only': self.read_only,
'show_deleted': self.show_deleted,
'auth_token': self.auth_token,
'request_id': self.request_id}
def get_admin_context(show_deleted="no"):
context = RequestContext(None,
tenant=None,
is_admin=True,
show_deleted=show_deleted)
return context
def get_context_from_function_and_args(function, args, kwargs):
"""Find an arg of type RequestContext and return it.
This is useful in a couple of decorators where we don't
know much about the function we're wrapping.
"""
for arg in itertools.chain(kwargs.values(), args):
if isinstance(arg, RequestContext):
return arg
return None
| apache-2.0 |
akretion/project-service | project_gtd/project_gtd.py | 21 | 6301 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp import tools
class project_gtd_context(osv.Model):
_name = "project.gtd.context"
_description = "Context"
_columns = {
'name': fields.char(
'Context', size=64, required=True, translate=True),
'sequence': fields.integer(
'Sequence',
help=("Gives the sequence order when displaying "
"a list of contexts.")),
}
_defaults = {
'sequence': 1
}
_order = "sequence, name"
class project_gtd_timebox(osv.Model):
_name = "project.gtd.timebox"
_order = "sequence"
_columns = {
'name': fields.char(
'Timebox', size=64, required=True, select=1, translate=1),
'sequence': fields.integer(
'Sequence',
help="Gives the sequence order when displaying "
"a list of timebox."),
}
class project_task(osv.Model):
_inherit = "project.task"
_columns = {
'timebox_id': fields.many2one(
'project.gtd.timebox',
"Timebox",
help="Time-laps during which task has to be treated"),
'context_id': fields.many2one(
'project.gtd.context',
"Context",
help="The context place where user has to treat task"),
}
def _get_context(self, cr, uid, context=None):
ids = self.pool.get('project.gtd.context').search(
cr, uid, [], context=context)
return ids and ids[0] or False
def _read_group_timebox_ids(
self, cr, uid, ids, domain,
read_group_order=None, access_rights_uid=None, context=None):
"""Used to display all timeboxes on the view."""
timebox_obj = self.pool.get('project.gtd.timebox')
order = timebox_obj._order
access_rights_uid = access_rights_uid or uid
timebox_ids = timebox_obj._search(
cr, uid, [],
order=order, access_rights_uid=access_rights_uid, context=context)
result = timebox_obj.name_get(
cr, access_rights_uid, timebox_ids, context=context)
# Restore order of the search
result.sort(
lambda x, y: cmp(timebox_ids.index(x[0]), timebox_ids.index(y[0])))
fold = dict.fromkeys(timebox_ids, False)
return result, fold
_defaults = {
'context_id': _get_context
}
_group_by_full = {
'timebox_id': _read_group_timebox_ids,
}
def copy_data(self, cr, uid, id, default=None, context=None):
if context is None:
context = {}
if not default:
default = {}
default['timebox_id'] = False
default['context_id'] = False
return super(project_task, self).copy_data(
cr, uid, id, default, context)
def next_timebox(self, cr, uid, ids, *args):
timebox_obj = self.pool.get('project.gtd.timebox')
timebox_ids = timebox_obj.search(cr, uid, [])
if not timebox_ids:
return True
for task in self.browse(cr, uid, ids):
timebox = task.timebox_id
if not timebox:
self.write(cr, uid, task.id, {'timebox_id': timebox_ids[0]})
elif timebox_ids.index(timebox) != len(timebox_ids)-1:
index = timebox_ids.index(timebox)
self.write(
cr, uid, task.id, {'timebox_id': timebox_ids[index+1]})
return True
def prev_timebox(self, cr, uid, ids, *args):
timebox_obj = self.pool.get('project.gtd.timebox')
timebox_ids = timebox_obj.search(cr, uid, [])
for task in self.browse(cr, uid, ids):
timebox = task.timebox_id
if timebox:
if timebox_ids.index(timebox):
index = timebox_ids.index(timebox)
self.write(
cr, uid, task.id,
{'timebox_id': timebox_ids[index - 1]})
else:
self.write(cr, uid, task.id, {'timebox_id': False})
return True
def fields_view_get(self, cr, uid, view_id=None, view_type='form',
context=None, toolbar=False, submenu=False):
if not context:
context = {}
res = super(project_task, self).fields_view_get(
cr, uid, view_id, view_type, context,
toolbar=toolbar, submenu=submenu)
search_extended = False
timebox_obj = self.pool.get('project.gtd.timebox')
if (res['type'] == 'search') and context.get('gtd', False):
timeboxes = timebox_obj.browse(
cr, uid, timebox_obj.search(cr, uid, []), context=context)
search_extended = ''
for timebox in timeboxes:
filter_ = u"""
<filter domain="[('timebox_id', '=', {timebox_id})]"
string="{string}"/>\n
""".format(timebox_id=timebox.id, string=timebox.name)
search_extended += filter_
search_extended += '<separator orientation="vertical"/>'
res['arch'] = tools.ustr(res['arch']).replace(
'<separator name="gtdsep"/>', search_extended)
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
hufsm/tu_gen2_libsigrokdecode | decoders/swd/__init__.py | 2 | 1212 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2014 Angus Gratton <gus@projectgus.com>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
'''
This PD decodes the ARM SWD (version 1) protocol, as described in the
"ARM Debug Interface v5.2" Architecture Specification.
Details:
http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ihi0031c/index.html
(Registration required)
Not supported:
* Turnaround periods other than the default 1, as set in DLCR.TURNROUND
(should be trivial to add)
* SWD protocol version 2 (multi-drop support, etc.)
'''
from .pd import Decoder
| gpl-3.0 |
motion2015/edx-platform | common/djangoapps/config_models/tests.py | 75 | 10300 | # -*- coding: utf-8 -*-
"""
Tests of ConfigurationModel
"""
import ddt
from django.contrib.auth.models import User
from django.db import models
from django.test import TestCase
from freezegun import freeze_time
from mock import patch
from config_models.models import ConfigurationModel
class ExampleConfig(ConfigurationModel):
"""
Test model for testing ``ConfigurationModels``.
"""
cache_timeout = 300
string_field = models.TextField()
int_field = models.IntegerField(default=10)
@patch('config_models.models.cache')
class ConfigurationModelTests(TestCase):
"""
Tests of ConfigurationModel
"""
def setUp(self):
super(ConfigurationModelTests, self).setUp()
self.user = User()
self.user.save()
def test_cache_deleted_on_save(self, mock_cache):
ExampleConfig(changed_by=self.user).save()
mock_cache.delete.assert_called_with(ExampleConfig.cache_key_name())
def test_cache_key_name(self, _mock_cache):
self.assertEquals(ExampleConfig.cache_key_name(), 'configuration/ExampleConfig/current')
def test_no_config_empty_cache(self, mock_cache):
mock_cache.get.return_value = None
current = ExampleConfig.current()
self.assertEquals(current.int_field, 10)
self.assertEquals(current.string_field, '')
mock_cache.set.assert_called_with(ExampleConfig.cache_key_name(), current, 300)
def test_no_config_full_cache(self, mock_cache):
current = ExampleConfig.current()
self.assertEquals(current, mock_cache.get.return_value)
def test_config_ordering(self, mock_cache):
mock_cache.get.return_value = None
with freeze_time('2012-01-01'):
first = ExampleConfig(changed_by=self.user)
first.string_field = 'first'
first.save()
second = ExampleConfig(changed_by=self.user)
second.string_field = 'second'
second.save()
self.assertEquals(ExampleConfig.current().string_field, 'second')
def test_cache_set(self, mock_cache):
mock_cache.get.return_value = None
first = ExampleConfig(changed_by=self.user)
first.string_field = 'first'
first.save()
ExampleConfig.current()
mock_cache.set.assert_called_with(ExampleConfig.cache_key_name(), first, 300)
def test_active_annotation(self, mock_cache):
mock_cache.get.return_value = None
with freeze_time('2012-01-01'):
ExampleConfig.objects.create(string_field='first')
ExampleConfig.objects.create(string_field='second')
rows = ExampleConfig.objects.with_active_flag().order_by('-change_date')
self.assertEqual(len(rows), 2)
self.assertEqual(rows[0].string_field, 'second')
self.assertEqual(rows[0].is_active, True)
self.assertEqual(rows[1].string_field, 'first')
self.assertEqual(rows[1].is_active, False)
class ExampleKeyedConfig(ConfigurationModel):
"""
Test model for testing ``ConfigurationModels`` with keyed configuration.
Does not inherit from ExampleConfig due to how Django handles model inheritance.
"""
cache_timeout = 300
KEY_FIELDS = ('left', 'right')
left = models.CharField(max_length=30)
right = models.CharField(max_length=30)
string_field = models.TextField()
int_field = models.IntegerField(default=10)
@ddt.ddt
@patch('config_models.models.cache')
class KeyedConfigurationModelTests(TestCase):
"""
Tests for ``ConfigurationModels`` with keyed configuration.
"""
def setUp(self):
super(KeyedConfigurationModelTests, self).setUp()
self.user = User()
self.user.save()
@ddt.data(('a', 'b'), ('c', 'd'))
@ddt.unpack
def test_cache_key_name(self, left, right, _mock_cache):
self.assertEquals(
ExampleKeyedConfig.cache_key_name(left, right),
'configuration/ExampleKeyedConfig/current/{},{}'.format(left, right)
)
@ddt.data(
((), 'left,right'),
(('left', 'right'), 'left,right'),
(('left', ), 'left')
)
@ddt.unpack
def test_key_values_cache_key_name(self, args, expected_key, _mock_cache):
self.assertEquals(
ExampleKeyedConfig.key_values_cache_key_name(*args),
'configuration/ExampleKeyedConfig/key_values/{}'.format(expected_key))
@ddt.data(('a', 'b'), ('c', 'd'))
@ddt.unpack
def test_no_config_empty_cache(self, left, right, mock_cache):
mock_cache.get.return_value = None
current = ExampleKeyedConfig.current(left, right)
self.assertEquals(current.int_field, 10)
self.assertEquals(current.string_field, '')
mock_cache.set.assert_called_with(ExampleKeyedConfig.cache_key_name(left, right), current, 300)
@ddt.data(('a', 'b'), ('c', 'd'))
@ddt.unpack
def test_no_config_full_cache(self, left, right, mock_cache):
current = ExampleKeyedConfig.current(left, right)
self.assertEquals(current, mock_cache.get.return_value)
def test_config_ordering(self, mock_cache):
mock_cache.get.return_value = None
with freeze_time('2012-01-01'):
ExampleKeyedConfig(
changed_by=self.user,
left='left_a',
right='right_a',
string_field='first_a',
).save()
ExampleKeyedConfig(
changed_by=self.user,
left='left_b',
right='right_b',
string_field='first_b',
).save()
ExampleKeyedConfig(
changed_by=self.user,
left='left_a',
right='right_a',
string_field='second_a',
).save()
ExampleKeyedConfig(
changed_by=self.user,
left='left_b',
right='right_b',
string_field='second_b',
).save()
self.assertEquals(ExampleKeyedConfig.current('left_a', 'right_a').string_field, 'second_a')
self.assertEquals(ExampleKeyedConfig.current('left_b', 'right_b').string_field, 'second_b')
def test_cache_set(self, mock_cache):
mock_cache.get.return_value = None
first = ExampleKeyedConfig(
changed_by=self.user,
left='left',
right='right',
string_field='first',
)
first.save()
ExampleKeyedConfig.current('left', 'right')
mock_cache.set.assert_called_with(ExampleKeyedConfig.cache_key_name('left', 'right'), first, 300)
def test_key_values(self, mock_cache):
mock_cache.get.return_value = None
with freeze_time('2012-01-01'):
ExampleKeyedConfig(left='left_a', right='right_a', changed_by=self.user).save()
ExampleKeyedConfig(left='left_b', right='right_b', changed_by=self.user).save()
ExampleKeyedConfig(left='left_a', right='right_a', changed_by=self.user).save()
ExampleKeyedConfig(left='left_b', right='right_b', changed_by=self.user).save()
unique_key_pairs = ExampleKeyedConfig.key_values()
self.assertEquals(len(unique_key_pairs), 2)
self.assertEquals(set(unique_key_pairs), set([('left_a', 'right_a'), ('left_b', 'right_b')]))
unique_left_keys = ExampleKeyedConfig.key_values('left', flat=True)
self.assertEquals(len(unique_left_keys), 2)
self.assertEquals(set(unique_left_keys), set(['left_a', 'left_b']))
def test_key_string_values(self, mock_cache):
""" Ensure str() vs unicode() doesn't cause duplicate cache entries """
ExampleKeyedConfig(left='left', right=u'〉☃', enabled=True, int_field=10, changed_by=self.user).save()
mock_cache.get.return_value = None
entry = ExampleKeyedConfig.current('left', u'〉☃')
key = mock_cache.get.call_args[0][0]
self.assertEqual(entry.int_field, 10)
mock_cache.get.assert_called_with(key)
self.assertEqual(mock_cache.set.call_args[0][0], key)
mock_cache.get.reset_mock()
entry = ExampleKeyedConfig.current(u'left', u'〉☃')
self.assertEqual(entry.int_field, 10)
mock_cache.get.assert_called_with(key)
def test_current_set(self, mock_cache):
mock_cache.get.return_value = None
with freeze_time('2012-01-01'):
ExampleKeyedConfig(left='left_a', right='right_a', int_field=0, changed_by=self.user).save()
ExampleKeyedConfig(left='left_b', right='right_b', int_field=0, changed_by=self.user).save()
ExampleKeyedConfig(left='left_a', right='right_a', int_field=1, changed_by=self.user).save()
ExampleKeyedConfig(left='left_b', right='right_b', int_field=2, changed_by=self.user).save()
queryset = ExampleKeyedConfig.objects.current_set()
self.assertEqual(len(queryset.all()), 2)
self.assertEqual(
set(queryset.order_by('int_field').values_list('int_field', flat=True)),
set([1, 2])
)
def test_active_annotation(self, mock_cache):
mock_cache.get.return_value = None
with freeze_time('2012-01-01'):
ExampleKeyedConfig.objects.create(left='left_a', right='right_a', string_field='first')
ExampleKeyedConfig.objects.create(left='left_b', right='right_b', string_field='first')
ExampleKeyedConfig.objects.create(left='left_a', right='right_a', string_field='second')
rows = ExampleKeyedConfig.objects.with_active_flag()
self.assertEqual(len(rows), 3)
for row in rows:
if row.left == 'left_a':
self.assertEqual(row.is_active, row.string_field == 'second')
else:
self.assertEqual(row.left, 'left_b')
self.assertEqual(row.string_field, 'first')
self.assertEqual(row.is_active, True)
def test_key_values_cache(self, mock_cache):
mock_cache.get.return_value = None
self.assertEquals(ExampleKeyedConfig.key_values(), [])
mock_cache.set.assert_called_with(ExampleKeyedConfig.key_values_cache_key_name(), [], 300)
fake_result = [('a', 'b'), ('c', 'd')]
mock_cache.get.return_value = fake_result
self.assertEquals(ExampleKeyedConfig.key_values(), fake_result)
| agpl-3.0 |
geky/pyOCD | pyOCD/target/target_lpc4330.py | 1 | 2872 | """
mbed CMSIS-DAP debugger
Copyright (c) 2006-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cortex_m import CortexM
class LPC4330(CortexM):
memoryMapXML = """<?xml version="1.0"?>
<!DOCTYPE memory-map PUBLIC "+//IDN gnu.org//DTD GDB Memory Map V1.0//EN" "http://sourceware.org/gdb/gdb-memory-map.dtd">
<memory-map>
<memory type="flash" start="0x14000000" length="0x4000000"> <property name="blocksize">0x400</property></memory>
<memory type="ram" start="0x10000000" length="0x20000"> </memory>
<memory type="ram" start="0x10080000" length="0x12000"> </memory>
<memory type="ram" start="0x20000000" length="0x8000"> </memory>
<memory type="ram" start="0x20008000" length="0x8000"> </memory>
</memory-map>
"""
def __init__(self, transport):
super(LPC4330, self).__init__(transport)
self.ignoreReset = False
def setFlash(self, flash):
self.flash = flash
def reset(self, software_reset = False):
# Always use software reset for LPC4330 since the hardware version
# will reset the DAP.
CortexM.reset(self, True)
def resetStopOnReset(self, software_reset = False):
if self.ignoreReset:
return
# Set core up to run some code in RAM that is guaranteed to be valid
# since FLASH could be corrupted and that is what user is trying to fix.
self.writeMemory(0x10000000, 0x10087ff0) # Initial SP
self.writeMemory(0x10000004, 0x1000000d) # Reset Handler
self.writeMemory(0x10000008, 0x1000000d) # Hard Fault Handler
self.writeMemory(0x1000000c, 0xe7fee7fe) # Infinite loop
self.writeMemory(0x40043100, 0x10000000) # Shadow 0x0 to RAM
# Always use software reset for LPC4330 since the hardware version
# will reset the DAP.
CortexM.resetStopOnReset(self, True)
# Map shadow memory to SPIFI FLASH
self.writeMemory(0x40043100, 0x80000000)
# The LPC4330 flash init routine can be used to remount FLASH.
self.ignoreReset = True
self.flash.init()
self.ignoreReset = False
# Set SP and PC based on interrupt vector in SPIFI_FLASH
sp = self.readMemory(0x14000000)
pc = self.readMemory(0x14000004)
self.writeCoreRegisterRaw('sp', sp)
self.writeCoreRegisterRaw('pc', pc)
| apache-2.0 |
matthiasdiener/spack | var/spack/repos/builtin/packages/emacs/package.py | 2 | 3112 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import sys
class Emacs(AutotoolsPackage):
"""The Emacs programmable text editor."""
homepage = "https://www.gnu.org/software/emacs"
url = "http://ftp.gnu.org/gnu/emacs/emacs-24.5.tar.gz"
version('26.1', '544d2ab5eb142e9ca69adb023d17bf4b')
version('25.3', '74ddd373dc52ac05ca7a8c63b1ddbf58')
version('25.2', '0a36d1cdbba6024d4dbbac027f87995f')
version('25.1', '95c12e6a9afdf0dcbdd7d2efa26ca42c')
version('24.5', 'd74b597503a68105e61b5b9f6d065b44')
variant('X', default=False, description="Enable an X toolkit")
variant(
'toolkit',
default='gtk',
values=('gtk', 'athena'),
description="Select an X toolkit (gtk, athena)"
)
variant('tls', default=False, description="Build Emacs with gnutls")
depends_on('pkgconfig', type='build')
depends_on('ncurses')
depends_on('pcre')
depends_on('zlib')
depends_on('libtiff', when='+X')
depends_on('libpng', when='+X')
depends_on('libxpm', when='+X')
depends_on('giflib', when='+X')
depends_on('libx11', when='+X')
depends_on('libxaw', when='+X toolkit=athena')
depends_on('gtkplus', when='+X toolkit=gtk')
depends_on('gnutls', when='+tls')
def configure_args(self):
spec = self.spec
toolkit = spec.variants['toolkit'].value
if '+X' in spec:
args = [
'--with-x',
'--with-x-toolkit={0}'.format(toolkit)
]
else:
args = ['--without-x']
# On OS X/macOS, do not build "nextstep/Emacs.app", because
# doing so throws an error at build-time
if sys.platform == 'darwin':
args.append('--without-ns')
if '+tls' in spec:
args.append('--with-gnutls')
else:
args.append('--without-gnutls')
return args
| lgpl-2.1 |
ltilve/ChromiumGStreamerBackend | tools/telemetry/third_party/gsutilz/third_party/boto/boto/kinesis/layer1.py | 102 | 40879 | # Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import base64
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.kinesis import exceptions
from boto.compat import json
from boto.compat import six
class KinesisConnection(AWSQueryConnection):
"""
Amazon Kinesis Service API Reference
Amazon Kinesis is a managed service that scales elastically for
real time processing of streaming big data.
"""
APIVersion = "2013-12-02"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "kinesis.us-east-1.amazonaws.com"
ServiceName = "Kinesis"
TargetPrefix = "Kinesis_20131202"
ResponseError = JSONResponseError
_faults = {
"ProvisionedThroughputExceededException": exceptions.ProvisionedThroughputExceededException,
"LimitExceededException": exceptions.LimitExceededException,
"ExpiredIteratorException": exceptions.ExpiredIteratorException,
"ResourceInUseException": exceptions.ResourceInUseException,
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
"InvalidArgumentException": exceptions.InvalidArgumentException,
"SubscriptionRequiredException": exceptions.SubscriptionRequiredException
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs:
kwargs['host'] = region.endpoint
super(KinesisConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def add_tags_to_stream(self, stream_name, tags):
"""
Adds or updates tags for the specified Amazon Kinesis stream.
Each stream can have up to 10 tags.
If tags have already been assigned to the stream,
`AddTagsToStream` overwrites any existing tags that correspond
to the specified tag keys.
:type stream_name: string
:param stream_name: The name of the stream.
:type tags: map
:param tags: The set of key-value pairs to use to create the tags.
"""
params = {'StreamName': stream_name, 'Tags': tags, }
return self.make_request(action='AddTagsToStream',
body=json.dumps(params))
def create_stream(self, stream_name, shard_count):
"""
Creates a Amazon Kinesis stream. A stream captures and
transports data records that are continuously emitted from
different data sources or producers . Scale-out within an
Amazon Kinesis stream is explicitly supported by means of
shards, which are uniquely identified groups of data records
in an Amazon Kinesis stream.
You specify and control the number of shards that a stream is
composed of. Each open shard can support up to 5 read
transactions per second, up to a maximum total of 2 MB of data
read per second. Each shard can support up to 1000 records
written per second, up to a maximum total of 1 MB data written
per second. You can add shards to a stream if the amount of
data input increases and you can remove shards if the amount
of data input decreases.
The stream name identifies the stream. The name is scoped to
the AWS account used by the application. It is also scoped by
region. That is, two streams in two different accounts can
have the same name, and two streams in the same account, but
in two different regions, can have the same name.
`CreateStream` is an asynchronous operation. Upon receiving a
`CreateStream` request, Amazon Kinesis immediately returns and
sets the stream status to `CREATING`. After the stream is
created, Amazon Kinesis sets the stream status to `ACTIVE`.
You should perform read and write operations only on an
`ACTIVE` stream.
You receive a `LimitExceededException` when making a
`CreateStream` request if you try to do one of the following:
+ Have more than five streams in the `CREATING` state at any
point in time.
+ Create more shards than are authorized for your account.
The default limit for an AWS account is 10 shards per stream.
If you need to create a stream with more than 10 shards,
`contact AWS Support`_ to increase the limit on your account.
You can use `DescribeStream` to check the stream status, which
is returned in `StreamStatus`.
`CreateStream` has a limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: A name to identify the stream. The stream name is
scoped to the AWS account used by the application that creates the
stream. It is also scoped by region. That is, two streams in two
different AWS accounts can have the same name, and two streams in
the same AWS account, but in two different regions, can have the
same name.
:type shard_count: integer
:param shard_count: The number of shards that the stream will use. The
throughput of the stream is a function of the number of shards;
more shards are required for greater provisioned throughput.
**Note:** The default limit for an AWS account is 10 shards per stream.
If you need to create a stream with more than 10 shards, `contact
AWS Support`_ to increase the limit on your account.
"""
params = {
'StreamName': stream_name,
'ShardCount': shard_count,
}
return self.make_request(action='CreateStream',
body=json.dumps(params))
def delete_stream(self, stream_name):
"""
Deletes a stream and all its shards and data. You must shut
down any applications that are operating on the stream before
you delete the stream. If an application attempts to operate
on a deleted stream, it will receive the exception
`ResourceNotFoundException`.
If the stream is in the `ACTIVE` state, you can delete it.
After a `DeleteStream` request, the specified stream is in the
`DELETING` state until Amazon Kinesis completes the deletion.
**Note:** Amazon Kinesis might continue to accept data read
and write operations, such as PutRecord, PutRecords, and
GetRecords, on a stream in the `DELETING` state until the
stream deletion is complete.
When you delete a stream, any shards in that stream are also
deleted, and any tags are dissociated from the stream.
You can use the DescribeStream operation to check the state of
the stream, which is returned in `StreamStatus`.
`DeleteStream` has a limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream to delete.
"""
params = {'StreamName': stream_name, }
return self.make_request(action='DeleteStream',
body=json.dumps(params))
def describe_stream(self, stream_name, limit=None,
exclusive_start_shard_id=None):
"""
Describes the specified stream.
The information about the stream includes its current status,
its Amazon Resource Name (ARN), and an array of shard objects.
For each shard object, there is information about the hash key
and sequence number ranges that the shard spans, and the IDs
of any earlier shards that played in a role in creating the
shard. A sequence number is the identifier associated with
every record ingested in the Amazon Kinesis stream. The
sequence number is assigned when a record is put into the
stream.
You can limit the number of returned shards using the `Limit`
parameter. The number of shards in a stream may be too large
to return from a single call to `DescribeStream`. You can
detect this by using the `HasMoreShards` flag in the returned
output. `HasMoreShards` is set to `True` when there is more
data available.
`DescribeStream` is a paginated operation. If there are more
shards available, you can request them using the shard ID of
the last shard returned. Specify this ID in the
`ExclusiveStartShardId` parameter in a subsequent request to
`DescribeStream`.
`DescribeStream` has a limit of 10 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream to describe.
:type limit: integer
:param limit: The maximum number of shards to return.
:type exclusive_start_shard_id: string
:param exclusive_start_shard_id: The shard ID of the shard to start
with.
"""
params = {'StreamName': stream_name, }
if limit is not None:
params['Limit'] = limit
if exclusive_start_shard_id is not None:
params['ExclusiveStartShardId'] = exclusive_start_shard_id
return self.make_request(action='DescribeStream',
body=json.dumps(params))
def get_records(self, shard_iterator, limit=None, b64_decode=True):
"""
Gets data records from a shard.
Specify a shard iterator using the `ShardIterator` parameter.
The shard iterator specifies the position in the shard from
which you want to start reading data records sequentially. If
there are no records available in the portion of the shard
that the iterator points to, `GetRecords` returns an empty
list. Note that it might take multiple calls to get to a
portion of the shard that contains records.
You can scale by provisioning multiple shards. Your
application should have one thread per shard, each reading
continuously from its stream. To read from a stream
continually, call `GetRecords` in a loop. Use GetShardIterator
to get the shard iterator to specify in the first `GetRecords`
call. `GetRecords` returns a new shard iterator in
`NextShardIterator`. Specify the shard iterator returned in
`NextShardIterator` in subsequent calls to `GetRecords`. Note
that if the shard has been closed, the shard iterator can't
return more data and `GetRecords` returns `null` in
`NextShardIterator`. You can terminate the loop when the shard
is closed, or when the shard iterator reaches the record with
the sequence number or other attribute that marks it as the
last record to process.
Each data record can be up to 50 KB in size, and each shard
can read up to 2 MB per second. You can ensure that your calls
don't exceed the maximum supported size or throughput by using
the `Limit` parameter to specify the maximum number of records
that `GetRecords` can return. Consider your average record
size when determining this limit. For example, if your average
record size is 40 KB, you can limit the data returned to about
1 MB per call by specifying 25 as the limit.
The size of the data returned by `GetRecords` will vary
depending on the utilization of the shard. The maximum size of
data that `GetRecords` can return is 10 MB. If a call returns
10 MB of data, subsequent calls made within the next 5 seconds
throw `ProvisionedThroughputExceededException`. If there is
insufficient provisioned throughput on the shard, subsequent
calls made within the next 1 second throw
`ProvisionedThroughputExceededException`. Note that
`GetRecords` won't return any data when it throws an
exception. For this reason, we recommend that you wait one
second between calls to `GetRecords`; however, it's possible
that the application will get exceptions for longer than 1
second.
To detect whether the application is falling behind in
processing, add a timestamp to your records and note how long
it takes to process them. You can also monitor how much data
is in a stream using the CloudWatch metrics for write
operations ( `PutRecord` and `PutRecords`). For more
information, see `Monitoring Amazon Kinesis with Amazon
CloudWatch`_ in the Amazon Kinesis Developer Guide .
:type shard_iterator: string
:param shard_iterator: The position in the shard from which you want to
start sequentially reading data records. A shard iterator specifies
this position using the sequence number of a data record in the
shard.
:type limit: integer
:param limit: The maximum number of records to return. Specify a value
of up to 10,000. If you specify a value that is greater than
10,000, `GetRecords` throws `InvalidArgumentException`.
:type b64_decode: boolean
:param b64_decode: Decode the Base64-encoded ``Data`` field of records.
"""
params = {'ShardIterator': shard_iterator, }
if limit is not None:
params['Limit'] = limit
response = self.make_request(action='GetRecords',
body=json.dumps(params))
# Base64 decode the data
if b64_decode:
for record in response.get('Records', []):
record['Data'] = base64.b64decode(
record['Data'].encode('utf-8')).decode('utf-8')
return response
def get_shard_iterator(self, stream_name, shard_id, shard_iterator_type,
starting_sequence_number=None):
"""
Gets a shard iterator. A shard iterator expires five minutes
after it is returned to the requester.
A shard iterator specifies the position in the shard from
which to start reading data records sequentially. A shard
iterator specifies this position using the sequence number of
a data record in a shard. A sequence number is the identifier
associated with every record ingested in the Amazon Kinesis
stream. The sequence number is assigned when a record is put
into the stream.
You must specify the shard iterator type. For example, you can
set the `ShardIteratorType` parameter to read exactly from the
position denoted by a specific sequence number by using the
`AT_SEQUENCE_NUMBER` shard iterator type, or right after the
sequence number by using the `AFTER_SEQUENCE_NUMBER` shard
iterator type, using sequence numbers returned by earlier
calls to PutRecord, PutRecords, GetRecords, or DescribeStream.
You can specify the shard iterator type `TRIM_HORIZON` in the
request to cause `ShardIterator` to point to the last
untrimmed record in the shard in the system, which is the
oldest data record in the shard. Or you can point to just
after the most recent record in the shard, by using the shard
iterator type `LATEST`, so that you always read the most
recent data in the shard.
When you repeatedly read from an Amazon Kinesis stream use a
GetShardIterator request to get the first shard iterator to to
use in your first `GetRecords` request and then use the shard
iterator returned by the `GetRecords` request in
`NextShardIterator` for subsequent reads. A new shard iterator
is returned by every `GetRecords` request in
`NextShardIterator`, which you use in the `ShardIterator`
parameter of the next `GetRecords` request.
If a `GetShardIterator` request is made too often, you receive
a `ProvisionedThroughputExceededException`. For more
information about throughput limits, see GetRecords.
If the shard is closed, the iterator can't return more data,
and `GetShardIterator` returns `null` for its `ShardIterator`.
A shard can be closed using SplitShard or MergeShards.
`GetShardIterator` has a limit of 5 transactions per second
per account per open shard.
:type stream_name: string
:param stream_name: The name of the stream.
:type shard_id: string
:param shard_id: The shard ID of the shard to get the iterator for.
:type shard_iterator_type: string
:param shard_iterator_type:
Determines how the shard iterator is used to start reading data records
from the shard.
The following are the valid shard iterator types:
+ AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted
by a specific sequence number.
+ AFTER_SEQUENCE_NUMBER - Start reading right after the position
denoted by a specific sequence number.
+ TRIM_HORIZON - Start reading at the last untrimmed record in the
shard in the system, which is the oldest data record in the shard.
+ LATEST - Start reading just after the most recent record in the
shard, so that you always read the most recent data in the shard.
:type starting_sequence_number: string
:param starting_sequence_number: The sequence number of the data record
in the shard from which to start reading from.
"""
params = {
'StreamName': stream_name,
'ShardId': shard_id,
'ShardIteratorType': shard_iterator_type,
}
if starting_sequence_number is not None:
params['StartingSequenceNumber'] = starting_sequence_number
return self.make_request(action='GetShardIterator',
body=json.dumps(params))
def list_streams(self, limit=None, exclusive_start_stream_name=None):
"""
Lists your streams.
The number of streams may be too large to return from a single
call to `ListStreams`. You can limit the number of returned
streams using the `Limit` parameter. If you do not specify a
value for the `Limit` parameter, Amazon Kinesis uses the
default limit, which is currently 10.
You can detect if there are more streams available to list by
using the `HasMoreStreams` flag from the returned output. If
there are more streams available, you can request more streams
by using the name of the last stream returned by the
`ListStreams` request in the `ExclusiveStartStreamName`
parameter in a subsequent request to `ListStreams`. The group
of stream names returned by the subsequent request is then
added to the list. You can continue this process until all the
stream names have been collected in the list.
`ListStreams` has a limit of 5 transactions per second per
account.
:type limit: integer
:param limit: The maximum number of streams to list.
:type exclusive_start_stream_name: string
:param exclusive_start_stream_name: The name of the stream to start the
list with.
"""
params = {}
if limit is not None:
params['Limit'] = limit
if exclusive_start_stream_name is not None:
params['ExclusiveStartStreamName'] = exclusive_start_stream_name
return self.make_request(action='ListStreams',
body=json.dumps(params))
def list_tags_for_stream(self, stream_name, exclusive_start_tag_key=None,
limit=None):
"""
Lists the tags for the specified Amazon Kinesis stream.
:type stream_name: string
:param stream_name: The name of the stream.
:type exclusive_start_tag_key: string
:param exclusive_start_tag_key: The key to use as the starting point
for the list of tags. If this parameter is set, `ListTagsForStream`
gets all tags that occur after `ExclusiveStartTagKey`.
:type limit: integer
:param limit: The number of tags to return. If this number is less than
the total number of tags associated with the stream, `HasMoreTags`
is set to `True`. To list additional tags, set
`ExclusiveStartTagKey` to the last key in the response.
"""
params = {'StreamName': stream_name, }
if exclusive_start_tag_key is not None:
params['ExclusiveStartTagKey'] = exclusive_start_tag_key
if limit is not None:
params['Limit'] = limit
return self.make_request(action='ListTagsForStream',
body=json.dumps(params))
def merge_shards(self, stream_name, shard_to_merge,
adjacent_shard_to_merge):
"""
Merges two adjacent shards in a stream and combines them into
a single shard to reduce the stream's capacity to ingest and
transport data. Two shards are considered adjacent if the
union of the hash key ranges for the two shards form a
contiguous set with no gaps. For example, if you have two
shards, one with a hash key range of 276...381 and the other
with a hash key range of 382...454, then you could merge these
two shards into a single shard that would have a hash key
range of 276...454. After the merge, the single child shard
receives data for all hash key values covered by the two
parent shards.
`MergeShards` is called when there is a need to reduce the
overall capacity of a stream because of excess capacity that
is not being used. You must specify the shard to be merged and
the adjacent shard for a stream. For more information about
merging shards, see `Merge Two Shards`_ in the Amazon Kinesis
Developer Guide .
If the stream is in the `ACTIVE` state, you can call
`MergeShards`. If a stream is in the `CREATING`, `UPDATING`,
or `DELETING` state, `MergeShards` returns a
`ResourceInUseException`. If the specified stream does not
exist, `MergeShards` returns a `ResourceNotFoundException`.
You can use DescribeStream to check the state of the stream,
which is returned in `StreamStatus`.
`MergeShards` is an asynchronous operation. Upon receiving a
`MergeShards` request, Amazon Kinesis immediately returns a
response and sets the `StreamStatus` to `UPDATING`. After the
operation is completed, Amazon Kinesis sets the `StreamStatus`
to `ACTIVE`. Read and write operations continue to work while
the stream is in the `UPDATING` state.
You use DescribeStream to determine the shard IDs that are
specified in the `MergeShards` request.
If you try to operate on too many streams in parallel using
CreateStream, DeleteStream, `MergeShards` or SplitShard, you
will receive a `LimitExceededException`.
`MergeShards` has limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream for the merge.
:type shard_to_merge: string
:param shard_to_merge: The shard ID of the shard to combine with the
adjacent shard for the merge.
:type adjacent_shard_to_merge: string
:param adjacent_shard_to_merge: The shard ID of the adjacent shard for
the merge.
"""
params = {
'StreamName': stream_name,
'ShardToMerge': shard_to_merge,
'AdjacentShardToMerge': adjacent_shard_to_merge,
}
return self.make_request(action='MergeShards',
body=json.dumps(params))
def put_record(self, stream_name, data, partition_key,
explicit_hash_key=None,
sequence_number_for_ordering=None,
exclusive_minimum_sequence_number=None,
b64_encode=True):
"""
This operation puts a data record into an Amazon Kinesis
stream from a producer. This operation must be called to send
data from the producer into the Amazon Kinesis stream for
real-time ingestion and subsequent processing. The `PutRecord`
operation requires the name of the stream that captures,
stores, and transports the data; a partition key; and the data
blob itself. The data blob could be a segment from a log file,
geographic/location data, website clickstream data, or any
other data type.
The partition key is used to distribute data across shards.
Amazon Kinesis segregates the data records that belong to a
data stream into multiple shards, using the partition key
associated with each data record to determine which shard a
given data record belongs to.
Partition keys are Unicode strings, with a maximum length
limit of 256 bytes. An MD5 hash function is used to map
partition keys to 128-bit integer values and to map associated
data records to shards using the hash key ranges of the
shards. You can override hashing the partition key to
determine the shard by explicitly specifying a hash value
using the `ExplicitHashKey` parameter. For more information,
see the `Amazon Kinesis Developer Guide`_.
`PutRecord` returns the shard ID of where the data record was
placed and the sequence number that was assigned to the data
record.
Sequence numbers generally increase over time. To guarantee
strictly increasing ordering, use the
`SequenceNumberForOrdering` parameter. For more information,
see the `Amazon Kinesis Developer Guide`_.
If a `PutRecord` request cannot be processed because of
insufficient provisioned throughput on the shard involved in
the request, `PutRecord` throws
`ProvisionedThroughputExceededException`.
Data records are accessible for only 24 hours from the time
that they are added to an Amazon Kinesis stream.
:type stream_name: string
:param stream_name: The name of the stream to put the data record into.
:type data: blob
:param data: The data blob to put into the record, which is
Base64-encoded when the blob is serialized.
The maximum size of the data blob (the payload after
Base64-decoding) is 50 kilobytes (KB)
Set `b64_encode` to disable automatic Base64 encoding.
:type partition_key: string
:param partition_key: Determines which shard in the stream the data
record is assigned to. Partition keys are Unicode strings with a
maximum length limit of 256 bytes. Amazon Kinesis uses the
partition key as input to a hash function that maps the partition
key and associated data to a specific shard. Specifically, an MD5
hash function is used to map partition keys to 128-bit integer
values and to map associated data records to shards. As a result of
this hashing mechanism, all data records with the same partition
key will map to the same shard within the stream.
:type explicit_hash_key: string
:param explicit_hash_key: The hash value used to explicitly determine
the shard the data record is assigned to by overriding the
partition key hash.
:type sequence_number_for_ordering: string
:param sequence_number_for_ordering: Guarantees strictly increasing
sequence numbers, for puts from the same client and to the same
partition key. Usage: set the `SequenceNumberForOrdering` of record
n to the sequence number of record n-1 (as returned in the
PutRecordResult when putting record n-1 ). If this parameter is not
set, records will be coarsely ordered based on arrival time.
:type b64_encode: boolean
:param b64_encode: Whether to Base64 encode `data`. Can be set to
``False`` if `data` is already encoded to prevent double encoding.
"""
params = {
'StreamName': stream_name,
'Data': data,
'PartitionKey': partition_key,
}
if explicit_hash_key is not None:
params['ExplicitHashKey'] = explicit_hash_key
if sequence_number_for_ordering is not None:
params['SequenceNumberForOrdering'] = sequence_number_for_ordering
if b64_encode:
if not isinstance(params['Data'], six.binary_type):
params['Data'] = params['Data'].encode('utf-8')
params['Data'] = base64.b64encode(params['Data']).decode('utf-8')
return self.make_request(action='PutRecord',
body=json.dumps(params))
def put_records(self, records, stream_name, b64_encode=True):
"""
Puts (writes) multiple data records from a producer into an
Amazon Kinesis stream in a single call (also referred to as a
`PutRecords` request). Use this operation to send data from a
data producer into the Amazon Kinesis stream for real-time
ingestion and processing. Each shard can support up to 1000
records written per second, up to a maximum total of 1 MB data
written per second.
You must specify the name of the stream that captures, stores,
and transports the data; and an array of request `Records`,
with each record in the array requiring a partition key and
data blob.
The data blob can be any type of data; for example, a segment
from a log file, geographic/location data, website clickstream
data, and so on.
The partition key is used by Amazon Kinesis as input to a hash
function that maps the partition key and associated data to a
specific shard. An MD5 hash function is used to map partition
keys to 128-bit integer values and to map associated data
records to shards. As a result of this hashing mechanism, all
data records with the same partition key map to the same shard
within the stream. For more information, see `Partition Key`_
in the Amazon Kinesis Developer Guide .
Each record in the `Records` array may include an optional
parameter, `ExplicitHashKey`, which overrides the partition
key to shard mapping. This parameter allows a data producer to
determine explicitly the shard where the record is stored. For
more information, see `Adding Multiple Records with
PutRecords`_ in the Amazon Kinesis Developer Guide .
The `PutRecords` response includes an array of response
`Records`. Each record in the response array directly
correlates with a record in the request array using natural
ordering, from the top to the bottom of the request and
response. The response `Records` array always includes the
same number of records as the request array.
The response `Records` array includes both successfully and
unsuccessfully processed records. Amazon Kinesis attempts to
process all records in each `PutRecords` request. A single
record failure does not stop the processing of subsequent
records.
A successfully-processed record includes `ShardId` and
`SequenceNumber` values. The `ShardId` parameter identifies
the shard in the stream where the record is stored. The
`SequenceNumber` parameter is an identifier assigned to the
put record, unique to all records in the stream.
An unsuccessfully-processed record includes `ErrorCode` and
`ErrorMessage` values. `ErrorCode` reflects the type of error
and can be one of the following values:
`ProvisionedThroughputExceededException` or `InternalFailure`.
`ErrorMessage` provides more detailed information about the
`ProvisionedThroughputExceededException` exception including
the account ID, stream name, and shard ID of the record that
was throttled.
Data records are accessible for only 24 hours from the time
that they are added to an Amazon Kinesis stream.
:type records: list
:param records: The records associated with the request.
:type stream_name: string
:param stream_name: The stream name associated with the request.
:type b64_encode: boolean
:param b64_encode: Whether to Base64 encode `data`. Can be set to
``False`` if `data` is already encoded to prevent double encoding.
"""
params = {'Records': records, 'StreamName': stream_name, }
if b64_encode:
for i in range(len(params['Records'])):
data = params['Records'][i]['Data']
if not isinstance(data, six.binary_type):
data = data.encode('utf-8')
params['Records'][i]['Data'] = base64.b64encode(
data).decode('utf-8')
return self.make_request(action='PutRecords',
body=json.dumps(params))
def remove_tags_from_stream(self, stream_name, tag_keys):
"""
Deletes tags from the specified Amazon Kinesis stream.
If you specify a tag that does not exist, it is ignored.
:type stream_name: string
:param stream_name: The name of the stream.
:type tag_keys: list
:param tag_keys: A list of tag keys. Each corresponding tag is removed
from the stream.
"""
params = {'StreamName': stream_name, 'TagKeys': tag_keys, }
return self.make_request(action='RemoveTagsFromStream',
body=json.dumps(params))
def split_shard(self, stream_name, shard_to_split, new_starting_hash_key):
"""
Splits a shard into two new shards in the stream, to increase
the stream's capacity to ingest and transport data.
`SplitShard` is called when there is a need to increase the
overall capacity of stream because of an expected increase in
the volume of data records being ingested.
You can also use `SplitShard` when a shard appears to be
approaching its maximum utilization, for example, when the set
of producers sending data into the specific shard are suddenly
sending more than previously anticipated. You can also call
`SplitShard` to increase stream capacity, so that more Amazon
Kinesis applications can simultaneously read data from the
stream for real-time processing.
You must specify the shard to be split and the new hash key,
which is the position in the shard where the shard gets split
in two. In many cases, the new hash key might simply be the
average of the beginning and ending hash key, but it can be
any hash key value in the range being mapped into the shard.
For more information about splitting shards, see `Split a
Shard`_ in the Amazon Kinesis Developer Guide .
You can use DescribeStream to determine the shard ID and hash
key values for the `ShardToSplit` and `NewStartingHashKey`
parameters that are specified in the `SplitShard` request.
`SplitShard` is an asynchronous operation. Upon receiving a
`SplitShard` request, Amazon Kinesis immediately returns a
response and sets the stream status to `UPDATING`. After the
operation is completed, Amazon Kinesis sets the stream status
to `ACTIVE`. Read and write operations continue to work while
the stream is in the `UPDATING` state.
You can use `DescribeStream` to check the status of the
stream, which is returned in `StreamStatus`. If the stream is
in the `ACTIVE` state, you can call `SplitShard`. If a stream
is in `CREATING` or `UPDATING` or `DELETING` states,
`DescribeStream` returns a `ResourceInUseException`.
If the specified stream does not exist, `DescribeStream`
returns a `ResourceNotFoundException`. If you try to create
more shards than are authorized for your account, you receive
a `LimitExceededException`.
The default limit for an AWS account is 10 shards per stream.
If you need to create a stream with more than 10 shards,
`contact AWS Support`_ to increase the limit on your account.
If you try to operate on too many streams in parallel using
CreateStream, DeleteStream, MergeShards or SplitShard, you
receive a `LimitExceededException`.
`SplitShard` has limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream for the shard split.
:type shard_to_split: string
:param shard_to_split: The shard ID of the shard to split.
:type new_starting_hash_key: string
:param new_starting_hash_key: A hash key value for the starting hash
key of one of the child shards created by the split. The hash key
range for a given shard constitutes a set of ordered contiguous
positive integers. The value for `NewStartingHashKey` must be in
the range of hash keys being mapped into the shard. The
`NewStartingHashKey` hash key value and all higher hash key values
in hash key range are distributed to one of the child shards. All
the lower hash key values in the range are distributed to the other
child shard.
"""
params = {
'StreamName': stream_name,
'ShardToSplit': shard_to_split,
'NewStartingHashKey': new_starting_hash_key,
}
return self.make_request(action='SplitShard',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response.getheaders())
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| bsd-3-clause |
eul721/The-Perfect-Pokemon-Team-Balancer | libs/env/Lib/site-packages/sqlalchemy/testing/requirements.py | 75 | 17798 | # testing/requirements.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Global database feature support policy.
Provides decorators to mark tests requiring specific feature support from the
target database.
External dialect test suites should subclass SuiteRequirements
to provide specific inclusion/exlusions.
"""
from . import exclusions
class Requirements(object):
pass
class SuiteRequirements(Requirements):
@property
def create_table(self):
"""target platform can emit basic CreateTable DDL."""
return exclusions.open()
@property
def drop_table(self):
"""target platform can emit basic DropTable DDL."""
return exclusions.open()
@property
def foreign_keys(self):
"""Target database must support foreign keys."""
return exclusions.open()
@property
def on_update_cascade(self):
""""target database must support ON UPDATE..CASCADE behavior in
foreign keys."""
return exclusions.open()
@property
def non_updating_cascade(self):
"""target database must *not* support ON UPDATE..CASCADE behavior in
foreign keys."""
return exclusions.closed()
@property
def deferrable_fks(self):
return exclusions.closed()
@property
def on_update_or_deferrable_fks(self):
# TODO: exclusions should be composable,
# somehow only_if([x, y]) isn't working here, negation/conjunctions
# getting confused.
return exclusions.only_if(
lambda: self.on_update_cascade.enabled or self.deferrable_fks.enabled
)
@property
def self_referential_foreign_keys(self):
"""Target database must support self-referential foreign keys."""
return exclusions.open()
@property
def foreign_key_ddl(self):
"""Target database must support the DDL phrases for FOREIGN KEY."""
return exclusions.open()
@property
def named_constraints(self):
"""target database must support names for constraints."""
return exclusions.open()
@property
def subqueries(self):
"""Target database must support subqueries."""
return exclusions.open()
@property
def offset(self):
"""target database can render OFFSET, or an equivalent, in a SELECT."""
return exclusions.open()
@property
def boolean_col_expressions(self):
"""Target database must support boolean expressions as columns"""
return exclusions.closed()
@property
def nullsordering(self):
"""Target backends that support nulls ordering."""
return exclusions.closed()
@property
def standalone_binds(self):
"""target database/driver supports bound parameters as column expressions
without being in the context of a typed column.
"""
return exclusions.closed()
@property
def intersect(self):
"""Target database must support INTERSECT or equivalent."""
return exclusions.closed()
@property
def except_(self):
"""Target database must support EXCEPT or equivalent (i.e. MINUS)."""
return exclusions.closed()
@property
def window_functions(self):
"""Target database must support window functions."""
return exclusions.closed()
@property
def autoincrement_insert(self):
"""target platform generates new surrogate integer primary key values
when insert() is executed, excluding the pk column."""
return exclusions.open()
@property
def fetch_rows_post_commit(self):
"""target platform will allow cursor.fetchone() to proceed after a
COMMIT.
Typically this refers to an INSERT statement with RETURNING which
is invoked within "autocommit". If the row can be returned
after the autocommit, then this rule can be open.
"""
return exclusions.open()
@property
def empty_inserts(self):
"""target platform supports INSERT with no values, i.e.
INSERT DEFAULT VALUES or equivalent."""
return exclusions.only_if(
lambda config: config.db.dialect.supports_empty_insert or \
config.db.dialect.supports_default_values,
"empty inserts not supported"
)
@property
def insert_from_select(self):
"""target platform supports INSERT from a SELECT."""
return exclusions.open()
@property
def returning(self):
"""target platform supports RETURNING."""
return exclusions.only_if(
lambda config: config.db.dialect.implicit_returning,
"'returning' not supported by database"
)
@property
def duplicate_names_in_cursor_description(self):
"""target platform supports a SELECT statement that has
the same name repeated more than once in the columns list."""
return exclusions.open()
@property
def denormalized_names(self):
"""Target database must have 'denormalized', i.e.
UPPERCASE as case insensitive names."""
return exclusions.skip_if(
lambda config: not config.db.dialect.requires_name_normalize,
"Backend does not require denormalized names."
)
@property
def multivalues_inserts(self):
"""target database must support multiple VALUES clauses in an
INSERT statement."""
return exclusions.skip_if(
lambda config: not config.db.dialect.supports_multivalues_insert,
"Backend does not support multirow inserts."
)
@property
def implements_get_lastrowid(self):
""""target dialect implements the executioncontext.get_lastrowid()
method without reliance on RETURNING.
"""
return exclusions.open()
@property
def emulated_lastrowid(self):
""""target dialect retrieves cursor.lastrowid, or fetches
from a database-side function after an insert() construct executes,
within the get_lastrowid() method.
Only dialects that "pre-execute", or need RETURNING to get last
inserted id, would return closed/fail/skip for this.
"""
return exclusions.closed()
@property
def dbapi_lastrowid(self):
""""target platform includes a 'lastrowid' accessor on the DBAPI
cursor object.
"""
return exclusions.closed()
@property
def views(self):
"""Target database must support VIEWs."""
return exclusions.closed()
@property
def schemas(self):
"""Target database must support external schemas, and have one
named 'test_schema'."""
return exclusions.closed()
@property
def sequences(self):
"""Target database must support SEQUENCEs."""
return exclusions.only_if([
lambda config: config.db.dialect.supports_sequences
], "no sequence support")
@property
def sequences_optional(self):
"""Target database supports sequences, but also optionally
as a means of generating new PK values."""
return exclusions.only_if([
lambda config: config.db.dialect.supports_sequences and \
config.db.dialect.sequences_optional
], "no sequence support, or sequences not optional")
@property
def reflects_pk_names(self):
return exclusions.closed()
@property
def table_reflection(self):
return exclusions.open()
@property
def view_column_reflection(self):
"""target database must support retrieval of the columns in a view,
similarly to how a table is inspected.
This does not include the full CREATE VIEW definition.
"""
return self.views
@property
def view_reflection(self):
"""target database must support inspection of the full CREATE VIEW definition.
"""
return self.views
@property
def schema_reflection(self):
return self.schemas
@property
def primary_key_constraint_reflection(self):
return exclusions.open()
@property
def foreign_key_constraint_reflection(self):
return exclusions.open()
@property
def index_reflection(self):
return exclusions.open()
@property
def unique_constraint_reflection(self):
"""target dialect supports reflection of unique constraints"""
return exclusions.open()
@property
def unbounded_varchar(self):
"""Target database must support VARCHAR with no length"""
return exclusions.open()
@property
def unicode_data(self):
"""Target database/dialect must support Python unicode objects with
non-ASCII characters represented, delivered as bound parameters
as well as in result rows.
"""
return exclusions.open()
@property
def unicode_ddl(self):
"""Target driver must support some degree of non-ascii symbol names."""
return exclusions.closed()
@property
def datetime_literals(self):
"""target dialect supports rendering of a date, time, or datetime as a
literal string, e.g. via the TypeEngine.literal_processor() method.
"""
return exclusions.closed()
@property
def datetime(self):
"""target dialect supports representation of Python
datetime.datetime() objects."""
return exclusions.open()
@property
def datetime_microseconds(self):
"""target dialect supports representation of Python
datetime.datetime() with microsecond objects."""
return exclusions.open()
@property
def datetime_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1970) values."""
return exclusions.closed()
@property
def date(self):
"""target dialect supports representation of Python
datetime.date() objects."""
return exclusions.open()
@property
def date_coerces_from_datetime(self):
"""target dialect accepts a datetime object as the target
of a date column."""
return exclusions.open()
@property
def date_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1970) values."""
return exclusions.closed()
@property
def time(self):
"""target dialect supports representation of Python
datetime.time() objects."""
return exclusions.open()
@property
def time_microseconds(self):
"""target dialect supports representation of Python
datetime.time() with microsecond objects."""
return exclusions.open()
@property
def binary_comparisons(self):
"""target database/driver can allow BLOB/BINARY fields to be compared
against a bound parameter value.
"""
return exclusions.open()
@property
def binary_literals(self):
"""target backend supports simple binary literals, e.g. an
expression like::
SELECT CAST('foo' AS BINARY)
Where ``BINARY`` is the type emitted from :class:`.LargeBinary`,
e.g. it could be ``BLOB`` or similar.
Basically fails on Oracle.
"""
return exclusions.open()
@property
def precision_numerics_general(self):
"""target backend has general support for moderately high-precision
numerics."""
return exclusions.open()
@property
def precision_numerics_enotation_small(self):
"""target backend supports Decimal() objects using E notation
to represent very small values."""
return exclusions.closed()
@property
def precision_numerics_enotation_large(self):
"""target backend supports Decimal() objects using E notation
to represent very large values."""
return exclusions.closed()
@property
def precision_numerics_many_significant_digits(self):
"""target backend supports values with many digits on both sides,
such as 319438950232418390.273596, 87673.594069654243
"""
return exclusions.closed()
@property
def precision_numerics_retains_significant_digits(self):
"""A precision numeric type will return empty significant digits,
i.e. a value such as 10.000 will come back in Decimal form with
the .000 maintained."""
return exclusions.closed()
@property
def precision_generic_float_type(self):
"""target backend will return native floating point numbers with at
least seven decimal places when using the generic Float type.
"""
return exclusions.open()
@property
def floats_to_four_decimals(self):
"""target backend can return a floating-point number with four
significant digits (such as 15.7563) accurately
(i.e. without FP inaccuracies, such as 15.75629997253418).
"""
return exclusions.open()
@property
def fetch_null_from_numeric(self):
"""target backend doesn't crash when you try to select a NUMERIC
value that has a value of NULL.
Added to support Pyodbc bug #351.
"""
return exclusions.open()
@property
def text_type(self):
"""Target database must support an unbounded Text() "
"type such as TEXT or CLOB"""
return exclusions.open()
@property
def empty_strings_varchar(self):
"""target database can persist/return an empty string with a
varchar.
"""
return exclusions.open()
@property
def empty_strings_text(self):
"""target database can persist/return an empty string with an
unbounded text."""
return exclusions.open()
@property
def selectone(self):
"""target driver must support the literal statement 'select 1'"""
return exclusions.open()
@property
def savepoints(self):
"""Target database must support savepoints."""
return exclusions.closed()
@property
def two_phase_transactions(self):
"""Target database must support two-phase transactions."""
return exclusions.closed()
@property
def update_from(self):
"""Target must support UPDATE..FROM syntax"""
return exclusions.closed()
@property
def update_where_target_in_subquery(self):
"""Target must support UPDATE where the same table is present in a
subquery in the WHERE clause.
This is an ANSI-standard syntax that apparently MySQL can't handle,
such as:
UPDATE documents SET flag=1 WHERE documents.title IN
(SELECT max(documents.title) AS title
FROM documents GROUP BY documents.user_id
)
"""
return exclusions.open()
@property
def mod_operator_as_percent_sign(self):
"""target database must use a plain percent '%' as the 'modulus'
operator."""
return exclusions.closed()
@property
def percent_schema_names(self):
"""target backend supports weird identifiers with percent signs
in them, e.g. 'some % column'.
this is a very weird use case but often has problems because of
DBAPIs that use python formatting. It's not a critical use
case either.
"""
return exclusions.closed()
@property
def order_by_label_with_expression(self):
"""target backend supports ORDER BY a column label within an
expression.
Basically this::
select data as foo from test order by foo || 'bar'
Lots of databases including Postgresql don't support this,
so this is off by default.
"""
return exclusions.closed()
@property
def unicode_connections(self):
"""Target driver must support non-ASCII characters being passed at all."""
return exclusions.open()
@property
def skip_mysql_on_windows(self):
"""Catchall for a large variety of MySQL on Windows failures"""
return exclusions.open()
@property
def ad_hoc_engines(self):
"""Test environment must allow ad-hoc engine/connection creation.
DBs that scale poorly for many connections, even when closed, i.e.
Oracle, may use the "--low-connections" option which flags this requirement
as not present.
"""
return exclusions.skip_if(lambda config: config.options.low_connections)
def _has_mysql_on_windows(self, config):
return False
def _has_mysql_fully_case_sensitive(self, config):
return False
@property
def sqlite(self):
return exclusions.skip_if(lambda: not self._has_sqlite())
@property
def cextensions(self):
return exclusions.skip_if(
lambda: not self._has_cextensions(), "C extensions not installed"
)
def _has_sqlite(self):
from sqlalchemy import create_engine
try:
create_engine('sqlite://')
return True
except ImportError:
return False
def _has_cextensions(self):
try:
from sqlalchemy import cresultproxy, cprocessors
return True
except ImportError:
return False
| gpl-2.0 |
sgerhart/ansible | lib/ansible/plugins/action/script.py | 13 | 7132 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import shlex
from ansible.errors import AnsibleError, AnsibleAction, _AnsibleActionDone, AnsibleActionFail, AnsibleActionSkip
from ansible.executor.powershell import module_manifest as ps_manifest
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = True
# On Windows platform, absolute paths begin with a (back)slash
# after chopping off a potential drive letter.
windows_absolute_path_detection = re.compile(r'^(?:[a-zA-Z]\:)?(\\|\/)')
def run(self, tmp=None, task_vars=None):
''' handler for file transfer operations '''
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
try:
creates = self._task.args.get('creates')
if creates:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of command executions.
if self._remote_file_exists(creates):
raise AnsibleActionSkip("%s exists, matching creates option" % creates)
removes = self._task.args.get('removes')
if removes:
# do not run the command if the line contains removes=filename
# and the filename does not exist. This allows idempotence
# of command executions.
if not self._remote_file_exists(removes):
raise AnsibleActionSkip("%s does not exist, matching removes option" % removes)
# The chdir must be absolute, because a relative path would rely on
# remote node behaviour & user config.
chdir = self._task.args.get('chdir')
if chdir:
# Powershell is the only Windows-path aware shell
if self._connection._shell.SHELL_FAMILY == 'powershell' and \
not self.windows_absolute_path_detection.matches(chdir):
raise AnsibleActionFail('chdir %s must be an absolute path for a Windows remote node' % chdir)
# Every other shell is unix-path-aware.
if self._connection._shell.SHELL_FAMILY != 'powershell' and not chdir.startswith('/'):
raise AnsibleActionFail('chdir %s must be an absolute path for a Unix-aware remote node' % chdir)
# Split out the script as the first item in raw_params using
# shlex.split() in order to support paths and files with spaces in the name.
# Any arguments passed to the script will be added back later.
raw_params = to_native(self._task.args.get('_raw_params', ''), errors='surrogate_or_strict')
parts = [to_text(s, errors='surrogate_or_strict') for s in shlex.split(raw_params.strip())]
source = parts[0]
# Support executable paths and files with spaces in the name.
executable = to_native(self._task.args.get('executable', ''), errors='surrogate_or_strict')
try:
source = self._loader.get_real_file(self._find_needle('files', source), decrypt=self._task.args.get('decrypt', True))
except AnsibleError as e:
raise AnsibleActionFail(to_native(e))
# now we execute script, always assume changed.
result['changed'] = True
if not self._play_context.check_mode:
# transfer the file to a remote tmp location
tmp_src = self._connection._shell.join_path(self._connection._shell.tmpdir,
os.path.basename(source))
# Convert raw_params to text for the purpose of replacing the script since
# parts and tmp_src are both unicode strings and raw_params will be different
# depending on Python version.
#
# Once everything is encoded consistently, replace the script path on the remote
# system with the remainder of the raw_params. This preserves quoting in parameters
# that would have been removed by shlex.split().
target_command = to_text(raw_params).strip().replace(parts[0], tmp_src)
self._transfer_file(source, tmp_src)
# set file permissions, more permissive when the copy is done as a different user
self._fixup_perms2((self._connection._shell.tmpdir, tmp_src), execute=True)
# add preparation steps to one ssh roundtrip executing the script
env_dict = dict()
env_string = self._compute_environment_string(env_dict)
if executable:
script_cmd = ' '.join([env_string, executable, target_command])
else:
script_cmd = ' '.join([env_string, target_command])
if self._play_context.check_mode:
raise _AnsibleActionDone()
script_cmd = self._connection._shell.wrap_for_exec(script_cmd)
exec_data = None
# PowerShell runs the script in a special wrapper to enable things
# like become and environment args
if self._connection._shell.SHELL_FAMILY == "powershell":
# FIXME: use a more public method to get the exec payload
pc = self._play_context
exec_data = ps_manifest._create_powershell_wrapper(
to_bytes(script_cmd), {}, env_dict, self._task.async_val,
pc.become, pc.become_method, pc.become_user,
pc.become_pass, pc.become_flags, substyle="script"
)
script_cmd = "-"
result.update(self._low_level_execute_command(cmd=script_cmd, in_data=exec_data, sudoable=True, chdir=chdir))
if 'rc' in result and result['rc'] != 0:
raise AnsibleActionFail('non-zero return code')
except AnsibleAction as e:
result.update(e.result)
finally:
self._remove_tmp_path(self._connection._shell.tmpdir)
return result
| mit |
Maccimo/intellij-community | python/helpers/pydev/_pydev_bundle/pydev_console_utils.py | 2 | 16369 | import os
import signal
import sys
import traceback
from _pydev_bundle import _pydev_imports_tipper
from _pydev_bundle.pydev_code_executor import BaseCodeExecutor
from _pydev_bundle.pydev_console_types import CodeFragment
from _pydev_bundle.pydev_imports import Exec
from _pydev_bundle.pydev_stdin import StdIn, DebugConsoleStdIn
from _pydev_imps._pydev_saved_modules import thread
from _pydevd_bundle import pydevd_thrift
from _pydevd_bundle import pydevd_vars
from _pydevd_bundle.pydevd_comm import InternalDataViewerAction
from _pydevd_bundle.pydevd_constants import IS_JYTHON, dict_iter_items
from _pydevd_bundle.pydevd_tables import exec_table_command
from pydev_console.pydev_protocol import CompletionOption, CompletionOptionType, \
PythonUnhandledException, PythonTableException
try:
import cStringIO as StringIO # may not always be available @UnusedImport
except:
try:
import StringIO # @Reimport
except:
import io as StringIO
# translation to Thrift `CompletionOptionType` enumeration
COMPLETION_OPTION_TYPES = {
_pydev_imports_tipper.TYPE_IMPORT: CompletionOptionType.IMPORT,
_pydev_imports_tipper.TYPE_CLASS: CompletionOptionType.CLASS,
_pydev_imports_tipper.TYPE_FUNCTION: CompletionOptionType.FUNCTION,
_pydev_imports_tipper.TYPE_ATTR: CompletionOptionType.ATTR,
_pydev_imports_tipper.TYPE_BUILTIN: CompletionOptionType.BUILTIN,
_pydev_imports_tipper.TYPE_PARAM: CompletionOptionType.PARAM,
_pydev_imports_tipper.TYPE_IPYTHON: CompletionOptionType.IPYTHON,
_pydev_imports_tipper.TYPE_IPYTHON_MAGIC: CompletionOptionType.IPYTHON_MAGIC,
}
def _to_completion_option(word):
name, documentation, args, ret_type = word
completion_option_type = COMPLETION_OPTION_TYPES[ret_type]
return CompletionOption(name, documentation, args.split(), completion_option_type)
# =======================================================================================================================
# Null
# =======================================================================================================================
class Null:
"""
Gotten from: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/68205
"""
def __init__(self, *args, **kwargs):
return None
def __call__(self, *args, **kwargs):
return self
def __getattr__(self, mname):
return self
def __setattr__(self, name, value):
return self
def __delattr__(self, name):
return self
def __repr__(self):
return "<Null>"
def __str__(self):
return "Null"
def __len__(self):
return 0
def __getitem__(self):
return self
def __setitem__(self, *args, **kwargs):
pass
def write(self, *args, **kwargs):
pass
def __nonzero__(self):
return 0
# =======================================================================================================================
# BaseInterpreterInterface
# =======================================================================================================================
class BaseInterpreterInterface(BaseCodeExecutor):
def __init__(self, mainThread, connect_status_queue=None, rpc_client=None):
super(BaseInterpreterInterface, self).__init__()
self.mainThread = mainThread
self.banner_shown = False
self.connect_status_queue = connect_status_queue
self.rpc_client = rpc_client
def build_banner(self):
return 'print({0})\n'.format(repr(self.get_greeting_msg()))
def create_std_in(self, debugger=None, original_std_in=None):
if debugger is None:
return StdIn(self, self.rpc_client, original_stdin=original_std_in)
else:
return DebugConsoleStdIn(dbg=debugger, original_stdin=original_std_in)
def do_exec_code(self, code, is_single_line):
try:
code_fragment = CodeFragment(code, is_single_line)
more = self.need_more(code_fragment)
if not more:
code_fragment = self.buffer
self.buffer = None
self.exec_queue.put(code_fragment)
return more
except:
traceback.print_exc()
return False
def execLine(self, line):
try:
if not self.banner_shown:
line = self.build_banner() + line
self.banner_shown = True
return self.do_exec_code(line, True)
except:
traceback.print_exc()
raise PythonUnhandledException(traceback.format_exc())
def execMultipleLines(self, lines):
try:
if not self.banner_shown:
lines = self.build_banner() + lines
self.banner_shown = True
if IS_JYTHON:
for line in lines.split('\n'):
self.do_exec_code(line, True)
else:
return self.do_exec_code(lines, False)
except:
traceback.print_exc()
raise PythonUnhandledException(traceback.format_exc())
def close(self):
sys.exit(0)
def get_server(self):
if getattr(self, 'rpc_client', None) is not None:
return self.rpc_client
else:
return None
server = property(get_server)
def ShowConsole(self):
server = self.get_server()
if server is not None:
server.showConsole()
def finish_exec(self, more):
self.interruptable = False
server = self.get_server()
if server is not None:
return server.notifyFinished(more)
else:
return True
def getFrame(self):
try:
hidden_ns = self.get_ipython_hidden_vars_dict()
return pydevd_thrift.frame_vars_to_struct(self.get_namespace(), hidden_ns)
except:
traceback.print_exc()
raise PythonUnhandledException(traceback.format_exc())
def getVariable(self, attributes):
try:
debug_values = []
val_dict = pydevd_vars.resolve_compound_var_object_fields(self.get_namespace(), attributes)
if val_dict is None:
val_dict = {}
keys = val_dict.keys()
for k in keys:
val = val_dict[k]
evaluate_full_value = pydevd_thrift.should_evaluate_full_value(val)
debug_values.append(pydevd_thrift.var_to_struct(val, k, evaluate_full_value=evaluate_full_value))
return debug_values
except:
traceback.print_exc()
raise PythonUnhandledException(traceback.format_exc())
def getArray(self, attr, roffset, coffset, rows, cols, format):
try:
name = attr.split("\t")[-1]
array = pydevd_vars.eval_in_context(name, self.get_namespace(), self.get_namespace())
return pydevd_thrift.table_like_struct_to_thrift_struct(array, name, roffset, coffset, rows, cols, format)
except:
traceback.print_exc()
raise PythonUnhandledException(traceback.format_exc())
def execDataViewerAction(self, varName, action, args):
try:
tmp_var = pydevd_vars.eval_in_context(varName, self.get_namespace(), self.get_namespace())
return InternalDataViewerAction.act(tmp_var, action, args.split("\t"))
except Exception as e:
raise PythonUnhandledException(type(e).__name__ + "\n" + traceback.format_exc())
def evaluate(self, expression, do_trunc):
# returns `DebugValue` of evaluated expression
try:
result = pydevd_vars.eval_in_context(expression, self.get_namespace(), self.get_namespace())
return [pydevd_thrift.var_to_struct(result, expression, do_trim=do_trunc)]
except:
traceback.print_exc()
raise PythonUnhandledException(traceback.format_exc())
def do_get_completions(self, text, act_tok):
"""Retrieves completion options.
Returns the array with completion options tuples.
:param text: the full text of the expression to complete
:param act_tok: resolved part of the expression
:return: the array of tuples `(name, documentation, args, ret_type)`
:Example:
Let us execute ``import time`` line in the Python console. Then try
to complete ``time.sle`` expression. At this point the method would
receive ``time.sle`` as ``text`` parameter and ``time.`` as
``act_tok`` parameter. The result would contain the array with the
following tuple among others: ``[..., ('sleep',
'sleep(seconds)\\n\\nDelay execution ...', '(seconds)', '2'),
...]``.
"""
try:
from _pydev_bundle._pydev_completer import Completer
completer = Completer(self.get_namespace(), None)
return completer.complete(act_tok)
except:
traceback.print_exc()
return []
def getCompletions(self, text, act_tok):
try:
words = self.do_get_completions(text, act_tok)
return [_to_completion_option(word) for word in words]
except:
raise PythonUnhandledException(traceback.format_exc())
def loadFullValue(self, seq, scope_attrs):
"""
Evaluate full value for async Console variables in a separate thread and send results to IDE side
:param seq: id of command
:param scope_attrs: a sequence of variables with their attributes separated by NEXT_VALUE_SEPARATOR
(i.e.: obj\tattr1\tattr2NEXT_VALUE_SEPARATORobj2\attr1\tattr2)
:return:
"""
try:
frame_variables = self.get_namespace()
var_objects = []
# vars = scope_attrs.split(NEXT_VALUE_SEPARATOR)
vars = scope_attrs
for var_attrs in vars:
if '\t' in var_attrs:
name, attrs = var_attrs.split('\t', 1)
else:
name = var_attrs
attrs = None
if name in frame_variables.keys():
var_object = pydevd_vars.resolve_var_object(frame_variables[name], attrs)
var_objects.append((var_object, name))
else:
var_object = pydevd_vars.eval_in_context(name, frame_variables, frame_variables)
var_objects.append((var_object, name))
from _pydev_bundle.pydev_console_commands import ThriftGetValueAsyncThreadConsole
t = ThriftGetValueAsyncThreadConsole(self.get_server(), seq, var_objects)
t.start()
except:
traceback.print_exc()
raise PythonUnhandledException(traceback.format_exc())
def changeVariable(self, attr, value):
try:
def do_change_variable():
Exec('%s=%s' % (attr, value), self.get_namespace(), self.get_namespace())
# Important: it has to be really enabled in the main thread, so, schedule
# it to run in the main thread.
self.exec_queue.put(do_change_variable)
except:
traceback.print_exc()
raise PythonUnhandledException(traceback.format_exc())
def _findFrame(self, thread_id, frame_id):
'''
Used to show console with variables connection.
Always return a frame where the locals map to our internal namespace.
'''
VIRTUAL_FRAME_ID = "1" # matches PyStackFrameConsole.java
VIRTUAL_CONSOLE_ID = "console_main" # matches PyThreadConsole.java
if thread_id == VIRTUAL_CONSOLE_ID and frame_id == VIRTUAL_FRAME_ID:
f = FakeFrame()
f.f_globals = {} # As globals=locals here, let's simply let it empty (and save a bit of network traffic).
f.f_locals = self.get_namespace()
return f
else:
return self.orig_find_frame(thread_id, frame_id)
def connectToDebugger(self, debuggerPort, debugger_host=None, debugger_options=None, extra_envs=None):
'''
Used to show console with variables connection.
Mainly, monkey-patches things in the debugger structure so that the debugger protocol works.
'''
try:
if debugger_options is None:
debugger_options = {}
for (env_name, value) in dict_iter_items(extra_envs):
existing_value = os.environ.get(env_name, None)
if existing_value:
os.environ[env_name] = "%s%c%s" % (existing_value, os.path.pathsep, value)
else:
os.environ[env_name] = value
if env_name == "PYTHONPATH":
sys.path.append(value)
def do_connect_to_debugger():
try:
# Try to import the packages needed to attach the debugger
import pydevd
from _pydev_imps._pydev_saved_modules import threading
except:
# This happens on Jython embedded in host eclipse
traceback.print_exc()
sys.stderr.write('pydevd is not available, cannot connect\n', )
from _pydevd_bundle.pydevd_constants import set_thread_id
from _pydev_bundle import pydev_localhost
set_thread_id(threading.currentThread(), "console_main")
self.orig_find_frame = pydevd_vars.find_frame
pydevd_vars.find_frame = self._findFrame
self.debugger = pydevd.PyDB()
try:
pydevd.apply_debugger_options(debugger_options)
if debugger_host is None or pydev_localhost.is_localhost(debugger_host):
host = pydev_localhost.get_localhost()
else:
host = debugger_host
self.debugger.connect(host, debuggerPort)
self.debugger.prepare_to_run()
self.debugger.disable_tracing()
except:
traceback.print_exc()
sys.stderr.write('Failed to connect to target debugger.\n')
# Register to process commands when idle
self.debugrunning = False
try:
import pydevconsole
pydevconsole.set_debug_hook(self.debugger.process_internal_commands)
except:
traceback.print_exc()
sys.stderr.write(
'Version of Python does not support debuggable Interactive Console.\n')
# Important: it has to be really enabled in the main thread, so, schedule
# it to run in the main thread.
self.exec_queue.put(do_connect_to_debugger)
return ('connect complete',)
except:
traceback.print_exc()
raise PythonUnhandledException(traceback.format_exc())
def execTableCommand(self, command, command_type):
try:
success, res = exec_table_command(command, command_type,
self.get_namespace(),
self.get_namespace())
if success:
return res
except:
traceback.print_exc()
raise PythonUnhandledException(traceback.format_exc())
if not success:
raise PythonTableException(str(res))
def handshake(self):
if self.connect_status_queue is not None:
self.connect_status_queue.put(True)
return "PyCharm"
def get_connect_status_queue(self):
return self.connect_status_queue
def hello(self, input_str):
# Don't care what the input string is
return ("Hello eclipse",)
# =======================================================================================================================
# FakeFrame
# =======================================================================================================================
class FakeFrame:
'''
Used to show console with variables connection.
A class to be used as a mock of a frame.
'''
| apache-2.0 |
haifengkao/LeakCanaryiOS | .ycm_extra_conf.py | 2 | 6487 | import os
import ycm_core
flags = [
'-D__IPHONE_OS_VERSION_MIN_REQUIRED=70000',
'-resource-dir',
'/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/../lib/clang/7.0.0',
'-mios-simulator-version-min=7.0',
'-arch i386',
'-fblocks',
'-fobjc-runtime=ios-7.0.0',
'-fencode-extended-block-signature',
'-fobjc-arc',
'-fobjc-exceptions',
'-fexceptions',
'-x',
'objective-c',
'-F/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/Library/Frameworks',
'-F/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator.sdk/System/Library/Frameworks',
'-I/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator.sdk/System/Library/Frameworks/Foundation.framework/Headers',
'-I/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include',
'-I/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/../lib/clang/7.0.0/include',
'-I/Library/Developer/CommandLineTools/usr/include',
'-ISUB./Example/Pods/Headers/Public',
#custom definition, include subfolders
'-ISUB./Pods/Headers/Public',
'-I./Pod/Classes',
'-isysroot',
'/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator.sdk',
'-MMD',
]
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def Subdirectories(directory):
res = []
for path, subdirs, files in os.walk(directory):
for name in subdirs:
item = os.path.join(path, name)
res.append(item)
return res
def IncludeFlagsOfSubdirectory( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_include_subdir = False
path_flags = [ '-ISUB']
for flag in flags:
# include the directory of flag as well
new_flag = [flag.replace('-ISUB', '-I')]
if make_next_include_subdir:
make_next_include_subdir = False
for subdir in Subdirectories(os.path.join(working_directory, flag)):
new_flag.append('-I')
new_flag.append(subdir)
for path_flag in path_flags:
if flag == path_flag:
make_next_include_subdir = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
for subdir in Subdirectories(os.path.join(working_directory, path)):
new_flag.append('-I' + subdir)
break
new_flags =new_flags + new_flag
return new_flags
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
#add include subfolders as well
flags = IncludeFlagsOfSubdirectory( flags, working_directory )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
# try:
# final_flags.remove( '-stdlib=libc++' )
# except ValueError:
# pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
# if __name__ == '__main__':
# # res = subdirectory( DirectoryOfThisScript())
# flags = [
# '-D__IPHONE_OS_VERSION_MIN_REQUIRED=70000',
# '-x',
# 'objective-c',
# '-F/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/Library/Frameworks',
# '-ISUB./Pods/Headers/Public',
# '-MMD',
# ]
# print IncludeFlagsOfSubdirectory( flags, DirectoryOfThisScript() )
| mit |
Ernesto99/odoo | addons/account_payment/account_payment.py | 212 | 19161 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import time
from openerp.osv import fields, osv
_logger = logging.getLogger(__name__)
class payment_mode(osv.osv):
_name= 'payment.mode'
_description= 'Payment Mode'
_columns = {
'name': fields.char('Name', required=True, help='Mode of Payment'),
'bank_id': fields.many2one('res.partner.bank', "Bank account",
required=True,help='Bank Account for the Payment Mode'),
'journal': fields.many2one('account.journal', 'Journal', required=True,
domain=[('type', 'in', ('bank','cash'))], help='Bank or Cash Journal for the Payment Mode'),
'company_id': fields.many2one('res.company', 'Company',required=True),
'partner_id':fields.related('company_id','partner_id',type='many2one',relation='res.partner',string='Partner',store=True,),
}
_defaults = {
'company_id': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.id
}
def suitable_bank_types(self, cr, uid, payment_code=None, context=None):
"""Return the codes of the bank type that are suitable
for the given payment type code"""
if not payment_code:
return []
cr.execute(""" SELECT pb.state
FROM res_partner_bank pb
JOIN payment_mode pm ON (pm.bank_id = pb.id)
WHERE pm.id = %s """, [payment_code])
return [x[0] for x in cr.fetchall()]
def onchange_company_id (self, cr, uid, ids, company_id=False, context=None):
result = {}
if company_id:
partner_id = self.pool.get('res.company').browse(cr, uid, company_id, context=context).partner_id.id
result['partner_id'] = partner_id
return {'value': result}
class payment_order(osv.osv):
_name = 'payment.order'
_description = 'Payment Order'
_rec_name = 'reference'
_order = 'id desc'
#dead code
def get_wizard(self, type):
_logger.warning("No wizard found for the payment type '%s'.", type)
return None
def _total(self, cursor, user, ids, name, args, context=None):
if not ids:
return {}
res = {}
for order in self.browse(cursor, user, ids, context=context):
if order.line_ids:
res[order.id] = reduce(lambda x, y: x + y.amount, order.line_ids, 0.0)
else:
res[order.id] = 0.0
return res
_columns = {
'date_scheduled': fields.date('Scheduled Date', states={'done':[('readonly', True)]}, help='Select a date if you have chosen Preferred Date to be fixed.'),
'reference': fields.char('Reference', required=1, states={'done': [('readonly', True)]}, copy=False),
'mode': fields.many2one('payment.mode', 'Payment Mode', select=True, required=1, states={'done': [('readonly', True)]}, help='Select the Payment Mode to be applied.'),
'state': fields.selection([
('draft', 'Draft'),
('cancel', 'Cancelled'),
('open', 'Confirmed'),
('done', 'Done')], 'Status', select=True, copy=False,
help='When an order is placed the status is \'Draft\'.\n Once the bank is confirmed the status is set to \'Confirmed\'.\n Then the order is paid the status is \'Done\'.'),
'line_ids': fields.one2many('payment.line', 'order_id', 'Payment lines', states={'done': [('readonly', True)]}),
'total': fields.function(_total, string="Total", type='float'),
'user_id': fields.many2one('res.users', 'Responsible', required=True, states={'done': [('readonly', True)]}),
'date_prefered': fields.selection([
('now', 'Directly'),
('due', 'Due date'),
('fixed', 'Fixed date')
], "Preferred Date", change_default=True, required=True, states={'done': [('readonly', True)]}, help="Choose an option for the Payment Order:'Fixed' stands for a date specified by you.'Directly' stands for the direct execution.'Due date' stands for the scheduled date of execution."),
'date_created': fields.date('Creation Date', readonly=True),
'date_done': fields.date('Execution Date', readonly=True),
'company_id': fields.related('mode', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
}
_defaults = {
'user_id': lambda self,cr,uid,context: uid,
'state': 'draft',
'date_prefered': 'due',
'date_created': lambda *a: time.strftime('%Y-%m-%d'),
'reference': lambda self,cr,uid,context: self.pool.get('ir.sequence').get(cr, uid, 'payment.order'),
}
def set_to_draft(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {'state': 'draft'})
self.create_workflow(cr, uid, ids)
return True
def action_open(self, cr, uid, ids, *args):
ir_seq_obj = self.pool.get('ir.sequence')
for order in self.read(cr, uid, ids, ['reference']):
if not order['reference']:
reference = ir_seq_obj.get(cr, uid, 'payment.order')
self.write(cr, uid, order['id'], {'reference':reference})
return True
def set_done(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {'date_done': time.strftime('%Y-%m-%d')})
self.signal_workflow(cr, uid, ids, 'done')
return True
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
payment_line_obj = self.pool.get('payment.line')
payment_line_ids = []
if (vals.get('date_prefered', False) == 'fixed' and not vals.get('date_scheduled', False)) or vals.get('date_scheduled', False):
for order in self.browse(cr, uid, ids, context=context):
for line in order.line_ids:
payment_line_ids.append(line.id)
payment_line_obj.write(cr, uid, payment_line_ids, {'date': vals.get('date_scheduled', False)}, context=context)
elif vals.get('date_prefered', False) == 'due':
vals.update({'date_scheduled': False})
for order in self.browse(cr, uid, ids, context=context):
for line in order.line_ids:
payment_line_obj.write(cr, uid, [line.id], {'date': line.ml_maturity_date}, context=context)
elif vals.get('date_prefered', False) == 'now':
vals.update({'date_scheduled': False})
for order in self.browse(cr, uid, ids, context=context):
for line in order.line_ids:
payment_line_ids.append(line.id)
payment_line_obj.write(cr, uid, payment_line_ids, {'date': False}, context=context)
return super(payment_order, self).write(cr, uid, ids, vals, context=context)
class payment_line(osv.osv):
_name = 'payment.line'
_description = 'Payment Line'
def translate(self, orig):
return {
"due_date": "date_maturity",
"reference": "ref"}.get(orig, orig)
def _info_owner(self, cr, uid, ids, name=None, args=None, context=None):
result = {}
for line in self.browse(cr, uid, ids, context=context):
owner = line.order_id.mode.bank_id.partner_id
result[line.id] = self._get_info_partner(cr, uid, owner, context=context)
return result
def _get_info_partner(self,cr, uid, partner_record, context=None):
if not partner_record:
return False
st = partner_record.street or ''
st1 = partner_record.street2 or ''
zip = partner_record.zip or ''
city = partner_record.city or ''
zip_city = zip + ' ' + city
cntry = partner_record.country_id and partner_record.country_id.name or ''
return partner_record.name + "\n" + st + " " + st1 + "\n" + zip_city + "\n" +cntry
def _info_partner(self, cr, uid, ids, name=None, args=None, context=None):
result = {}
for line in self.browse(cr, uid, ids, context=context):
result[line.id] = False
if not line.partner_id:
break
result[line.id] = self._get_info_partner(cr, uid, line.partner_id, context=context)
return result
#dead code
def select_by_name(self, cr, uid, ids, name, args, context=None):
if not ids: return {}
partner_obj = self.pool.get('res.partner')
cr.execute("""SELECT pl.id, ml.%s
FROM account_move_line ml
INNER JOIN payment_line pl
ON (ml.id = pl.move_line_id)
WHERE pl.id IN %%s"""% self.translate(name),
(tuple(ids),))
res = dict(cr.fetchall())
if name == 'partner_id':
partner_name = {}
for p_id, p_name in partner_obj.name_get(cr, uid,
filter(lambda x:x and x != 0,res.values()), context=context):
partner_name[p_id] = p_name
for id in ids:
if id in res and partner_name:
res[id] = (res[id],partner_name[res[id]])
else:
res[id] = (False,False)
else:
for id in ids:
res.setdefault(id, (False, ""))
return res
def _amount(self, cursor, user, ids, name, args, context=None):
if not ids:
return {}
currency_obj = self.pool.get('res.currency')
if context is None:
context = {}
res = {}
for line in self.browse(cursor, user, ids, context=context):
ctx = context.copy()
ctx['date'] = line.order_id.date_done or time.strftime('%Y-%m-%d')
res[line.id] = currency_obj.compute(cursor, user, line.currency.id,
line.company_currency.id,
line.amount_currency, context=ctx)
return res
def _get_currency(self, cr, uid, context=None):
user_obj = self.pool.get('res.users')
currency_obj = self.pool.get('res.currency')
user = user_obj.browse(cr, uid, uid, context=context)
if user.company_id:
return user.company_id.currency_id.id
else:
return currency_obj.search(cr, uid, [('rate', '=', 1.0)])[0]
def _get_date(self, cr, uid, context=None):
if context is None:
context = {}
payment_order_obj = self.pool.get('payment.order')
date = False
if context.get('order_id') and context['order_id']:
order = payment_order_obj.browse(cr, uid, context['order_id'], context=context)
if order.date_prefered == 'fixed':
date = order.date_scheduled
else:
date = time.strftime('%Y-%m-%d')
return date
def _get_ml_inv_ref(self, cr, uid, ids, *a):
res = {}
for id in self.browse(cr, uid, ids):
res[id.id] = False
if id.move_line_id:
if id.move_line_id.invoice:
res[id.id] = id.move_line_id.invoice.id
return res
def _get_ml_maturity_date(self, cr, uid, ids, *a):
res = {}
for id in self.browse(cr, uid, ids):
if id.move_line_id:
res[id.id] = id.move_line_id.date_maturity
else:
res[id.id] = False
return res
def _get_ml_created_date(self, cr, uid, ids, *a):
res = {}
for id in self.browse(cr, uid, ids):
if id.move_line_id:
res[id.id] = id.move_line_id.date_created
else:
res[id.id] = False
return res
_columns = {
'name': fields.char('Your Reference', required=True),
'communication': fields.char('Communication', required=True, help="Used as the message between ordering customer and current company. Depicts 'What do you want to say to the recipient about this order ?'"),
'communication2': fields.char('Communication 2', help='The successor message of Communication.'),
'move_line_id': fields.many2one('account.move.line', 'Entry line', domain=[('reconcile_id', '=', False), ('account_id.type', '=', 'payable')], help='This Entry Line will be referred for the information of the ordering customer.'),
'amount_currency': fields.float('Amount in Partner Currency', digits=(16, 2),
required=True, help='Payment amount in the partner currency'),
'currency': fields.many2one('res.currency','Partner Currency', required=True),
'company_currency': fields.many2one('res.currency', 'Company Currency', readonly=True),
'bank_id': fields.many2one('res.partner.bank', 'Destination Bank Account'),
'order_id': fields.many2one('payment.order', 'Order', required=True,
ondelete='cascade', select=True),
'partner_id': fields.many2one('res.partner', string="Partner", required=True, help='The Ordering Customer'),
'amount': fields.function(_amount, string='Amount in Company Currency',
type='float',
help='Payment amount in the company currency'),
'ml_date_created': fields.function(_get_ml_created_date, string="Effective Date",
type='date', help="Invoice Effective Date"),
'ml_maturity_date': fields.function(_get_ml_maturity_date, type='date', string='Due Date'),
'ml_inv_ref': fields.function(_get_ml_inv_ref, type='many2one', relation='account.invoice', string='Invoice Ref.'),
'info_owner': fields.function(_info_owner, string="Owner Account", type="text", help='Address of the Main Partner'),
'info_partner': fields.function(_info_partner, string="Destination Account", type="text", help='Address of the Ordering Customer.'),
'date': fields.date('Payment Date', help="If no payment date is specified, the bank will treat this payment line directly"),
'create_date': fields.datetime('Created', readonly=True),
'state': fields.selection([('normal','Free'), ('structured','Structured')], 'Communication Type', required=True),
'bank_statement_line_id': fields.many2one('account.bank.statement.line', 'Bank statement line'),
'company_id': fields.related('order_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
}
_defaults = {
'name': lambda obj, cursor, user, context: obj.pool.get('ir.sequence'
).get(cursor, user, 'payment.line'),
'state': 'normal',
'currency': _get_currency,
'company_currency': _get_currency,
'date': _get_date,
}
_sql_constraints = [
('name_uniq', 'UNIQUE(name)', 'The payment line name must be unique!'),
]
def onchange_move_line(self, cr, uid, ids, move_line_id, payment_type, date_prefered, date_scheduled, currency=False, company_currency=False, context=None):
data = {}
move_line_obj = self.pool.get('account.move.line')
data['amount_currency'] = data['communication'] = data['partner_id'] = data['bank_id'] = data['amount'] = False
if move_line_id:
line = move_line_obj.browse(cr, uid, move_line_id, context=context)
data['amount_currency'] = line.amount_residual_currency
res = self.onchange_amount(cr, uid, ids, data['amount_currency'], currency,
company_currency, context)
if res:
data['amount'] = res['value']['amount']
data['partner_id'] = line.partner_id.id
temp = line.currency_id and line.currency_id.id or False
if not temp:
if line.invoice:
data['currency'] = line.invoice.currency_id.id
else:
data['currency'] = temp
# calling onchange of partner and updating data dictionary
temp_dict = self.onchange_partner(cr, uid, ids, line.partner_id.id, payment_type)
data.update(temp_dict['value'])
data['communication'] = line.ref
if date_prefered == 'now':
#no payment date => immediate payment
data['date'] = False
elif date_prefered == 'due':
data['date'] = line.date_maturity
elif date_prefered == 'fixed':
data['date'] = date_scheduled
return {'value': data}
def onchange_amount(self, cr, uid, ids, amount, currency, cmpny_currency, context=None):
if (not amount) or (not cmpny_currency):
return {'value': {'amount': False}}
res = {}
currency_obj = self.pool.get('res.currency')
company_amount = currency_obj.compute(cr, uid, currency, cmpny_currency, amount)
res['amount'] = company_amount
return {'value': res}
def onchange_partner(self, cr, uid, ids, partner_id, payment_type, context=None):
data = {}
partner_obj = self.pool.get('res.partner')
payment_mode_obj = self.pool.get('payment.mode')
data['info_partner'] = data['bank_id'] = False
if partner_id:
part_obj = partner_obj.browse(cr, uid, partner_id, context=context)
partner = part_obj.name or ''
data['info_partner'] = self._get_info_partner(cr, uid, part_obj, context=context)
if part_obj.bank_ids and payment_type:
bank_type = payment_mode_obj.suitable_bank_types(cr, uid, payment_type, context=context)
for bank in part_obj.bank_ids:
if bank.state in bank_type:
data['bank_id'] = bank.id
break
return {'value': data}
def fields_get(self, cr, uid, fields=None, context=None, write_access=True, attributes=None):
res = super(payment_line, self).fields_get(cr, uid, fields, context, write_access, attributes)
if 'communication2' in res:
res['communication2'].setdefault('states', {})
res['communication2']['states']['structured'] = [('readonly', True)]
res['communication2']['states']['normal'] = [('readonly', False)]
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
vinhlh/bite-project | deps/mrtaskman/server/mapreduce/lib/simplejson/scanner.py | 77 | 2263 | #!/usr/bin/env python
"""JSON token scanner
"""
import re
try:
from mapreduce.lib.simplejson._speedups import make_scanner as c_make_scanner
except ImportError:
c_make_scanner = None
__all__ = ['make_scanner']
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL))
def py_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
return parse_string(string, idx + 1, encoding, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), encoding, strict, _scan_once, object_hook)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
return _scan_once
make_scanner = c_make_scanner or py_make_scanner
| apache-2.0 |
IntelLabs/numba | numba/tests/test_struct_ref.py | 5 | 11855 | """
Test mutable struct, aka, structref
"""
import warnings
import numpy as np
from numba import typed, njit, errors
from numba.core import types
from numba.experimental import structref
from numba.extending import overload_method, overload_attribute
from numba.tests.support import (
MemoryLeakMixin, TestCase, temp_directory, override_config,
)
@structref.register
class MySimplerStructType(types.StructRef):
"""
Test associated with this type represent the lowest level uses of structref.
"""
pass
my_struct_ty = MySimplerStructType(
fields=[("values", types.intp[:]), ("counter", types.intp)]
)
structref.define_boxing(MySimplerStructType, structref.StructRefProxy)
class MyStruct(structref.StructRefProxy):
def __new__(cls, values, counter):
# Define this method to customize the constructor.
# The default takes `*args`. Customizing allow the use of keyword-arg.
# The impl of the method calls `StructRefProxy.__new__`
return structref.StructRefProxy.__new__(cls, values, counter)
# The below defines wrappers for attributes and methods manually
@property
def values(self):
return get_values(self)
@values.setter
def values(self, val):
return set_values(self, val)
@property
def counter(self):
return get_counter(self)
def testme(self, arg):
return self.values * arg + self.counter
@property
def prop(self):
return self.values, self.counter
@structref.register
class MyStructType(types.StructRef):
"""Test associated with this type represent the higher-level uses of
structef.
"""
pass
# Call to define_proxy is needed to register the use of `MyStruct` as a
# PyObject proxy for creating a Numba-allocated structref.
# The `MyStruct` class can then be used in both jit-code and interpreted-code.
structref.define_proxy(
MyStruct,
MyStructType,
['values', 'counter'],
)
@njit
def my_struct(values, counter):
st = structref.new(my_struct_ty)
my_struct_init(st, values, counter)
return st
@njit
def my_struct_init(self, values, counter):
self.values = values
self.counter = counter
@njit
def ctor_by_intrinsic(vs, ctr):
st = my_struct(vs, counter=ctr)
st.values += st.values
st.counter *= ctr
return st
@njit
def ctor_by_class(vs, ctr):
return MyStruct(values=vs, counter=ctr)
@njit
def get_values(st):
return st.values
@njit
def set_values(st, val):
st.values = val
@njit
def get_counter(st):
return st.counter
@njit
def compute_fields(st):
return st.values + st.counter
class TestStructRefBasic(MemoryLeakMixin, TestCase):
def test_structref_type(self):
sr = types.StructRef([('a', types.int64)])
self.assertEqual(sr.field_dict['a'], types.int64)
sr = types.StructRef([('a', types.int64), ('b', types.float64)])
self.assertEqual(sr.field_dict['a'], types.int64)
self.assertEqual(sr.field_dict['b'], types.float64)
# bad case
with self.assertRaisesRegex(ValueError,
"expecting a str for field name"):
types.StructRef([(1, types.int64)])
with self.assertRaisesRegex(ValueError,
"expecting a Numba Type for field type"):
types.StructRef([('a', 123)])
def test_invalid_uses(self):
with self.assertRaisesRegex(ValueError, "cannot register"):
structref.register(types.StructRef)
with self.assertRaisesRegex(ValueError, "cannot register"):
structref.define_boxing(types.StructRef, MyStruct)
def test_MySimplerStructType(self):
vs = np.arange(10, dtype=np.intp)
ctr = 13
first_expected = vs + vs
first_got = ctor_by_intrinsic(vs, ctr)
# the returned instance is a structref.StructRefProxy
# but not a MyStruct
self.assertNotIsInstance(first_got, MyStruct)
self.assertPreciseEqual(first_expected, get_values(first_got))
second_expected = first_expected + (ctr * ctr)
second_got = compute_fields(first_got)
self.assertPreciseEqual(second_expected, second_got)
def test_MySimplerStructType_wrapper_has_no_attrs(self):
vs = np.arange(10, dtype=np.intp)
ctr = 13
wrapper = ctor_by_intrinsic(vs, ctr)
self.assertIsInstance(wrapper, structref.StructRefProxy)
with self.assertRaisesRegex(AttributeError, 'values'):
wrapper.values
with self.assertRaisesRegex(AttributeError, 'counter'):
wrapper.counter
def test_MyStructType(self):
vs = np.arange(10, dtype=np.float64)
ctr = 11
first_expected_arr = vs.copy()
first_got = ctor_by_class(vs, ctr)
self.assertIsInstance(first_got, MyStruct)
self.assertPreciseEqual(first_expected_arr, first_got.values)
second_expected = first_expected_arr + ctr
second_got = compute_fields(first_got)
self.assertPreciseEqual(second_expected, second_got)
self.assertEqual(first_got.counter, ctr)
def test_MyStructType_mixed_types(self):
# structref constructor is generic
@njit
def mixed_type(x, y, m, n):
return MyStruct(x, y), MyStruct(m, n)
a, b = mixed_type(1, 2.3, 3.4j, (4,))
self.assertEqual(a.values, 1)
self.assertEqual(a.counter, 2.3)
self.assertEqual(b.values, 3.4j)
self.assertEqual(b.counter, (4,))
def test_MyStructType_in_dict(self):
td = typed.Dict()
td['a'] = MyStruct(1, 2.3)
self.assertEqual(td['a'].values, 1)
self.assertEqual(td['a'].counter, 2.3)
# overwrite
td['a'] = MyStruct(2, 3.3)
self.assertEqual(td['a'].values, 2)
self.assertEqual(td['a'].counter, 3.3)
# mutate
td['a'].values += 10
self.assertEqual(td['a'].values, 12) # changed
self.assertEqual(td['a'].counter, 3.3) # unchanged
# insert
td['b'] = MyStruct(4, 5.6)
def test_MyStructType_in_dict_mixed_type_error(self):
self.disable_leak_check()
td = typed.Dict()
td['a'] = MyStruct(1, 2.3)
self.assertEqual(td['a'].values, 1)
self.assertEqual(td['a'].counter, 2.3)
# ERROR: store different types
with self.assertRaisesRegex(errors.TypingError,
r"Cannot cast numba.MyStructType"):
# because first field is not a float;
# the second field is now an integer.
td['b'] = MyStruct(2.3, 1)
@overload_method(MyStructType, "testme")
def _ol_mystructtype_testme(self, arg):
def impl(self, arg):
return self.values * arg + self.counter
return impl
@overload_attribute(MyStructType, "prop")
def _ol_mystructtype_prop(self):
def get(self):
return self.values, self.counter
return get
class TestStructRefExtending(MemoryLeakMixin, TestCase):
def test_overload_method(self):
@njit
def check(x):
vs = np.arange(10, dtype=np.float64)
ctr = 11
obj = MyStruct(vs, ctr)
return obj.testme(x)
x = 3
got = check(x)
expect = check.py_func(x)
self.assertPreciseEqual(got, expect)
def test_overload_attribute(self):
@njit
def check():
vs = np.arange(10, dtype=np.float64)
ctr = 11
obj = MyStruct(vs, ctr)
return obj.prop
got = check()
expect = check.py_func()
self.assertPreciseEqual(got, expect)
def caching_test_make(x, y):
struct = MyStruct(values=x, counter=y)
return struct
def caching_test_use(struct, z):
return struct.testme(z)
class TestStructRefCaching(MemoryLeakMixin, TestCase):
def setUp(self):
self._cache_dir = temp_directory(TestStructRefCaching.__name__)
self._cache_override = override_config('CACHE_DIR', self._cache_dir)
self._cache_override.__enter__()
warnings.simplefilter("error")
warnings.filterwarnings(action="ignore", module="typeguard")
def tearDown(self):
self._cache_override.__exit__(None, None, None)
warnings.resetwarnings()
def test_structref_caching(self):
def assert_cached(stats):
self.assertEqual(len(stats.cache_hits), 1)
self.assertEqual(len(stats.cache_misses), 0)
def assert_not_cached(stats):
self.assertEqual(len(stats.cache_hits), 0)
self.assertEqual(len(stats.cache_misses), 1)
def check(cached):
check_make = njit(cache=True)(caching_test_make)
check_use = njit(cache=True)(caching_test_use)
vs = np.random.random(3)
ctr = 17
factor = 3
st = check_make(vs, ctr)
got = check_use(st, factor)
expect = vs * factor + ctr
self.assertPreciseEqual(got, expect)
if cached:
assert_cached(check_make.stats)
assert_cached(check_use.stats)
else:
assert_not_cached(check_make.stats)
assert_not_cached(check_use.stats)
check(cached=False)
check(cached=True)
@structref.register
class PolygonStructType(types.StructRef):
def preprocess_fields(self, fields):
# temp name to allow Optional instantiation
self.name = f"numba.PolygonStructType#{id(self)}"
fields = tuple([
('value', types.Optional(types.int64)),
('parent', types.Optional(self)),
])
return fields
polygon_struct_type = PolygonStructType(fields=(
('value', types.Any),
('parent', types.Any)
))
class PolygonStruct(structref.StructRefProxy):
def __new__(cls, value, parent):
return structref.StructRefProxy.__new__(cls, value, parent)
@property
def value(self):
return PolygonStruct_get_value(self)
@property
def parent(self):
return PolygonStruct_get_parent(self)
@njit
def PolygonStruct_get_value(self):
return self.value
@njit
def PolygonStruct_get_parent(self):
return self.parent
structref.define_proxy(
PolygonStruct,
PolygonStructType,
["value", "parent"]
)
@overload_method(PolygonStructType, "flip")
def _ol_polygon_struct_flip(self):
def impl(self):
if self.value is not None:
self.value = -self.value
return impl
@overload_attribute(PolygonStructType, "prop")
def _ol_polygon_struct_prop(self):
def get(self):
return self.value, self.parent
return get
class TestStructRefForwardTyping(MemoryLeakMixin, TestCase):
def test_same_type_assignment(self):
@njit
def check(x):
poly = PolygonStruct(None, None)
p_poly = PolygonStruct(None, None)
poly.value = x
poly.parent = p_poly
p_poly.value = x
return poly.parent.value
x = 11
got = check(x)
expect = x
self.assertPreciseEqual(got, expect)
def test_overload_method(self):
@njit
def check(x):
poly = PolygonStruct(None, None)
p_poly = PolygonStruct(None, None)
poly.value = x
poly.parent = p_poly
p_poly.value = x
poly.flip()
poly.parent.flip()
return poly.parent.value
x = 3
got = check(x)
expect = -x
self.assertPreciseEqual(got, expect)
def test_overload_attribute(self):
@njit
def check():
obj = PolygonStruct(5, None)
return obj.prop[0]
got = check()
expect = 5
self.assertPreciseEqual(got, expect)
| bsd-2-clause |
polonat/django-payments | payments/forms.py | 5 | 2499 | from __future__ import unicode_literals
try:
from collections import OrderedDict
except ImportError:
from django.utils.datastructures import SortedDict as OrderedDict
from django import forms
from django.utils.translation import ugettext_lazy as _
from .fields import (CreditCardNumberField, CreditCardExpiryField,
CreditCardVerificationField, CreditCardNameField)
class PaymentForm(forms.Form):
'''
Payment form, suitable for Django templates.
When displaying the form remember to use *action* and *method*.
'''
#: Form action URL for template use
action = ''
#: Form method for template use, either "get" or "post"
method = 'post'
def __init__(self, data=None, action=None, method='post', provider=None,
payment=None, hidden_inputs=True, autosubmit=False):
if hidden_inputs and data is not None:
super(PaymentForm, self).__init__(auto_id=False)
for key, val in data.items():
widget = forms.widgets.HiddenInput()
self.fields[key] = forms.CharField(initial=val, widget=widget)
else:
super(PaymentForm, self).__init__(data=data)
self.action = action
self.autosubmit = autosubmit
self.method = method
self.provider = provider
self.payment = payment
class CreditCardPaymentForm(PaymentForm):
number = CreditCardNumberField(label=_('Card Number'), max_length=32,
required=True)
expiration = CreditCardExpiryField()
cvv2 = CreditCardVerificationField(
label=_('CVV2 Security Number'), required=False, help_text=_(
'Last three digits located on the back of your card.'
' For American Express the four digits found on the front side.'))
def __init__(self, *args, **kwargs):
super(CreditCardPaymentForm, self).__init__(
hidden_inputs=False, *args, **kwargs)
if hasattr(self, 'VALID_TYPES'):
self.fields['number'].valid_types = self.VALID_TYPES
class CreditCardPaymentFormWithName(CreditCardPaymentForm):
name = CreditCardNameField(label=_('Name on Credit Card'), max_length=128)
def __init__(self, *args, **kwargs):
super(CreditCardPaymentFormWithName, self).__init__(*args, **kwargs)
name_field = self.fields.pop('name')
fields = OrderedDict({'name': name_field})
fields.update(self.fields)
self.fields = fields
| bsd-3-clause |
anandology/tornado | tornado/escape.py | 120 | 14441 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Escaping/unescaping methods for HTML, JSON, URLs, and others.
Also includes a few other miscellaneous string manipulation functions that
have crept in over time.
"""
from __future__ import absolute_import, division, print_function, with_statement
import re
import sys
from tornado.util import unicode_type, basestring_type, u
try:
from urllib.parse import parse_qs as _parse_qs # py3
except ImportError:
from urlparse import parse_qs as _parse_qs # Python 2.6+
try:
import htmlentitydefs # py2
except ImportError:
import html.entities as htmlentitydefs # py3
try:
import urllib.parse as urllib_parse # py3
except ImportError:
import urllib as urllib_parse # py2
import json
try:
unichr
except NameError:
unichr = chr
_XHTML_ESCAPE_RE = re.compile('[&<>"\']')
_XHTML_ESCAPE_DICT = {'&': '&', '<': '<', '>': '>', '"': '"',
'\'': '''}
def xhtml_escape(value):
"""Escapes a string so it is valid within HTML or XML.
Escapes the characters ``<``, ``>``, ``"``, ``'``, and ``&``.
When used in attribute values the escaped strings must be enclosed
in quotes.
.. versionchanged:: 3.2
Added the single quote to the list of escaped characters.
"""
return _XHTML_ESCAPE_RE.sub(lambda match: _XHTML_ESCAPE_DICT[match.group(0)],
to_basestring(value))
def xhtml_unescape(value):
"""Un-escapes an XML-escaped string."""
return re.sub(r"&(#?)(\w+?);", _convert_entity, _unicode(value))
# The fact that json_encode wraps json.dumps is an implementation detail.
# Please see https://github.com/tornadoweb/tornado/pull/706
# before sending a pull request that adds **kwargs to this function.
def json_encode(value):
"""JSON-encodes the given Python object."""
# JSON permits but does not require forward slashes to be escaped.
# This is useful when json data is emitted in a <script> tag
# in HTML, as it prevents </script> tags from prematurely terminating
# the javascript. Some json libraries do this escaping by default,
# although python's standard library does not, so we do it here.
# http://stackoverflow.com/questions/1580647/json-why-are-forward-slashes-escaped
return json.dumps(value).replace("</", "<\\/")
def json_decode(value):
"""Returns Python objects for the given JSON string."""
return json.loads(to_basestring(value))
def squeeze(value):
"""Replace all sequences of whitespace chars with a single space."""
return re.sub(r"[\x00-\x20]+", " ", value).strip()
def url_escape(value, plus=True):
"""Returns a URL-encoded version of the given value.
If ``plus`` is true (the default), spaces will be represented
as "+" instead of "%20". This is appropriate for query strings
but not for the path component of a URL. Note that this default
is the reverse of Python's urllib module.
.. versionadded:: 3.1
The ``plus`` argument
"""
quote = urllib_parse.quote_plus if plus else urllib_parse.quote
return quote(utf8(value))
# python 3 changed things around enough that we need two separate
# implementations of url_unescape. We also need our own implementation
# of parse_qs since python 3's version insists on decoding everything.
if sys.version_info[0] < 3:
def url_unescape(value, encoding='utf-8', plus=True):
"""Decodes the given value from a URL.
The argument may be either a byte or unicode string.
If encoding is None, the result will be a byte string. Otherwise,
the result is a unicode string in the specified encoding.
If ``plus`` is true (the default), plus signs will be interpreted
as spaces (literal plus signs must be represented as "%2B"). This
is appropriate for query strings and form-encoded values but not
for the path component of a URL. Note that this default is the
reverse of Python's urllib module.
.. versionadded:: 3.1
The ``plus`` argument
"""
unquote = (urllib_parse.unquote_plus if plus else urllib_parse.unquote)
if encoding is None:
return unquote(utf8(value))
else:
return unicode_type(unquote(utf8(value)), encoding)
parse_qs_bytes = _parse_qs
else:
def url_unescape(value, encoding='utf-8', plus=True):
"""Decodes the given value from a URL.
The argument may be either a byte or unicode string.
If encoding is None, the result will be a byte string. Otherwise,
the result is a unicode string in the specified encoding.
If ``plus`` is true (the default), plus signs will be interpreted
as spaces (literal plus signs must be represented as "%2B"). This
is appropriate for query strings and form-encoded values but not
for the path component of a URL. Note that this default is the
reverse of Python's urllib module.
.. versionadded:: 3.1
The ``plus`` argument
"""
if encoding is None:
if plus:
# unquote_to_bytes doesn't have a _plus variant
value = to_basestring(value).replace('+', ' ')
return urllib_parse.unquote_to_bytes(value)
else:
unquote = (urllib_parse.unquote_plus if plus
else urllib_parse.unquote)
return unquote(to_basestring(value), encoding=encoding)
def parse_qs_bytes(qs, keep_blank_values=False, strict_parsing=False):
"""Parses a query string like urlparse.parse_qs, but returns the
values as byte strings.
Keys still become type str (interpreted as latin1 in python3!)
because it's too painful to keep them as byte strings in
python3 and in practice they're nearly always ascii anyway.
"""
# This is gross, but python3 doesn't give us another way.
# Latin1 is the universal donor of character encodings.
result = _parse_qs(qs, keep_blank_values, strict_parsing,
encoding='latin1', errors='strict')
encoded = {}
for k, v in result.items():
encoded[k] = [i.encode('latin1') for i in v]
return encoded
_UTF8_TYPES = (bytes, type(None))
def utf8(value):
"""Converts a string argument to a byte string.
If the argument is already a byte string or None, it is returned unchanged.
Otherwise it must be a unicode string and is encoded as utf8.
"""
if isinstance(value, _UTF8_TYPES):
return value
if not isinstance(value, unicode_type):
raise TypeError(
"Expected bytes, unicode, or None; got %r" % type(value)
)
return value.encode("utf-8")
_TO_UNICODE_TYPES = (unicode_type, type(None))
def to_unicode(value):
"""Converts a string argument to a unicode string.
If the argument is already a unicode string or None, it is returned
unchanged. Otherwise it must be a byte string and is decoded as utf8.
"""
if isinstance(value, _TO_UNICODE_TYPES):
return value
if not isinstance(value, bytes):
raise TypeError(
"Expected bytes, unicode, or None; got %r" % type(value)
)
return value.decode("utf-8")
# to_unicode was previously named _unicode not because it was private,
# but to avoid conflicts with the built-in unicode() function/type
_unicode = to_unicode
# When dealing with the standard library across python 2 and 3 it is
# sometimes useful to have a direct conversion to the native string type
if str is unicode_type:
native_str = to_unicode
else:
native_str = utf8
_BASESTRING_TYPES = (basestring_type, type(None))
def to_basestring(value):
"""Converts a string argument to a subclass of basestring.
In python2, byte and unicode strings are mostly interchangeable,
so functions that deal with a user-supplied argument in combination
with ascii string constants can use either and should return the type
the user supplied. In python3, the two types are not interchangeable,
so this method is needed to convert byte strings to unicode.
"""
if isinstance(value, _BASESTRING_TYPES):
return value
if not isinstance(value, bytes):
raise TypeError(
"Expected bytes, unicode, or None; got %r" % type(value)
)
return value.decode("utf-8")
def recursive_unicode(obj):
"""Walks a simple data structure, converting byte strings to unicode.
Supports lists, tuples, and dictionaries.
"""
if isinstance(obj, dict):
return dict((recursive_unicode(k), recursive_unicode(v)) for (k, v) in obj.items())
elif isinstance(obj, list):
return list(recursive_unicode(i) for i in obj)
elif isinstance(obj, tuple):
return tuple(recursive_unicode(i) for i in obj)
elif isinstance(obj, bytes):
return to_unicode(obj)
else:
return obj
# I originally used the regex from
# http://daringfireball.net/2010/07/improved_regex_for_matching_urls
# but it gets all exponential on certain patterns (such as too many trailing
# dots), causing the regex matcher to never return.
# This regex should avoid those problems.
# Use to_unicode instead of tornado.util.u - we don't want backslashes getting
# processed as escapes.
_URL_RE = re.compile(to_unicode(r"""\b((?:([\w-]+):(/{1,3})|www[.])(?:(?:(?:[^\s&()]|&|")*(?:[^!"#$%&'()*+,.:;<=>?@\[\]^`{|}~\s]))|(?:\((?:[^\s&()]|&|")*\)))+)"""))
def linkify(text, shorten=False, extra_params="",
require_protocol=False, permitted_protocols=["http", "https"]):
"""Converts plain text into HTML with links.
For example: ``linkify("Hello http://tornadoweb.org!")`` would return
``Hello <a href="http://tornadoweb.org">http://tornadoweb.org</a>!``
Parameters:
* ``shorten``: Long urls will be shortened for display.
* ``extra_params``: Extra text to include in the link tag, or a callable
taking the link as an argument and returning the extra text
e.g. ``linkify(text, extra_params='rel="nofollow" class="external"')``,
or::
def extra_params_cb(url):
if url.startswith("http://example.com"):
return 'class="internal"'
else:
return 'class="external" rel="nofollow"'
linkify(text, extra_params=extra_params_cb)
* ``require_protocol``: Only linkify urls which include a protocol. If
this is False, urls such as www.facebook.com will also be linkified.
* ``permitted_protocols``: List (or set) of protocols which should be
linkified, e.g. ``linkify(text, permitted_protocols=["http", "ftp",
"mailto"])``. It is very unsafe to include protocols such as
``javascript``.
"""
if extra_params and not callable(extra_params):
extra_params = " " + extra_params.strip()
def make_link(m):
url = m.group(1)
proto = m.group(2)
if require_protocol and not proto:
return url # not protocol, no linkify
if proto and proto not in permitted_protocols:
return url # bad protocol, no linkify
href = m.group(1)
if not proto:
href = "http://" + href # no proto specified, use http
if callable(extra_params):
params = " " + extra_params(href).strip()
else:
params = extra_params
# clip long urls. max_len is just an approximation
max_len = 30
if shorten and len(url) > max_len:
before_clip = url
if proto:
proto_len = len(proto) + 1 + len(m.group(3) or "") # +1 for :
else:
proto_len = 0
parts = url[proto_len:].split("/")
if len(parts) > 1:
# Grab the whole host part plus the first bit of the path
# The path is usually not that interesting once shortened
# (no more slug, etc), so it really just provides a little
# extra indication of shortening.
url = url[:proto_len] + parts[0] + "/" + \
parts[1][:8].split('?')[0].split('.')[0]
if len(url) > max_len * 1.5: # still too long
url = url[:max_len]
if url != before_clip:
amp = url.rfind('&')
# avoid splitting html char entities
if amp > max_len - 5:
url = url[:amp]
url += "..."
if len(url) >= len(before_clip):
url = before_clip
else:
# full url is visible on mouse-over (for those who don't
# have a status bar, such as Safari by default)
params += ' title="%s"' % href
return u('<a href="%s"%s>%s</a>') % (href, params, url)
# First HTML-escape so that our strings are all safe.
# The regex is modified to avoid character entites other than & so
# that we won't pick up ", etc.
text = _unicode(xhtml_escape(text))
return _URL_RE.sub(make_link, text)
def _convert_entity(m):
if m.group(1) == "#":
try:
if m.group(2)[:1].lower() == 'x':
return unichr(int(m.group(2)[1:], 16))
else:
return unichr(int(m.group(2)))
except ValueError:
return "&#%s;" % m.group(2)
try:
return _HTML_UNICODE_MAP[m.group(2)]
except KeyError:
return "&%s;" % m.group(2)
def _build_unicode_map():
unicode_map = {}
for name, value in htmlentitydefs.name2codepoint.items():
unicode_map[name] = unichr(value)
return unicode_map
_HTML_UNICODE_MAP = _build_unicode_map()
| apache-2.0 |
alexpotter1/Neutron_msm8974_hammerhead | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
gautam1858/tensorflow | tensorflow/python/framework/subscribe.py | 22 | 12982 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Subscribe function."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import re
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
def _recursive_apply(tensors, apply_fn):
"""Helper method to recursively apply a function to structure of tensors.
The structure of the tensors should take the form similar to fetches in
`tf.Session` and includes single `Tensor`, `list`, nested `list`, `tuple`,
`namedtuple`, or `dict`.
Args:
tensors: Single `Tensor`, `list`, nested `list, `tuple`,
`namedtuple`, or `dict`.
apply_fn: Function to apply to each `Tensor` and should return a `Tensor`.
Returns:
Returns the modified tensors with the same structure.
Raises:
`TypeError` if undefined type in the tensors structure.
"""
tensors_type = type(tensors)
if tensors_type is ops.Tensor:
return apply_fn(tensors)
elif isinstance(tensors, variables.Variable):
return apply_fn(tensors.value())
elif isinstance(tensors, (list, tuple)):
tensors = [_recursive_apply(t, apply_fn) for t in tensors]
if tensors_type is list:
return list(tensors)
elif tensors_type is tuple:
return tuple(tensors)
return tensors_type(*tensors) # collections.namedtuple
elif tensors_type is dict:
return dict([(k, _recursive_apply(v, apply_fn))
for k, v in tensors.items()])
else:
raise TypeError('_recursive_apply argument %r has invalid type %r' %
(tensors, tensors_type))
class _ControlOutputCache(object):
"""Helper class to manage calculating and caching control_outputs in graph."""
def __init__(self):
self.cache = {}
def calc_control_outputs(self, graph):
"""Returns the map of control_outputs for a given graph.
Args:
graph: The graph to parse.
Returns:
A map of the control outputs.
"""
control_outputs = {}
for op in graph.get_operations():
for control_input in op.control_inputs:
if control_input not in control_outputs:
control_outputs[control_input] = set()
control_outputs[control_input].add(op)
return control_outputs
def get_control_outputs(self, op):
"""Return the control outputs for a given op.
Args:
op: The op to fetch control outputs for.
Returns:
Iterable of control output ops.
"""
if op.graph not in self.cache:
control_outputs = self.calc_control_outputs(op.graph)
self.cache[op.graph] = control_outputs
else:
control_outputs = self.cache[op.graph]
return control_outputs.get(op, [])
def _subscribe_new(tensor, side_effects, control_cache):
"""Helper method that subscribes a single tensor to a list of side_effects.
Args:
tensor: `tf.Tensor`
side_effects: List of side_effect functions see subscribe for details.
control_cache: `_ControlOutputCache` helper to get control_outputs faster.
Returns:
The modified replacement to the passed in tensor which triggers the side
effects.
"""
update_input = []
for consumer_op in list(tensor.consumers()): # explicit copy
update_input.append((consumer_op, list(consumer_op.inputs).index(tensor)))
update_control_input = control_cache.get_control_outputs(tensor.op)
# Trailing slash on name scope to replace the scope.
name_scope = tensor.op.name + '/subscription/'
with ops.name_scope(name_scope):
outs = []
for s in side_effects:
outs += s(tensor)
with ops.control_dependencies(outs):
out = array_ops.identity(tensor)
for consumer_op, index in update_input:
consumer_op._update_input(index, out) # pylint: disable=protected-access
for consumer_op in update_control_input:
# If an op has more than one output and two or more of its output tensors
# are subscribed at the same time, we remove the control dependency from
# the original op only once and we add the dependencies to all the
# new identities.
new_control_inputs = consumer_op.control_inputs
if tensor.op in new_control_inputs:
new_control_inputs.remove(tensor.op)
new_control_inputs.append(out.op)
# pylint: disable=protected-access
consumer_op._remove_all_control_inputs()
consumer_op._add_control_inputs(new_control_inputs)
# pylint: enable=protected-access
return out
def _subscribe_extend(tensor, side_effects):
"""Helper method to extend the list of side_effects for a subscribed tensor.
Args:
tensor: A `tf.Tensor` as returned by subscribe().
side_effects: List of side_effect functions, see subscribe for details.
Returns:
The given subscribed tensor (for API consistency).
"""
assert len(tensor.op.inputs) == 1, 'Op {} must only have one input'.format(
tensor.op.name)
source_tensor = tensor.op.inputs[0]
# Build the side effect graphs and add their outputs to the list of control
# dependencies for the subscribed tensor.
outs = []
name_scope = source_tensor.op.name + '/subscription/'
with ops.name_scope(name_scope):
for s in side_effects:
outs += s(source_tensor)
out_ops = [out.op if isinstance(out, ops.Tensor) else out for out in outs]
tensor.op._add_control_inputs(out_ops) # pylint: disable=protected-access
return tensor
def _is_subscribed_identity(tensor):
"""Checks if the given tensor is an identity op returned by `subscribe()`.
Args:
tensor: A `tf.Tensor` to check.
Returns:
True if the given tensor matches the criteria for subscription identies:
its op type is `Identity`, its name matches the name of its input and
conforms to the convention for subscribed nodes.
False otherwise.
"""
# Subscribed tensor are assumed to be identity ops.
if tensor.op.type != 'Identity':
return False
# Check that the tensor name matches the convention in place for identity ops
# created by subscribe().
match = re.match(
r'(?P<prefix_name>^.*?)/subscription/Identity[^/]+', tensor.name)
if match is None or len(match.groups()) != 1:
return False
prefix_name = match.group('prefix_name')
# Get a reference to the source tensor and check that it has a matching name.
assert len(tensor.op.inputs) == 1, 'Op {} must only have one input'.format(
tensor.op.name)
source_tensor = tensor.op.inputs[0]
if prefix_name != source_tensor.op.name:
return False
return True
def _subscribe(tensor, side_effects, control_cache):
"""Helper method that subscribes a single tensor to a list of side_effects.
This method will check if the given tensor has already been subscribed or if
it's a tensor returned by a previous call to `subscribe()` and, if so, will
reuse the existing identity op, appending the given side effects to the list
of existing ones.
Args:
tensor: The `tf.Tensor` to be subscribed.
side_effects: List of side_effect functions, see subscribe for details.
control_cache: `_ControlOutputCache` helper to get control_outputs faster.
Returns:
The modified replacement to the passed in tensor which triggers the side
effects or the given tensor, if it was already been subscribed.
"""
# Check if the given tensor has a numpy compatible type (see dtypes.py).
# If not, we cannot subscribe it, so we just return the original tensor.
if not tensor.dtype.is_numpy_compatible:
logging.debug(('Tensor {} has an un-supported {} type and cannot be '
'subscribed.').format(tensor.name, tensor.dtype))
return tensor
if _is_subscribed_identity(tensor):
return _subscribe_extend(tensor, side_effects)
# Check if the given tensor has already been subscribed by inspecting its
# outputs.
name_scope = tensor.op.name + '/subscription/Identity'
consumers = tensor.consumers()
matching_ops = [op for op in consumers if op.name.startswith(name_scope)]
assert len(matching_ops) <= 1, ('Op {} must only have one subscription '
'op connected to it').format(tensor.op.name)
if len(matching_ops) == 1:
candidate_tensor = matching_ops[0].outputs[0]
if _is_subscribed_identity(candidate_tensor):
return _subscribe_extend(candidate_tensor, side_effects)
return _subscribe_new(tensor, side_effects, control_cache)
@contextlib.contextmanager
def _preserve_control_flow_context(tensor):
"""Preserve the control flow context for the given tensor.
Sets the graph context to the tensor's context so that side effect ops are
added under the same context.
This is needed when subscribing to tensors defined within a conditional
block or a while loop. In these cases we need that the side-effect ops
are created within the same control flow context as that of the tensor
they are attached to.
Args:
tensor: tensor whose context should be preserved.
Yields:
None
"""
# pylint: disable=protected-access
context = tensor.op._get_control_flow_context()
# pylint: enable=protected-access
if context:
context.Enter()
try:
yield
finally:
if context:
context.Exit()
def _scoped_subscribe(tensor, side_effects, control_cache):
"""Helper method that subscribes a single tensor to a list of side_effects.
This is a thin wrapper around `_subscribe` and ensures that the side effect
ops are added within the same device and control flow context of the
subscribed tensor.
Args:
tensor: The `tf.Tensor` to be subscribed.
side_effects: List of side_effect functions, see subscribe for details.
control_cache: `_ControlOutputCache` helper to get control_outputs faster.
Returns:
The modified replacement to the passed in tensor which triggers the side
effects or the given tensor, if it was already been subscribed.
"""
with ops.device(tensor.device):
with _preserve_control_flow_context(tensor):
return _subscribe(tensor, side_effects, control_cache)
def subscribe(tensors, side_effects):
"""Subscribe to a tensor.
This method will attach side effect graphs to a given set
of tensors. Set of tensors follows from session.run and supports
single `Tensor`, `list`, nested `list`, `tuple`, `namedtuple`, or `dict`. It
returns the tensors in the same passed in structure, but as clones with
side effects applied. The supplied side effect graphs are specified
as a constructor function which takes the target tensor and
constructs a side effect graph and returns a list of ops that should
be control dependencies on fetching the tensor. It will append
'subscription' to the name scope of the tensor for every node in
the side effect graph. These control dependencies are what trigger
the side effects. Subscribe will construct the additions to your
graph and return the created identity tensor downstream of the control
dependencies. Use these tensors as you would normally in the rest of
your tensorflow code. If a given tensor has already been subscribed or a
tensor returned by a call to subscribe is passed, the previously created
identity tensor will be reused and the side effect graphs will be added to
the existing ones.
Args:
tensors: `Tensor` or set of tensors to subscribe to. Set of tensors format
follows from `Session.run` and supports single `Tensor`, `list`, nested
`list`, `tuple`, `namedtuple`, or `dict`.
side_effects: Function(s) that takes a `Tensor`, construct a subgraph, and
return a nonempty list of control dependencies. This can be a single
function or list of functions.
Returns:
Subscribed tensors, which are identity copies of the passed in tensors
in the same passed in structure, but the graph has been modified
such that these are downstream of the control dependencies for
the side effect graphs. Use these functionally equivalent tensors
instead of the passed in tensors for further construction or running.
"""
if not hasattr(side_effects, '__iter__'):
side_effects = [side_effects]
control_outputs = _ControlOutputCache()
result = _recursive_apply(
tensors, lambda t: _scoped_subscribe(t, side_effects, control_outputs))
return result
| apache-2.0 |
JeongJunSik/TizenRT | external/iotivity/iotivity_1.2-rel/extlibs/gtest/gtest-1.7.0/test/gtest_env_var_test.py | 2408 | 3487 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly parses environment variables."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_')
environ = os.environ.copy()
def AssertEq(expected, actual):
if expected != actual:
print 'Expected: %s' % (expected,)
print ' Actual: %s' % (actual,)
raise AssertionError
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def GetFlag(flag):
"""Runs gtest_env_var_test_ and returns its output."""
args = [COMMAND]
if flag is not None:
args += [flag]
return gtest_test_utils.Subprocess(args, env=environ).output
def TestFlag(flag, test_val, default_val):
"""Verifies that the given flag is affected by the corresponding env var."""
env_var = 'GTEST_' + flag.upper()
SetEnvVar(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
SetEnvVar(env_var, None)
AssertEq(default_val, GetFlag(flag))
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
"""Tests that environment variable should affect the corresponding flag."""
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
TestFlag('filter', 'FooTest.Bar', '*')
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
TestFlag('catch_exceptions', '0', '1')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
if __name__ == '__main__':
gtest_test_utils.Main()
| apache-2.0 |
coldnew/tf700-kernel | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
rezasafi/spark | python/pyspark/taskcontext.py | 4 | 7271 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from pyspark.java_gateway import local_connect_and_auth
from pyspark.serializers import write_int, UTF8Deserializer
class TaskContext(object):
"""
Contextual information about a task which can be read or mutated during
execution. To access the TaskContext for a running task, use:
:meth:`TaskContext.get`.
"""
_taskContext = None
_attemptNumber = None
_partitionId = None
_stageId = None
_taskAttemptId = None
_localProperties = None
_resources = None
def __new__(cls):
"""Even if users construct TaskContext instead of using get, give them the singleton."""
taskContext = cls._taskContext
if taskContext is not None:
return taskContext
cls._taskContext = taskContext = object.__new__(cls)
return taskContext
@classmethod
def _getOrCreate(cls):
"""Internal function to get or create global TaskContext."""
if cls._taskContext is None:
cls._taskContext = TaskContext()
return cls._taskContext
@classmethod
def get(cls):
"""
Return the currently active TaskContext. This can be called inside of
user functions to access contextual information about running tasks.
.. note:: Must be called on the worker, not the driver. Returns None if not initialized.
"""
return cls._taskContext
def stageId(self):
"""The ID of the stage that this task belong to."""
return self._stageId
def partitionId(self):
"""
The ID of the RDD partition that is computed by this task.
"""
return self._partitionId
def attemptNumber(self):
""""
How many times this task has been attempted. The first task attempt will be assigned
attemptNumber = 0, and subsequent attempts will have increasing attempt numbers.
"""
return self._attemptNumber
def taskAttemptId(self):
"""
An ID that is unique to this task attempt (within the same SparkContext, no two task
attempts will share the same attempt ID). This is roughly equivalent to Hadoop's
TaskAttemptID.
"""
return self._taskAttemptId
def getLocalProperty(self, key):
"""
Get a local property set upstream in the driver, or None if it is missing.
"""
return self._localProperties.get(key, None)
def resources(self):
"""
Resources allocated to the task. The key is the resource name and the value is information
about the resource.
"""
return self._resources
BARRIER_FUNCTION = 1
def _load_from_socket(port, auth_secret):
"""
Load data from a given socket, this is a blocking method thus only return when the socket
connection has been closed.
"""
(sockfile, sock) = local_connect_and_auth(port, auth_secret)
# The barrier() call may block forever, so no timeout
sock.settimeout(None)
# Make a barrier() function call.
write_int(BARRIER_FUNCTION, sockfile)
sockfile.flush()
# Collect result.
res = UTF8Deserializer().loads(sockfile)
# Release resources.
sockfile.close()
sock.close()
return res
class BarrierTaskContext(TaskContext):
"""
.. note:: Experimental
A :class:`TaskContext` with extra contextual info and tooling for tasks in a barrier stage.
Use :func:`BarrierTaskContext.get` to obtain the barrier context for a running barrier task.
.. versionadded:: 2.4.0
"""
_port = None
_secret = None
@classmethod
def _getOrCreate(cls):
"""
Internal function to get or create global BarrierTaskContext. We need to make sure
BarrierTaskContext is returned from here because it is needed in python worker reuse
scenario, see SPARK-25921 for more details.
"""
if not isinstance(cls._taskContext, BarrierTaskContext):
cls._taskContext = object.__new__(cls)
return cls._taskContext
@classmethod
def get(cls):
"""
.. note:: Experimental
Return the currently active :class:`BarrierTaskContext`.
This can be called inside of user functions to access contextual information about
running tasks.
.. note:: Must be called on the worker, not the driver. Returns None if not initialized.
"""
return cls._taskContext
@classmethod
def _initialize(cls, port, secret):
"""
Initialize BarrierTaskContext, other methods within BarrierTaskContext can only be called
after BarrierTaskContext is initialized.
"""
cls._port = port
cls._secret = secret
def barrier(self):
"""
.. note:: Experimental
Sets a global barrier and waits until all tasks in this stage hit this barrier.
Similar to `MPI_Barrier` function in MPI, this function blocks until all tasks
in the same stage have reached this routine.
.. warning:: In a barrier stage, each task much have the same number of `barrier()`
calls, in all possible code branches.
Otherwise, you may get the job hanging or a SparkException after timeout.
.. versionadded:: 2.4.0
"""
if self._port is None or self._secret is None:
raise Exception("Not supported to call barrier() before initialize " +
"BarrierTaskContext.")
else:
_load_from_socket(self._port, self._secret)
def getTaskInfos(self):
"""
.. note:: Experimental
Returns :class:`BarrierTaskInfo` for all tasks in this barrier stage,
ordered by partition ID.
.. versionadded:: 2.4.0
"""
if self._port is None or self._secret is None:
raise Exception("Not supported to call getTaskInfos() before initialize " +
"BarrierTaskContext.")
else:
addresses = self._localProperties.get("addresses", "")
return [BarrierTaskInfo(h.strip()) for h in addresses.split(",")]
class BarrierTaskInfo(object):
"""
.. note:: Experimental
Carries all task infos of a barrier task.
:var address: The IPv4 address (host:port) of the executor that the barrier task is running on
.. versionadded:: 2.4.0
"""
def __init__(self, address):
self.address = address
| apache-2.0 |
jaywink/shoop | shoop_tests/front/test_customer_information.py | 6 | 1891 | # -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
import pytest
from shoop.testing.factories import get_default_shop
from shoop.testing.soup_utils import extract_form_fields
from shoop_tests.utils import SmartClient
@pytest.mark.django_db
def test_new_user_information_edit():
client = SmartClient()
get_default_shop()
# create new user
user_password = "niilo"
user = get_user_model().objects.create_user(
username="Niilo_Nyyppa",
email="niilo@example.shoop.io",
password=user_password,
first_name="Niilo",
last_name="Nyyppä",
)
client.login(username=user.username, password=user_password)
# make sure all information matches in form
customer_edit_url = reverse("shoop:customer_edit")
soup = client.soup(customer_edit_url)
assert soup.find(attrs={"name": "contact-email"})["value"] == user.email
assert soup.find(attrs={"name": "contact-name"})["value"] == user.get_full_name()
# Test POSTing
form = extract_form_fields(soup)
new_email = "nyyppa@example.shoop.io"
form["contact-email"] = new_email
form["contact-country"] = "FI"
for prefix in ("billing", "shipping"):
form["%s-city" % prefix] = "test-city"
form["%s-email" % prefix] = new_email
form["%s-street" % prefix] = "test-street"
form["%s-country" % prefix] = "FI"
response, soup = client.response_and_soup(customer_edit_url, form, "post")
assert response.status_code == 302
assert get_user_model().objects.get(pk=user.pk).email == new_email
| agpl-3.0 |
mbrubeck/servo | tests/wpt/web-platform-tests/tools/third_party/pytest/_pytest/warnings.py | 16 | 3164 | from __future__ import absolute_import, division, print_function
import warnings
from contextlib import contextmanager
import pytest
from _pytest import compat
def _setoption(wmod, arg):
"""
Copy of the warning._setoption function but does not escape arguments.
"""
parts = arg.split(':')
if len(parts) > 5:
raise wmod._OptionError("too many fields (max 5): %r" % (arg,))
while len(parts) < 5:
parts.append('')
action, message, category, module, lineno = [s.strip()
for s in parts]
action = wmod._getaction(action)
category = wmod._getcategory(category)
if lineno:
try:
lineno = int(lineno)
if lineno < 0:
raise ValueError
except (ValueError, OverflowError):
raise wmod._OptionError("invalid lineno %r" % (lineno,))
else:
lineno = 0
wmod.filterwarnings(action, message, category, module, lineno)
def pytest_addoption(parser):
group = parser.getgroup("pytest-warnings")
group.addoption(
'-W', '--pythonwarnings', action='append',
help="set which warnings to report, see -W option of python itself.")
parser.addini("filterwarnings", type="linelist",
help="Each line specifies a pattern for "
"warnings.filterwarnings. "
"Processed after -W and --pythonwarnings.")
@contextmanager
def catch_warnings_for_item(item):
"""
catches the warnings generated during setup/call/teardown execution
of the given item and after it is done posts them as warnings to this
item.
"""
args = item.config.getoption('pythonwarnings') or []
inifilters = item.config.getini("filterwarnings")
with warnings.catch_warnings(record=True) as log:
for arg in args:
warnings._setoption(arg)
for arg in inifilters:
_setoption(warnings, arg)
mark = item.get_marker('filterwarnings')
if mark:
for arg in mark.args:
warnings._setoption(arg)
yield
for warning in log:
warn_msg = warning.message
unicode_warning = False
if compat._PY2 and any(isinstance(m, compat.UNICODE_TYPES) for m in warn_msg.args):
new_args = []
for m in warn_msg.args:
new_args.append(compat.ascii_escaped(m) if isinstance(m, compat.UNICODE_TYPES) else m)
unicode_warning = list(warn_msg.args) != new_args
warn_msg.args = new_args
msg = warnings.formatwarning(
warn_msg, warning.category,
warning.filename, warning.lineno, warning.line)
item.warn("unused", msg)
if unicode_warning:
warnings.warn(
"Warning is using unicode non convertible to ascii, "
"converting to a safe representation:\n %s" % msg,
UnicodeWarning)
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_protocol(item):
with catch_warnings_for_item(item):
yield
| mpl-2.0 |
devendermishrajio/nova_test_latest | nova/tests/unit/objects/test_migration.py | 12 | 8947 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import timeutils
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova.objects import migration
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_objects
NOW = timeutils.utcnow().replace(microsecond=0)
def fake_db_migration(**updates):
db_instance = {
'created_at': NOW,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'id': 123,
'source_compute': 'compute-source',
'dest_compute': 'compute-dest',
'source_node': 'node-source',
'dest_node': 'node-dest',
'dest_host': 'host-dest',
'old_instance_type_id': 42,
'new_instance_type_id': 84,
'instance_uuid': 'fake-uuid',
'status': 'migrating',
'migration_type': 'resize',
'hidden': False,
}
if updates:
db_instance.update(updates)
return db_instance
class _TestMigrationObject(object):
def test_get_by_id(self):
ctxt = context.get_admin_context()
fake_migration = fake_db_migration()
self.mox.StubOutWithMock(db, 'migration_get')
db.migration_get(ctxt, fake_migration['id']).AndReturn(fake_migration)
self.mox.ReplayAll()
mig = migration.Migration.get_by_id(ctxt, fake_migration['id'])
self.compare_obj(mig, fake_migration)
def test_get_by_instance_and_status(self):
ctxt = context.get_admin_context()
fake_migration = fake_db_migration()
self.mox.StubOutWithMock(db, 'migration_get_by_instance_and_status')
db.migration_get_by_instance_and_status(ctxt,
fake_migration['id'],
'migrating'
).AndReturn(fake_migration)
self.mox.ReplayAll()
mig = migration.Migration.get_by_instance_and_status(
ctxt, fake_migration['id'], 'migrating')
self.compare_obj(mig, fake_migration)
def test_create(self):
ctxt = context.get_admin_context()
fake_migration = fake_db_migration()
self.mox.StubOutWithMock(db, 'migration_create')
db.migration_create(ctxt, {'source_compute': 'foo'}).AndReturn(
fake_migration)
self.mox.ReplayAll()
mig = migration.Migration(context=ctxt)
mig.source_compute = 'foo'
mig.create()
self.assertEqual(fake_migration['dest_compute'], mig.dest_compute)
def test_recreate_fails(self):
ctxt = context.get_admin_context()
fake_migration = fake_db_migration()
self.mox.StubOutWithMock(db, 'migration_create')
db.migration_create(ctxt, {'source_compute': 'foo'}).AndReturn(
fake_migration)
self.mox.ReplayAll()
mig = migration.Migration(context=ctxt)
mig.source_compute = 'foo'
mig.create()
self.assertRaises(exception.ObjectActionError, mig.create)
def test_save(self):
ctxt = context.get_admin_context()
fake_migration = fake_db_migration()
self.mox.StubOutWithMock(db, 'migration_update')
db.migration_update(ctxt, 123, {'source_compute': 'foo'}
).AndReturn(fake_migration)
self.mox.ReplayAll()
mig = migration.Migration(context=ctxt)
mig.id = 123
mig.source_compute = 'foo'
mig.save()
self.assertEqual(fake_migration['dest_compute'], mig.dest_compute)
def test_instance(self):
ctxt = context.get_admin_context()
fake_migration = fake_db_migration()
fake_inst = fake_instance.fake_db_instance()
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(ctxt, fake_migration['instance_uuid'],
columns_to_join=['info_cache',
'security_groups'],
use_slave=False
).AndReturn(fake_inst)
mig = migration.Migration._from_db_object(ctxt,
migration.Migration(),
fake_migration)
mig._context = ctxt
self.mox.ReplayAll()
self.assertEqual(mig.instance.host, fake_inst['host'])
def test_get_unconfirmed_by_dest_compute(self):
ctxt = context.get_admin_context()
fake_migration = fake_db_migration()
db_migrations = [fake_migration, dict(fake_migration, id=456)]
self.mox.StubOutWithMock(
db, 'migration_get_unconfirmed_by_dest_compute')
db.migration_get_unconfirmed_by_dest_compute(
ctxt, 'window', 'foo',
use_slave=False).AndReturn(db_migrations)
self.mox.ReplayAll()
migrations = (
migration.MigrationList.get_unconfirmed_by_dest_compute(
ctxt, 'window', 'foo', use_slave=False))
self.assertEqual(2, len(migrations))
for index, db_migration in enumerate(db_migrations):
self.compare_obj(migrations[index], db_migration)
def test_get_in_progress_by_host_and_node(self):
ctxt = context.get_admin_context()
fake_migration = fake_db_migration()
db_migrations = [fake_migration, dict(fake_migration, id=456)]
self.mox.StubOutWithMock(
db, 'migration_get_in_progress_by_host_and_node')
db.migration_get_in_progress_by_host_and_node(
ctxt, 'host', 'node').AndReturn(db_migrations)
self.mox.ReplayAll()
migrations = (
migration.MigrationList.get_in_progress_by_host_and_node(
ctxt, 'host', 'node'))
self.assertEqual(2, len(migrations))
for index, db_migration in enumerate(db_migrations):
self.compare_obj(migrations[index], db_migration)
def test_get_by_filters(self):
ctxt = context.get_admin_context()
fake_migration = fake_db_migration()
db_migrations = [fake_migration, dict(fake_migration, id=456)]
self.mox.StubOutWithMock(
db, 'migration_get_all_by_filters')
filters = {'foo': 'bar'}
db.migration_get_all_by_filters(ctxt, filters).AndReturn(db_migrations)
self.mox.ReplayAll()
migrations = migration.MigrationList.get_by_filters(ctxt, filters)
self.assertEqual(2, len(migrations))
for index, db_migration in enumerate(db_migrations):
self.compare_obj(migrations[index], db_migration)
def test_migrate_old_resize_record(self):
db_migration = dict(fake_db_migration(), migration_type=None)
with mock.patch('nova.db.migration_get') as fake_get:
fake_get.return_value = db_migration
mig = objects.Migration.get_by_id(context.get_admin_context(), 1)
self.assertTrue(mig.obj_attr_is_set('migration_type'))
self.assertEqual('resize', mig.migration_type)
def test_migrate_old_migration_record(self):
db_migration = dict(
fake_db_migration(), migration_type=None,
old_instance_type_id=1, new_instance_type_id=1)
with mock.patch('nova.db.migration_get') as fake_get:
fake_get.return_value = db_migration
mig = objects.Migration.get_by_id(context.get_admin_context(), 1)
self.assertTrue(mig.obj_attr_is_set('migration_type'))
self.assertEqual('migration', mig.migration_type)
def test_migrate_unset_type_resize(self):
mig = objects.Migration(old_instance_type_id=1,
new_instance_type_id=2)
self.assertEqual('resize', mig.migration_type)
self.assertTrue(mig.obj_attr_is_set('migration_type'))
def test_migrate_unset_type_migration(self):
mig = objects.Migration(old_instance_type_id=1,
new_instance_type_id=1)
self.assertEqual('migration', mig.migration_type)
self.assertTrue(mig.obj_attr_is_set('migration_type'))
class TestMigrationObject(test_objects._LocalTest,
_TestMigrationObject):
pass
class TestRemoteMigrationObject(test_objects._RemoteTest,
_TestMigrationObject):
pass
| apache-2.0 |
danieljaouen/ansible | lib/ansible/modules/cloud/openstack/os_quota.py | 23 | 15753 | #!/usr/bin/python
# Copyright (c) 2016 Pason System Corporation
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_quota
short_description: Manage OpenStack Quotas
extends_documentation_fragment: openstack
version_added: "2.3"
author: "Michael Gale (gale.michael@gmail.com)"
description:
- Manage OpenStack Quotas. Quotas can be created,
updated or deleted using this module. A quota will be updated
if matches an existing project and is present.
options:
name:
description:
- Name of the OpenStack Project to manage.
required: true
state:
description:
- A value of present sets the quota and a value of absent resets the quota to system defaults.
default: present
backup_gigabytes:
description: Maximum size of backups in GB's.
backups:
description: Maximum number of backups allowed.
cores:
description: Maximum number of CPU's per project.
fixed_ips:
description: Number of fixed IP's to allow.
floating_ips:
description: Number of floating IP's to allow in Compute.
aliases: ['compute_floating_ips']
floatingip:
description: Number of floating IP's to allow in Network.
aliases: ['network_floating_ips']
gigabytes:
description: Maximum volume storage allowed for project.
gigabytes_lvm:
description: Maximum size in GB's of individual lvm volumes.
injected_file_size:
description: Maximum file size in bytes.
injected_files:
description: Number of injected files to allow.
injected_path_size:
description: Maximum path size.
instances:
description: Maximum number of instances allowed.
key_pairs:
description: Number of key pairs to allow.
loadbalancer:
description: Number of load balancers to allow.
version_added: "2.4"
network:
description: Number of networks to allow.
per_volume_gigabytes:
description: Maximum size in GB's of individual volumes.
pool:
description: Number of load balancer pools to allow.
version_added: "2.4"
port:
description: Number of Network ports to allow, this needs to be greater than the instances limit.
properties:
description: Number of properties to allow.
ram:
description: Maximum amount of ram in MB to allow.
rbac_policy:
description: Number of policies to allow.
router:
description: Number of routers to allow.
security_group_rule:
description: Number of rules per security group to allow.
security_group:
description: Number of security groups to allow.
server_group_members:
description: Number of server group members to allow.
server_groups:
description: Number of server groups to allow.
snapshots:
description: Number of snapshots to allow.
snapshots_lvm:
description: Number of LVM snapshots to allow.
subnet:
description: Number of subnets to allow.
subnetpool:
description: Number of subnet pools to allow.
volumes:
description: Number of volumes to allow.
volumes_lvm:
description: Number of LVM volumes to allow.
availability_zone:
description:
- Ignored. Present for backwards compatibility
requirements:
- "python >= 2.7"
- "openstacksdk >= 0.13.0"
'''
EXAMPLES = '''
# List a Project Quota
- os_quota:
cloud: mycloud
name: demoproject
# Set a Project back to the defaults
- os_quota:
cloud: mycloud
name: demoproject
state: absent
# Update a Project Quota for cores
- os_quota:
cloud: mycloud
name: demoproject
cores: 100
# Update a Project Quota
- os_quota:
name: demoproject
cores: 1000
volumes: 20
volumes_type:
- volume_lvm: 10
# Complete example based on list of projects
- name: Update quotas
os_quota:
name: "{{ item.name }}"
backup_gigabytes: "{{ item.backup_gigabytes }}"
backups: "{{ item.backups }}"
cores: "{{ item.cores }}"
fixed_ips: "{{ item.fixed_ips }}"
floating_ips: "{{ item.floating_ips }}"
floatingip: "{{ item.floatingip }}"
gigabytes: "{{ item.gigabytes }}"
injected_file_size: "{{ item.injected_file_size }}"
injected_files: "{{ item.injected_files }}"
injected_path_size: "{{ item.injected_path_size }}"
instances: "{{ item.instances }}"
key_pairs: "{{ item.key_pairs }}"
loadbalancer: "{{ item.loadbalancer }}"
per_volume_gigabytes: "{{ item.per_volume_gigabytes }}"
pool: "{{ item.pool }}"
port: "{{ item.port }}"
properties: "{{ item.properties }}"
ram: "{{ item.ram }}"
security_group_rule: "{{ item.security_group_rule }}"
security_group: "{{ item.security_group }}"
server_group_members: "{{ item.server_group_members }}"
server_groups: "{{ item.server_groups }}"
snapshots: "{{ item.snapshots }}"
volumes: "{{ item.volumes }}"
volumes_types:
volumes_lvm: "{{ item.volumes_lvm }}"
snapshots_types:
snapshots_lvm: "{{ item.snapshots_lvm }}"
gigabytes_types:
gigabytes_lvm: "{{ item.gigabytes_lvm }}"
with_items:
- "{{ projects }}"
when: item.state == "present"
'''
RETURN = '''
openstack_quotas:
description: Dictionary describing the project quota.
returned: Regardless if changes where made or note
type: complex
contains:
openstack_quotas: {
compute: {
cores: 150,
fixed_ips: -1,
floating_ips: 10,
injected_file_content_bytes: 10240,
injected_file_path_bytes: 255,
injected_files: 5,
instances: 100,
key_pairs: 100,
metadata_items: 128,
ram: 153600,
security_group_rules: 20,
security_groups: 10,
server_group_members: 10,
server_groups: 10
},
network: {
floatingip: 50,
loadbalancer: 10,
network: 10,
pool: 10,
port: 160,
rbac_policy: 10,
router: 10,
security_group: 10,
security_group_rule: 100,
subnet: 10,
subnetpool: -1
},
volume: {
backup_gigabytes: 1000,
backups: 10,
gigabytes: 1000,
gigabytes_lvm: -1,
per_volume_gigabytes: -1,
snapshots: 10,
snapshots_lvm: -1,
volumes: 10,
volumes_lvm: -1
}
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _get_volume_quotas(cloud, project):
return cloud.get_volume_quotas(project)
def _get_network_quotas(cloud, project):
return cloud.get_network_quotas(project)
def _get_compute_quotas(cloud, project):
return cloud.get_compute_quotas(project)
def _get_quotas(sdk, module, cloud, project):
quota = {}
try:
quota['volume'] = _get_volume_quotas(cloud, project)
except sdk.exceptions.OpenStackCloudURINotFound:
module.warn("No public endpoint for volumev2 service was found. Ignoring volume quotas.")
try:
quota['network'] = _get_network_quotas(cloud, project)
except sdk.exceptions.OpenStackCloudURINotFound:
module.warn("No public endpoint for network service was found. Ignoring network quotas.")
quota['compute'] = _get_compute_quotas(cloud, project)
for quota_type in quota.keys():
quota[quota_type] = _scrub_results(quota[quota_type])
return quota
def _scrub_results(quota):
filter_attr = [
'HUMAN_ID',
'NAME_ATTR',
'human_id',
'request_ids',
'x_openstack_request_ids',
]
for attr in filter_attr:
if attr in quota:
del quota[attr]
return quota
def _system_state_change_details(module, project_quota_output):
quota_change_request = {}
changes_required = False
for quota_type in project_quota_output.keys():
for quota_option in project_quota_output[quota_type].keys():
if quota_option in module.params and module.params[quota_option] is not None:
if project_quota_output[quota_type][quota_option] != module.params[quota_option]:
changes_required = True
if quota_type not in quota_change_request:
quota_change_request[quota_type] = {}
quota_change_request[quota_type][quota_option] = module.params[quota_option]
return (changes_required, quota_change_request)
def _system_state_change(module, project_quota_output):
"""
Determine if changes are required to the current project quota.
This is done by comparing the current project_quota_output against
the desired quota settings set on the module params.
"""
changes_required, quota_change_request = _system_state_change_details(
module,
project_quota_output
)
if changes_required:
return True
else:
return False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
state=dict(default='present', choices=['absent', 'present']),
backup_gigabytes=dict(required=False, type='int', default=None),
backups=dict(required=False, type='int', default=None),
cores=dict(required=False, type='int', default=None),
fixed_ips=dict(required=False, type='int', default=None),
floating_ips=dict(required=False, type='int', default=None, aliases=['compute_floating_ips']),
floatingip=dict(required=False, type='int', default=None, aliases=['network_floating_ips']),
gigabytes=dict(required=False, type='int', default=None),
gigabytes_types=dict(required=False, type='dict', default={}),
injected_file_size=dict(required=False, type='int', default=None),
injected_files=dict(required=False, type='int', default=None),
injected_path_size=dict(required=False, type='int', default=None),
instances=dict(required=False, type='int', default=None),
key_pairs=dict(required=False, type='int', default=None),
loadbalancer=dict(required=False, type='int', default=None),
network=dict(required=False, type='int', default=None),
per_volume_gigabytes=dict(required=False, type='int', default=None),
pool=dict(required=False, type='int', default=None),
port=dict(required=False, type='int', default=None),
project=dict(required=False, type='int', default=None),
properties=dict(required=False, type='int', default=None),
ram=dict(required=False, type='int', default=None),
rbac_policy=dict(required=False, type='int', default=None),
router=dict(required=False, type='int', default=None),
security_group_rule=dict(required=False, type='int', default=None),
security_group=dict(required=False, type='int', default=None),
server_group_members=dict(required=False, type='int', default=None),
server_groups=dict(required=False, type='int', default=None),
snapshots=dict(required=False, type='int', default=None),
snapshots_types=dict(required=False, type='dict', default={}),
subnet=dict(required=False, type='int', default=None),
subnetpool=dict(required=False, type='int', default=None),
volumes=dict(required=False, type='int', default=None),
volumes_types=dict(required=False, type='dict', default={})
)
module = AnsibleModule(argument_spec,
supports_check_mode=True
)
sdk, cloud = openstack_cloud_from_module(module)
try:
cloud_params = dict(module.params)
# In order to handle the different volume types we update module params after.
dynamic_types = [
'gigabytes_types',
'snapshots_types',
'volumes_types',
]
for dynamic_type in dynamic_types:
for k, v in module.params[dynamic_type].items():
module.params[k] = int(v)
# Get current quota values
project_quota_output = _get_quotas(
sdk, module, cloud, cloud_params['name'])
changes_required = False
if module.params['state'] == "absent":
# If a quota state is set to absent we should assume there will be changes.
# The default quota values are not accessible so we can not determine if
# no changes will occur or not.
if module.check_mode:
module.exit_json(changed=True)
# Calling delete_network_quotas when a quota has not been set results
# in an error, according to the sdk docs it should return the
# current quota.
# The following error string is returned:
# network client call failed: Quota for tenant 69dd91d217e949f1a0b35a4b901741dc could not be found.
neutron_msg1 = "network client call failed: Quota for tenant"
neutron_msg2 = "could not be found"
for quota_type in project_quota_output.keys():
quota_call = getattr(cloud, 'delete_%s_quotas' % (quota_type))
try:
quota_call(cloud_params['name'])
except sdk.exceptions.OpenStackCloudException as e:
error_msg = str(e)
if error_msg.find(neutron_msg1) > -1 and error_msg.find(neutron_msg2) > -1:
pass
else:
module.fail_json(msg=str(e), extra_data=e.extra_data)
project_quota_output = _get_quotas(
sdk, module, cloud, cloud_params['name'])
changes_required = True
elif module.params['state'] == "present":
if module.check_mode:
module.exit_json(changed=_system_state_change(module, project_quota_output))
changes_required, quota_change_request = _system_state_change_details(
module,
project_quota_output
)
if changes_required:
for quota_type in quota_change_request.keys():
quota_call = getattr(cloud, 'set_%s_quotas' % (quota_type))
quota_call(cloud_params['name'], **quota_change_request[quota_type])
# Get quota state post changes for validation
project_quota_update = _get_quotas(
sdk, module, cloud, cloud_params['name'])
if project_quota_output == project_quota_update:
module.fail_json(msg='Could not apply quota update')
project_quota_output = project_quota_update
module.exit_json(changed=changes_required,
openstack_quotas=project_quota_output
)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e), extra_data=e.extra_data)
if __name__ == '__main__':
main()
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.