repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
2ndy/RaspIM | usr/lib/python2.7/xml/dom/domreg.py | 238 | 3478 | """Registration facilities for DOM. This module should not be used
directly. Instead, the functions getDOMImplementation and
registerDOMImplementation should be imported from xml.dom."""
from xml.dom.minicompat import * # isinstance, StringTypes
# This is a list of well-known implementations. Well-known names
# should be published by posting to xml-sig@python.org, and are
# subsequently recorded in this file.
well_known_implementations = {
'minidom':'xml.dom.minidom',
'4DOM': 'xml.dom.DOMImplementation',
}
# DOM implementations not officially registered should register
# themselves with their
registered = {}
def registerDOMImplementation(name, factory):
"""registerDOMImplementation(name, factory)
Register the factory function with the name. The factory function
should return an object which implements the DOMImplementation
interface. The factory function can either return the same object,
or a new one (e.g. if that implementation supports some
customization)."""
registered[name] = factory
def _good_enough(dom, features):
"_good_enough(dom, features) -> Return 1 if the dom offers the features"
for f,v in features:
if not dom.hasFeature(f,v):
return 0
return 1
def getDOMImplementation(name = None, features = ()):
"""getDOMImplementation(name = None, features = ()) -> DOM implementation.
Return a suitable DOM implementation. The name is either
well-known, the module name of a DOM implementation, or None. If
it is not None, imports the corresponding module and returns
DOMImplementation object if the import succeeds.
If name is not given, consider the available implementations to
find one with the required feature set. If no implementation can
be found, raise an ImportError. The features list must be a sequence
of (feature, version) pairs which are passed to hasFeature."""
import os
creator = None
mod = well_known_implementations.get(name)
if mod:
mod = __import__(mod, {}, {}, ['getDOMImplementation'])
return mod.getDOMImplementation()
elif name:
return registered[name]()
elif "PYTHON_DOM" in os.environ:
return getDOMImplementation(name = os.environ["PYTHON_DOM"])
# User did not specify a name, try implementations in arbitrary
# order, returning the one that has the required features
if isinstance(features, StringTypes):
features = _parse_feature_string(features)
for creator in registered.values():
dom = creator()
if _good_enough(dom, features):
return dom
for creator in well_known_implementations.keys():
try:
dom = getDOMImplementation(name = creator)
except StandardError: # typically ImportError, or AttributeError
continue
if _good_enough(dom, features):
return dom
raise ImportError,"no suitable DOM implementation found"
def _parse_feature_string(s):
features = []
parts = s.split()
i = 0
length = len(parts)
while i < length:
feature = parts[i]
if feature[0] in "0123456789":
raise ValueError, "bad feature name: %r" % (feature,)
i = i + 1
version = None
if i < length:
v = parts[i]
if v[0] in "0123456789":
i = i + 1
version = v
features.append((feature, version))
return tuple(features)
| gpl-2.0 |
lordsutch/MUTCDSpeedSigns | signmatch.py | 1 | 4877 | #!/usr/bin/python
# The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt
#
# This example program shows how you can use dlib to make an object
# detector for things like faces, pedestrians, and any other semi-rigid
# object. In particular, we go though the steps to train the kind of sliding
# window object detector first published by Dalal and Triggs in 2005 in the
# paper Histograms of Oriented Gradients for Human Detection.
#
# COMPILING THE DLIB PYTHON INTERFACE
# Dlib comes with a compiled python interface for python 2.7 on MS Windows. If
# you are using another python version or operating system then you need to
# compile the dlib python interface before you can use this file. To do this,
# run compile_dlib_python_module.bat. This should work on any operating
# system so long as you have CMake and boost-python installed.
# On Ubuntu, this can be done easily by running the command:
# sudo apt-get install libboost-python-dev cmake
#
# Also note that this example requires scikit-image which can be installed
# via the command:
# pip install -U scikit-image
# Or downloaded from http://scikit-image.org/download.html.
from __future__ import print_function
from multiprocessing import Pool
from functools import partial
import multiprocessing
import os
import sys
import glob
import warnings
import dlib
import skimage
import argparse
import itertools
import traceback
import uuid
# Now let's use the detector as you would in a normal application. First we
# will load it from disk.
TRAINING='speedlimits.svm'
detector = dlib.simple_object_detector(TRAINING)
# from skimage.transform import rescale, pyramid_expand
from skimage import io
from skimage.color import rgb2gray
from skimage.io import imread
from skimage import img_as_ubyte
def process_file(f, verbose=False, link=None):
if not os.path.exists(f):
# Skip if not readable
return []
# Cheat a bit to improve upsampling speed here... grayscale is faster
img = imread(f)
img = rgb2gray(img)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
img = img_as_ubyte(img)
#print(dir(img))
dets = detector(img, 1) # Upsampling improves detection IME
if dets: # We found a sign (or more!)
if verbose:
print('Found', len(dets), 'sign(s) in', f,
[str(x) for x in dets], file=sys.stderr)
if link:
if not os.path.exists(link) and not os.path.isdir(link):
os.makedirs(link, 0o700)
fnamebase = os.path.basename(f)
linkname = os.path.join(link, fnamebase)
# Add a random prefix to avoid clashes, if needed
while os.path.exists(linkname):
linkname = os.path.join(link, str(uuid.uuid4())+'-'+fnamebase)
try:
os.link(f, linkname)
except OSError as e:
try:
os.symlink(f, linkname)
except OSError as e:
print('Unable to link',f,'to',link, file=sys.stderr)
trackback.print_exc()
return dets
if verbose:
print('No signs in', f, file=sys.stderr)
return []
if __name__ == '__main__':
NCPUS = multiprocessing.cpu_count()/2
parser = argparse.ArgumentParser(description='detect images matching pattern')
parser.add_argument('-s', '--show-filter', dest='showfilter',
default=False, action='store_true',
help='show the filter that will be applied')
parser.add_argument('-v', '--verbose', dest='verbose', default=False,
action='store_true', help='include extra output to stderr')
parser.add_argument('-l', '--link', dest='link', default=None,
action='store', type=str, metavar='DIR',
help='link found files to DIR')
parser.add_argument('-j', '--parallel', dest='ncpus', default=NCPUS,
action='store', type=int, metavar='JOBS',
help='how many parallel tasks to run')
parser.add_argument('files', metavar='FILE', type=str, nargs="*",
help='files to scan')
args = parser.parse_args()
if args.showfilter:
# We can look at the HOG filter we learned.
win_det = dlib.image_window()
win_det.set_image(detector)
dlib.hit_enter_to_continue()
sys.exit(0)
filenames = []
for bit in args.files:
filenames.extend( glob.glob(bit) )
partial_process = partial(process_file, verbose=args.verbose, link=args.link)
p = Pool(args.ncpus) ## Number of parallel processes to run
status = p.map(partial_process, filenames)
print("\n".join(itertools.compress(filenames, status)))
| mit |
tinkercnc/linuxcnc-mirror-old | tests/io-startup/test-ui.py | 6 | 1343 | #!/usr/bin/env python
import linuxcnc_util
import hal
import time
import sys
import os
# this is how long we wait for linuxcnc to do our bidding
timeout = 5.0
# unbuffer stdout
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
def wait_for_pin_value(pin_name, value, timeout=5.0):
start_time = time.time()
while (time.time() - start_time) < timeout:
time.sleep(0.1)
if h[pin_name] == value:
print "pin '%s' reached target value '%s' after %f seconds" % (pin_name, value, time.time() - start_time)
return
print "Error: pin '%s' didn't reach value '%s' within timeout of %f seconds (it's %s instead)" % (pin_name, value, timeout, h[pin_name])
sys.exit(1)
f = open('expected-startup-tool-number', 'r')
contents = f.read()
f.close()
expected_startup_tool_number = int(contents)
print "expecting tool number %d" % expected_startup_tool_number
#
# set up pins
# shell out to halcmd to make nets to halui and motion
#
h = hal.component("test-ui")
h.newpin("tool-number", hal.HAL_S32, hal.HAL_IN)
h.ready() # mark the component as 'ready'
os.system("halcmd source ../../postgui.hal")
#
# connect to LinuxCNC
#
l = linuxcnc_util.LinuxCNC()
wait_for_pin_value('tool-number', expected_startup_tool_number)
l.wait_for_tool_in_spindle(expected_startup_tool_number)
sys.exit(0)
| lgpl-2.1 |
alanljj/oca_hr | hr_holidays_extension/__openerp__.py | 18 | 2358 | # -*- coding:utf-8 -*-
#
#
# Copyright (C) 2013 Michael Telahun Makonnen <mmakonnen@gmail.com>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
{
'name': 'HR Holidays Extension',
'version': '1.0',
'category': 'Generic Modules/Human Resources',
'description': """
Extended Capabilities for HR Holidays (Leaves)
==============================================
* When calculating the number of leave days take into account the
employee's schedule and public holidays
* The 'Need Action' mechanism assumes the HR Manager approves leave
requests
* Rename 'Leave Requests' menu item to 'My Leaves'
(which is closer to its intent)
* Add a new menu item: All Leave Requests
* New way of entering leaves based on the number of days requested, rather
than by specifying a start and end date. You tell it how many days to
grant and it calculates the start and end dates based on the employee's
schedule.
* Allow a manager to approve the leave requests of subordinates (manager
must be immediate superior of employee or manager of employee's
department and have leave approval rights)
""",
'author': "Michael Telahun Makonnen <mmakonnen@gmail.com>,Odoo Community Association (OCA)",
'website': 'http://miketelahun.wordpress.com',
'license': 'AGPL-3',
'depends': [
'hr_holidays',
'hr_public_holidays',
'hr_schedule',
],
'data': [
'security/user_groups.xml',
'security/ir.model.access.csv',
'security/ir_rule.xml',
'hr_holidays_workflow.xml',
'hr_holidays_view.xml',
],
'test': [
],
'installable': False,
}
| agpl-3.0 |
xlqian/navitia | source/jormungandr/jormungandr/parking_space_availability/car/car_park_provider_manager.py | 3 | 2267 | # Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from jormungandr.parking_space_availability.abstract_provider_manager import AbstractProviderManager
POI_TYPE_ID = 'poi_type:amenity:parking'
class CarParkingProviderManager(AbstractProviderManager):
def __init__(self, car_park_providers_configurations):
super(CarParkingProviderManager, self).__init__()
self.car_park_providers = []
for configuration in car_park_providers_configurations:
arguments = configuration.get('args', {})
self.car_park_providers.append(self._init_class(configuration['class'], arguments))
def _handle_poi(self, item):
if 'poi_type' in item and item['poi_type']['id'] == POI_TYPE_ID:
provider = self._find_provider(item)
if provider:
item['car_park'] = provider.get_informations(item)
return provider
return None
def _get_providers(self):
return self.car_park_providers
| agpl-3.0 |
MaPePeR/numpy | numpy/distutils/unixccompiler.py | 155 | 4656 | """
unixccompiler - can handle very long argument lists for ar.
"""
from __future__ import division, absolute_import, print_function
import os
from distutils.errors import DistutilsExecError, CompileError
from distutils.unixccompiler import *
from numpy.distutils.ccompiler import replace_method
from numpy.distutils.compat import get_exception
if sys.version_info[0] < 3:
from . import log
else:
from numpy.distutils import log
# Note that UnixCCompiler._compile appeared in Python 2.3
def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
"""Compile a single source files with a Unix-style compiler."""
# HP ad-hoc fix, see ticket 1383
ccomp = self.compiler_so
if ccomp[0] == 'aCC':
# remove flags that will trigger ANSI-C mode for aCC
if '-Ae' in ccomp:
ccomp.remove('-Ae')
if '-Aa' in ccomp:
ccomp.remove('-Aa')
# add flags for (almost) sane C++ handling
ccomp += ['-AA']
self.compiler_so = ccomp
# ensure OPT environment variable is read
if 'OPT' in os.environ:
from distutils.sysconfig import get_config_vars
opt = " ".join(os.environ['OPT'].split())
gcv_opt = " ".join(get_config_vars('OPT')[0].split())
ccomp_s = " ".join(self.compiler_so)
if opt not in ccomp_s:
ccomp_s = ccomp_s.replace(gcv_opt, opt)
self.compiler_so = ccomp_s.split()
llink_s = " ".join(self.linker_so)
if opt not in llink_s:
self.linker_so = llink_s.split() + opt.split()
display = '%s: %s' % (os.path.basename(self.compiler_so[0]), src)
try:
self.spawn(self.compiler_so + cc_args + [src, '-o', obj] +
extra_postargs, display = display)
except DistutilsExecError:
msg = str(get_exception())
raise CompileError(msg)
replace_method(UnixCCompiler, '_compile', UnixCCompiler__compile)
def UnixCCompiler_create_static_lib(self, objects, output_libname,
output_dir=None, debug=0, target_lang=None):
"""
Build a static library in a separate sub-process.
Parameters
----------
objects : list or tuple of str
List of paths to object files used to build the static library.
output_libname : str
The library name as an absolute or relative (if `output_dir` is used)
path.
output_dir : str, optional
The path to the output directory. Default is None, in which case
the ``output_dir`` attribute of the UnixCCompiler instance.
debug : bool, optional
This parameter is not used.
target_lang : str, optional
This parameter is not used.
Returns
-------
None
"""
objects, output_dir = self._fix_object_args(objects, output_dir)
output_filename = \
self.library_filename(output_libname, output_dir=output_dir)
if self._need_link(objects, output_filename):
try:
# previous .a may be screwed up; best to remove it first
# and recreate.
# Also, ar on OS X doesn't handle updating universal archives
os.unlink(output_filename)
except (IOError, OSError):
pass
self.mkpath(os.path.dirname(output_filename))
tmp_objects = objects + self.objects
while tmp_objects:
objects = tmp_objects[:50]
tmp_objects = tmp_objects[50:]
display = '%s: adding %d object files to %s' % (
os.path.basename(self.archiver[0]),
len(objects), output_filename)
self.spawn(self.archiver + [output_filename] + objects,
display = display)
# Not many Unices required ranlib anymore -- SunOS 4.x is, I
# think the only major Unix that does. Maybe we need some
# platform intelligence here to skip ranlib if it's not
# needed -- or maybe Python's configure script took care of
# it for us, hence the check for leading colon.
if self.ranlib:
display = '%s:@ %s' % (os.path.basename(self.ranlib[0]),
output_filename)
try:
self.spawn(self.ranlib + [output_filename],
display = display)
except DistutilsExecError:
msg = str(get_exception())
raise LibError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
return
replace_method(UnixCCompiler, 'create_static_lib',
UnixCCompiler_create_static_lib)
| bsd-3-clause |
ryfeus/lambda-packs | Sklearn_scipy_numpy/source/scipy/stats/morestats.py | 4 | 94486 | # Author: Travis Oliphant, 2002
#
# Further updates and enhancements by many SciPy developers.
#
from __future__ import division, print_function, absolute_import
import math
import warnings
from collections import namedtuple
import numpy as np
from numpy import (isscalar, r_, log, around, unique, asarray,
zeros, arange, sort, amin, amax, any, atleast_1d,
sqrt, ceil, floor, array, poly1d, compress,
pi, exp, ravel, angle, count_nonzero)
from numpy.testing.decorators import setastest
from scipy._lib.six import string_types
from scipy import optimize
from scipy import special
from . import statlib
from . import stats
from .stats import find_repeats
from .contingency import chi2_contingency
from . import distributions
from ._distn_infrastructure import rv_generic
__all__ = ['mvsdist',
'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot',
'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot',
'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', 'binom_test',
'fligner', 'mood', 'wilcoxon', 'median_test',
'pdf_fromgamma', 'circmean', 'circvar', 'circstd', 'anderson_ksamp'
]
Mean = namedtuple('Mean', ('statistic', 'minmax'))
Variance = namedtuple('Variance', ('statistic', 'minmax'))
Std_dev = namedtuple('Std_dev', ('statistic', 'minmax'))
def bayes_mvs(data, alpha=0.90):
r"""
Bayesian confidence intervals for the mean, var, and std.
Parameters
----------
data : array_like
Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`.
Requires 2 or more data points.
alpha : float, optional
Probability that the returned confidence interval contains
the true parameter.
Returns
-------
mean_cntr, var_cntr, std_cntr : tuple
The three results are for the mean, variance and standard deviation,
respectively. Each result is a tuple of the form::
(center, (lower, upper))
with `center` the mean of the conditional pdf of the value given the
data, and `(lower, upper)` a confidence interval, centered on the
median, containing the estimate to a probability ``alpha``.
See Also
--------
mvsdist
Notes
-----
Each tuple of mean, variance, and standard deviation estimates represent
the (center, (lower, upper)) with center the mean of the conditional pdf
of the value given the data and (lower, upper) is a confidence interval
centered on the median, containing the estimate to a probability
``alpha``.
Converts data to 1-D and assumes all data has the same mean and variance.
Uses Jeffrey's prior for variance and std.
Equivalent to ``tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat))``
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", http://scholarsarchive.byu.edu/facpub/278,
2006.
Examples
--------
First a basic example to demonstrate the outputs:
>>> from scipy import stats
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = stats.bayes_mvs(data)
>>> mean
Mean(statistic=9.0, minmax=(7.1036502226125329, 10.896349777387467))
>>> var
Variance(statistic=10.0, minmax=(3.1767242068607087, 24.459103821334018))
>>> std
Std_dev(statistic=2.9724954732045084, minmax=(1.7823367265645143, 4.9456146050146295))
Now we generate some normally distributed random data, and get estimates of
mean and standard deviation with 95% confidence intervals for those
estimates:
>>> n_samples = 100000
>>> data = stats.norm.rvs(size=n_samples)
>>> res_mean, res_var, res_std = stats.bayes_mvs(data, alpha=0.95)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.hist(data, bins=100, normed=True, label='Histogram of data')
>>> ax.vlines(res_mean.statistic, 0, 0.5, colors='r', label='Estimated mean')
>>> ax.axvspan(res_mean.minmax[0],res_mean.minmax[1], facecolor='r',
... alpha=0.2, label=r'Estimated mean (95% limits)')
>>> ax.vlines(res_std.statistic, 0, 0.5, colors='g', label='Estimated scale')
>>> ax.axvspan(res_std.minmax[0],res_std.minmax[1], facecolor='g', alpha=0.2,
... label=r'Estimated scale (95% limits)')
>>> ax.legend(fontsize=10)
>>> ax.set_xlim([-4, 4])
>>> ax.set_ylim([0, 0.5])
>>> plt.show()
"""
m, v, s = mvsdist(data)
if alpha >= 1 or alpha <= 0:
raise ValueError("0 < alpha < 1 is required, but alpha=%s was given."
% alpha)
m_res = Mean(m.mean(), m.interval(alpha))
v_res = Variance(v.mean(), v.interval(alpha))
s_res = Std_dev(s.mean(), s.interval(alpha))
return m_res, v_res, s_res
def mvsdist(data):
"""
'Frozen' distributions for mean, variance, and standard deviation of data.
Parameters
----------
data : array_like
Input array. Converted to 1-D using ravel.
Requires 2 or more data-points.
Returns
-------
mdist : "frozen" distribution object
Distribution object representing the mean of the data
vdist : "frozen" distribution object
Distribution object representing the variance of the data
sdist : "frozen" distribution object
Distribution object representing the standard deviation of the data
See Also
--------
bayes_mvs
Notes
-----
The return values from ``bayes_mvs(data)`` is equivalent to
``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``.
In other words, calling ``<dist>.mean()`` and ``<dist>.interval(0.90)``
on the three distribution objects returned from this function will give
the same results that are returned from `bayes_mvs`.
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", http://scholarsarchive.byu.edu/facpub/278,
2006.
Examples
--------
>>> from scipy import stats
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = stats.mvsdist(data)
We now have frozen distribution objects "mean", "var" and "std" that we can
examine:
>>> mean.mean()
9.0
>>> mean.interval(0.95)
(6.6120585482655692, 11.387941451734431)
>>> mean.std()
1.1952286093343936
"""
x = ravel(data)
n = len(x)
if n < 2:
raise ValueError("Need at least 2 data-points.")
xbar = x.mean()
C = x.var()
if n > 1000: # gaussian approximations for large n
mdist = distributions.norm(loc=xbar, scale=math.sqrt(C / n))
sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C / (2. * n)))
vdist = distributions.norm(loc=C, scale=math.sqrt(2.0 / n) * C)
else:
nm1 = n - 1
fac = n * C / 2.
val = nm1 / 2.
mdist = distributions.t(nm1, loc=xbar, scale=math.sqrt(C / nm1))
sdist = distributions.gengamma(val, -2, scale=math.sqrt(fac))
vdist = distributions.invgamma(val, scale=fac)
return mdist, vdist, sdist
def kstat(data, n=2):
r"""
Return the nth k-statistic (1<=n<=4 so far).
The nth k-statistic k_n is the unique symmetric unbiased estimator of the
nth cumulant kappa_n.
Parameters
----------
data : array_like
Input array. Note that n-D input gets flattened.
n : int, {1, 2, 3, 4}, optional
Default is equal to 2.
Returns
-------
kstat : float
The nth k-statistic.
See Also
--------
kstatvar: Returns an unbiased estimator of the variance of the k-statistic.
moment: Returns the n-th central moment about the mean for a sample.
Notes
-----
For a sample size n, the first few k-statistics are given by:
.. math::
k_{1} = \mu
k_{2} = \frac{n}{n-1} m_{2}
k_{3} = \frac{ n^{2} } {(n-1) (n-2)} m_{3}
k_{4} = \frac{ n^{2} [(n + 1)m_{4} - 3(n - 1) m^2_{2}]} {(n-1) (n-2) (n-3)}
where ``:math:\mu`` is the sample mean, ``:math:m_2`` is the sample
variance, and ``:math:m_i`` is the i-th sample central moment.
References
----------
http://mathworld.wolfram.com/k-Statistic.html
http://mathworld.wolfram.com/Cumulant.html
Examples
--------
>>> from scipy import stats
>>> rndm = np.random.RandomState(1234)
As sample size increases, n-th moment and n-th k-statistic converge to the
same number (although they aren't identical). In the case of the normal
distribution, they converge to zero.
>>> for n in [2, 3, 4, 5, 6, 7]:
... x = rndm.normal(size=10**n)
... m, k = stats.moment(x, 3), stats.kstat(x, 3)
... print("%.3g %.3g %.3g" % (m, k, m-k))
-0.631 -0.651 0.0194
0.0282 0.0283 -8.49e-05
-0.0454 -0.0454 1.36e-05
7.53e-05 7.53e-05 -2.26e-09
0.00166 0.00166 -4.99e-09
-2.88e-06 -2.88e-06 8.63e-13
"""
if n > 4 or n < 1:
raise ValueError("k-statistics only supported for 1<=n<=4")
n = int(n)
S = np.zeros(n + 1, np.float64)
data = ravel(data)
N = data.size
# raise ValueError on empty input
if N == 0:
raise ValueError("Data input must not be empty")
# on nan input, return nan without warning
if np.isnan(np.sum(data)):
return np.nan
for k in range(1, n + 1):
S[k] = np.sum(data**k, axis=0)
if n == 1:
return S[1] * 1.0/N
elif n == 2:
return (N*S[2] - S[1]**2.0) / (N*(N - 1.0))
elif n == 3:
return (2*S[1]**3 - 3*N*S[1]*S[2] + N*N*S[3]) / (N*(N - 1.0)*(N - 2.0))
elif n == 4:
return ((-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 -
4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) /
(N*(N-1.0)*(N-2.0)*(N-3.0)))
else:
raise ValueError("Should not be here.")
def kstatvar(data, n=2):
r"""
Returns an unbiased estimator of the variance of the k-statistic.
See `kstat` for more details of the k-statistic.
Parameters
----------
data : array_like
Input array. Note that n-D input gets flattened.
n : int, {1, 2}, optional
Default is equal to 2.
Returns
-------
kstatvar : float
The nth k-statistic variance.
See Also
--------
kstat: Returns the n-th k-statistic.
moment: Returns the n-th central moment about the mean for a sample.
Notes
-----
The variances of the first few k-statistics are given by:
.. math::
var(k_{1}) = \frac{\kappa^2}{n}
var(k_{2}) = \frac{\kappa^4}{n} + \frac{2\kappa^2_{2}}{n - 1}
var(k_{3}) = \frac{\kappa^6}{n} + \frac{9 \kappa_2 \kappa_4}{n - 1} +
\frac{9 \kappa^2_{3}}{n - 1} +
\frac{6 n \kappa^3_{2}}{(n-1) (n-2)}
var(k_{4}) = \frac{\kappa^8}{n} + \frac{16 \kappa_2 \kappa_6}{n - 1} +
\frac{48 \kappa_{3} \kappa_5}{n - 1} +
\frac{34 \kappa^2_{4}}{n-1} + \frac{72 n \kappa^2_{2} \kappa_4}{(n - 1) (n - 2)} +
\frac{144 n \kappa_{2} \kappa^2_{3}}{(n - 1) (n - 2)} +
\frac{24 (n + 1) n \kappa^4_{2}}{(n - 1) (n - 2) (n - 3)}
"""
data = ravel(data)
N = len(data)
if n == 1:
return kstat(data, n=2) * 1.0/N
elif n == 2:
k2 = kstat(data, n=2)
k4 = kstat(data, n=4)
return (2*N*k2**2 + (N-1)*k4) / (N*(N+1))
else:
raise ValueError("Only n=1 or n=2 supported.")
def _calc_uniform_order_statistic_medians(n):
"""
Approximations of uniform order statistic medians.
Parameters
----------
n : int
Sample size.
Returns
-------
v : 1d float array
Approximations of the order statistic medians.
References
----------
.. [1] James J. Filliben, "The Probability Plot Correlation Coefficient
Test for Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
Examples
--------
Order statistics of the uniform distribution on the unit interval
are marginally distributed according to beta distributions.
The expectations of these order statistic are evenly spaced across
the interval, but the distributions are skewed in a way that
pushes the medians slightly towards the endpoints of the unit interval:
>>> n = 4
>>> k = np.arange(1, n+1)
>>> from scipy.stats import beta
>>> a = k
>>> b = n-k+1
>>> beta.mean(a, b)
array([ 0.2, 0.4, 0.6, 0.8])
>>> beta.median(a, b)
array([ 0.15910358, 0.38572757, 0.61427243, 0.84089642])
The Filliben approximation uses the exact medians of the smallest
and greatest order statistics, and the remaining medians are approximated
by points spread evenly across a sub-interval of the unit interval:
>>> from scipy.morestats import _calc_uniform_order_statistic_medians
>>> _calc_uniform_order_statistic_medians(n)
array([ 0.15910358, 0.38545246, 0.61454754, 0.84089642])
This plot shows the skewed distributions of the order statistics
of a sample of size four from a uniform distribution on the unit interval:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0.0, 1.0, num=50, endpoint=True)
>>> pdfs = [beta.pdf(x, a[i], b[i]) for i in range(n)]
>>> plt.figure()
>>> plt.plot(x, pdfs[0], x, pdfs[1], x, pdfs[2], x, pdfs[3])
"""
v = np.zeros(n, dtype=np.float64)
v[-1] = 0.5**(1.0 / n)
v[0] = 1 - v[-1]
i = np.arange(2, n)
v[1:-1] = (i - 0.3175) / (n + 0.365)
return v
def _parse_dist_kw(dist, enforce_subclass=True):
"""Parse `dist` keyword.
Parameters
----------
dist : str or stats.distributions instance.
Several functions take `dist` as a keyword, hence this utility
function.
enforce_subclass : bool, optional
If True (default), `dist` needs to be a
`_distn_infrastructure.rv_generic` instance.
It can sometimes be useful to set this keyword to False, if a function
wants to accept objects that just look somewhat like such an instance
(for example, they have a ``ppf`` method).
"""
if isinstance(dist, rv_generic):
pass
elif isinstance(dist, string_types):
try:
dist = getattr(distributions, dist)
except AttributeError:
raise ValueError("%s is not a valid distribution name" % dist)
elif enforce_subclass:
msg = ("`dist` should be a stats.distributions instance or a string "
"with the name of such a distribution.")
raise ValueError(msg)
return dist
def _add_axis_labels_title(plot, xlabel, ylabel, title):
"""Helper function to add axes labels and a title to stats plots"""
try:
if hasattr(plot, 'set_title'):
# Matplotlib Axes instance or something that looks like it
plot.set_title(title)
plot.set_xlabel(xlabel)
plot.set_ylabel(ylabel)
else:
# matplotlib.pyplot module
plot.title(title)
plot.xlabel(xlabel)
plot.ylabel(ylabel)
except:
# Not an MPL object or something that looks (enough) like it.
# Don't crash on adding labels or title
pass
def probplot(x, sparams=(), dist='norm', fit=True, plot=None):
"""
Calculate quantiles for a probability plot, and optionally show the plot.
Generates a probability plot of sample data against the quantiles of a
specified theoretical distribution (the normal distribution by default).
`probplot` optionally calculates a best-fit line for the data and plots the
results using Matplotlib or a given plot function.
Parameters
----------
x : array_like
Sample/response data from which `probplot` creates the plot.
sparams : tuple, optional
Distribution-specific shape parameters (shape parameters plus location
and scale).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. The default is 'norm' for a
normal probability plot. Objects that look enough like a
stats.distributions instance (i.e. they have a ``ppf`` method) are also
accepted.
fit : bool, optional
Fit a least-squares regression (best-fit) line to the sample data if
True (default).
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
Returns
-------
(osm, osr) : tuple of ndarrays
Tuple of theoretical quantiles (osm, or order statistic medians) and
ordered responses (osr). `osr` is simply sorted input `x`.
For details on how `osm` is calculated see the Notes section.
(slope, intercept, r) : tuple of floats, optional
Tuple containing the result of the least-squares fit, if that is
performed by `probplot`. `r` is the square root of the coefficient of
determination. If ``fit=False`` and ``plot=None``, this tuple is not
returned.
Notes
-----
Even if `plot` is given, the figure is not shown or saved by `probplot`;
``plt.show()`` or ``plt.savefig('figname.png')`` should be used after
calling `probplot`.
`probplot` generates a probability plot, which should not be confused with
a Q-Q or a P-P plot. Statsmodels has more extensive functionality of this
type, see ``statsmodels.api.ProbPlot``.
The formula used for the theoretical quantiles (horizontal axis of the
probability plot) is Filliben's estimate::
quantiles = dist.ppf(val), for
0.5**(1/n), for i = n
val = (i - 0.3175) / (n + 0.365), for i = 2, ..., n-1
1 - 0.5**(1/n), for i = 1
where ``i`` indicates the i-th ordered value and ``n`` is the total number
of values.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> nsample = 100
>>> np.random.seed(7654321)
A t distribution with small degrees of freedom:
>>> ax1 = plt.subplot(221)
>>> x = stats.t.rvs(3, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A t distribution with larger degrees of freedom:
>>> ax2 = plt.subplot(222)
>>> x = stats.t.rvs(25, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A mixture of two normal distributions with broadcasting:
>>> ax3 = plt.subplot(223)
>>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5],
... size=(nsample//2,2)).ravel()
>>> res = stats.probplot(x, plot=plt)
A standard normal distribution:
>>> ax4 = plt.subplot(224)
>>> x = stats.norm.rvs(loc=0, scale=1, size=nsample)
>>> res = stats.probplot(x, plot=plt)
Produce a new figure with a loggamma distribution, using the ``dist`` and
``sparams`` keywords:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> x = stats.loggamma.rvs(c=2.5, size=500)
>>> res = stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax)
>>> ax.set_title("Probplot for loggamma dist with shape parameter 2.5")
Show the results with Matplotlib:
>>> plt.show()
"""
x = np.asarray(x)
_perform_fit = fit or (plot is not None)
if x.size == 0:
if _perform_fit:
return (x, x), (np.nan, np.nan, 0.0)
else:
return x, x
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
dist = _parse_dist_kw(dist, enforce_subclass=False)
if sparams is None:
sparams = ()
if isscalar(sparams):
sparams = (sparams,)
if not isinstance(sparams, tuple):
sparams = tuple(sparams)
osm = dist.ppf(osm_uniform, *sparams)
osr = sort(x)
if _perform_fit:
# perform a linear least squares fit.
slope, intercept, r, prob, sterrest = stats.linregress(osm, osr)
if plot is not None:
plot.plot(osm, osr, 'bo', osm, slope*osm + intercept, 'r-')
_add_axis_labels_title(plot, xlabel='Quantiles',
ylabel='Ordered Values',
title='Probability Plot')
# Add R^2 value to the plot as text
xmin = amin(osm)
xmax = amax(osm)
ymin = amin(x)
ymax = amax(x)
posx = xmin + 0.70 * (xmax - xmin)
posy = ymin + 0.01 * (ymax - ymin)
plot.text(posx, posy, "$R^2=%1.4f$" % r**2)
if fit:
return (osm, osr), (slope, intercept, r)
else:
return osm, osr
def ppcc_max(x, brack=(0.0, 1.0), dist='tukeylambda'):
"""
Calculate the shape parameter that maximizes the PPCC
The probability plot correlation coefficient (PPCC) plot can be used to
determine the optimal shape parameter for a one-parameter family of
distributions. ppcc_max returns the shape parameter that would maximize the
probability plot correlation coefficient for the given data to a
one-parameter family of distributions.
Parameters
----------
x : array_like
Input array.
brack : tuple, optional
Triple (a,b,c) where (a<b<c). If bracket consists of two numbers (a, c)
then they are assumed to be a starting interval for a downhill bracket
search (see `scipy.optimize.brent`).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. Objects that look enough
like a stats.distributions instance (i.e. they have a ``ppf`` method)
are also accepted. The default is ``'tukeylambda'``.
Returns
-------
shape_value : float
The shape parameter at which the probability plot correlation
coefficient reaches its max value.
See also
--------
ppcc_plot, probplot, boxcox
Notes
-----
The brack keyword serves as a starting point which is useful in corner
cases. One can use a plot to obtain a rough visual estimate of the location
for the maximum to start the search near it.
References
----------
.. [1] J.J. Filliben, "The Probability Plot Correlation Coefficient Test for
Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
.. [2] http://www.itl.nist.gov/div898/handbook/eda/section3/ppccplot.htm
Examples
--------
First we generate some random data from a Tukey-Lambda distribution,
with shape parameter -0.7:
>>> from scipy import stats
>>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000,
... random_state=1234567) + 1e4
Now we explore this data with a PPCC plot as well as the related
probability plot and Box-Cox normplot. A red line is drawn where we
expect the PPCC value to be maximal (at the shape parameter -0.7 used
above):
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure(figsize=(8, 6))
>>> ax = fig.add_subplot(111)
>>> res = stats.ppcc_plot(x, -5, 5, plot=ax)
We calculate the value where the shape should reach its maximum and a red
line is drawn there. The line should coincide with the highest point in the
ppcc_plot.
>>> max = stats.ppcc_max(x)
>>> ax.vlines(max, 0, 1, colors='r', label='Expected shape value')
>>> plt.show()
"""
dist = _parse_dist_kw(dist)
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
osr = sort(x)
# this function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation)
# and returns 1-r so that a minimization function maximizes the
# correlation
def tempfunc(shape, mi, yvals, func):
xvals = func(mi, shape)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(tempfunc, brack=brack, args=(osm_uniform, osr, dist.ppf))
def ppcc_plot(x, a, b, dist='tukeylambda', plot=None, N=80):
"""
Calculate and optionally plot probability plot correlation coefficient.
The probability plot correlation coefficient (PPCC) plot can be used to
determine the optimal shape parameter for a one-parameter family of
distributions. It cannot be used for distributions without shape parameters
(like the normal distribution) or with multiple shape parameters.
By default a Tukey-Lambda distribution (`stats.tukeylambda`) is used. A
Tukey-Lambda PPCC plot interpolates from long-tailed to short-tailed
distributions via an approximately normal one, and is therefore particularly
useful in practice.
Parameters
----------
x : array_like
Input array.
a, b: scalar
Lower and upper bounds of the shape parameter to use.
dist : str or stats.distributions instance, optional
Distribution or distribution function name. Objects that look enough
like a stats.distributions instance (i.e. they have a ``ppf`` method)
are also accepted. The default is ``'tukeylambda'``.
plot : object, optional
If given, plots PPCC against the shape parameter.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`a` to `b`).
Returns
-------
svals : ndarray
The shape values for which `ppcc` was calculated.
ppcc : ndarray
The calculated probability plot correlation coefficient values.
See also
--------
ppcc_max, probplot, boxcox_normplot, tukeylambda
References
----------
J.J. Filliben, "The Probability Plot Correlation Coefficient Test for
Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
Examples
--------
First we generate some random data from a Tukey-Lambda distribution,
with shape parameter -0.7:
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234567)
>>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
Now we explore this data with a PPCC plot as well as the related
probability plot and Box-Cox normplot. A red line is drawn where we
expect the PPCC value to be maximal (at the shape parameter -0.7 used
above):
>>> fig = plt.figure(figsize=(12, 4))
>>> ax1 = fig.add_subplot(131)
>>> ax2 = fig.add_subplot(132)
>>> ax3 = fig.add_subplot(133)
>>> res = stats.probplot(x, plot=ax1)
>>> res = stats.boxcox_normplot(x, -5, 5, plot=ax2)
>>> res = stats.ppcc_plot(x, -5, 5, plot=ax3)
>>> ax3.vlines(-0.7, 0, 1, colors='r', label='Expected shape value')
>>> plt.show()
"""
if b <= a:
raise ValueError("`b` has to be larger than `a`.")
svals = np.linspace(a, b, num=N)
ppcc = np.empty_like(svals)
for k, sval in enumerate(svals):
_, r2 = probplot(x, sval, dist=dist, fit=True)
ppcc[k] = r2[-1]
if plot is not None:
plot.plot(svals, ppcc, 'x')
_add_axis_labels_title(plot, xlabel='Shape Values',
ylabel='Prob Plot Corr. Coef.',
title='(%s) PPCC Plot' % dist)
return svals, ppcc
def boxcox_llf(lmb, data):
r"""The boxcox log-likelihood function.
Parameters
----------
lmb : scalar
Parameter for Box-Cox transformation. See `boxcox` for details.
data : array_like
Data to calculate Box-Cox log-likelihood for. If `data` is
multi-dimensional, the log-likelihood is calculated along the first
axis.
Returns
-------
llf : float or ndarray
Box-Cox log-likelihood of `data` given `lmb`. A float for 1-D `data`,
an array otherwise.
See Also
--------
boxcox, probplot, boxcox_normplot, boxcox_normmax
Notes
-----
The Box-Cox log-likelihood function is defined here as
.. math::
llf = (\lambda - 1) \sum_i(\log(x_i)) -
N/2 \log(\sum_i (y_i - \bar{y})^2 / N),
where ``y`` is the Box-Cox transformed input data ``x``.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes
>>> np.random.seed(1245)
Generate some random variates and calculate Box-Cox log-likelihood values
for them for a range of ``lmbda`` values:
>>> x = stats.loggamma.rvs(5, loc=10, size=1000)
>>> lmbdas = np.linspace(-2, 10)
>>> llf = np.zeros(lmbdas.shape, dtype=float)
>>> for ii, lmbda in enumerate(lmbdas):
... llf[ii] = stats.boxcox_llf(lmbda, x)
Also find the optimal lmbda value with `boxcox`:
>>> x_most_normal, lmbda_optimal = stats.boxcox(x)
Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a
horizontal line to check that that's really the optimum:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(lmbdas, llf, 'b.-')
>>> ax.axhline(stats.boxcox_llf(lmbda_optimal, x), color='r')
>>> ax.set_xlabel('lmbda parameter')
>>> ax.set_ylabel('Box-Cox log-likelihood')
Now add some probability plots to show that where the log-likelihood is
maximized the data transformed with `boxcox` looks closest to normal:
>>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right'
>>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs):
... xt = stats.boxcox(x, lmbda=lmbda)
... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt)
... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc)
... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-')
... ax_inset.set_xticklabels([])
... ax_inset.set_yticklabels([])
... ax_inset.set_title('$\lambda=%1.2f$' % lmbda)
>>> plt.show()
"""
data = np.asarray(data)
N = data.shape[0]
if N == 0:
return np.nan
y = boxcox(data, lmb)
y_mean = np.mean(y, axis=0)
llf = (lmb - 1) * np.sum(np.log(data), axis=0)
llf -= N / 2.0 * np.log(np.sum((y - y_mean)**2. / N, axis=0))
return llf
def _boxcox_conf_interval(x, lmax, alpha):
# Need to find the lambda for which
# f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1
fac = 0.5 * distributions.chi2.ppf(1 - alpha, 1)
target = boxcox_llf(lmax, x) - fac
def rootfunc(lmbda, data, target):
return boxcox_llf(lmbda, data) - target
# Find positive endpoint of interval in which answer is to be found
newlm = lmax + 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm += 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmplus = optimize.brentq(rootfunc, lmax, newlm, args=(x, target))
# Now find negative interval in the same way
newlm = lmax - 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm -= 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x, target))
return lmminus, lmplus
def boxcox(x, lmbda=None, alpha=None):
r"""
Return a positive dataset transformed by a Box-Cox power transformation.
Parameters
----------
x : ndarray
Input array. Should be 1-dimensional.
lmbda : {None, scalar}, optional
If `lmbda` is not None, do the transformation for that value.
If `lmbda` is None, find the lambda that maximizes the log-likelihood
function and return it as the second output argument.
alpha : {None, float}, optional
If ``alpha`` is not None, return the ``100 * (1-alpha)%`` confidence
interval for `lmbda` as the third output argument.
Must be between 0.0 and 1.0.
Returns
-------
boxcox : ndarray
Box-Cox power transformed array.
maxlog : float, optional
If the `lmbda` parameter is None, the second returned argument is
the lambda that maximizes the log-likelihood function.
(min_ci, max_ci) : tuple of float, optional
If `lmbda` parameter is None and ``alpha`` is not None, this returned
tuple of floats represents the minimum and maximum confidence limits
given ``alpha``.
See Also
--------
probplot, boxcox_normplot, boxcox_normmax, boxcox_llf
Notes
-----
The Box-Cox transform is given by::
y = (x**lmbda - 1) / lmbda, for lmbda > 0
log(x), for lmbda = 0
`boxcox` requires the input data to be positive. Sometimes a Box-Cox
transformation provides a shift parameter to achieve this; `boxcox` does
not. Such a shift parameter is equivalent to adding a positive constant to
`x` before calling `boxcox`.
The confidence limits returned when ``alpha`` is provided give the interval
where:
.. math::
llf(\hat{\lambda}) - llf(\lambda) < \frac{1}{2}\chi^2(1 - \alpha, 1),
with ``llf`` the log-likelihood function and :math:`\chi^2` the chi-squared
function.
References
----------
G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the
Royal Statistical Society B, 26, 211-252 (1964).
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
We generate some random variates from a non-normal distribution and make a
probability plot for it, to show it is non-normal in the tails:
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> prob = stats.probplot(x, dist=stats.norm, plot=ax1)
>>> ax1.set_xlabel('')
>>> ax1.set_title('Probplot against normal distribution')
We now use `boxcox` to transform the data so it's closest to normal:
>>> ax2 = fig.add_subplot(212)
>>> xt, _ = stats.boxcox(x)
>>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2)
>>> ax2.set_title('Probplot after Box-Cox transformation')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if any(x <= 0):
raise ValueError("Data must be positive.")
if lmbda is not None: # single transformation
return special.boxcox(x, lmbda)
# If lmbda=None, find the lmbda that maximizes the log-likelihood function.
lmax = boxcox_normmax(x, method='mle')
y = boxcox(x, lmax)
if alpha is None:
return y, lmax
else:
# Find confidence interval
interval = _boxcox_conf_interval(x, lmax, alpha)
return y, lmax, interval
def boxcox_normmax(x, brack=(-2.0, 2.0), method='pearsonr'):
"""Compute optimal Box-Cox transform parameter for input data.
Parameters
----------
x : array_like
Input array.
brack : 2-tuple, optional
The starting interval for a downhill bracket search with
`optimize.brent`. Note that this is in most cases not critical; the
final result is allowed to be outside this bracket.
method : str, optional
The method to determine the optimal transform parameter (`boxcox`
``lmbda`` parameter). Options are:
'pearsonr' (default)
Maximizes the Pearson correlation coefficient between
``y = boxcox(x)`` and the expected values for ``y`` if `x` would be
normally-distributed.
'mle'
Minimizes the log-likelihood `boxcox_llf`. This is the method used
in `boxcox`.
'all'
Use all optimization methods available, and return all results.
Useful to compare different methods.
Returns
-------
maxlog : float or ndarray
The optimal transform parameter found. An array instead of a scalar
for ``method='all'``.
See Also
--------
boxcox, boxcox_llf, boxcox_normplot
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234) # make this example reproducible
Generate some data and determine optimal ``lmbda`` in various ways:
>>> x = stats.loggamma.rvs(5, size=30) + 5
>>> y, lmax_mle = stats.boxcox(x)
>>> lmax_pearsonr = stats.boxcox_normmax(x)
>>> lmax_mle
7.177...
>>> lmax_pearsonr
7.916...
>>> stats.boxcox_normmax(x, method='all')
array([ 7.91667384, 7.17718692])
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.boxcox_normplot(x, -10, 10, plot=ax)
>>> ax.axvline(lmax_mle, color='r')
>>> ax.axvline(lmax_pearsonr, color='g', ls='--')
>>> plt.show()
"""
def _pearsonr(x, brack):
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
xvals = distributions.norm.ppf(osm_uniform)
def _eval_pearsonr(lmbda, xvals, samps):
# This function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation) and
# returns ``1 - r`` so that a minimization function maximizes the
# correlation.
y = boxcox(samps, lmbda)
yvals = np.sort(y)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(_eval_pearsonr, brack=brack, args=(xvals, x))
def _mle(x, brack):
def _eval_mle(lmb, data):
# function to minimize
return -boxcox_llf(lmb, data)
return optimize.brent(_eval_mle, brack=brack, args=(x,))
def _all(x, brack):
maxlog = np.zeros(2, dtype=float)
maxlog[0] = _pearsonr(x, brack)
maxlog[1] = _mle(x, brack)
return maxlog
methods = {'pearsonr': _pearsonr,
'mle': _mle,
'all': _all}
if method not in methods.keys():
raise ValueError("Method %s not recognized." % method)
optimfunc = methods[method]
return optimfunc(x, brack)
def boxcox_normplot(x, la, lb, plot=None, N=80):
"""Compute parameters for a Box-Cox normality plot, optionally show it.
A Box-Cox normality plot shows graphically what the best transformation
parameter is to use in `boxcox` to obtain a distribution that is close
to normal.
Parameters
----------
x : array_like
Input array.
la, lb : scalar
The lower and upper bounds for the ``lmbda`` values to pass to `boxcox`
for Box-Cox transformations. These are also the limits of the
horizontal axis of the plot if that is generated.
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`la` to `lb`).
Returns
-------
lmbdas : ndarray
The ``lmbda`` values for which a Box-Cox transform was done.
ppcc : ndarray
Probability Plot Correlelation Coefficient, as obtained from `probplot`
when fitting the Box-Cox transformed input `x` against a normal
distribution.
See Also
--------
probplot, boxcox, boxcox_normmax, boxcox_llf, ppcc_max
Notes
-----
Even if `plot` is given, the figure is not shown or saved by
`boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')``
should be used after calling `probplot`.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Generate some non-normally distributed data, and create a Box-Cox plot:
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.boxcox_normplot(x, -20, 20, plot=ax)
Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in
the same plot:
>>> _, maxlog = stats.boxcox(x)
>>> ax.axvline(maxlog, color='r')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if lb <= la:
raise ValueError("`lb` has to be larger than `la`.")
lmbdas = np.linspace(la, lb, num=N)
ppcc = lmbdas * 0.0
for i, val in enumerate(lmbdas):
# Determine for each lmbda the correlation coefficient of transformed x
z = boxcox(x, lmbda=val)
_, r2 = probplot(z, dist='norm', fit=True)
ppcc[i] = r2[-1]
if plot is not None:
plot.plot(lmbdas, ppcc, 'x')
_add_axis_labels_title(plot, xlabel='$\lambda$',
ylabel='Prob Plot Corr. Coef.',
title='Box-Cox Normality Plot')
return lmbdas, ppcc
def shapiro(x, a=None, reta=False):
"""
Perform the Shapiro-Wilk test for normality.
The Shapiro-Wilk test tests the null hypothesis that the
data was drawn from a normal distribution.
Parameters
----------
x : array_like
Array of sample data.
a : array_like, optional
Array of internal parameters used in the calculation. If these
are not given, they will be computed internally. If x has length
n, then a must have length n/2.
reta : bool, optional
Whether or not to return the internally computed a values. The
default is False.
Returns
-------
W : float
The test statistic.
p-value : float
The p-value for the hypothesis test.
a : array_like, optional
If `reta` is True, then these are the internally computed "a"
values that may be passed into this function on future calls.
See Also
--------
anderson : The Anderson-Darling test for normality
kstest : The Kolmogorov-Smirnov test for goodness of fit.
Notes
-----
The algorithm used is described in [4]_ but censoring parameters as
described are not implemented. For N > 5000 the W test statistic is accurate
but the p-value may not be.
The chance of rejecting the null hypothesis when it is true is close to 5%
regardless of sample size.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Shapiro, S. S. & Wilk, M.B (1965). An analysis of variance test for
normality (complete samples), Biometrika, Vol. 52, pp. 591-611.
.. [3] Razali, N. M. & Wah, Y. B. (2011) Power comparisons of Shapiro-Wilk,
Kolmogorov-Smirnov, Lilliefors and Anderson-Darling tests, Journal of
Statistical Modeling and Analytics, Vol. 2, pp. 21-33.
.. [4] ALGORITHM AS R94 APPL. STATIST. (1995) VOL. 44, NO. 4.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
>>> x = stats.norm.rvs(loc=5, scale=3, size=100)
>>> stats.shapiro(x)
(0.9772805571556091, 0.08144091814756393)
"""
if a is not None or reta:
warnings.warn("input parameters 'a' and 'reta' are scheduled to be "
"removed in version 0.18.0", FutureWarning)
x = np.ravel(x)
N = len(x)
if N < 3:
raise ValueError("Data must be at least length 3.")
if a is None:
a = zeros(N, 'f')
init = 0
else:
if len(a) != N // 2:
raise ValueError("len(a) must equal len(x)/2")
init = 1
y = sort(x)
a, w, pw, ifault = statlib.swilk(y, a[:N//2], init)
if ifault not in [0, 2]:
warnings.warn("Input data for shapiro has range zero. The results "
"may not be accurate.")
if N > 5000:
warnings.warn("p-value may not be accurate for N > 5000.")
if reta:
return w, pw, a
else:
return w, pw
# Values from Stephens, M A, "EDF Statistics for Goodness of Fit and
# Some Comparisons", Journal of he American Statistical
# Association, Vol. 69, Issue 347, Sept. 1974, pp 730-737
_Avals_norm = array([0.576, 0.656, 0.787, 0.918, 1.092])
_Avals_expon = array([0.922, 1.078, 1.341, 1.606, 1.957])
# From Stephens, M A, "Goodness of Fit for the Extreme Value Distribution",
# Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588.
_Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038])
# From Stephens, M A, "Tests of Fit for the Logistic Distribution Based
# on the Empirical Distribution Function.", Biometrika,
# Vol. 66, Issue 3, Dec. 1979, pp 591-595.
_Avals_logistic = array([0.426, 0.563, 0.660, 0.769, 0.906, 1.010])
AndersonResult = namedtuple('AndersonResult', ('statistic',
'critical_values',
'significance_level'))
def anderson(x, dist='norm'):
"""
Anderson-Darling test for data coming from a particular distribution
The Anderson-Darling test is a modification of the Kolmogorov-
Smirnov test `kstest` for the null hypothesis that a sample is
drawn from a population that follows a particular distribution.
For the Anderson-Darling test, the critical values depend on
which distribution is being tested against. This function works
for normal, exponential, logistic, or Gumbel (Extreme Value
Type I) distributions.
Parameters
----------
x : array_like
array of sample data
dist : {'norm','expon','logistic','gumbel','extreme1'}, optional
the type of distribution to test against. The default is 'norm'
and 'extreme1' is a synonym for 'gumbel'
Returns
-------
statistic : float
The Anderson-Darling test statistic
critical_values : list
The critical values for this distribution
significance_level : list
The significance levels for the corresponding critical values
in percents. The function returns critical values for a
differing set of significance levels depending on the
distribution that is being tested against.
Notes
-----
Critical values provided are for the following significance levels:
normal/exponenential
15%, 10%, 5%, 2.5%, 1%
logistic
25%, 10%, 5%, 2.5%, 1%, 0.5%
Gumbel
25%, 10%, 5%, 2.5%, 1%
If A2 is larger than these critical values then for the corresponding
significance level, the null hypothesis that the data come from the
chosen distribution can be rejected.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and
Some Comparisons, Journal of the American Statistical Association,
Vol. 69, pp. 730-737.
.. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit
Statistics with Unknown Parameters, Annals of Statistics, Vol. 4,
pp. 357-369.
.. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value
Distribution, Biometrika, Vol. 64, pp. 583-588.
.. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference
to Tests for Exponentiality , Technical Report No. 262,
Department of Statistics, Stanford University, Stanford, CA.
.. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution
Based on the Empirical Distribution Function, Biometrika, Vol. 66,
pp. 591-595.
"""
if dist not in ['norm', 'expon', 'gumbel', 'extreme1', 'logistic']:
raise ValueError("Invalid distribution; dist must be 'norm', "
"'expon', 'gumbel', 'extreme1' or 'logistic'.")
y = sort(x)
xbar = np.mean(x, axis=0)
N = len(y)
if dist == 'norm':
s = np.std(x, ddof=1, axis=0)
w = (y - xbar) / s
z = distributions.norm.cdf(w)
sig = array([15, 10, 5, 2.5, 1])
critical = around(_Avals_norm / (1.0 + 4.0/N - 25.0/N/N), 3)
elif dist == 'expon':
w = y / xbar
z = distributions.expon.cdf(w)
sig = array([15, 10, 5, 2.5, 1])
critical = around(_Avals_expon / (1.0 + 0.6/N), 3)
elif dist == 'logistic':
def rootfunc(ab, xj, N):
a, b = ab
tmp = (xj - a) / b
tmp2 = exp(tmp)
val = [np.sum(1.0/(1+tmp2), axis=0) - 0.5*N,
np.sum(tmp*(1.0-tmp2)/(1+tmp2), axis=0) + N]
return array(val)
sol0 = array([xbar, np.std(x, ddof=1, axis=0)])
sol = optimize.fsolve(rootfunc, sol0, args=(x, N), xtol=1e-5)
w = (y - sol[0]) / sol[1]
z = distributions.logistic.cdf(w)
sig = array([25, 10, 5, 2.5, 1, 0.5])
critical = around(_Avals_logistic / (1.0 + 0.25/N), 3)
else: # (dist == 'gumbel') or (dist == 'extreme1'):
xbar, s = distributions.gumbel_l.fit(x)
w = (y - xbar) / s
z = distributions.gumbel_l.cdf(w)
sig = array([25, 10, 5, 2.5, 1])
critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3)
i = arange(1, N + 1)
A2 = -N - np.sum((2*i - 1.0) / N * (log(z) + log(1 - z[::-1])), axis=0)
return AndersonResult(A2, critical, sig)
def _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 7 of Scholz and Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2aKN : float
The A2aKN statistics of Scholz and Stephens 1987.
"""
A2akN = 0.
Z_ssorted_left = Z.searchsorted(Zstar, 'left')
if N == Zstar.size:
lj = 1.
else:
lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left
Bj = Z_ssorted_left + lj / 2.
for i in arange(0, k):
s = np.sort(samples[i])
s_ssorted_right = s.searchsorted(Zstar, side='right')
Mij = s_ssorted_right.astype(float)
fij = s_ssorted_right - s.searchsorted(Zstar, 'left')
Mij -= fij / 2.
inner = lj / float(N) * (N*Mij - Bj*n[i])**2 / (Bj*(N - Bj) - N*lj/4.)
A2akN += inner.sum() / n[i]
A2akN *= (N - 1.) / N
return A2akN
def _anderson_ksamp_right(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 6 of Scholz & Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2KN : float
The A2KN statistics of Scholz and Stephens 1987.
"""
A2kN = 0.
lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1],
'left')
Bj = lj.cumsum()
for i in arange(0, k):
s = np.sort(samples[i])
Mij = s.searchsorted(Zstar[:-1], side='right')
inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / (Bj * (N - Bj))
A2kN += inner.sum() / n[i]
return A2kN
Anderson_ksampResult = namedtuple('Anderson_ksampResult',
('statistic', 'critical_values',
'significance_level'))
def anderson_ksamp(samples, midrank=True):
"""The Anderson-Darling test for k-samples.
The k-sample Anderson-Darling test is a modification of the
one-sample Anderson-Darling test. It tests the null hypothesis
that k-samples are drawn from the same population without having
to specify the distribution function of that population. The
critical values depend on the number of samples.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample data in arrays.
midrank : bool, optional
Type of Anderson-Darling test which is computed. Default
(True) is the midrank test applicable to continuous and
discrete populations. If False, the right side empirical
distribution is used.
Returns
-------
statistic : float
Normalized k-sample Anderson-Darling test statistic.
critical_values : array
The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%.
significance_level : float
An approximate significance level at which the null hypothesis for the
provided samples can be rejected.
Raises
------
ValueError
If less than 2 samples are provided, a sample is empty, or no
distinct observations are in the samples.
See Also
--------
ks_2samp : 2 sample Kolmogorov-Smirnov test
anderson : 1 sample Anderson-Darling test
Notes
-----
[1]_ Defines three versions of the k-sample Anderson-Darling test:
one for continuous distributions and two for discrete
distributions, in which ties between samples may occur. The
default of this routine is to compute the version based on the
midrank empirical distribution function. This test is applicable
to continuous and discrete data. If midrank is set to False, the
right side empirical distribution is used for a test for discrete
data. According to [1]_, the two discrete test statistics differ
only slightly if a few collisions due to round-off errors occur in
the test not adjusted for ties between samples.
.. versionadded:: 0.14.0
References
----------
.. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample
Anderson-Darling Tests, Journal of the American Statistical
Association, Vol. 82, pp. 918-924.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(314159)
The null hypothesis that the two random samples come from the same
distribution can be rejected at the 5% level because the returned
test value is greater than the critical value for 5% (1.961) but
not at the 2.5% level. The interpolation gives an approximate
significance level of 3.1%:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(loc=0.5, size=30)])
(2.4615796189876105,
array([ 0.325, 1.226, 1.961, 2.718, 3.752]),
0.03134990135800783)
The null hypothesis cannot be rejected for three samples from an
identical distribution. The approximate p-value (87%) has to be
computed by extrapolation and may not be very accurate:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(size=30), np.random.normal(size=20)])
(-0.73091722665244196,
array([ 0.44925884, 1.3052767 , 1.9434184 , 2.57696569, 3.41634856]),
0.8789283903979661)
"""
k = len(samples)
if (k < 2):
raise ValueError("anderson_ksamp needs at least two samples")
samples = list(map(np.asarray, samples))
Z = np.sort(np.hstack(samples))
N = Z.size
Zstar = np.unique(Z)
if Zstar.size < 2:
raise ValueError("anderson_ksamp needs more than one distinct "
"observation")
n = np.array([sample.size for sample in samples])
if any(n == 0):
raise ValueError("anderson_ksamp encountered sample without "
"observations")
if midrank:
A2kN = _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N)
else:
A2kN = _anderson_ksamp_right(samples, Z, Zstar, k, n, N)
H = (1. / n).sum()
hs_cs = (1. / arange(N - 1, 1, -1)).cumsum()
h = hs_cs[-1] + 1
g = (hs_cs / arange(2, N)).sum()
a = (4*g - 6) * (k - 1) + (10 - 6*g)*H
b = (2*g - 4)*k**2 + 8*h*k + (2*g - 14*h - 4)*H - 8*h + 4*g - 6
c = (6*h + 2*g - 2)*k**2 + (4*h - 4*g + 6)*k + (2*h - 6)*H + 4*h
d = (2*h + 6)*k**2 - 4*h*k
sigmasq = (a*N**3 + b*N**2 + c*N + d) / ((N - 1.) * (N - 2.) * (N - 3.))
m = k - 1
A2 = (A2kN - m) / math.sqrt(sigmasq)
# The b_i values are the interpolation coefficients from Table 2
# of Scholz and Stephens 1987
b0 = np.array([0.675, 1.281, 1.645, 1.96, 2.326])
b1 = np.array([-0.245, 0.25, 0.678, 1.149, 1.822])
b2 = np.array([-0.105, -0.305, -0.362, -0.391, -0.396])
critical = b0 + b1 / math.sqrt(m) + b2 / m
pf = np.polyfit(critical, log(np.array([0.25, 0.1, 0.05, 0.025, 0.01])), 2)
if A2 < critical.min() or A2 > critical.max():
warnings.warn("approximate p-value will be computed by extrapolation")
p = math.exp(np.polyval(pf, A2))
return Anderson_ksampResult(A2, critical, p)
AnsariResult = namedtuple('AnsariResult', ('statistic', 'pvalue'))
def ansari(x, y):
"""
Perform the Ansari-Bradley test for equal scale parameters
The Ansari-Bradley test is a non-parametric test for the equality
of the scale parameter of the distributions from which two
samples were drawn.
Parameters
----------
x, y : array_like
arrays of sample data
Returns
-------
statistic : float
The Ansari-Bradley test statistic
pvalue : float
The p-value of the hypothesis test
See Also
--------
fligner : A non-parametric test for the equality of k variances
mood : A non-parametric test for the equality of two scale parameters
Notes
-----
The p-value given is exact when the sample sizes are both less than
55 and there are no ties, otherwise a normal approximation for the
p-value is used.
References
----------
.. [1] Sprent, Peter and N.C. Smeeton. Applied nonparametric statistical
methods. 3rd ed. Chapman and Hall/CRC. 2001. Section 5.8.2.
"""
x, y = asarray(x), asarray(y)
n = len(x)
m = len(y)
if m < 1:
raise ValueError("Not enough other observations.")
if n < 1:
raise ValueError("Not enough test observations.")
N = m + n
xy = r_[x, y] # combine
rank = stats.rankdata(xy)
symrank = amin(array((rank, N - rank + 1)), 0)
AB = np.sum(symrank[:n], axis=0)
uxy = unique(xy)
repeats = (len(uxy) != len(xy))
exact = ((m < 55) and (n < 55) and not repeats)
if repeats and (m < 55 or n < 55):
warnings.warn("Ties preclude use of exact statistic.")
if exact:
astart, a1, ifault = statlib.gscale(n, m)
ind = AB - astart
total = np.sum(a1, axis=0)
if ind < len(a1)/2.0:
cind = int(ceil(ind))
if ind == cind:
pval = 2.0 * np.sum(a1[:cind+1], axis=0) / total
else:
pval = 2.0 * np.sum(a1[:cind], axis=0) / total
else:
find = int(floor(ind))
if ind == floor(ind):
pval = 2.0 * np.sum(a1[find:], axis=0) / total
else:
pval = 2.0 * np.sum(a1[find+1:], axis=0) / total
return AnsariResult(AB, min(1.0, pval))
# otherwise compute normal approximation
if N % 2: # N odd
mnAB = n * (N+1.0)**2 / 4.0 / N
varAB = n * m * (N+1.0) * (3+N**2) / (48.0 * N**2)
else:
mnAB = n * (N+2.0) / 4.0
varAB = m * n * (N+2) * (N-2.0) / 48 / (N-1.0)
if repeats: # adjust variance estimates
# compute np.sum(tj * rj**2,axis=0)
fac = np.sum(symrank**2, axis=0)
if N % 2: # N odd
varAB = m * n * (16*N*fac - (N+1)**4) / (16.0 * N**2 * (N-1))
else: # N even
varAB = m * n * (16*fac - N*(N+2)**2) / (16.0 * N * (N-1))
z = (AB - mnAB) / sqrt(varAB)
pval = distributions.norm.sf(abs(z)) * 2.0
return AnsariResult(AB, pval)
BartlettResult = namedtuple('BartlettResult', ('statistic', 'pvalue'))
def bartlett(*args):
"""
Perform Bartlett's test for equal variances
Bartlett's test tests the null hypothesis that all input samples
are from populations with equal variances. For samples
from significantly non-normal populations, Levene's test
`levene` is more robust.
Parameters
----------
sample1, sample2,... : array_like
arrays of sample data. May be different lengths.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value of the test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
levene : A robust parametric test for equality of k variances
Notes
-----
Conover et al. (1981) examine many of the existing parametric and
nonparametric tests by extensive simulations and they conclude that the
tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be
superior in terms of robustness of departures from normality and power [3]_.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm
.. [2] Snedecor, George W. and Cochran, William G. (1989), Statistical
Methods, Eighth Edition, Iowa State University Press.
.. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
Hypothesis Testing based on Quadratic Inference Function. Technical
Report #99-03, Center for Likelihood Studies, Pennsylvania State
University.
.. [4] Bartlett, M. S. (1937). Properties of Sufficiency and Statistical
Tests. Proceedings of the Royal Society of London. Series A,
Mathematical and Physical Sciences, Vol. 160, No.901, pp. 268-282.
"""
# Handle empty input
for a in args:
if np.asanyarray(a).size == 0:
return BartlettResult(np.nan, np.nan)
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
ssq = zeros(k, 'd')
for j in range(k):
Ni[j] = len(args[j])
ssq[j] = np.var(args[j], ddof=1)
Ntot = np.sum(Ni, axis=0)
spsq = np.sum((Ni - 1)*ssq, axis=0) / (1.0*(Ntot - k))
numer = (Ntot*1.0 - k) * log(spsq) - np.sum((Ni - 1.0)*log(ssq), axis=0)
denom = 1.0 + 1.0/(3*(k - 1)) * ((np.sum(1.0/(Ni - 1.0), axis=0)) -
1.0/(Ntot - k))
T = numer / denom
pval = distributions.chi2.sf(T, k - 1) # 1 - cdf
return BartlettResult(T, pval)
LeveneResult = namedtuple('LeveneResult', ('statistic', 'pvalue'))
def levene(*args, **kwds):
"""
Perform Levene test for equal variances.
The Levene test tests the null hypothesis that all input samples
are from populations with equal variances. Levene's test is an
alternative to Bartlett's test `bartlett` in the case where
there are significant deviations from normality.
Parameters
----------
sample1, sample2, ... : array_like
The sample data, possibly with different lengths
center : {'mean', 'median', 'trimmed'}, optional
Which function of the data to use in the test. The default
is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value for the test.
Notes
-----
Three variations of Levene's test are possible. The possibilities
and their recommended usages are:
* 'median' : Recommended for skewed (non-normal) distributions>
* 'mean' : Recommended for symmetric, moderate-tailed distributions.
* 'trimmed' : Recommended for heavy-tailed distributions.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm
.. [2] Levene, H. (1960). In Contributions to Probability and Statistics:
Essays in Honor of Harold Hotelling, I. Olkin et al. eds.,
Stanford University Press, pp. 278-292.
.. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American
Statistical Association, 69, 364-367
"""
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("levene() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
Yci = zeros(k, 'd')
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
+ "or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(np.sort(arg), proportiontocut)
for arg in args)
func = lambda x: np.mean(x, axis=0)
for j in range(k):
Ni[j] = len(args[j])
Yci[j] = func(args[j])
Ntot = np.sum(Ni, axis=0)
# compute Zij's
Zij = [None] * k
for i in range(k):
Zij[i] = abs(asarray(args[i]) - Yci[i])
# compute Zbari
Zbari = zeros(k, 'd')
Zbar = 0.0
for i in range(k):
Zbari[i] = np.mean(Zij[i], axis=0)
Zbar += Zbari[i] * Ni[i]
Zbar /= Ntot
numer = (Ntot - k) * np.sum(Ni * (Zbari - Zbar)**2, axis=0)
# compute denom_variance
dvar = 0.0
for i in range(k):
dvar += np.sum((Zij[i] - Zbari[i])**2, axis=0)
denom = (k - 1.0) * dvar
W = numer / denom
pval = distributions.f.sf(W, k-1, Ntot-k) # 1 - cdf
return LeveneResult(W, pval)
@setastest(False)
def binom_test(x, n=None, p=0.5, alternative='two-sided'):
"""
Perform a test that the probability of success is p.
This is an exact, two-sided test of the null hypothesis
that the probability of success in a Bernoulli experiment
is `p`.
Parameters
----------
x : integer or array_like
the number of successes, or if x has length 2, it is the
number of successes and the number of failures.
n : integer
the number of trials. This is ignored if x gives both the
number of successes and failures
p : float, optional
The hypothesized probability of success. 0 <= p <= 1. The
default value is p = 0.5
Returns
-------
p-value : float
The p-value of the hypothesis test
References
----------
.. [1] http://en.wikipedia.org/wiki/Binomial_test
"""
x = atleast_1d(x).astype(np.integer)
if len(x) == 2:
n = x[1] + x[0]
x = x[0]
elif len(x) == 1:
x = x[0]
if n is None or n < x:
raise ValueError("n must be >= x")
n = np.int_(n)
else:
raise ValueError("Incorrect length for x.")
if (p > 1.0) or (p < 0.0):
raise ValueError("p must be in range [0,1]")
if alternative not in ('two-sided', 'less', 'greater'):
raise ValueError("alternative not recognized\n"
"should be 'two-sided', 'less' or 'greater'")
if alternative == 'less':
pval = distributions.binom.cdf(x, n, p)
return pval
if alternative == 'greater':
pval = distributions.binom.sf(x-1, n, p)
return pval
# if alternative was neither 'less' nor 'greater', then it's 'two-sided'
d = distributions.binom.pmf(x, n, p)
rerr = 1 + 1e-7
if x == p * n:
# special case as shortcut, would also be handled by `else` below
pval = 1.
elif x < p * n:
i = np.arange(np.ceil(p * n), n+1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(x, n, p) +
distributions.binom.sf(n - y, n, p))
else:
i = np.arange(np.floor(p*n) + 1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(y-1, n, p) +
distributions.binom.sf(x-1, n, p))
return min(1.0, pval)
def _apply_func(x, g, func):
# g is list of indices into x
# separating x into different groups
# func should be applied over the groups
g = unique(r_[0, g, len(x)])
output = []
for k in range(len(g) - 1):
output.append(func(x[g[k]:g[k+1]]))
return asarray(output)
FlignerResult = namedtuple('FlignerResult', ('statistic', 'pvalue'))
def fligner(*args, **kwds):
"""
Perform Fligner-Killeen test for equality of variance.
Fligner's test tests the null hypothesis that all input samples
are from populations with equal variances. Fligner-Killeen's test is
distribution free when populations are identical [2]_.
Parameters
----------
sample1, sample2, ... : array_like
Arrays of sample data. Need not be the same length.
center : {'mean', 'median', 'trimmed'}, optional
Keyword argument controlling which function of the data is used in
computing the test statistic. The default is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value for the hypothesis test.
See Also
--------
bartlett : A parametric test for equality of k variances in normal samples
levene : A robust parametric test for equality of k variances
Notes
-----
As with Levene's test there are three variants of Fligner's test that
differ by the measure of central tendency used in the test. See `levene`
for more information.
Conover et al. (1981) examine many of the existing parametric and
nonparametric tests by extensive simulations and they conclude that the
tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be
superior in terms of robustness of departures from normality and power [3]_.
References
----------
.. [1] http://www.stat.psu.edu/~bgl/center/tr/TR993.ps
.. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample
tests for scale. 'Journal of the American Statistical Association.'
71(353), 210-213.
.. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
Hypothesis Testing based on Quadratic Inference Function. Technical
Report #99-03, Center for Likelihood Studies, Pennsylvania State
University.
.. [4] Conover, W. J., Johnson, M. E. and Johnson M. M. (1981). A
comparative study of tests for homogeneity of variances, with
applications to the outer continental shelf biding data.
Technometrics, 23(4), 351-361.
"""
# Handle empty input
for a in args:
if np.asanyarray(a).size == 0:
return FlignerResult(np.nan, np.nan)
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("fligner() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
+ "or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(arg, proportiontocut) for arg in args)
func = lambda x: np.mean(x, axis=0)
Ni = asarray([len(args[j]) for j in range(k)])
Yci = asarray([func(args[j]) for j in range(k)])
Ntot = np.sum(Ni, axis=0)
# compute Zij's
Zij = [abs(asarray(args[i]) - Yci[i]) for i in range(k)]
allZij = []
g = [0]
for i in range(k):
allZij.extend(list(Zij[i]))
g.append(len(allZij))
ranks = stats.rankdata(allZij)
a = distributions.norm.ppf(ranks / (2*(Ntot + 1.0)) + 0.5)
# compute Aibar
Aibar = _apply_func(a, g, np.sum) / Ni
anbar = np.mean(a, axis=0)
varsq = np.var(a, axis=0, ddof=1)
Xsq = np.sum(Ni * (asarray(Aibar) - anbar)**2.0, axis=0) / varsq
pval = distributions.chi2.sf(Xsq, k - 1) # 1 - cdf
return FlignerResult(Xsq, pval)
def mood(x, y, axis=0):
"""
Perform Mood's test for equal scale parameters.
Mood's two-sample test for scale parameters is a non-parametric
test for the null hypothesis that two samples are drawn from the
same distribution with the same scale parameter.
Parameters
----------
x, y : array_like
Arrays of sample data.
axis : int, optional
The axis along which the samples are tested. `x` and `y` can be of
different length along `axis`.
If `axis` is None, `x` and `y` are flattened and the test is done on
all values in the flattened arrays.
Returns
-------
z : scalar or ndarray
The z-score for the hypothesis test. For 1-D inputs a scalar is
returned.
p-value : scalar ndarray
The p-value for the hypothesis test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
ansari : A non-parametric test for the equality of 2 variances
bartlett : A parametric test for equality of k variances in normal samples
levene : A parametric test for equality of k variances
Notes
-----
The data are assumed to be drawn from probability distributions ``f(x)``
and ``f(x/s) / s`` respectively, for some probability density function f.
The null hypothesis is that ``s == 1``.
For multi-dimensional arrays, if the inputs are of shapes
``(n0, n1, n2, n3)`` and ``(n0, m1, n2, n3)``, then if ``axis=1``, the
resulting z and p values will have shape ``(n0, n2, n3)``. Note that
``n1`` and ``m1`` don't have to be equal, but the other dimensions do.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(1234)
>>> x2 = np.random.randn(2, 45, 6, 7)
>>> x1 = np.random.randn(2, 30, 6, 7)
>>> z, p = stats.mood(x1, x2, axis=1)
>>> p.shape
(2, 6, 7)
Find the number of points where the difference in scale is not significant:
>>> (p > 0.1).sum()
74
Perform the test with different scales:
>>> x1 = np.random.randn(2, 30)
>>> x2 = np.random.randn(2, 35) * 10.0
>>> stats.mood(x1, x2, axis=1)
(array([-5.7178125 , -5.25342163]), array([ 1.07904114e-08, 1.49299218e-07]))
"""
x = np.asarray(x, dtype=float)
y = np.asarray(y, dtype=float)
if axis is None:
x = x.flatten()
y = y.flatten()
axis = 0
# Determine shape of the result arrays
res_shape = tuple([x.shape[ax] for ax in range(len(x.shape)) if ax != axis])
if not (res_shape == tuple([y.shape[ax] for ax in range(len(y.shape)) if
ax != axis])):
raise ValueError("Dimensions of x and y on all axes except `axis` "
"should match")
n = x.shape[axis]
m = y.shape[axis]
N = m + n
if N < 3:
raise ValueError("Not enough observations.")
xy = np.concatenate((x, y), axis=axis)
if axis != 0:
xy = np.rollaxis(xy, axis)
xy = xy.reshape(xy.shape[0], -1)
# Generalized to the n-dimensional case by adding the axis argument, and
# using for loops, since rankdata is not vectorized. For improving
# performance consider vectorizing rankdata function.
all_ranks = np.zeros_like(xy)
for j in range(xy.shape[1]):
all_ranks[:, j] = stats.rankdata(xy[:, j])
Ri = all_ranks[:n]
M = np.sum((Ri - (N + 1.0) / 2)**2, axis=0)
# Approx stat.
mnM = n * (N * N - 1.0) / 12
varM = m * n * (N + 1.0) * (N + 2) * (N - 2) / 180
z = (M - mnM) / sqrt(varM)
# sf for right tail, cdf for left tail. Factor 2 for two-sidedness
z_pos = z > 0
pval = np.zeros_like(z)
pval[z_pos] = 2 * distributions.norm.sf(z[z_pos])
pval[~z_pos] = 2 * distributions.norm.cdf(z[~z_pos])
if res_shape == ():
# Return scalars, not 0-D arrays
z = z[0]
pval = pval[0]
else:
z.shape = res_shape
pval.shape = res_shape
return z, pval
WilcoxonResult = namedtuple('WilcoxonResult', ('statistic', 'pvalue'))
def wilcoxon(x, y=None, zero_method="wilcox", correction=False):
"""
Calculate the Wilcoxon signed-rank test.
The Wilcoxon signed-rank test tests the null hypothesis that two
related paired samples come from the same distribution. In particular,
it tests whether the distribution of the differences x - y is symmetric
about zero. It is a non-parametric version of the paired T-test.
Parameters
----------
x : array_like
The first set of measurements.
y : array_like, optional
The second set of measurements. If `y` is not given, then the `x`
array is considered to be the differences between the two sets of
measurements.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt":
Pratt treatment: includes zero-differences in the ranking process
(more conservative)
"wilcox":
Wilcox treatment: discards all zero-differences
"zsplit":
Zero rank split: just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the
z-statistic. Default is False.
Returns
-------
statistic : float
The sum of the ranks of the differences above or below zero, whichever
is smaller.
pvalue : float
The two-sided p-value for the test.
Notes
-----
Because the normal approximation is used for the calculations, the
samples used should be large. A typical rule is to require that
n > 20.
References
----------
.. [1] http://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test
"""
if zero_method not in ["wilcox", "pratt", "zsplit"]:
raise ValueError("Zero method should be either 'wilcox' "
"or 'pratt' or 'zsplit'")
if y is None:
d = x
else:
x, y = map(asarray, (x, y))
if len(x) != len(y):
raise ValueError('Unequal N in wilcoxon. Aborting.')
d = x - y
if zero_method == "wilcox":
# Keep all non-zero differences
d = compress(np.not_equal(d, 0), d, axis=-1)
count = len(d)
if count < 10:
warnings.warn("Warning: sample size too small for normal approximation.")
r = stats.rankdata(abs(d))
r_plus = np.sum((d > 0) * r, axis=0)
r_minus = np.sum((d < 0) * r, axis=0)
if zero_method == "zsplit":
r_zero = np.sum((d == 0) * r, axis=0)
r_plus += r_zero / 2.
r_minus += r_zero / 2.
T = min(r_plus, r_minus)
mn = count * (count + 1.) * 0.25
se = count * (count + 1.) * (2. * count + 1.)
if zero_method == "pratt":
r = r[d != 0]
replist, repnum = find_repeats(r)
if repnum.size != 0:
# Correction for repeated elements.
se -= 0.5 * (repnum * (repnum * repnum - 1)).sum()
se = sqrt(se / 24)
correction = 0.5 * int(bool(correction)) * np.sign(T - mn)
z = (T - mn - correction) / se
prob = 2. * distributions.norm.sf(abs(z))
return WilcoxonResult(T, prob)
@setastest(False)
def median_test(*args, **kwds):
"""
Mood's median test.
Test that two or more samples come from populations with the same median.
Let ``n = len(args)`` be the number of samples. The "grand median" of
all the data is computed, and a contingency table is formed by
classifying the values in each sample as being above or below the grand
median. The contingency table, along with `correction` and `lambda_`,
are passed to `scipy.stats.chi2_contingency` to compute the test statistic
and p-value.
Parameters
----------
sample1, sample2, ... : array_like
The set of samples. There must be at least two samples.
Each sample must be a one-dimensional sequence containing at least
one value. The samples are not required to have the same length.
ties : str, optional
Determines how values equal to the grand median are classified in
the contingency table. The string must be one of::
"below":
Values equal to the grand median are counted as "below".
"above":
Values equal to the grand median are counted as "above".
"ignore":
Values equal to the grand median are not counted.
The default is "below".
correction : bool, optional
If True, *and* there are just two samples, apply Yates' correction
for continuity when computing the test statistic associated with
the contingency table. Default is True.
lambda_ : float or str, optional.
By default, the statistic computed in this test is Pearson's
chi-squared statistic. `lambda_` allows a statistic from the
Cressie-Read power divergence family to be used instead. See
`power_divergence` for details.
Default is 1 (Pearson's chi-squared statistic).
Returns
-------
stat : float
The test statistic. The statistic that is returned is determined by
`lambda_`. The default is Pearson's chi-squared statistic.
p : float
The p-value of the test.
m : float
The grand median.
table : ndarray
The contingency table. The shape of the table is (2, n), where
n is the number of samples. The first row holds the counts of the
values above the grand median, and the second row holds the counts
of the values below the grand median. The table allows further
analysis with, for example, `scipy.stats.chi2_contingency`, or with
`scipy.stats.fisher_exact` if there are two samples, without having
to recompute the table.
See Also
--------
kruskal : Compute the Kruskal-Wallis H-test for independent samples.
mannwhitneyu : Computes the Mann-Whitney rank test on samples x and y.
Notes
-----
.. versionadded:: 0.15.0
References
----------
.. [1] Mood, A. M., Introduction to the Theory of Statistics. McGraw-Hill
(1950), pp. 394-399.
.. [2] Zar, J. H., Biostatistical Analysis, 5th ed. Prentice Hall (2010).
See Sections 8.12 and 10.15.
Examples
--------
A biologist runs an experiment in which there are three groups of plants.
Group 1 has 16 plants, group 2 has 15 plants, and group 3 has 17 plants.
Each plant produces a number of seeds. The seed counts for each group
are::
Group 1: 10 14 14 18 20 22 24 25 31 31 32 39 43 43 48 49
Group 2: 28 30 31 33 34 35 36 40 44 55 57 61 91 92 99
Group 3: 0 3 9 22 23 25 25 33 34 34 40 45 46 48 62 67 84
The following code applies Mood's median test to these samples.
>>> g1 = [10, 14, 14, 18, 20, 22, 24, 25, 31, 31, 32, 39, 43, 43, 48, 49]
>>> g2 = [28, 30, 31, 33, 34, 35, 36, 40, 44, 55, 57, 61, 91, 92, 99]
>>> g3 = [0, 3, 9, 22, 23, 25, 25, 33, 34, 34, 40, 45, 46, 48, 62, 67, 84]
>>> from scipy.stats import median_test
>>> stat, p, med, tbl = median_test(g1, g2, g3)
The median is
>>> med
34.0
and the contingency table is
>>> tbl
array([[ 5, 10, 7],
[11, 5, 10]])
`p` is too large to conclude that the medians are not the same:
>>> p
0.12609082774093244
The "G-test" can be performed by passing ``lambda_="log-likelihood"`` to
`median_test`.
>>> g, p, med, tbl = median_test(g1, g2, g3, lambda_="log-likelihood")
>>> p
0.12224779737117837
The median occurs several times in the data, so we'll get a different
result if, for example, ``ties="above"`` is used:
>>> stat, p, med, tbl = median_test(g1, g2, g3, ties="above")
>>> p
0.063873276069553273
>>> tbl
array([[ 5, 11, 9],
[11, 4, 8]])
This example demonstrates that if the data set is not large and there
are values equal to the median, the p-value can be sensitive to the
choice of `ties`.
"""
ties = kwds.pop('ties', 'below')
correction = kwds.pop('correction', True)
lambda_ = kwds.pop('lambda_', None)
if len(kwds) > 0:
bad_kwd = kwds.keys()[0]
raise TypeError("median_test() got an unexpected keyword "
"argument %r" % bad_kwd)
if len(args) < 2:
raise ValueError('median_test requires two or more samples.')
ties_options = ['below', 'above', 'ignore']
if ties not in ties_options:
raise ValueError("invalid 'ties' option '%s'; 'ties' must be one "
"of: %s" % (ties, str(ties_options)[1:-1]))
data = [np.asarray(arg) for arg in args]
# Validate the sizes and shapes of the arguments.
for k, d in enumerate(data):
if d.size == 0:
raise ValueError("Sample %d is empty. All samples must "
"contain at least one value." % (k + 1))
if d.ndim != 1:
raise ValueError("Sample %d has %d dimensions. All "
"samples must be one-dimensional sequences." %
(k + 1, d.ndim))
grand_median = np.median(np.concatenate(data))
# Create the contingency table.
table = np.zeros((2, len(data)), dtype=np.int64)
for k, sample in enumerate(data):
nabove = count_nonzero(sample > grand_median)
nbelow = count_nonzero(sample < grand_median)
nequal = sample.size - (nabove + nbelow)
table[0, k] += nabove
table[1, k] += nbelow
if ties == "below":
table[1, k] += nequal
elif ties == "above":
table[0, k] += nequal
# Check that no row or column of the table is all zero.
# Such a table can not be given to chi2_contingency, because it would have
# a zero in the table of expected frequencies.
rowsums = table.sum(axis=1)
if rowsums[0] == 0:
raise ValueError("All values are below the grand median (%r)." %
grand_median)
if rowsums[1] == 0:
raise ValueError("All values are above the grand median (%r)." %
grand_median)
if ties == "ignore":
# We already checked that each sample has at least one value, but it
# is possible that all those values equal the grand median. If `ties`
# is "ignore", that would result in a column of zeros in `table`. We
# check for that case here.
zero_cols = np.where((table == 0).all(axis=0))[0]
if len(zero_cols) > 0:
msg = ("All values in sample %d are equal to the grand "
"median (%r), so they are ignored, resulting in an "
"empty sample." % (zero_cols[0] + 1, grand_median))
raise ValueError(msg)
stat, p, dof, expected = chi2_contingency(table, lambda_=lambda_,
correction=correction)
return stat, p, grand_median, table
def _hermnorm(N):
# return the negatively normalized hermite polynomials up to order N-1
# (inclusive)
# using the recursive relationship
# p_n+1 = p_n(x)' - x*p_n(x)
# and p_0(x) = 1
plist = [None] * N
plist[0] = poly1d(1)
for n in range(1, N):
plist[n] = plist[n-1].deriv() - poly1d([1, 0]) * plist[n-1]
return plist
# Note: when removing pdf_fromgamma, also remove the _hermnorm support function
@np.deprecate(message="scipy.stats.pdf_fromgamma is deprecated in scipy 0.16.0 "
"in favour of statsmodels.distributions.ExpandedNormal.")
def pdf_fromgamma(g1, g2, g3=0.0, g4=None):
if g4 is None:
g4 = 3 * g2**2
sigsq = 1.0 / g2
sig = sqrt(sigsq)
mu = g1 * sig**3.0
p12 = _hermnorm(13)
for k in range(13):
p12[k] /= sig**k
# Add all of the terms to polynomial
totp = (p12[0] - g1/6.0*p12[3] +
g2/24.0*p12[4] + g1**2/72.0 * p12[6] -
g3/120.0*p12[5] - g1*g2/144.0*p12[7] - g1**3.0/1296.0*p12[9] +
g4/720*p12[6] + (g2**2/1152.0 + g1*g3/720)*p12[8] +
g1**2 * g2/1728.0*p12[10] + g1**4.0 / 31104.0*p12[12])
# Final normalization
totp = totp / sqrt(2*pi) / sig
def thefunc(x):
xn = (x - mu) / sig
return totp(xn) * exp(-xn**2 / 2.)
return thefunc
def _circfuncs_common(samples, high, low):
samples = np.asarray(samples)
if samples.size == 0:
return np.nan, np.nan
ang = (samples - low)*2*pi / (high - low)
return samples, ang
def circmean(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular mean for samples in a range.
Parameters
----------
samples : array_like
Input array.
high : float or int, optional
High boundary for circular mean range. Default is ``2*pi``.
low : float or int, optional
Low boundary for circular mean range. Default is 0.
axis : int, optional
Axis along which means are computed. The default is to compute
the mean of the flattened array.
Returns
-------
circmean : float
Circular mean.
"""
samples, ang = _circfuncs_common(samples, high, low)
res = angle(np.mean(exp(1j * ang), axis=axis))
mask = res < 0
if mask.ndim > 0:
res[mask] += 2*pi
elif mask:
res += 2*pi
return res*(high - low)/2.0/pi + low
def circvar(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular variance for samples assumed to be in a range
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular variance range. Default is 0.
high : float or int, optional
High boundary for circular variance range. Default is ``2*pi``.
axis : int, optional
Axis along which variances are computed. The default is to compute
the variance of the flattened array.
Returns
-------
circvar : float
Circular variance.
Notes
-----
This uses a definition of circular variance that in the limit of small
angles returns a number close to the 'linear' variance.
"""
samples, ang = _circfuncs_common(samples, high, low)
res = np.mean(exp(1j * ang), axis=axis)
R = abs(res)
return ((high - low)/2.0/pi)**2 * 2 * log(1/R)
def circstd(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular standard deviation for samples assumed to be in the
range [low to high].
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular standard deviation range. Default is 0.
high : float or int, optional
High boundary for circular standard deviation range.
Default is ``2*pi``.
axis : int, optional
Axis along which standard deviations are computed. The default is
to compute the standard deviation of the flattened array.
Returns
-------
circstd : float
Circular standard deviation.
Notes
-----
This uses a definition of circular standard deviation that in the limit of
small angles returns a number close to the 'linear' standard deviation.
"""
samples, ang = _circfuncs_common(samples, high, low)
res = np.mean(exp(1j * ang), axis=axis)
R = abs(res)
return ((high - low)/2.0/pi) * sqrt(-2*log(R))
# Tests to include (from R) -- some of these already in stats.
########
# X Ansari-Bradley
# X Bartlett (and Levene)
# X Binomial
# Y Pearson's Chi-squared (stats.chisquare)
# Y Association Between Paired samples (stats.pearsonr, stats.spearmanr)
# stats.kendalltau) -- these need work though
# Fisher's exact test
# X Fligner-Killeen Test
# Y Friedman Rank Sum (stats.friedmanchisquare?)
# Y Kruskal-Wallis
# Y Kolmogorov-Smirnov
# Cochran-Mantel-Haenszel Chi-Squared for Count
# McNemar's Chi-squared for Count
# X Mood Two-Sample
# X Test For Equal Means in One-Way Layout (see stats.ttest also)
# Pairwise Comparisons of proportions
# Pairwise t tests
# Tabulate p values for pairwise comparisons
# Pairwise Wilcoxon rank sum tests
# Power calculations two sample test of prop.
# Power calculations for one and two sample t tests
# Equal or Given Proportions
# Trend in Proportions
# Quade Test
# Y Student's T Test
# Y F Test to compare two variances
# XY Wilcoxon Rank Sum and Signed Rank Tests
| mit |
MobinRanjbar/hue | desktop/core/ext-py/Django-1.6.10/tests/view_tests/views.py | 49 | 10403 | from __future__ import absolute_import
import sys
from django.core.exceptions import PermissionDenied, SuspiciousOperation
from django.core.urlresolvers import get_resolver
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response, render
from django.template import Context, RequestContext, TemplateDoesNotExist
from django.views.debug import technical_500_response, SafeExceptionReporterFilter
from django.views.decorators.debug import (sensitive_post_parameters,
sensitive_variables)
from django.utils.log import getLogger
from . import BrokenException, except_args
def index_page(request):
"""Dummy index page"""
return HttpResponse('<html><body>Dummy page</body></html>')
def raises(request):
# Make sure that a callable that raises an exception in the stack frame's
# local vars won't hijack the technical 500 response. See:
# http://code.djangoproject.com/ticket/15025
def callable():
raise Exception
try:
raise Exception
except Exception:
return technical_500_response(request, *sys.exc_info())
def raises400(request):
raise SuspiciousOperation
def raises403(request):
raise PermissionDenied
def raises404(request):
resolver = get_resolver(None)
resolver.resolve('')
def redirect(request):
"""
Forces an HTTP redirect.
"""
return HttpResponseRedirect("target/")
def view_exception(request, n):
raise BrokenException(except_args[int(n)])
def template_exception(request, n):
return render_to_response('debug/template_exception.html',
{'arg': except_args[int(n)]})
def jsi18n(request):
return render_to_response('jsi18n.html')
# Some views to exercise the shortcuts
def render_to_response_view(request):
return render_to_response('debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
})
def render_to_response_view_with_request_context(request):
return render_to_response('debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, context_instance=RequestContext(request))
def render_to_response_view_with_content_type(request):
return render_to_response('debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, content_type='application/x-rendertest')
def render_view(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
})
def render_view_with_base_context(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, context_instance=Context())
def render_view_with_content_type(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, content_type='application/x-rendertest')
def render_view_with_status(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, status=403)
def render_view_with_current_app(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, current_app="foobar_app")
def render_view_with_current_app_conflict(request):
# This should fail because we don't passing both a current_app and
# context_instance:
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, current_app="foobar_app", context_instance=RequestContext(request))
def raises_template_does_not_exist(request, path='i_dont_exist.html'):
# We need to inspect the HTML generated by the fancy 500 debug view but
# the test client ignores it, so we send it explicitly.
try:
return render_to_response(path)
except TemplateDoesNotExist:
return technical_500_response(request, *sys.exc_info())
def render_no_template(request):
# If we do not specify a template, we need to make sure the debug
# view doesn't blow up.
return render(request, [], {})
def send_log(request, exc_info):
logger = getLogger('django.request')
# The default logging config has a logging filter to ensure admin emails are
# only sent with DEBUG=False, but since someone might choose to remove that
# filter, we still want to be able to test the behavior of error emails
# with DEBUG=True. So we need to remove the filter temporarily.
admin_email_handler = [
h for h in logger.handlers
if h.__class__.__name__ == "AdminEmailHandler"
][0]
orig_filters = admin_email_handler.filters
admin_email_handler.filters = []
admin_email_handler.include_html = True
logger.error('Internal Server Error: %s', request.path,
exc_info=exc_info,
extra={
'status_code': 500,
'request': request
}
)
admin_email_handler.filters = orig_filters
def non_sensitive_view(request):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's source
# is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd'])
sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e'])
try:
raise Exception
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
@sensitive_variables('sauce')
@sensitive_post_parameters('bacon-key', 'sausage-key')
def sensitive_view(request):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's source
# is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd'])
sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e'])
try:
raise Exception
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
@sensitive_variables()
@sensitive_post_parameters()
def paranoid_view(request):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's source
# is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd'])
sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e'])
try:
raise Exception
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
def sensitive_args_function_caller(request):
try:
sensitive_args_function(''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e']))
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
@sensitive_variables('sauce')
def sensitive_args_function(sauce):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's source
# is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd'])
raise Exception
def sensitive_kwargs_function_caller(request):
try:
sensitive_kwargs_function(''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e']))
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
@sensitive_variables('sauce')
def sensitive_kwargs_function(sauce=None):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's source
# is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd'])
raise Exception
class UnsafeExceptionReporterFilter(SafeExceptionReporterFilter):
"""
Ignores all the filtering done by its parent class.
"""
def get_post_parameters(self, request):
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
return tb_frame.f_locals.items()
@sensitive_variables()
@sensitive_post_parameters()
def custom_exception_reporter_filter_view(request):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's source
# is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd'])
sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e'])
request.exception_reporter_filter = UnsafeExceptionReporterFilter()
try:
raise Exception
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
class Klass(object):
@sensitive_variables('sauce')
def method(self, request):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's
# source is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd'])
sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e'])
try:
raise Exception
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
def sensitive_method_view(request):
return Klass().method(request)
@sensitive_variables('sauce')
@sensitive_post_parameters('bacon-key', 'sausage-key')
def multivalue_dict_key_error(request):
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd'])
sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e'])
try:
request.POST['bar']
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
| apache-2.0 |
emc-tridax/appengine-python-flask-master | lib/mysql/connector/charsets.py | 16 | 12326 | # -*- coding: utf-8 -*-
# MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# This file was auto-generated.
_GENERATED_ON = '2014-05-23'
_MYSQL_VERSION = (5, 7, 4)
"""This module contains the MySQL Server Character Sets"""
MYSQL_CHARACTER_SETS = [
# (character set name, collation, default)
None,
("big5", "big5_chinese_ci", True), # 1
("latin2", "latin2_czech_cs", False), # 2
("dec8", "dec8_swedish_ci", True), # 3
("cp850", "cp850_general_ci", True), # 4
("latin1", "latin1_german1_ci", False), # 5
("hp8", "hp8_english_ci", True), # 6
("koi8r", "koi8r_general_ci", True), # 7
("latin1", "latin1_swedish_ci", True), # 8
("latin2", "latin2_general_ci", True), # 9
("swe7", "swe7_swedish_ci", True), # 10
("ascii", "ascii_general_ci", True), # 11
("ujis", "ujis_japanese_ci", True), # 12
("sjis", "sjis_japanese_ci", True), # 13
("cp1251", "cp1251_bulgarian_ci", False), # 14
("latin1", "latin1_danish_ci", False), # 15
("hebrew", "hebrew_general_ci", True), # 16
None,
("tis620", "tis620_thai_ci", True), # 18
("euckr", "euckr_korean_ci", True), # 19
("latin7", "latin7_estonian_cs", False), # 20
("latin2", "latin2_hungarian_ci", False), # 21
("koi8u", "koi8u_general_ci", True), # 22
("cp1251", "cp1251_ukrainian_ci", False), # 23
("gb2312", "gb2312_chinese_ci", True), # 24
("greek", "greek_general_ci", True), # 25
("cp1250", "cp1250_general_ci", True), # 26
("latin2", "latin2_croatian_ci", False), # 27
("gbk", "gbk_chinese_ci", True), # 28
("cp1257", "cp1257_lithuanian_ci", False), # 29
("latin5", "latin5_turkish_ci", True), # 30
("latin1", "latin1_german2_ci", False), # 31
("armscii8", "armscii8_general_ci", True), # 32
("utf8", "utf8_general_ci", True), # 33
("cp1250", "cp1250_czech_cs", False), # 34
("ucs2", "ucs2_general_ci", True), # 35
("cp866", "cp866_general_ci", True), # 36
("keybcs2", "keybcs2_general_ci", True), # 37
("macce", "macce_general_ci", True), # 38
("macroman", "macroman_general_ci", True), # 39
("cp852", "cp852_general_ci", True), # 40
("latin7", "latin7_general_ci", True), # 41
("latin7", "latin7_general_cs", False), # 42
("macce", "macce_bin", False), # 43
("cp1250", "cp1250_croatian_ci", False), # 44
("utf8mb4", "utf8mb4_general_ci", True), # 45
("utf8mb4", "utf8mb4_bin", False), # 46
("latin1", "latin1_bin", False), # 47
("latin1", "latin1_general_ci", False), # 48
("latin1", "latin1_general_cs", False), # 49
("cp1251", "cp1251_bin", False), # 50
("cp1251", "cp1251_general_ci", True), # 51
("cp1251", "cp1251_general_cs", False), # 52
("macroman", "macroman_bin", False), # 53
("utf16", "utf16_general_ci", True), # 54
("utf16", "utf16_bin", False), # 55
("utf16le", "utf16le_general_ci", True), # 56
("cp1256", "cp1256_general_ci", True), # 57
("cp1257", "cp1257_bin", False), # 58
("cp1257", "cp1257_general_ci", True), # 59
("utf32", "utf32_general_ci", True), # 60
("utf32", "utf32_bin", False), # 61
("utf16le", "utf16le_bin", False), # 62
("binary", "binary", True), # 63
("armscii8", "armscii8_bin", False), # 64
("ascii", "ascii_bin", False), # 65
("cp1250", "cp1250_bin", False), # 66
("cp1256", "cp1256_bin", False), # 67
("cp866", "cp866_bin", False), # 68
("dec8", "dec8_bin", False), # 69
("greek", "greek_bin", False), # 70
("hebrew", "hebrew_bin", False), # 71
("hp8", "hp8_bin", False), # 72
("keybcs2", "keybcs2_bin", False), # 73
("koi8r", "koi8r_bin", False), # 74
("koi8u", "koi8u_bin", False), # 75
None,
("latin2", "latin2_bin", False), # 77
("latin5", "latin5_bin", False), # 78
("latin7", "latin7_bin", False), # 79
("cp850", "cp850_bin", False), # 80
("cp852", "cp852_bin", False), # 81
("swe7", "swe7_bin", False), # 82
("utf8", "utf8_bin", False), # 83
("big5", "big5_bin", False), # 84
("euckr", "euckr_bin", False), # 85
("gb2312", "gb2312_bin", False), # 86
("gbk", "gbk_bin", False), # 87
("sjis", "sjis_bin", False), # 88
("tis620", "tis620_bin", False), # 89
("ucs2", "ucs2_bin", False), # 90
("ujis", "ujis_bin", False), # 91
("geostd8", "geostd8_general_ci", True), # 92
("geostd8", "geostd8_bin", False), # 93
("latin1", "latin1_spanish_ci", False), # 94
("cp932", "cp932_japanese_ci", True), # 95
("cp932", "cp932_bin", False), # 96
("eucjpms", "eucjpms_japanese_ci", True), # 97
("eucjpms", "eucjpms_bin", False), # 98
("cp1250", "cp1250_polish_ci", False), # 99
None,
("utf16", "utf16_unicode_ci", False), # 101
("utf16", "utf16_icelandic_ci", False), # 102
("utf16", "utf16_latvian_ci", False), # 103
("utf16", "utf16_romanian_ci", False), # 104
("utf16", "utf16_slovenian_ci", False), # 105
("utf16", "utf16_polish_ci", False), # 106
("utf16", "utf16_estonian_ci", False), # 107
("utf16", "utf16_spanish_ci", False), # 108
("utf16", "utf16_swedish_ci", False), # 109
("utf16", "utf16_turkish_ci", False), # 110
("utf16", "utf16_czech_ci", False), # 111
("utf16", "utf16_danish_ci", False), # 112
("utf16", "utf16_lithuanian_ci", False), # 113
("utf16", "utf16_slovak_ci", False), # 114
("utf16", "utf16_spanish2_ci", False), # 115
("utf16", "utf16_roman_ci", False), # 116
("utf16", "utf16_persian_ci", False), # 117
("utf16", "utf16_esperanto_ci", False), # 118
("utf16", "utf16_hungarian_ci", False), # 119
("utf16", "utf16_sinhala_ci", False), # 120
("utf16", "utf16_german2_ci", False), # 121
("utf16", "utf16_croatian_ci", False), # 122
("utf16", "utf16_unicode_520_ci", False), # 123
("utf16", "utf16_vietnamese_ci", False), # 124
None,
None,
None,
("ucs2", "ucs2_unicode_ci", False), # 128
("ucs2", "ucs2_icelandic_ci", False), # 129
("ucs2", "ucs2_latvian_ci", False), # 130
("ucs2", "ucs2_romanian_ci", False), # 131
("ucs2", "ucs2_slovenian_ci", False), # 132
("ucs2", "ucs2_polish_ci", False), # 133
("ucs2", "ucs2_estonian_ci", False), # 134
("ucs2", "ucs2_spanish_ci", False), # 135
("ucs2", "ucs2_swedish_ci", False), # 136
("ucs2", "ucs2_turkish_ci", False), # 137
("ucs2", "ucs2_czech_ci", False), # 138
("ucs2", "ucs2_danish_ci", False), # 139
("ucs2", "ucs2_lithuanian_ci", False), # 140
("ucs2", "ucs2_slovak_ci", False), # 141
("ucs2", "ucs2_spanish2_ci", False), # 142
("ucs2", "ucs2_roman_ci", False), # 143
("ucs2", "ucs2_persian_ci", False), # 144
("ucs2", "ucs2_esperanto_ci", False), # 145
("ucs2", "ucs2_hungarian_ci", False), # 146
("ucs2", "ucs2_sinhala_ci", False), # 147
("ucs2", "ucs2_german2_ci", False), # 148
("ucs2", "ucs2_croatian_ci", False), # 149
("ucs2", "ucs2_unicode_520_ci", False), # 150
("ucs2", "ucs2_vietnamese_ci", False), # 151
None,
None,
None,
None,
None,
None,
None,
("ucs2", "ucs2_general_mysql500_ci", False), # 159
("utf32", "utf32_unicode_ci", False), # 160
("utf32", "utf32_icelandic_ci", False), # 161
("utf32", "utf32_latvian_ci", False), # 162
("utf32", "utf32_romanian_ci", False), # 163
("utf32", "utf32_slovenian_ci", False), # 164
("utf32", "utf32_polish_ci", False), # 165
("utf32", "utf32_estonian_ci", False), # 166
("utf32", "utf32_spanish_ci", False), # 167
("utf32", "utf32_swedish_ci", False), # 168
("utf32", "utf32_turkish_ci", False), # 169
("utf32", "utf32_czech_ci", False), # 170
("utf32", "utf32_danish_ci", False), # 171
("utf32", "utf32_lithuanian_ci", False), # 172
("utf32", "utf32_slovak_ci", False), # 173
("utf32", "utf32_spanish2_ci", False), # 174
("utf32", "utf32_roman_ci", False), # 175
("utf32", "utf32_persian_ci", False), # 176
("utf32", "utf32_esperanto_ci", False), # 177
("utf32", "utf32_hungarian_ci", False), # 178
("utf32", "utf32_sinhala_ci", False), # 179
("utf32", "utf32_german2_ci", False), # 180
("utf32", "utf32_croatian_ci", False), # 181
("utf32", "utf32_unicode_520_ci", False), # 182
("utf32", "utf32_vietnamese_ci", False), # 183
None,
None,
None,
None,
None,
None,
None,
None,
("utf8", "utf8_unicode_ci", False), # 192
("utf8", "utf8_icelandic_ci", False), # 193
("utf8", "utf8_latvian_ci", False), # 194
("utf8", "utf8_romanian_ci", False), # 195
("utf8", "utf8_slovenian_ci", False), # 196
("utf8", "utf8_polish_ci", False), # 197
("utf8", "utf8_estonian_ci", False), # 198
("utf8", "utf8_spanish_ci", False), # 199
("utf8", "utf8_swedish_ci", False), # 200
("utf8", "utf8_turkish_ci", False), # 201
("utf8", "utf8_czech_ci", False), # 202
("utf8", "utf8_danish_ci", False), # 203
("utf8", "utf8_lithuanian_ci", False), # 204
("utf8", "utf8_slovak_ci", False), # 205
("utf8", "utf8_spanish2_ci", False), # 206
("utf8", "utf8_roman_ci", False), # 207
("utf8", "utf8_persian_ci", False), # 208
("utf8", "utf8_esperanto_ci", False), # 209
("utf8", "utf8_hungarian_ci", False), # 210
("utf8", "utf8_sinhala_ci", False), # 211
("utf8", "utf8_german2_ci", False), # 212
("utf8", "utf8_croatian_ci", False), # 213
("utf8", "utf8_unicode_520_ci", False), # 214
("utf8", "utf8_vietnamese_ci", False), # 215
None,
None,
None,
None,
None,
None,
None,
("utf8", "utf8_general_mysql500_ci", False), # 223
("utf8mb4", "utf8mb4_unicode_ci", False), # 224
("utf8mb4", "utf8mb4_icelandic_ci", False), # 225
("utf8mb4", "utf8mb4_latvian_ci", False), # 226
("utf8mb4", "utf8mb4_romanian_ci", False), # 227
("utf8mb4", "utf8mb4_slovenian_ci", False), # 228
("utf8mb4", "utf8mb4_polish_ci", False), # 229
("utf8mb4", "utf8mb4_estonian_ci", False), # 230
("utf8mb4", "utf8mb4_spanish_ci", False), # 231
("utf8mb4", "utf8mb4_swedish_ci", False), # 232
("utf8mb4", "utf8mb4_turkish_ci", False), # 233
("utf8mb4", "utf8mb4_czech_ci", False), # 234
("utf8mb4", "utf8mb4_danish_ci", False), # 235
("utf8mb4", "utf8mb4_lithuanian_ci", False), # 236
("utf8mb4", "utf8mb4_slovak_ci", False), # 237
("utf8mb4", "utf8mb4_spanish2_ci", False), # 238
("utf8mb4", "utf8mb4_roman_ci", False), # 239
("utf8mb4", "utf8mb4_persian_ci", False), # 240
("utf8mb4", "utf8mb4_esperanto_ci", False), # 241
("utf8mb4", "utf8mb4_hungarian_ci", False), # 242
("utf8mb4", "utf8mb4_sinhala_ci", False), # 243
("utf8mb4", "utf8mb4_german2_ci", False), # 244
("utf8mb4", "utf8mb4_croatian_ci", False), # 245
("utf8mb4", "utf8mb4_unicode_520_ci", False), # 246
("utf8mb4", "utf8mb4_vietnamese_ci", False), # 247
("gb18030", "gb18030_chinese_ci", True), # 248
("gb18030", "gb18030_bin", False), # 249
("gb18030", "gb18030_unicode_520_ci", False), # 250
]
| apache-2.0 |
kupferlauncher/kupfer | kupfer/plugin/virtualbox/__init__.py | 2 | 5061 | # -*- coding: UTF-8 -*-
__kupfer_name__ = _("VirtualBox")
__kupfer_sources__ = ("VBoxMachinesSource", )
__description__ = _("Control VirtualBox Virtual Machines. "
"Supports both Sun VirtualBox and Open Source Edition.")
__version__ = "0.4"
__author__ = "Karol Będkowski <karol.bedkowski@gmail.com>"
from kupfer.objects import Leaf, Action, Source
from kupfer import pretty
from kupfer import plugin_support
from kupfer.obj.apps import ApplicationSource
from kupfer.plugin.virtualbox import ose_support
from kupfer.plugin.virtualbox import constants as vbox_const
__kupfer_settings__ = plugin_support.PluginSettings(
{
"key": "force_cli",
"label": _("Force use CLI interface"),
"type": bool,
"value": False,
},
)
def _get_vbox():
if __kupfer_settings__['force_cli']:
pretty.print_info(__name__, 'Using cli...')
return ose_support
try:
from kupfer.plugin.virtualbox import vboxapi4_support
pretty.print_info(__name__, 'Using vboxapi4...')
return vboxapi4_support
except ImportError:
pass
try:
from kupfer.plugin.virtualbox import vboxapi_support
pretty.print_info(__name__, 'Using vboxapi...')
return vboxapi_support
except ImportError:
pass
pretty.print_info(__name__, 'Using cli...')
return ose_support
class _VBoxSupportProxy:
VBOX = None
def __getattr__(self, attr):
if not self.VBOX:
self.reload_settings()
return getattr(self.VBOX, attr)
def reload_settings(self):
pretty.print_debug(__name__, '_VBoxSupportProxy.reloading...')
self.unload_module()
self.VBOX = _get_vbox()
def unload_module(self):
if not self.VBOX:
return
self.VBOX.unload()
self.VBOX = None
vbox_support = _VBoxSupportProxy()
class VirtualMachine(Leaf):
def __init__(self, obj, name, description):
Leaf.__init__(self, obj, name)
self.description = description
def get_description(self):
return self.description
def get_icon_name(self):
return vbox_support.ICON
def get_actions(self):
state = vbox_support.get_machine_state(self.object)
if state == vbox_const.VM_STATE_POWEROFF:
yield VMAction(_('Power On'), 'system-run',
vbox_const.VM_START_NORMAL)
yield VMAction(_('Power On Headless'), 'system-run',
vbox_const.VM_START_HEADLESS, -5)
elif state == vbox_const.VM_STATE_POWERON:
yield VMAction(_('Send Power Off Signal'), 'system-shutdown',
vbox_const.VM_ACPI_POWEROFF, -5)
yield VMAction(_('Pause'), 'pause', vbox_const.VM_PAUSE)
yield VMAction(_('Reboot'), 'system-reboot',
vbox_const.VM_REBOOT, -10)
elif state == vbox_const.VM_STATE_SAVED:
yield VMAction(_('Power On'), 'system-run',
vbox_const.VM_START_NORMAL)
yield VMAction(_('Power On Headless'), 'system-run',
vbox_const.VM_START_HEADLESS, -5)
else: # VM_STATE_PAUSED
yield VMAction(_('Resume'), 'resume', vbox_const.VM_RESUME)
if state in (vbox_const.VM_STATE_POWERON, vbox_const.VM_STATE_PAUSED):
yield VMAction(_('Save State'), 'system-supsend',
vbox_const.VM_SAVE)
yield VMAction(_('Power Off'), 'system-shutdown',
vbox_const.VM_POWEROFF, -10)
class VMAction(Action):
def __init__(self, name, icon, command, rank_adjust=0):
Action.__init__(self, name)
self._icon = icon
self.rank_adjust = rank_adjust
self.command = command
def get_icon_name(self):
return self._icon
def item_types(self):
yield VirtualMachine
def activate(self, leaf):
vbox_support.vm_action(self.command, leaf.object)
class VBoxMachinesSource(ApplicationSource):
appleaf_content_id = ("virtualbox-ose", "virtualbox")
def __init__(self, name=_("VirtualBox Machines")):
Source.__init__(self, name)
def initialize(self):
if vbox_support.MONITORED_DIRS:
self.monitor_token = self.monitor_directories(
*vbox_support.MONITORED_DIRS)
__kupfer_settings__.connect("plugin-setting-changed", self._setting_changed)
def finalize(self):
if vbox_support:
vbox_support.unload_module()
def is_dynamic(self):
return vbox_support.IS_DYNAMIC
def get_items(self):
for machine_id, machine_name, machine_desc in vbox_support.get_machines():
yield VirtualMachine(machine_id, machine_name, machine_desc)
def get_description(self):
return None
def get_icon_name(self):
return vbox_support.ICON
def provides(self):
yield VirtualMachine
def _setting_changed(self, _setting, _key, _value):
if vbox_support:
vbox_support.reload_settings()
| gpl-3.0 |
visualputty/Landing-Lights | django/template/debug.py | 232 | 3797 | from django.conf import settings
from django.template.base import Lexer, Parser, tag_re, NodeList, VariableNode, TemplateSyntaxError
from django.utils.encoding import force_unicode
from django.utils.html import escape
from django.utils.safestring import SafeData, EscapeData
from django.utils.formats import localize
class DebugLexer(Lexer):
def __init__(self, template_string, origin):
super(DebugLexer, self).__init__(template_string, origin)
def tokenize(self):
"Return a list of tokens from a given template_string"
result, upto = [], 0
for match in tag_re.finditer(self.template_string):
start, end = match.span()
if start > upto:
result.append(self.create_token(self.template_string[upto:start], (upto, start), False))
upto = start
result.append(self.create_token(self.template_string[start:end], (start, end), True))
upto = end
last_bit = self.template_string[upto:]
if last_bit:
result.append(self.create_token(last_bit, (upto, upto + len(last_bit)), False))
return result
def create_token(self, token_string, source, in_tag):
token = super(DebugLexer, self).create_token(token_string, in_tag)
token.source = self.origin, source
return token
class DebugParser(Parser):
def __init__(self, lexer):
super(DebugParser, self).__init__(lexer)
self.command_stack = []
def enter_command(self, command, token):
self.command_stack.append( (command, token.source) )
def exit_command(self):
self.command_stack.pop()
def error(self, token, msg):
return self.source_error(token.source, msg)
def source_error(self, source,msg):
e = TemplateSyntaxError(msg)
e.source = source
return e
def create_nodelist(self):
return DebugNodeList()
def create_variable_node(self, contents):
return DebugVariableNode(contents)
def extend_nodelist(self, nodelist, node, token):
node.source = token.source
super(DebugParser, self).extend_nodelist(nodelist, node, token)
def unclosed_block_tag(self, parse_until):
command, source = self.command_stack.pop()
msg = "Unclosed tag '%s'. Looking for one of: %s " % (command, ', '.join(parse_until))
raise self.source_error(source, msg)
def compile_function_error(self, token, e):
if not hasattr(e, 'source'):
e.source = token.source
class DebugNodeList(NodeList):
def render_node(self, node, context):
try:
result = node.render(context)
except TemplateSyntaxError, e:
if not hasattr(e, 'source'):
e.source = node.source
raise
except Exception, e:
from sys import exc_info
wrapped = TemplateSyntaxError(u'Caught %s while rendering: %s' %
(e.__class__.__name__, force_unicode(e, errors='replace')))
wrapped.source = node.source
wrapped.exc_info = exc_info()
raise wrapped, None, wrapped.exc_info[2]
return result
class DebugVariableNode(VariableNode):
def render(self, context):
try:
output = self.filter_expression.resolve(context)
output = localize(output, use_l10n=context.use_l10n)
output = force_unicode(output)
except TemplateSyntaxError, e:
if not hasattr(e, 'source'):
e.source = self.source
raise
except UnicodeDecodeError:
return ''
if (context.autoescape and not isinstance(output, SafeData)) or isinstance(output, EscapeData):
return escape(output)
else:
return output
| bsd-3-clause |
jhoos/django | django/contrib/gis/gdal/field.py | 122 | 6400 | from ctypes import byref, c_int
from datetime import date, datetime, time
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.gdal.prototypes import ds as capi
from django.utils.encoding import force_text
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_Fld_* routines are relevant here.
class Field(GDALBase):
"""
This class wraps an OGR Field, and needs to be instantiated
from a Feature object.
"""
def __init__(self, feat, index):
"""
Initializes on the feature object and the integer index of
the field within the feature.
"""
# Setting the feature pointer and index.
self._feat = feat
self._index = index
# Getting the pointer for this field.
fld_ptr = capi.get_feat_field_defn(feat.ptr, index)
if not fld_ptr:
raise GDALException('Cannot create OGR Field, invalid pointer given.')
self.ptr = fld_ptr
# Setting the class depending upon the OGR Field Type (OFT)
self.__class__ = OGRFieldTypes[self.type]
# OFTReal with no precision should be an OFTInteger.
if isinstance(self, OFTReal) and self.precision == 0:
self.__class__ = OFTInteger
self._double = True
def __str__(self):
"Returns the string representation of the Field."
return str(self.value).strip()
# #### Field Methods ####
def as_double(self):
"Retrieves the Field's value as a double (float)."
return capi.get_field_as_double(self._feat.ptr, self._index)
def as_int(self):
"Retrieves the Field's value as an integer."
return capi.get_field_as_integer(self._feat.ptr, self._index)
def as_string(self):
"Retrieves the Field's value as a string."
string = capi.get_field_as_string(self._feat.ptr, self._index)
return force_text(string, encoding=self._feat.encoding, strings_only=True)
def as_datetime(self):
"Retrieves the Field's value as a tuple of date & time components."
yy, mm, dd, hh, mn, ss, tz = [c_int() for i in range(7)]
status = capi.get_field_as_datetime(
self._feat.ptr, self._index, byref(yy), byref(mm), byref(dd),
byref(hh), byref(mn), byref(ss), byref(tz))
if status:
return (yy, mm, dd, hh, mn, ss, tz)
else:
raise GDALException('Unable to retrieve date & time information from the field.')
# #### Field Properties ####
@property
def name(self):
"Returns the name of this Field."
name = capi.get_field_name(self.ptr)
return force_text(name, encoding=self._feat.encoding, strings_only=True)
@property
def precision(self):
"Returns the precision of this Field."
return capi.get_field_precision(self.ptr)
@property
def type(self):
"Returns the OGR type of this Field."
return capi.get_field_type(self.ptr)
@property
def type_name(self):
"Return the OGR field type name for this Field."
return capi.get_field_type_name(self.type)
@property
def value(self):
"Returns the value of this Field."
# Default is to get the field as a string.
return self.as_string()
@property
def width(self):
"Returns the width of this Field."
return capi.get_field_width(self.ptr)
# ### The Field sub-classes for each OGR Field type. ###
class OFTInteger(Field):
_double = False
@property
def value(self):
"Returns an integer contained in this field."
if self._double:
# If this is really from an OFTReal field with no precision,
# read as a double and cast as Python int (to prevent overflow).
return int(self.as_double())
else:
return self.as_int()
@property
def type(self):
"""
GDAL uses OFTReals to represent OFTIntegers in created
shapefiles -- forcing the type here since the underlying field
type may actually be OFTReal.
"""
return 0
class OFTReal(Field):
@property
def value(self):
"Returns a float contained in this field."
return self.as_double()
# String & Binary fields, just subclasses
class OFTString(Field):
pass
class OFTWideString(Field):
pass
class OFTBinary(Field):
pass
# OFTDate, OFTTime, OFTDateTime fields.
class OFTDate(Field):
@property
def value(self):
"Returns a Python `date` object for the OFTDate field."
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return date(yy.value, mm.value, dd.value)
except (ValueError, GDALException):
return None
class OFTDateTime(Field):
@property
def value(self):
"Returns a Python `datetime` object for this OFTDateTime field."
# TODO: Adapt timezone information.
# See http://lists.osgeo.org/pipermail/gdal-dev/2006-February/007990.html
# The `tz` variable has values of: 0=unknown, 1=localtime (ambiguous),
# 100=GMT, 104=GMT+1, 80=GMT-5, etc.
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return datetime(yy.value, mm.value, dd.value, hh.value, mn.value, ss.value)
except (ValueError, GDALException):
return None
class OFTTime(Field):
@property
def value(self):
"Returns a Python `time` object for this OFTTime field."
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return time(hh.value, mn.value, ss.value)
except (ValueError, GDALException):
return None
# List fields are also just subclasses
class OFTIntegerList(Field):
pass
class OFTRealList(Field):
pass
class OFTStringList(Field):
pass
class OFTWideStringList(Field):
pass
# Class mapping dictionary for OFT Types and reverse mapping.
OGRFieldTypes = {
0: OFTInteger,
1: OFTIntegerList,
2: OFTReal,
3: OFTRealList,
4: OFTString,
5: OFTStringList,
6: OFTWideString,
7: OFTWideStringList,
8: OFTBinary,
9: OFTDate,
10: OFTTime,
11: OFTDateTime,
}
ROGRFieldTypes = {cls: num for num, cls in OGRFieldTypes.items()}
| bsd-3-clause |
nirgal/ngw | core/management/commands/recover_contacts.py | 1 | 1347 | import logging
from django.core.management.base import BaseCommand
from ngw.core import perms
from ngw.core.models import GROUP_EVERYBODY, Contact
class Command(BaseCommand):
help = 'Recover lost contacts'
def handle(self, *args, **options):
logger = logging.getLogger('contactrecover')
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(
'{asctime} {name} {levelname!s:8} {message}', style='{'))
logger.addHandler(handler)
logger.propagate = False
verbosity = int(options['verbosity'])
if verbosity == 0:
logger.setLevel(logging.ERROR)
elif verbosity == 1:
logger.setLevel(logging.WARNING)
elif verbosity == 2:
logger.setLevel(logging.INFO)
elif verbosity == 3:
logger.setLevel(logging.DEBUG)
for contact in Contact.objects.extra(where=[
('NOT EXISTS (SELECT * FROM contact_in_group'
' WHERE contact_id=contact.id'
' AND group_id IN (SELECT self_and_subgroups({}))'
' AND flags & {} <> 0)')
.format(GROUP_EVERYBODY, perms.MEMBER)]):
logger.error('%s (#%s) is not member of group EVERYBODY',
contact.name,
contact.id)
| bsd-2-clause |
microelly2/cadquery-freecad-module | CadQuery/Libs/docutils/languages/he.py | 148 | 2683 | # Author: Meir Kriheli
# Id: $Id: he.py 4837 2006-12-26 09:59:41Z sfcben $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Hebrew-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
# fixed: language-dependent
'author': u'\u05de\u05d7\u05d1\u05e8',
'authors': u'\u05de\u05d7\u05d1\u05e8\u05d9',
'organization': u'\u05d0\u05e8\u05d2\u05d5\u05df',
'address': u'\u05db\u05ea\u05d5\u05d1\u05ea',
'contact': u'\u05d0\u05d9\u05e9 \u05e7\u05e9\u05e8',
'version': u'\u05d2\u05e8\u05e1\u05d4',
'revision': u'\u05de\u05d4\u05d3\u05d5\u05e8\u05d4',
'status': u'\u05e1\u05d8\u05d8\u05d5\u05e1',
'date': u'\u05ea\u05d0\u05e8\u05d9\u05da',
'copyright': u'\u05d6\u05db\u05d5\u05d9\u05d5\u05ea \u05e9\u05de\u05d5\u05e8\u05d5\u05ea',
'dedication': u'\u05d4\u05e7\u05d3\u05e9\u05d4',
'abstract': u'\u05ea\u05e7\u05e6\u05d9\u05e8',
'attention': u'\u05ea\u05e9\u05d5\u05de\u05ea \u05dc\u05d1',
'caution': u'\u05d6\u05d4\u05d9\u05e8\u05d5\u05ea',
'danger': u'\u05e1\u05db\u05e0\u05d4',
'error': u'\u05e9\u05d2\u05d9\u05d0\u05d4' ,
'hint': u'\u05e8\u05de\u05d6',
'important': u'\u05d7\u05e9\u05d5\u05d1',
'note': u'\u05d4\u05e2\u05e8\u05d4',
'tip': u'\u05d8\u05d9\u05e4',
'warning': u'\u05d0\u05d6\u05d4\u05e8\u05d4',
'contents': u'\u05ea\u05d5\u05db\u05df'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
# language-dependent: fixed
u'\u05de\u05d7\u05d1\u05e8': 'author',
u'\u05de\u05d7\u05d1\u05e8\u05d9': 'authors',
u'\u05d0\u05e8\u05d2\u05d5\u05df': 'organization',
u'\u05db\u05ea\u05d5\u05d1\u05ea': 'address',
u'\u05d0\u05d9\u05e9 \u05e7\u05e9\u05e8': 'contact',
u'\u05d2\u05e8\u05e1\u05d4': 'version',
u'\u05de\u05d4\u05d3\u05d5\u05e8\u05d4': 'revision',
u'\u05e1\u05d8\u05d8\u05d5\u05e1': 'status',
u'\u05ea\u05d0\u05e8\u05d9\u05da': 'date',
u'\u05d6\u05db\u05d5\u05d9\u05d5\u05ea \u05e9\u05de\u05d5\u05e8\u05d5\u05ea': 'copyright',
u'\u05d4\u05e7\u05d3\u05e9\u05d4': 'dedication',
u'\u05ea\u05e7\u05e6\u05d9\u05e8': 'abstract'}
"""Hebrew to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
| lgpl-3.0 |
mzl9039/spark | examples/src/main/python/ml/estimator_transformer_param_example.py | 123 | 3952 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Estimator Transformer Param Example.
"""
from __future__ import print_function
# $example on$
from pyspark.ml.linalg import Vectors
from pyspark.ml.classification import LogisticRegression
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("EstimatorTransformerParamExample")\
.getOrCreate()
# $example on$
# Prepare training data from a list of (label, features) tuples.
training = spark.createDataFrame([
(1.0, Vectors.dense([0.0, 1.1, 0.1])),
(0.0, Vectors.dense([2.0, 1.0, -1.0])),
(0.0, Vectors.dense([2.0, 1.3, 1.0])),
(1.0, Vectors.dense([0.0, 1.2, -0.5]))], ["label", "features"])
# Create a LogisticRegression instance. This instance is an Estimator.
lr = LogisticRegression(maxIter=10, regParam=0.01)
# Print out the parameters, documentation, and any default values.
print("LogisticRegression parameters:\n" + lr.explainParams() + "\n")
# Learn a LogisticRegression model. This uses the parameters stored in lr.
model1 = lr.fit(training)
# Since model1 is a Model (i.e., a transformer produced by an Estimator),
# we can view the parameters it used during fit().
# This prints the parameter (name: value) pairs, where names are unique IDs for this
# LogisticRegression instance.
print("Model 1 was fit using parameters: ")
print(model1.extractParamMap())
# We may alternatively specify parameters using a Python dictionary as a paramMap
paramMap = {lr.maxIter: 20}
paramMap[lr.maxIter] = 30 # Specify 1 Param, overwriting the original maxIter.
paramMap.update({lr.regParam: 0.1, lr.threshold: 0.55}) # Specify multiple Params.
# You can combine paramMaps, which are python dictionaries.
paramMap2 = {lr.probabilityCol: "myProbability"} # Change output column name
paramMapCombined = paramMap.copy()
paramMapCombined.update(paramMap2)
# Now learn a new model using the paramMapCombined parameters.
# paramMapCombined overrides all parameters set earlier via lr.set* methods.
model2 = lr.fit(training, paramMapCombined)
print("Model 2 was fit using parameters: ")
print(model2.extractParamMap())
# Prepare test data
test = spark.createDataFrame([
(1.0, Vectors.dense([-1.0, 1.5, 1.3])),
(0.0, Vectors.dense([3.0, 2.0, -0.1])),
(1.0, Vectors.dense([0.0, 2.2, -1.5]))], ["label", "features"])
# Make predictions on test data using the Transformer.transform() method.
# LogisticRegression.transform will only use the 'features' column.
# Note that model2.transform() outputs a "myProbability" column instead of the usual
# 'probability' column since we renamed the lr.probabilityCol parameter previously.
prediction = model2.transform(test)
result = prediction.select("features", "label", "myProbability", "prediction") \
.collect()
for row in result:
print("features=%s, label=%s -> prob=%s, prediction=%s"
% (row.features, row.label, row.myProbability, row.prediction))
# $example off$
spark.stop()
| apache-2.0 |
kupferlauncher/kupfer | oldplugins/epiphany.py | 2 | 1247 | __kupfer_name__ = _("Epiphany Bookmarks")
__kupfer_sources__ = ("EpiphanySource", )
__description__ = _("Index of Epiphany bookmarks")
__version__ = ""
__author__ = "Ulrik Sverdrup <ulrik.sverdrup@gmail.com>"
import os
from kupfer.objects import Source
from kupfer.objects import UrlLeaf
from kupfer.obj.apps import AppLeafContentMixin
from kupfer.plugin import epiphany_support
class EpiphanySource (AppLeafContentMixin, Source):
appleaf_content_id = "epiphany"
def __init__(self):
super(EpiphanySource, self).__init__(_("Epiphany Bookmarks"))
def get_items(self):
fpath = os.path.expanduser(epiphany_support.EPHY_BOOKMARKS_FILE)
if not os.path.exists(fpath):
self.output_debug("Epiphany bookmarks file not found:", fpath)
return ()
try:
bookmarks = list(epiphany_support.parse_epiphany_bookmarks(fpath))
except EnvironmentError as exc:
self.output_error(exc)
return ()
return (UrlLeaf(href, title) for title, href in bookmarks)
def get_description(self):
return _("Index of Epiphany bookmarks")
def get_icon_name(self):
return "web-browser"
def provides(self):
yield UrlLeaf
| gpl-3.0 |
drnextgis/QGIS | python/ext-libs/pygments/lexers/_lassobuiltins.py | 77 | 137633 | # -*- coding: utf-8 -*-
"""
pygments.lexers._lassobuiltins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Built-in Lasso types, traits, and methods.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
BUILTINS = {
'Types': [
'null',
'void',
'tag',
'trait',
'integer',
'decimal',
'boolean',
'capture',
'string',
'bytes',
'keyword',
'custom',
'staticarray',
'signature',
'memberstream',
'dsinfo',
'sourcefile',
'array',
'pair',
'opaque',
'filedesc',
'dirdesc',
'locale',
'ucal',
'xml_domimplementation',
'xml_node',
'xml_characterdata',
'xml_document',
'xml_element',
'xml_attr',
'xml_text',
'xml_cdatasection',
'xml_entityreference',
'xml_entity',
'xml_processinginstruction',
'xml_comment',
'xml_documenttype',
'xml_documentfragment',
'xml_notation',
'xml_nodelist',
'xml_namednodemap',
'xml_namednodemap_ht',
'xml_namednodemap_attr',
'xmlstream',
'sqlite3',
'sqlite3_stmt',
'mime_reader',
'curltoken',
'regexp',
'zip_impl',
'zip_file_impl',
'library_thread_loader_thread$',
'library_thread_loader',
'generateforeachunkeyed',
'generateforeachkeyed',
'eacher',
'queriable_where',
'queriable_select',
'queriable_selectmany',
'queriable_groupby',
'queriable_join',
'queriable_groupjoin',
'queriable_orderby',
'queriable_orderbydescending',
'queriable_thenby',
'queriable_thenbydescending',
'queriable_skip',
'queriable_take',
'queriable_grouping',
'generateseries',
'tie',
'pairup',
'delve',
'repeat',
'pair_compare',
'serialization_object_identity_compare',
'serialization_element',
'serialization_writer_standin',
'serialization_writer_ref',
'serialization_writer',
'serialization_reader',
'tree_nullnode',
'tree_node',
'tree_base',
'map_node',
'map',
'file',
'dir',
'magick_image',
'ldap',
'os_process',
'java_jnienv',
'jobject',
'jmethodid',
'jfieldid',
'database_registry',
'sqlite_db',
'sqlite_results',
'sqlite_currentrow',
'sqlite_table',
'sqlite_column',
'curl',
'date',
'debugging_stack',
'dbgp_server',
'dbgp_packet',
'duration',
'inline_type',
'json_literal',
'json_object',
'list_node',
'list',
'jchar',
'jchararray',
'jbyte',
'jbytearray',
'jfloat',
'jint',
'jshort',
'currency',
'scientific',
'percent',
'dateandtime',
'timeonly',
'net_tcp',
'net_tcpssl',
'net_named_pipe',
'net_udppacket',
'net_udp',
'pdf_typebase',
'pdf_doc',
'pdf_color',
'pdf_barcode',
'pdf_font',
'pdf_image',
'pdf_list',
'pdf_read',
'pdf_table',
'pdf_text',
'pdf_hyphenator',
'pdf_chunk',
'pdf_phrase',
'pdf_paragraph',
'queue',
'set',
'sys_process',
'worker_pool',
'zip_file',
'zip',
'cache_server_element',
'cache_server',
'dns_response',
'component_render_state',
'component',
'component_container',
'document_base',
'document_body',
'document_header',
'text_document',
'data_document',
'email_compose',
'email_pop',
'email_parse',
'email_queue_impl_base',
'email_stage_impl_base',
'fcgi_record',
'web_request_impl',
'fcgi_request',
'include_cache_thread$',
'include_cache',
'atbegin_thread$',
'atbegin',
'fastcgi_each_fcgi_param',
'fastcgi_server',
'filemaker_datasource',
'http_document',
'http_document_header',
'http_header_field',
'html_document_head',
'html_document_body',
'raw_document_body',
'bytes_document_body',
'html_attr',
'html_atomic_element',
'html_container_element',
'http_error',
'html_script',
'html_text',
'html_raw',
'html_binary',
'html_json',
'html_cdata',
'html_eol',
'html_div',
'html_span',
'html_br',
'html_hr',
'html_h1',
'html_h2',
'html_h3',
'html_h4',
'html_h5',
'html_h6',
'html_meta',
'html_link',
'html_object',
'html_style',
'html_base',
'html_table',
'html_tr',
'html_td',
'html_th',
'html_img',
'html_form',
'html_fieldset',
'html_legend',
'html_input',
'html_label',
'html_option',
'html_select',
'http_server_web_connection',
'http_server',
'http_server_connection_handler',
'image',
'lassoapp_installer',
'lassoapp_content_rep_halt',
'lassoapp_dirsrc_fileresource',
'lassoapp_dirsrc_appsource',
'lassoapp_livesrc_fileresource',
'lassoapp_livesrc_appsource',
'lassoapp_long_expiring_bytes',
'lassoapp_zip_file_server_thread$',
'lassoapp_zip_file_server',
'lassoapp_zipsrc_fileresource',
'lassoapp_zipsrc_appsource',
'lassoapp_compiledsrc_fileresource',
'lassoapp_compiledsrc_appsource',
'lassoapp_manualsrc_appsource',
'log_impl_base',
'portal_impl',
'security_registry',
'memory_session_driver_impl_entry',
'memory_session_driver_impl',
'sqlite_session_driver_impl_entry',
'sqlite_session_driver_impl',
'mysql_session_driver_impl',
'odbc_session_driver_impl',
'session_delete_expired_thread_thread$',
'session_delete_expired_thread',
'email_smtp',
'client_address',
'client_ip',
'web_node_base',
'web_node_root',
'web_node_content_representation_xhr_container',
'web_node_content_representation_html_specialized',
'web_node_content_representation_css_specialized',
'web_node_content_representation_js_specialized',
'web_node_echo',
'web_error_atend',
'web_response_impl',
'web_router'
],
'Traits': [
'trait_asstring',
'any',
'trait_generator',
'trait_decompose_assignment',
'trait_foreach',
'trait_generatorcentric',
'trait_foreachtextelement',
'trait_finite',
'trait_finiteforeach',
'trait_keyed',
'trait_keyedfinite',
'trait_keyedforeach',
'trait_frontended',
'trait_backended',
'trait_doubleended',
'trait_positionallykeyed',
'trait_expandable',
'trait_frontexpandable',
'trait_backexpandable',
'trait_contractible',
'trait_frontcontractible',
'trait_backcontractible',
'trait_fullymutable',
'trait_keyedmutable',
'trait_endedfullymutable',
'trait_setoperations',
'trait_searchable',
'trait_positionallysearchable',
'trait_pathcomponents',
'trait_readbytes',
'trait_writebytes',
'trait_setencoding',
'trait_readstring',
'trait_writestring',
'trait_hashable',
'trait_each_sub',
'trait_stack',
'trait_list',
'trait_array',
'trait_map',
'trait_close',
'trait_file',
'trait_scalar',
'trait_queriablelambda',
'trait_queriable',
'queriable_asstring',
'trait_serializable',
'trait_treenode',
'trait_json_serialize',
'formattingbase',
'trait_net',
'trait_xml_elementcompat',
'trait_xml_nodecompat',
'web_connection',
'html_element_coreattrs',
'html_element_i18nattrs',
'html_element_eventsattrs',
'html_attributed',
'lassoapp_resource',
'lassoapp_source',
'lassoapp_capabilities',
'session_driver',
'web_node_content_json_specialized',
'web_node',
'web_node_container',
'web_node_content_representation',
'web_node_content',
'web_node_content_document',
'web_node_postable',
'web_node_content_html_specialized',
'web_node_content_css_specialized',
'web_node_content_js_specialized'
],
'Methods': [
'fail_now',
'staticarray',
'integer',
'decimal',
'string',
'bytes',
'keyword',
'signature',
'register',
'register_thread',
'escape_tag',
'handle',
'handle_failure',
'protect_now',
'threadvar_get',
'threadvar_set',
'threadvar_set_asrt',
'threadvar_find',
'abort_now',
'abort_clear',
'failure_clear',
'var_keys',
'var_values',
'null',
'trait',
'staticarray_join',
'suspend',
'main_thread_only',
'split_thread',
'capture_nearestloopcount',
'capture_nearestloopcontinue',
'capture_nearestloopabort',
'pair',
'io_file_o_rdonly',
'io_file_o_wronly',
'io_file_o_rdwr',
'io_file_o_nonblock',
'io_file_o_sync',
'io_file_o_shlock',
'io_file_o_exlock',
'io_file_o_async',
'io_file_o_fsync',
'io_file_o_nofollow',
'io_file_s_irwxu',
'io_file_s_irusr',
'io_file_s_iwusr',
'io_file_s_ixusr',
'io_file_s_irwxg',
'io_file_s_irgrp',
'io_file_s_iwgrp',
'io_file_s_ixgrp',
'io_file_s_irwxo',
'io_file_s_iroth',
'io_file_s_iwoth',
'io_file_s_ixoth',
'io_file_s_isuid',
'io_file_s_isgid',
'io_file_s_isvtx',
'io_file_s_ifmt',
'io_file_s_ifchr',
'io_file_s_ifdir',
'io_file_s_ifreg',
'io_file_o_append',
'io_file_o_creat',
'io_file_o_trunc',
'io_file_o_excl',
'io_file_seek_set',
'io_file_seek_cur',
'io_file_seek_end',
'io_file_s_ififo',
'io_file_s_ifblk',
'io_file_s_iflnk',
'io_file_s_ifsock',
'io_net_shut_rd',
'io_net_shut_wr',
'io_net_shut_rdwr',
'io_net_sock_stream',
'io_net_sock_dgram',
'io_net_sock_raw',
'io_net_sock_rdm',
'io_net_sock_seqpacket',
'io_net_so_debug',
'io_net_so_acceptconn',
'io_net_so_reuseaddr',
'io_net_so_keepalive',
'io_net_so_dontroute',
'io_net_so_broadcast',
'io_net_so_useloopback',
'io_net_so_linger',
'io_net_so_oobinline',
'io_net_so_timestamp',
'io_net_so_sndbuf',
'io_net_so_rcvbuf',
'io_net_so_sndlowat',
'io_net_so_rcvlowat',
'io_net_so_sndtimeo',
'io_net_so_rcvtimeo',
'io_net_so_error',
'io_net_so_type',
'io_net_sol_socket',
'io_net_af_unix',
'io_net_af_inet',
'io_net_af_inet6',
'io_net_ipproto_ip',
'io_net_ipproto_udp',
'io_net_msg_peek',
'io_net_msg_oob',
'io_net_msg_waitall',
'io_file_fioclex',
'io_file_fionclex',
'io_file_fionread',
'io_file_fionbio',
'io_file_fioasync',
'io_file_fiosetown',
'io_file_fiogetown',
'io_file_fiodtype',
'io_file_f_dupfd',
'io_file_f_getfd',
'io_file_f_setfd',
'io_file_f_getfl',
'io_file_f_setfl',
'io_file_f_getlk',
'io_file_f_setlk',
'io_file_f_setlkw',
'io_file_fd_cloexec',
'io_file_f_rdlck',
'io_file_f_unlck',
'io_file_f_wrlck',
'io_dir_dt_unknown',
'io_dir_dt_fifo',
'io_dir_dt_chr',
'io_dir_dt_blk',
'io_dir_dt_reg',
'io_dir_dt_sock',
'io_dir_dt_wht',
'io_dir_dt_lnk',
'io_dir_dt_dir',
'io_file_access',
'io_file_chdir',
'io_file_getcwd',
'io_file_chown',
'io_file_lchown',
'io_file_truncate',
'io_file_link',
'io_file_pipe',
'io_file_rmdir',
'io_file_symlink',
'io_file_unlink',
'io_file_remove',
'io_file_rename',
'io_file_tempnam',
'io_file_mkstemp',
'io_file_dirname',
'io_file_realpath',
'io_file_chmod',
'io_file_mkdir',
'io_file_mkfifo',
'io_file_umask',
'io_net_socket',
'io_net_bind',
'io_net_connect',
'io_net_listen',
'io_net_recv',
'io_net_recvfrom',
'io_net_accept',
'io_net_send',
'io_net_sendto',
'io_net_shutdown',
'io_net_getpeername',
'io_net_getsockname',
'io_net_ssl_begin',
'io_net_ssl_end',
'io_net_ssl_shutdown',
'io_net_ssl_setverifylocations',
'io_net_ssl_usecertificatechainfile',
'io_net_ssl_useprivatekeyfile',
'io_net_ssl_connect',
'io_net_ssl_accept',
'io_net_ssl_error',
'io_net_ssl_errorstring',
'io_net_ssl_liberrorstring',
'io_net_ssl_funcerrorstring',
'io_net_ssl_reasonerrorstring',
'io_net_ssl_setconnectstate',
'io_net_ssl_setacceptstate',
'io_net_ssl_read',
'io_net_ssl_write',
'io_file_stat_size',
'io_file_stat_mode',
'io_file_stat_mtime',
'io_file_stat_atime',
'io_file_lstat_size',
'io_file_lstat_mode',
'io_file_lstat_mtime',
'io_file_lstat_atime',
'io_file_readlink',
'io_file_lockf',
'io_file_f_ulock',
'io_file_f_tlock',
'io_file_f_test',
'dirdesc',
'io_file_stdin',
'io_file_stdout',
'io_file_stderr',
'filedesc',
'uchar_alphabetic',
'uchar_ascii_hex_digit',
'uchar_bidi_control',
'uchar_bidi_mirrored',
'uchar_dash',
'uchar_default_ignorable_code_point',
'uchar_deprecated',
'uchar_diacritic',
'uchar_extender',
'uchar_full_composition_exclusion',
'uchar_grapheme_base',
'uchar_grapheme_extend',
'uchar_grapheme_link',
'uchar_hex_digit',
'uchar_hyphen',
'uchar_id_continue',
'uchar_ideographic',
'uchar_ids_binary_operator',
'uchar_ids_trinary_operator',
'uchar_join_control',
'uchar_logical_order_exception',
'uchar_lowercase',
'uchar_math',
'uchar_noncharacter_code_point',
'uchar_quotation_mark',
'uchar_radical',
'uchar_soft_dotted',
'uchar_terminal_punctuation',
'uchar_unified_ideograph',
'uchar_uppercase',
'uchar_white_space',
'uchar_xid_continue',
'uchar_case_sensitive',
'uchar_s_term',
'uchar_variation_selector',
'uchar_nfd_inert',
'uchar_nfkd_inert',
'uchar_nfc_inert',
'uchar_nfkc_inert',
'uchar_segment_starter',
'uchar_pattern_syntax',
'uchar_pattern_white_space',
'uchar_posix_alnum',
'uchar_posix_blank',
'uchar_posix_graph',
'uchar_posix_print',
'uchar_posix_xdigit',
'uchar_bidi_class',
'uchar_block',
'uchar_canonical_combining_class',
'uchar_decomposition_type',
'uchar_east_asian_width',
'uchar_general_category',
'uchar_joining_group',
'uchar_joining_type',
'uchar_line_break',
'uchar_numeric_type',
'uchar_script',
'uchar_hangul_syllable_type',
'uchar_nfd_quick_check',
'uchar_nfkd_quick_check',
'uchar_nfc_quick_check',
'uchar_nfkc_quick_check',
'uchar_lead_canonical_combining_class',
'uchar_trail_canonical_combining_class',
'uchar_grapheme_cluster_break',
'uchar_sentence_break',
'uchar_word_break',
'uchar_general_category_mask',
'uchar_numeric_value',
'uchar_age',
'uchar_bidi_mirroring_glyph',
'uchar_case_folding',
'uchar_iso_comment',
'uchar_lowercase_mapping',
'uchar_name',
'uchar_simple_case_folding',
'uchar_simple_lowercase_mapping',
'uchar_simple_titlecase_mapping',
'uchar_simple_uppercase_mapping',
'uchar_titlecase_mapping',
'uchar_unicode_1_name',
'uchar_uppercase_mapping',
'u_wb_other',
'u_wb_aletter',
'u_wb_format',
'u_wb_katakana',
'u_wb_midletter',
'u_wb_midnum',
'u_wb_numeric',
'u_wb_extendnumlet',
'u_sb_other',
'u_sb_aterm',
'u_sb_close',
'u_sb_format',
'u_sb_lower',
'u_sb_numeric',
'u_sb_oletter',
'u_sb_sep',
'u_sb_sp',
'u_sb_sterm',
'u_sb_upper',
'u_lb_unknown',
'u_lb_ambiguous',
'u_lb_alphabetic',
'u_lb_break_both',
'u_lb_break_after',
'u_lb_break_before',
'u_lb_mandatory_break',
'u_lb_contingent_break',
'u_lb_close_punctuation',
'u_lb_combining_mark',
'u_lb_carriage_return',
'u_lb_exclamation',
'u_lb_glue',
'u_lb_hyphen',
'u_lb_ideographic',
'u_lb_inseparable',
'u_lb_infix_numeric',
'u_lb_line_feed',
'u_lb_nonstarter',
'u_lb_numeric',
'u_lb_open_punctuation',
'u_lb_postfix_numeric',
'u_lb_prefix_numeric',
'u_lb_quotation',
'u_lb_complex_context',
'u_lb_surrogate',
'u_lb_space',
'u_lb_break_symbols',
'u_lb_zwspace',
'u_lb_next_line',
'u_lb_word_joiner',
'u_lb_h2',
'u_lb_h3',
'u_lb_jl',
'u_lb_jt',
'u_lb_jv',
'u_nt_none',
'u_nt_decimal',
'u_nt_digit',
'u_nt_numeric',
'locale',
'locale_english',
'locale_french',
'locale_german',
'locale_italian',
'locale_japanese',
'locale_korean',
'locale_chinese',
'locale_simplifiedchinese',
'locale_traditionalchinese',
'locale_france',
'locale_germany',
'locale_italy',
'locale_japan',
'locale_korea',
'locale_china',
'locale_prc',
'locale_taiwan',
'locale_uk',
'locale_us',
'locale_canada',
'locale_canadafrench',
'locale_default',
'locale_setdefault',
'locale_isocountries',
'locale_isolanguages',
'locale_availablelocales',
'ucal_listtimezones',
'ucal',
'ucal_era',
'ucal_year',
'ucal_month',
'ucal_weekofyear',
'ucal_weekofmonth',
'ucal_dayofmonth',
'ucal_dayofyear',
'ucal_dayofweek',
'ucal_dayofweekinmonth',
'ucal_ampm',
'ucal_hour',
'ucal_hourofday',
'ucal_minute',
'ucal_second',
'ucal_millisecond',
'ucal_zoneoffset',
'ucal_dstoffset',
'ucal_yearwoy',
'ucal_dowlocal',
'ucal_extendedyear',
'ucal_julianday',
'ucal_millisecondsinday',
'ucal_lenient',
'ucal_firstdayofweek',
'ucal_daysinfirstweek',
'xml_domimplementation',
'sys_sigalrm',
'sys_sighup',
'sys_sigkill',
'sys_sigpipe',
'sys_sigquit',
'sys_sigusr1',
'sys_sigusr2',
'sys_sigchld',
'sys_sigcont',
'sys_sigstop',
'sys_sigtstp',
'sys_sigttin',
'sys_sigttou',
'sys_sigbus',
'sys_sigprof',
'sys_sigsys',
'sys_sigtrap',
'sys_sigurg',
'sys_sigvtalrm',
'sys_sigxcpu',
'sys_sigxfsz',
'sys_wcontinued',
'sys_wnohang',
'sys_wuntraced',
'sys_sigabrt',
'sys_sigfpe',
'sys_sigill',
'sys_sigint',
'sys_sigsegv',
'sys_sigterm',
'sys_exit',
'sys_fork',
'sys_kill',
'sys_waitpid',
'sys_getegid',
'sys_geteuid',
'sys_getgid',
'sys_getlogin',
'sys_getpid',
'sys_getppid',
'sys_getuid',
'sys_setuid',
'sys_setgid',
'sys_setsid',
'sys_errno',
'sys_strerror',
'sys_time',
'sys_difftime',
'sys_getpwuid',
'sys_getpwnam',
'sys_getgrnam',
'sys_drand48',
'sys_erand48',
'sys_jrand48',
'sys_lcong48',
'sys_lrand48',
'sys_mrand48',
'sys_nrand48',
'sys_srand48',
'sys_random',
'sys_srandom',
'sys_seed48',
'sys_rand',
'sys_srand',
'sys_environ',
'sys_getenv',
'sys_setenv',
'sys_unsetenv',
'sys_uname',
'uuid_compare',
'uuid_copy',
'uuid_generate',
'uuid_generate_random',
'uuid_generate_time',
'uuid_is_null',
'uuid_parse',
'uuid_unparse',
'uuid_unparse_lower',
'uuid_unparse_upper',
'sys_credits',
'sleep',
'sys_dll_ext',
'sys_listtypes',
'sys_listtraits',
'sys_listunboundmethods',
'sys_getthreadcount',
'sys_growheapby',
'sys_getheapsize',
'sys_getheapfreebytes',
'sys_getbytessincegc',
'sys_garbagecollect',
'sys_clock',
'sys_getstartclock',
'sys_clockspersec',
'sys_pointersize',
'sys_loadlibrary',
'sys_getchar',
'sys_chroot',
'sys_exec',
'sys_kill_exec',
'sys_wait_exec',
'sys_test_exec',
'sys_detach_exec',
'sys_pid_exec',
'wifexited',
'wexitstatus',
'wifsignaled',
'wtermsig',
'wifstopped',
'wstopsig',
'wifcontinued',
'sys_eol',
'sys_iswindows',
'sys_is_windows',
'sys_isfullpath',
'sys_is_full_path',
'lcapi_loadmodule',
'lcapi_listdatasources',
'dsinfo',
'encrypt_blowfish',
'decrypt_blowfish',
'cipher_digest',
'cipher_encrypt',
'cipher_decrypt',
'cipher_list',
'cipher_keylength',
'cipher_hmac',
'cipher_seal',
'cipher_open',
'cipher_sign',
'cipher_verify',
'cipher_decrypt_private',
'cipher_decrypt_public',
'cipher_encrypt_private',
'cipher_encrypt_public',
'cipher_generate_key',
'xmlstream',
'sourcefile',
'tag',
'tag_exists',
'mime_reader',
'curl_easy_init',
'curl_easy_duphandle',
'curl_easy_cleanup',
'curl_easy_getinfo',
'curl_multi_perform',
'curl_multi_result',
'curl_easy_reset',
'curl_easy_setopt',
'curl_easy_strerror',
'curl_getdate',
'curl_version',
'curl_version_info',
'curlinfo_effective_url',
'curlinfo_content_type',
'curlinfo_response_code',
'curlinfo_header_size',
'curlinfo_request_size',
'curlinfo_ssl_verifyresult',
'curlinfo_filetime',
'curlinfo_redirect_count',
'curlinfo_http_connectcode',
'curlinfo_httpauth_avail',
'curlinfo_proxyauth_avail',
'curlinfo_os_errno',
'curlinfo_num_connects',
'curlinfo_total_time',
'curlinfo_namelookup_time',
'curlinfo_connect_time',
'curlinfo_pretransfer_time',
'curlinfo_size_upload',
'curlinfo_size_download',
'curlinfo_speed_download',
'curlinfo_speed_upload',
'curlinfo_content_length_download',
'curlinfo_content_length_upload',
'curlinfo_starttransfer_time',
'curlinfo_redirect_time',
'curlinfo_ssl_engines',
'curlopt_url',
'curlopt_postfields',
'curlopt_cainfo',
'curlopt_capath',
'curlopt_cookie',
'curlopt_cookiefile',
'curlopt_cookiejar',
'curlopt_customrequest',
'curlopt_egdsocket',
'curlopt_encoding',
'curlopt_ftp_account',
'curlopt_ftpport',
'curlopt_interface',
'curlopt_krb4level',
'curlopt_netrc_file',
'curlopt_proxy',
'curlopt_proxyuserpwd',
'curlopt_random_file',
'curlopt_range',
'curlopt_readdata',
'curlopt_referer',
'curlopt_ssl_cipher_list',
'curlopt_sslcert',
'curlopt_sslcerttype',
'curlopt_sslengine',
'curlopt_sslkey',
'curlopt_sslkeypasswd',
'curlopt_sslkeytype',
'curlopt_useragent',
'curlopt_userpwd',
'curlopt_postfieldsize',
'curlopt_autoreferer',
'curlopt_buffersize',
'curlopt_connecttimeout',
'curlopt_cookiesession',
'curlopt_crlf',
'curlopt_dns_use_global_cache',
'curlopt_failonerror',
'curlopt_filetime',
'curlopt_followlocation',
'curlopt_forbid_reuse',
'curlopt_fresh_connect',
'curlopt_ftp_create_missing_dirs',
'curlopt_ftp_response_timeout',
'curlopt_ftp_ssl',
'curlopt_use_ssl',
'curlopt_ftp_use_eprt',
'curlopt_ftp_use_epsv',
'curlopt_ftpappend',
'curlopt_ftplistonly',
'curlopt_ftpsslauth',
'curlopt_header',
'curlopt_http_version',
'curlopt_httpauth',
'curlopt_httpget',
'curlopt_httpproxytunnel',
'curlopt_infilesize',
'curlopt_ipresolve',
'curlopt_low_speed_limit',
'curlopt_low_speed_time',
'curlopt_maxconnects',
'curlopt_maxfilesize',
'curlopt_maxredirs',
'curlopt_netrc',
'curlopt_nobody',
'curlopt_noprogress',
'curlopt_port',
'curlopt_post',
'curlopt_proxyauth',
'curlopt_proxyport',
'curlopt_proxytype',
'curlopt_put',
'curlopt_resume_from',
'curlopt_ssl_verifyhost',
'curlopt_ssl_verifypeer',
'curlopt_sslengine_default',
'curlopt_sslversion',
'curlopt_tcp_nodelay',
'curlopt_timecondition',
'curlopt_timeout',
'curlopt_timevalue',
'curlopt_transfertext',
'curlopt_unrestricted_auth',
'curlopt_upload',
'curlopt_verbose',
'curlopt_infilesize_large',
'curlopt_maxfilesize_large',
'curlopt_postfieldsize_large',
'curlopt_resume_from_large',
'curlopt_http200aliases',
'curlopt_httpheader',
'curlopt_postquote',
'curlopt_prequote',
'curlopt_quote',
'curlopt_httppost',
'curlopt_writedata',
'curl_version_ipv6',
'curl_version_kerberos4',
'curl_version_ssl',
'curl_version_libz',
'curl_version_ntlm',
'curl_version_gssnegotiate',
'curl_version_debug',
'curl_version_asynchdns',
'curl_version_spnego',
'curl_version_largefile',
'curl_version_idn',
'curl_netrc_ignored',
'curl_netrc_optional',
'curl_netrc_required',
'curl_http_version_none',
'curl_http_version_1_0',
'curl_http_version_1_1',
'curl_ipresolve_whatever',
'curl_ipresolve_v4',
'curl_ipresolve_v6',
'curlftpssl_none',
'curlftpssl_try',
'curlftpssl_control',
'curlftpssl_all',
'curlftpssl_last',
'curlftpauth_default',
'curlftpauth_ssl',
'curlftpauth_tls',
'curlauth_none',
'curlauth_basic',
'curlauth_digest',
'curlauth_gssnegotiate',
'curlauth_ntlm',
'curlauth_any',
'curlauth_anysafe',
'curlproxy_http',
'curlproxy_socks4',
'curlproxy_socks5',
'curle_ok',
'curle_unsupported_protocol',
'curle_failed_init',
'curle_url_malformat',
'curle_url_malformat_user',
'curle_couldnt_resolve_proxy',
'curle_couldnt_resolve_host',
'curle_couldnt_connect',
'curle_ftp_weird_server_reply',
'curle_ftp_access_denied',
'curle_ftp_user_password_incorrect',
'curle_ftp_weird_pass_reply',
'curle_ftp_weird_user_reply',
'curle_ftp_weird_pasv_reply',
'curle_ftp_weird_227_format',
'curle_ftp_cant_get_host',
'curle_ftp_cant_reconnect',
'curle_ftp_couldnt_set_binary',
'curle_partial_file',
'curle_ftp_couldnt_retr_file',
'curle_ftp_write_error',
'curle_ftp_quote_error',
'curle_http_returned_error',
'curle_write_error',
'curle_malformat_user',
'curle_read_error',
'curle_out_of_memory',
'curle_operation_timeouted',
'curle_ftp_couldnt_set_ascii',
'curle_ftp_port_failed',
'curle_ftp_couldnt_use_rest',
'curle_ftp_couldnt_get_size',
'curle_http_range_error',
'curle_http_post_error',
'curle_ssl_connect_error',
'curle_bad_download_resume',
'curle_file_couldnt_read_file',
'curle_ldap_cannot_bind',
'curle_ldap_search_failed',
'curle_library_not_found',
'curle_function_not_found',
'curle_aborted_by_callback',
'curle_bad_function_argument',
'curle_bad_calling_order',
'curle_interface_failed',
'curle_bad_password_entered',
'curle_too_many_redirects',
'curle_unknown_telnet_option',
'curle_telnet_option_syntax',
'curle_obsolete',
'curle_ssl_peer_certificate',
'curle_got_nothing',
'curle_ssl_engine_notfound',
'curle_ssl_engine_setfailed',
'curle_send_error',
'curle_recv_error',
'curle_share_in_use',
'curle_ssl_certproblem',
'curle_ssl_cipher',
'curle_ssl_cacert',
'curle_bad_content_encoding',
'curle_ldap_invalid_url',
'curle_filesize_exceeded',
'curle_ftp_ssl_failed',
'curle_send_fail_rewind',
'curle_ssl_engine_initfailed',
'curle_login_denied',
'curlmsg_done',
'regexp',
'array',
'boolean',
'zip_open',
'zip_name_locate',
'zip_fopen',
'zip_fopen_index',
'zip_fread',
'zip_fclose',
'zip_close',
'zip_stat',
'zip_stat_index',
'zip_get_archive_comment',
'zip_get_file_comment',
'zip_get_name',
'zip_get_num_files',
'zip_add',
'zip_replace',
'zip_add_dir',
'zip_set_file_comment',
'zip_rename',
'zip_delete',
'zip_unchange',
'zip_unchange_all',
'zip_unchange_archive',
'zip_set_archive_comment',
'zip_error_to_str',
'zip_file_strerror',
'zip_strerror',
'zip_error_get',
'zip_file_error_get',
'zip_error_get_sys_type',
'zlib_version',
'fastcgi_initiate_request',
'debugging_enabled',
'debugging_stop',
'evdns_resolve_ipv4',
'evdns_resolve_ipv6',
'evdns_resolve_reverse',
'evdns_resolve_reverse_ipv6',
'library_thread_loader',
'stdout',
'stdoutnl',
'fail',
'fail_if',
'fail_ifnot',
'error_code',
'error_msg',
'error_obj',
'error_stack',
'error_push',
'error_pop',
'error_reset',
'error_msg_invalidparameter',
'error_code_invalidparameter',
'error_msg_networkerror',
'error_code_networkerror',
'error_msg_runtimeassertion',
'error_code_runtimeassertion',
'error_msg_methodnotfound',
'error_code_methodnotfound',
'error_msg_resnotfound',
'error_code_resnotfound',
'error_msg_filenotfound',
'error_code_filenotfound',
'error_msg_aborted',
'error_code_aborted',
'error_msg_dividebyzero',
'error_code_dividebyzero',
'error_msg_noerror',
'error_code_noerror',
'abort',
'protect',
'trait_asstring',
'any',
'trait_generator',
'trait_decompose_assignment',
'trait_foreach',
'trait_generatorcentric',
'generateforeach',
'generateforeachunkeyed',
'generateforeachkeyed',
'trait_foreachtextelement',
'trait_finite',
'trait_finiteforeach',
'trait_keyed',
'trait_keyedfinite',
'trait_keyedforeach',
'trait_frontended',
'trait_backended',
'trait_doubleended',
'trait_positionallykeyed',
'trait_expandable',
'trait_frontexpandable',
'trait_backexpandable',
'trait_contractible',
'trait_frontcontractible',
'trait_backcontractible',
'trait_fullymutable',
'trait_keyedmutable',
'trait_endedfullymutable',
'trait_setoperations',
'trait_searchable',
'trait_positionallysearchable',
'trait_pathcomponents',
'trait_readbytes',
'trait_writebytes',
'trait_setencoding',
'trait_readstring',
'trait_writestring',
'trait_hashable',
'eacher',
'trait_each_sub',
'trait_stack',
'trait_list',
'trait_array',
'trait_map',
'trait_close',
'trait_file',
'trait_scalar',
'method_name',
'trait_queriablelambda',
'trait_queriable',
'queriable_asstring',
'queriable_where',
'queriable_do',
'queriable_sum',
'queriable_average',
'queriable_min',
'queriable_max',
'queriable_select',
'queriable_selectmany',
'queriable_groupby',
'queriable_join',
'queriable_groupjoin',
'queriable_orderby',
'queriable_orderbydescending',
'queriable_thenby',
'queriable_thenbydescending',
'queriable_skip',
'queriable_take',
'queriable_grouping',
'queriable_internal_combinebindings',
'queriable_defaultcompare',
'queriable_reversecompare',
'queriable_qsort',
'generateseries',
'timer',
'tie',
'pairup',
'delve',
'repeat',
'thread_var_push',
'thread_var_pop',
'thread_var_get',
'loop_value',
'loop_value_push',
'loop_value_pop',
'loop_key',
'loop_key_push',
'loop_key_pop',
'loop_push',
'loop_pop',
'loop_count',
'loop_continue',
'loop_abort',
'loop',
'sys_while',
'sys_iterate',
'pair_compare',
'serialization_object_identity_compare',
'serialization_element',
'trait_serializable',
'serialization_writer_standin',
'serialization_writer_ref',
'serialization_writer',
'serialization_reader',
'string_validcharset',
'eol',
'encoding_utf8',
'encoding_iso88591',
'trait_treenode',
'tree_nullnode',
'tree_node',
'tree_base',
'map_node',
'map',
'integer_random',
'integer_bitor',
'millis',
'micros',
'max',
'min',
'range',
'median',
'decimal_random',
'pi',
'lcapi_datasourceinit',
'lcapi_datasourceterm',
'lcapi_datasourcenames',
'lcapi_datasourcetablenames',
'lcapi_datasourcesearch',
'lcapi_datasourceadd',
'lcapi_datasourceupdate',
'lcapi_datasourcedelete',
'lcapi_datasourceinfo',
'lcapi_datasourceexecsql',
'lcapi_datasourcerandom',
'lcapi_datasourceschemanames',
'lcapi_datasourcecloseconnection',
'lcapi_datasourcetickle',
'lcapi_datasourceduplicate',
'lcapi_datasourcescripts',
'lcapi_datasourceimage',
'lcapi_datasourcefindall',
'lcapi_datasourcematchesname',
'lcapi_datasourcepreparesql',
'lcapi_datasourceunpreparesql',
'lcapi_datasourcenothing',
'lcapi_fourchartointeger',
'lcapi_datasourcetypestring',
'lcapi_datasourcetypeinteger',
'lcapi_datasourcetypeboolean',
'lcapi_datasourcetypeblob',
'lcapi_datasourcetypedecimal',
'lcapi_datasourcetypedate',
'lcapi_datasourceprotectionnone',
'lcapi_datasourceprotectionreadonly',
'lcapi_datasourceopgt',
'lcapi_datasourceopgteq',
'lcapi_datasourceopeq',
'lcapi_datasourceopneq',
'lcapi_datasourceoplt',
'lcapi_datasourceoplteq',
'lcapi_datasourceopbw',
'lcapi_datasourceopew',
'lcapi_datasourceopct',
'lcapi_datasourceopnct',
'lcapi_datasourceopnbw',
'lcapi_datasourceopnew',
'lcapi_datasourceopand',
'lcapi_datasourceopor',
'lcapi_datasourceopnot',
'lcapi_datasourceopno',
'lcapi_datasourceopany',
'lcapi_datasourceopin',
'lcapi_datasourceopnin',
'lcapi_datasourceopft',
'lcapi_datasourceoprx',
'lcapi_datasourceopnrx',
'lcapi_datasourcesortascending',
'lcapi_datasourcesortdescending',
'lcapi_datasourcesortcustom',
'lcapi_loadmodules',
'lasso_version',
'lasso_uniqueid',
'usage',
'file_defaultencoding',
'file_copybuffersize',
'file_modeline',
'file_modechar',
'file_forceroot',
'file_tempfile',
'file',
'file_stdin',
'file_stdout',
'file_stderr',
'lasso_tagexists',
'lasso_methodexists',
'output',
'if_empty',
'if_null',
'if_true',
'if_false',
'process',
'treemap',
'locale_format',
'compress',
'uncompress',
'decompress',
'tag_name',
'series',
'nslookup',
'all',
'bw',
'cn',
'eq',
'ew',
'ft',
'gt',
'gte',
'lt',
'lte',
'neq',
'nrx',
'rx',
'none',
'minimal',
'full',
'output_none',
'lasso_executiontimelimit',
'namespace_global',
'namespace_using',
'namespace_import',
'site_id',
'site_name',
'sys_homepath',
'sys_masterhomepath',
'sys_supportpath',
'sys_librariespath',
'sys_databasespath',
'sys_usercapimodulepath',
'sys_appspath',
'sys_userstartuppath',
'dir',
'magick_image',
'ldap',
'ldap_scope_base',
'ldap_scope_onelevel',
'ldap_scope_subtree',
'mysqlds',
'os_process',
'odbc',
'sqliteconnector',
'sqlite_createdb',
'sqlite_setsleepmillis',
'sqlite_setsleeptries',
'java_jvm_getenv',
'java_jvm_create',
'java_jdbc_load',
'database_database',
'database_table_datasources',
'database_table_datasource_hosts',
'database_table_datasource_databases',
'database_table_database_tables',
'database_table_table_fields',
'database_qs',
'database_initialize',
'database_util_cleanpath',
'database_adddefaultsqlitehost',
'database_registry',
'sqlite_ok',
'sqlite_error',
'sqlite_internal',
'sqlite_perm',
'sqlite_abort',
'sqlite_busy',
'sqlite_locked',
'sqlite_nomem',
'sqlite_readonly',
'sqlite_interrupt',
'sqlite_ioerr',
'sqlite_corrupt',
'sqlite_notfound',
'sqlite_full',
'sqlite_cantopen',
'sqlite_protocol',
'sqlite_empty',
'sqlite_schema',
'sqlite_toobig',
'sqlite_constraint',
'sqlite_mismatch',
'sqlite_misuse',
'sqlite_nolfs',
'sqlite_auth',
'sqlite_format',
'sqlite_range',
'sqlite_notadb',
'sqlite_row',
'sqlite_done',
'sqlite_integer',
'sqlite_float',
'sqlite_blob',
'sqlite_null',
'sqlite_text',
'sqlite3',
'sqlite_db',
'sqlite_results',
'sqlite_currentrow',
'sqlite_table',
'sqlite_column',
'bom_utf16be',
'bom_utf16le',
'bom_utf32be',
'bom_utf32le',
'bom_utf8',
'curl',
'include_url',
'ftp_getdata',
'ftp_getfile',
'ftp_getlisting',
'ftp_putdata',
'ftp_putfile',
'ftp_deletefile',
'date',
'debugging_step_in',
'debugging_get_stack',
'debugging_get_context',
'debugging_detach',
'debugging_step_over',
'debugging_step_out',
'debugging_run',
'debugging_break',
'debugging_breakpoint_set',
'debugging_breakpoint_get',
'debugging_breakpoint_remove',
'debugging_breakpoint_list',
'debugging_breakpoint_update',
'debugging_terminate',
'debugging_context_locals',
'debugging_context_vars',
'debugging_context_self',
'debugging_stack',
'dbgp_stop_stack_name',
'dbgp_server',
'dbgp_packet',
'duration',
'encrypt_md5',
'inline_columninfo_pos',
'inline_resultrows_pos',
'inline_foundcount_pos',
'inline_colinfo_name_pos',
'inline_colinfo_valuelist_pos',
'inline_scopeget',
'inline_scopepush',
'inline_scopepop',
'inline_namedget',
'inline_namedput',
'inline',
'inline_type',
'resultset_count',
'resultset',
'resultsets',
'rows',
'rows_impl',
'records',
'column',
'field',
'column_names',
'field_names',
'column_name',
'field_name',
'found_count',
'shown_count',
'shown_first',
'shown_last',
'action_statement',
'lasso_currentaction',
'maxrecords_value',
'skiprecords_value',
'action_param',
'action_params',
'admin_authorization',
'admin_currentgroups',
'admin_currentuserid',
'admin_currentusername',
'database_name',
'table_name',
'layout_name',
'schema_name',
'keycolumn_name',
'keyfield_name',
'keycolumn_value',
'keyfield_value',
'inline_colinfo_type_pos',
'column_type',
'rows_array',
'records_array',
'records_map',
'trait_json_serialize',
'json_serialize',
'json_consume_string',
'json_consume_token',
'json_consume_array',
'json_consume_object',
'json_deserialize',
'json_literal',
'json_object',
'json_rpccall',
'list_node',
'list',
'jchar',
'jchararray',
'jbyte',
'jbytearray',
'jfloat',
'jint',
'jshort',
'ljapi_initialize',
'formattingbase',
'currency',
'scientific',
'percent',
'dateandtime',
'timeonly',
'locale_format_style_full',
'locale_format_style_long',
'locale_format_style_medium',
'locale_format_style_short',
'locale_format_style_default',
'locale_format_style_none',
'locale_format_style_date_time',
'net_connectinprogress',
'net_connectok',
'net_typessl',
'net_typessltcp',
'net_typessludp',
'net_typetcp',
'net_typeudp',
'net_waitread',
'net_waittimeout',
'net_waitwrite',
'trait_net',
'net_tcp',
'net_tcpssl',
'net_named_pipe',
'net_udppacket',
'net_udp',
'admin_initialize',
'admin_getpref',
'admin_setpref',
'admin_removepref',
'admin_userexists',
'admin_lassoservicepath',
'pdf_package',
'pdf_rectangle',
'pdf_typebase',
'pdf_doc',
'pdf_color',
'pdf_barcode',
'pdf_font',
'pdf_image',
'pdf_list',
'pdf_read',
'pdf_table',
'pdf_text',
'pdf_hyphenator',
'pdf_chunk',
'pdf_phrase',
'pdf_paragraph',
'pdf_serve',
'queue',
'random_seed',
'set',
'sys_process',
'worker_pool',
'xml',
'trait_xml_elementcompat',
'trait_xml_nodecompat',
'xml_transform',
'zip_create',
'zip_excl',
'zip_checkcons',
'zip_fl_nocase',
'zip_fl_nodir',
'zip_fl_compressed',
'zip_fl_unchanged',
'zip_er_ok',
'zip_er_multidisk',
'zip_er_rename',
'zip_er_close',
'zip_er_seek',
'zip_er_read',
'zip_er_write',
'zip_er_crc',
'zip_er_zipclosed',
'zip_er_noent',
'zip_er_exists',
'zip_er_open',
'zip_er_tmpopen',
'zip_er_zlib',
'zip_er_memory',
'zip_er_changed',
'zip_er_compnotsupp',
'zip_er_eof',
'zip_er_inval',
'zip_er_nozip',
'zip_er_internal',
'zip_er_incons',
'zip_er_remove',
'zip_er_deleted',
'zip_et_none',
'zip_et_sys',
'zip_et_zlib',
'zip_cm_default',
'zip_cm_store',
'zip_cm_shrink',
'zip_cm_reduce_1',
'zip_cm_reduce_2',
'zip_cm_reduce_3',
'zip_cm_reduce_4',
'zip_cm_implode',
'zip_cm_deflate',
'zip_cm_deflate64',
'zip_cm_pkware_implode',
'zip_cm_bzip2',
'zip_em_none',
'zip_em_trad_pkware',
'zip_em_des',
'zip_em_rc2_old',
'zip_em_3des_168',
'zip_em_3des_112',
'zip_em_aes_128',
'zip_em_aes_192',
'zip_em_aes_256',
'zip_em_rc2',
'zip_em_rc4',
'zip_em_unknown',
'zip_file',
'zip',
'cache_server_element',
'cache_server',
'dns_response',
'dns_lookup',
'dns_default',
'string_charfromname',
'string_concatenate',
'string_endswith',
'string_extract',
'string_findposition',
'string_findregexp',
'string_getunicodeversion',
'string_insert',
'string_isalpha',
'string_isalphanumeric',
'string_isdigit',
'string_ishexdigit',
'string_islower',
'string_isnumeric',
'string_ispunctuation',
'string_isspace',
'string_isupper',
'string_length',
'string_remove',
'string_removeleading',
'string_removetrailing',
'string_replace',
'string_replaceregexp',
'string_todecimal',
'string_tointeger',
'string_uppercase',
'string_lowercase',
'document',
'component_render_state',
'component',
'component_container',
'document_base',
'document_body',
'document_header',
'text_document',
'data_document',
'email_attachment_mime_type',
'email_translatebreakstocrlf',
'email_findemails',
'email_fix_address',
'email_fix_address_list',
'email_compose',
'email_send',
'email_queue',
'email_immediate',
'email_result',
'email_status',
'email_token',
'email_merge',
'email_batch',
'encode_qheader',
'email_pop',
'email_parse',
'email_safeemail',
'email_extract',
'email_pop_priv_substring',
'email_pop_priv_extract',
'email_digestchallenge',
'email_pop_priv_quote',
'email_digestresponse',
'encrypt_hmac',
'encrypt_crammd5',
'email_queue_impl_base',
'email_fs_error_clean',
'email_stage_impl_base',
'email_initialize',
'email_mxlookup',
'lasso_errorreporting',
'fcgi_version_1',
'fcgi_null_request_id',
'fcgi_begin_request',
'fcgi_abort_request',
'fcgi_end_request',
'fcgi_params',
'fcgi_stdin',
'fcgi_stdout',
'fcgi_stderr',
'fcgi_data',
'fcgi_get_values',
'fcgi_get_values_result',
'fcgi_unknown_type',
'fcgi_keep_conn',
'fcgi_responder',
'fcgi_authorize',
'fcgi_filter',
'fcgi_request_complete',
'fcgi_cant_mpx_conn',
'fcgi_overloaded',
'fcgi_unknown_role',
'fcgi_max_conns',
'fcgi_max_reqs',
'fcgi_mpxs_conns',
'fcgi_read_timeout_seconds',
'fcgi_record',
'fcgi_makeendrequestbody',
'fcgi_bodychunksize',
'fcgi_makestdoutbody',
'fcgi_readparam',
'web_connection',
'web_request_impl',
'web_request',
'fcgi_request',
'include_cache_compare',
'include_cache',
'atbegin',
'fastcgi_initialize',
'fastcgi_handlecon',
'fastcgi_handlereq',
'fastcgi_each_fcgi_param',
'fastcgi_createfcgirequest',
'fastcgi_server',
'web_handlefcgirequest',
'filemaker_datasource',
'filemakerds_initialize',
'filemakerds',
'value_listitem',
'valuelistitem',
'selected',
'checked',
'value_list',
'http_document',
'http_document_header',
'http_header_field',
'html_document_head',
'html_document_body',
'raw_document_body',
'bytes_document_body',
'html_element_coreattrs',
'html_element_i18nattrs',
'html_element_eventsattrs',
'html_attributed',
'html_attr',
'html_atomic_element',
'html_container_element',
'http_error',
'html_script',
'html_text',
'html_raw',
'html_binary',
'html_json',
'html_cdata',
'html_eol',
'html_div',
'html_span',
'html_br',
'html_hr',
'html_h1',
'html_h2',
'html_h3',
'html_h4',
'html_h5',
'html_h6',
'html_meta',
'html_link',
'html_object',
'html_style',
'html_base',
'html_table',
'html_tr',
'html_td',
'html_th',
'html_img',
'html_form',
'html_fieldset',
'html_legend',
'html_input',
'html_label',
'html_option',
'html_select',
'http_char_space',
'http_char_htab',
'http_char_cr',
'http_char_lf',
'http_char_question',
'http_char_colon',
'http_read_timeout_secs',
'http_server_web_connection',
'http_server',
'http_server_connection_handler',
'image',
'jdbc_initialize',
'lassoapp_settingsdb',
'lassoapp_resource',
'lassoapp_format_mod_date',
'lassoapp_include_current',
'lassoapp_include',
'lassoapp_find_missing_file',
'lassoapp_source',
'lassoapp_capabilities',
'lassoapp_get_capabilities_name',
'lassoapp_exists',
'lassoapp_path_to_method_name',
'lassoapp_invoke_resource',
'lassoapp_installer',
'lassoapp_initialize_db',
'lassoapp_initialize',
'lassoapp_content_rep_halt',
'lassoapp_issourcefileextension',
'lassoapp_dirsrc_fileresource',
'lassoapp_dirsrc_appsource',
'lassoapp_livesrc_fileresource',
'lassoapp_livesrc_appsource',
'lassoapp_long_expiring_bytes',
'lassoapp_zip_file_server',
'lassoapp_zipsrc_fileresource',
'lassoapp_zipsrc_appsource',
'lassoapp_compiledsrc_fileresource',
'lassoapp_compiledsrc_appsource',
'lassoapp_manualsrc_appsource',
'lassoapp_current_include',
'lassoapp_current_app',
'lassoapp_do_with_include',
'lassoapp_link',
'lassoapp_load_module',
'lassoapp_mime_type_html',
'lassoapp_mime_type_lasso',
'lassoapp_mime_type_xml',
'lassoapp_mime_type_ppt',
'lassoapp_mime_type_js',
'lassoapp_mime_type_txt',
'lassoapp_mime_type_jpg',
'lassoapp_mime_type_png',
'lassoapp_mime_type_gif',
'lassoapp_mime_type_css',
'lassoapp_mime_type_csv',
'lassoapp_mime_type_tif',
'lassoapp_mime_type_ico',
'lassoapp_mime_type_rss',
'lassoapp_mime_type_xhr',
'lassoapp_mime_type_pdf',
'lassoapp_mime_type_docx',
'lassoapp_mime_type_doc',
'lassoapp_mime_type_zip',
'lassoapp_mime_type_svg',
'lassoapp_mime_type_ttf',
'lassoapp_mime_type_woff',
'lassoapp_mime_get',
'log_level_critical',
'log_level_warning',
'log_level_detail',
'log_level_sql',
'log_level_deprecated',
'log_destination_console',
'log_destination_file',
'log_destination_database',
'log',
'log_setdestination',
'log_always',
'log_critical',
'log_warning',
'log_detail',
'log_sql',
'log_deprecated',
'log_max_file_size',
'log_trim_file_size',
'log_impl_base',
'log_initialize',
'portal_impl',
'portal',
'security_database',
'security_table_groups',
'security_table_users',
'security_table_ug_map',
'security_default_realm',
'security_initialize',
'security_registry',
'session_driver',
'session_initialize',
'session_getdefaultdriver',
'session_setdefaultdriver',
'session_start',
'session_addvar',
'session_removevar',
'session_end',
'session_id',
'session_abort',
'session_result',
'session_deleteexpired',
'memory_session_driver_impl_entry',
'memory_session_driver_impl',
'sqlite_session_driver_impl_entry',
'sqlite_session_driver_impl',
'mysql_session_driver_impl',
'odbc_session_driver_mssql',
'odbc_session_driver_impl',
'session_decorate',
'session_delete_expired_thread',
'email_smtp',
'auth_admin',
'auth_check',
'auth_custom',
'auth_group',
'auth_prompt',
'auth_user',
'client_address',
'client_addr',
'client_authorization',
'client_browser',
'client_contentlength',
'client_contenttype',
'client_cookielist',
'client_cookies',
'client_encoding',
'client_formmethod',
'client_getargs',
'client_getparams',
'client_getparam',
'client_headers',
'client_integertoip',
'client_ip',
'client_iptointeger',
'client_password',
'client_postargs',
'client_postparams',
'client_postparam',
'client_type',
'client_username',
'client_url',
'referer_url',
'referrer_url',
'content_type',
'content_encoding',
'cookie',
'cookie_set',
'include',
'include_currentpath',
'include_filepath',
'include_localpath',
'include_once',
'include_path',
'include_raw',
'includes',
'library',
'library_once',
'response_filepath',
'response_localpath',
'response_path',
'response_realm',
'response_root',
'redirect_url',
'server_admin',
'server_name',
'server_ip',
'server_port',
'server_protocol',
'server_signature',
'server_software',
'server_push',
'token_value',
'wap_isenabled',
'wap_maxbuttons',
'wap_maxhorzpixels',
'wap_maxvertpixels',
'wap_maxcolumns',
'wap_maxrows',
'define_atbegin',
'define_atend',
'content_header',
'content_addheader',
'content_replaceheader',
'content_body',
'html_comment',
'web_node_content_json_specialized',
'web_node',
'web_node_container',
'web_node_content_representation',
'web_node_content',
'web_node_content_document',
'web_node_postable',
'web_node_base',
'web_node_forpath',
'web_nodes_requesthandler',
'web_nodes_normalizeextension',
'web_nodes_processcontentnode',
'web_node_root',
'web_nodes_initialize',
'web_node_content_representation_xhr_container',
'web_node_content_representation_xhr',
'web_node_content_html_specialized',
'web_node_content_representation_html_specialized',
'web_node_content_representation_html',
'web_node_content_css_specialized',
'web_node_content_representation_css_specialized',
'web_node_content_representation_css',
'web_node_content_js_specialized',
'web_node_content_representation_js_specialized',
'web_node_content_representation_js',
'web_node_echo',
'web_response_nodesentry',
'web_error_atend',
'web_response_impl',
'web_response',
'web_router_database',
'web_router_initialize',
'web_router',
'asstring',
'isnota',
'isallof',
'isanyof',
'oncompare',
'isa',
'ascopy',
'ascopydeep',
'type',
'invoke',
'atend',
'decomposeassignment',
'asgenerator',
'foreach',
'eachword',
'eachline',
'eachcharacter',
'foreachwordbreak',
'foreachlinebreak',
'foreachcharacter',
'isempty',
'isnotempty',
'ifempty',
'ifnotempty',
'size',
'values',
'asarray',
'aslist',
'asstaticarray',
'join',
'get',
'keys',
'askeyedgenerator',
'eachpair',
'eachkey',
'foreachpair',
'foreachkey',
'front',
'first',
'back',
'last',
'second',
'insert',
'insertfront',
'insertfirst',
'insertback',
'insertfrom',
'insertlast',
'remove',
'removeall',
'removefront',
'removefirst',
'removeback',
'removelast',
'difference',
'intersection',
'union',
'contains',
'find',
'findposition',
'componentdelimiter',
'extensiondelimiter',
'lastcomponent',
'foreachpathcomponent',
'eachcomponent',
'striplastcomponent',
'firstcomponent',
'stripfirstcomponent',
'splitextension',
'hastrailingcomponent',
'isfullpath',
'findlast',
'sub',
'readsomebytes',
'readbytesfully',
'readbytes',
'writebytes',
'encoding',
'readstring',
'writestring',
'hash',
'foreachsub',
'eachsub',
'push',
'pop',
'top',
'dowithclose',
'close',
'fd',
'do',
'sum',
'average',
'where',
'select',
'selectmany',
'groupby',
'groupjoin',
'orderby',
'orderbydescending',
'thenby',
'thenbydescending',
'skip',
'take',
'serialize',
'serializationelements',
'acceptdeserializedelement',
'left',
'right',
'up',
'value',
'bind',
'listen',
'localaddress',
'remoteaddress',
'shutdownrd',
'shutdownwr',
'shutdownrdwr',
'setname',
'contents',
'tagname',
'foreachchild',
'eachchild',
'foreachmatch',
'eachmatch',
'haschildnodes',
'childnodes',
'extract',
'connection',
'requestparams',
'stdin',
'mimes',
'setstatus',
'getstatus',
'writeheaderline',
'writeheaderbytes',
'writebodybytes',
'id',
'class',
'style',
'title',
'gethtmlattr',
'lang',
'onclick',
'ondblclick',
'onmousedown',
'onmouseup',
'onmouseover',
'onmousemove',
'onmouseout',
'onkeypress',
'onkeydown',
'onkeyup',
'sethtmlattr',
'gethtmlattrstring',
'hashtmlattr',
'addcomponent',
'attributes',
'issourcefile',
'resourceinvokable',
'resourcename',
'fullpath',
'appname',
'srcpath',
'resources',
'foo',
'startup',
'validatesessionstable',
'createtable',
'fetchdata',
'savedata',
'init',
'kill',
'expire',
'jsonlabel',
'jsonhtml',
'jsonisleaf',
'delim',
'name',
'path',
'nodelist',
'subnode',
'subnodes',
'representnoderesult',
'mime',
'extensions',
'representnode',
'defaultcontentrepresentation',
'supportscontentrepresentation',
'acceptpost',
'htmlcontent',
'csscontent',
'jscontent',
'escape_member',
'sameas',
'parent',
'settrait',
'oncreate',
'listmethods',
'hasmethod',
'addtrait',
'gettype',
'istype',
'doccomment',
'requires',
'provides',
'subtraits',
'description',
'hosttonet16',
'hosttonet32',
'nettohost16',
'nettohost32',
'nettohost64',
'hosttonet64',
'bitset',
'bittest',
'bitflip',
'bitclear',
'bitor',
'bitand',
'bitxor',
'bitnot',
'bitshiftleft',
'bitshiftright',
'abs',
'div',
'dereferencepointer',
'asdecimal',
'deg2rad',
'asstringhex',
'asstringoct',
'acos',
'asin',
'atan',
'atan2',
'ceil',
'cos',
'cosh',
'exp',
'fabs',
'floor',
'frexp',
'ldexp',
'log10',
'modf',
'pow',
'sin',
'sinh',
'sqrt',
'tan',
'tanh',
'erf',
'erfc',
'gamma',
'hypot',
'j0',
'j1',
'jn',
'lgamma',
'y0',
'y1',
'yn',
'isnan',
'acosh',
'asinh',
'atanh',
'cbrt',
'expm1',
'nextafter',
'scalb',
'ilogb',
'log1p',
'logb',
'remainder',
'rint',
'asinteger',
'self',
'detach',
'restart',
'resume',
'continuation',
'home',
'callsite_file',
'callsite_line',
'callsite_col',
'callstack',
'splitthread',
'threadreaddesc',
'givenblock',
'autocollectbuffer',
'calledname',
'methodname',
'invokeuntil',
'invokewhile',
'invokeautocollect',
'asasync',
'append',
'appendchar',
'private_find',
'private_findlast',
'length',
'chardigitvalue',
'private_compare',
'charname',
'chartype',
'decompose',
'normalize',
'digit',
'foldcase',
'private_merge',
'unescape',
'trim',
'titlecase',
'reverse',
'getisocomment',
'getnumericvalue',
'totitle',
'toupper',
'tolower',
'lowercase',
'uppercase',
'isalnum',
'isalpha',
'isbase',
'iscntrl',
'isdigit',
'isxdigit',
'islower',
'isprint',
'isspace',
'istitle',
'ispunct',
'isgraph',
'isblank',
'isualphabetic',
'isulowercase',
'isupper',
'isuuppercase',
'isuwhitespace',
'iswhitespace',
'encodehtml',
'decodehtml',
'encodexml',
'decodexml',
'encodehtmltoxml',
'getpropertyvalue',
'hasbinaryproperty',
'asbytes',
'equals',
'compare',
'comparecodepointorder',
'padleading',
'padtrailing',
'merge',
'split',
'removeleading',
'removetrailing',
'beginswith',
'endswith',
'replace',
'eachwordbreak',
'encodesql92',
'encodesql',
'substring',
'setsize',
'reserve',
'getrange',
'private_setrange',
'importas',
'import8bits',
'import32bits',
'import64bits',
'import16bits',
'importbytes',
'importpointer',
'export8bits',
'export16bits',
'export32bits',
'export64bits',
'exportbytes',
'exportsigned8bits',
'exportsigned16bits',
'exportsigned32bits',
'exportsigned64bits',
'marker',
'swapbytes',
'encodeurl',
'decodeurl',
'encodebase64',
'decodebase64',
'encodeqp',
'decodeqp',
'encodemd5',
'encodehex',
'decodehex',
'detectcharset',
'bestcharset',
'crc',
'importstring',
'setrange',
'exportas',
'exportstring',
'exportpointerbits',
'foreachbyte',
'eachbyte',
'typename',
'returntype',
'restname',
'paramdescs',
'action',
'statement',
'inputcolumns',
'keycolumns',
'returncolumns',
'sortcolumns',
'skiprows',
'maxrows',
'rowsfound',
'statementonly',
'lop',
'databasename',
'tablename',
'schemaname',
'hostid',
'hostdatasource',
'hostname',
'hostport',
'hostusername',
'hostpassword',
'hostschema',
'hosttableencoding',
'hostextra',
'hostisdynamic',
'refobj',
'prepared',
'getset',
'addset',
'numsets',
'addrow',
'addcolumninfo',
'forcedrowid',
'makeinheritedcopy',
'filename',
'expose',
'recover',
'count',
'exchange',
'findindex',
'sort',
'family',
'isvalid',
'isssl',
'open',
'read',
'write',
'ioctl',
'seek',
'mode',
'mtime',
'atime',
'dup',
'dup2',
'fchdir',
'fchown',
'fsync',
'ftruncate',
'fchmod',
'sendfd',
'receivefd',
'readobject',
'tryreadobject',
'writeobject',
'leaveopen',
'rewind',
'tell',
'language',
'script',
'country',
'variant',
'displaylanguage',
'displayscript',
'displaycountry',
'displayvariant',
'displayname',
'basename',
'keywords',
'iso3language',
'iso3country',
'formatas',
'formatnumber',
'parsenumber',
'parseas',
'format',
'parse',
'add',
'roll',
'getattr',
'setattr',
'clear',
'isset',
'settimezone',
'timezone',
'time',
'indaylighttime',
'createdocument',
'parsedocument',
'hasfeature',
'createdocumenttype',
'nodename',
'nodevalue',
'nodetype',
'parentnode',
'firstchild',
'lastchild',
'previoussibling',
'nextsibling',
'ownerdocument',
'namespaceuri',
'prefix',
'localname',
'insertbefore',
'replacechild',
'removechild',
'appendchild',
'clonenode',
'issupported',
'hasattributes',
'extractone',
'transform',
'data',
'substringdata',
'appenddata',
'insertdata',
'deletedata',
'replacedata',
'doctype',
'implementation',
'documentelement',
'createelement',
'createdocumentfragment',
'createtextnode',
'createcomment',
'createcdatasection',
'createprocessinginstruction',
'createattribute',
'createentityreference',
'getelementsbytagname',
'importnode',
'createelementns',
'createattributens',
'getelementsbytagnamens',
'getelementbyid',
'getattribute',
'setattribute',
'removeattribute',
'getattributenode',
'setattributenode',
'removeattributenode',
'getattributens',
'setattributens',
'removeattributens',
'getattributenodens',
'setattributenodens',
'hasattribute',
'hasattributens',
'specified',
'ownerelement',
'splittext',
'notationname',
'publicid',
'systemid',
'target',
'entities',
'notations',
'internalsubset',
'item',
'getnameditem',
'getnameditemns',
'setnameditem',
'setnameditemns',
'removenameditem',
'removenameditemns',
'next',
'readattributevalue',
'attributecount',
'baseuri',
'depth',
'hasvalue',
'isemptyelement',
'xmllang',
'getattributenamespace',
'lookupnamespace',
'movetoattribute',
'movetoattributenamespace',
'movetofirstattribute',
'movetonextattribute',
'movetoelement',
'prepare',
'last_insert_rowid',
'total_changes',
'interrupt',
'errcode',
'errmsg',
'addmathfunctions',
'finalize',
'step',
'bind_blob',
'bind_double',
'bind_int',
'bind_null',
'bind_text',
'bind_parameter_index',
'reset',
'column_count',
'column_decltype',
'column_blob',
'column_double',
'column_int64',
'column_text',
'ismultipart',
'gotfileupload',
'setmaxfilesize',
'getparts',
'trackingid',
'currentfile',
'addtobuffer',
'input',
'replacepattern',
'findpattern',
'ignorecase',
'setinput',
'setreplacepattern',
'setfindpattern',
'setignorecase',
'appendreplacement',
'matches',
'private_replaceall',
'appendtail',
'groupcount',
'matchposition',
'matchesstart',
'private_replacefirst',
'private_split',
'matchstring',
'replaceall',
'replacefirst',
'findall',
'findcount',
'findfirst',
'findsymbols',
'loadlibrary',
'getlibrary',
'f',
'r',
'form',
'gen',
'callfirst',
'key',
'by',
'from',
'to',
'd',
't',
'object',
'inneroncompare',
'members',
'writeid',
'addmember',
'refid',
'index',
'objects',
'tabs',
'trunk',
'trace',
'asxml',
'tabstr',
'toxmlstring',
'idmap',
'readidobjects',
'red',
'root',
'getnode',
'firstnode',
'lastnode',
'nextnode',
'private_rebalanceforremove',
'private_rotateleft',
'private_rotateright',
'private_rebalanceforinsert',
'eachnode',
'foreachnode',
'resolvelinks',
'parentdir',
'aslazystring',
'openread',
'openwrite',
'openwriteonly',
'openappend',
'opentruncate',
'exists',
'modificationtime',
'lastaccesstime',
'modificationdate',
'lastaccessdate',
'delete',
'moveto',
'copyto',
'linkto',
'flush',
'chmod',
'chown',
'isopen',
'position',
'setmarker',
'setposition',
'setmode',
'foreachline',
'lock',
'unlock',
'trylock',
'testlock',
'perms',
'islink',
'isdir',
'realpath',
'openwith',
'create',
'setcwd',
'foreachentry',
'eachpath',
'eachfilepath',
'eachdirpath',
'each',
'eachfile',
'eachdir',
'eachpathrecursive',
'eachfilepathrecursive',
'eachdirpathrecursive',
'eachentry',
'makefullpath',
'annotate',
'blur',
'command',
'composite',
'contrast',
'convert',
'crop',
'execute',
'enhance',
'flipv',
'fliph',
'modulate',
'rotate',
'save',
'scale',
'sharpen',
'addcomment',
'comments',
'describe',
'height',
'pixel',
'resolutionv',
'resolutionh',
'width',
'setcolorspace',
'colorspace',
'debug',
'histogram',
'imgptr',
'appendimagetolist',
'fx',
'applyheatcolors',
'authenticate',
'search',
'searchurl',
'readerror',
'readline',
'setencoding',
'closewrite',
'exitcode',
'getversion',
'findclass',
'throw',
'thrownew',
'exceptionoccurred',
'exceptiondescribe',
'exceptionclear',
'fatalerror',
'newglobalref',
'deleteglobalref',
'deletelocalref',
'issameobject',
'allocobject',
'newobject',
'getobjectclass',
'isinstanceof',
'getmethodid',
'callobjectmethod',
'callbooleanmethod',
'callbytemethod',
'callcharmethod',
'callshortmethod',
'callintmethod',
'calllongmethod',
'callfloatmethod',
'calldoublemethod',
'callvoidmethod',
'callnonvirtualobjectmethod',
'callnonvirtualbooleanmethod',
'callnonvirtualbytemethod',
'callnonvirtualcharmethod',
'callnonvirtualshortmethod',
'callnonvirtualintmethod',
'callnonvirtuallongmethod',
'callnonvirtualfloatmethod',
'callnonvirtualdoublemethod',
'callnonvirtualvoidmethod',
'getfieldid',
'getobjectfield',
'getbooleanfield',
'getbytefield',
'getcharfield',
'getshortfield',
'getintfield',
'getlongfield',
'getfloatfield',
'getdoublefield',
'setobjectfield',
'setbooleanfield',
'setbytefield',
'setcharfield',
'setshortfield',
'setintfield',
'setlongfield',
'setfloatfield',
'setdoublefield',
'getstaticmethodid',
'callstaticobjectmethod',
'callstaticbooleanmethod',
'callstaticbytemethod',
'callstaticcharmethod',
'callstaticshortmethod',
'callstaticintmethod',
'callstaticlongmethod',
'callstaticfloatmethod',
'callstaticdoublemethod',
'callstaticvoidmethod',
'getstaticfieldid',
'getstaticobjectfield',
'getstaticbooleanfield',
'getstaticbytefield',
'getstaticcharfield',
'getstaticshortfield',
'getstaticintfield',
'getstaticlongfield',
'getstaticfloatfield',
'getstaticdoublefield',
'setstaticobjectfield',
'setstaticbooleanfield',
'setstaticbytefield',
'setstaticcharfield',
'setstaticshortfield',
'setstaticintfield',
'setstaticlongfield',
'setstaticfloatfield',
'setstaticdoublefield',
'newstring',
'getstringlength',
'getstringchars',
'getarraylength',
'newobjectarray',
'getobjectarrayelement',
'setobjectarrayelement',
'newbooleanarray',
'newbytearray',
'newchararray',
'newshortarray',
'newintarray',
'newlongarray',
'newfloatarray',
'newdoublearray',
'getbooleanarrayelements',
'getbytearrayelements',
'getchararrayelements',
'getshortarrayelements',
'getintarrayelements',
'getlongarrayelements',
'getfloatarrayelements',
'getdoublearrayelements',
'getbooleanarrayregion',
'getbytearrayregion',
'getchararrayregion',
'getshortarrayregion',
'getintarrayregion',
'getlongarrayregion',
'getfloatarrayregion',
'getdoublearrayregion',
'setbooleanarrayregion',
'setbytearrayregion',
'setchararrayregion',
'setshortarrayregion',
'setintarrayregion',
'setlongarrayregion',
'setfloatarrayregion',
'setdoublearrayregion',
'monitorenter',
'monitorexit',
'fromreflectedmethod',
'fromreflectedfield',
'toreflectedmethod',
'toreflectedfield',
'exceptioncheck',
'dbtablestable',
'dstable',
'dsdbtable',
'dshoststable',
'fieldstable',
'sql',
'adddatasource',
'loaddatasourceinfo',
'loaddatasourcehostinfo',
'getdatasource',
'getdatasourceid',
'getdatasourcename',
'listdatasources',
'listactivedatasources',
'removedatasource',
'listdatasourcehosts',
'listhosts',
'adddatasourcehost',
'getdatasourcehost',
'removedatasourcehost',
'getdatabasehost',
'gethostdatabase',
'listalldatabases',
'listdatasourcedatabases',
'listhostdatabases',
'getdatasourcedatabase',
'getdatasourcedatabasebyid',
'getdatabasebyname',
'getdatabasebyid',
'getdatabasebyalias',
'adddatasourcedatabase',
'removedatasourcedatabase',
'listalltables',
'listdatabasetables',
'getdatabasetable',
'getdatabasetablebyalias',
'getdatabasetablebyid',
'gettablebyid',
'adddatabasetable',
'removedatabasetable',
'removefield',
'maybevalue',
'getuniquealiasname',
'makecolumnlist',
'makecolumnmap',
'datasourcecolumns',
'datasourcemap',
'hostcolumns',
'hostmap',
'hostcolumns2',
'hostmap2',
'databasecolumns',
'databasemap',
'tablecolumns',
'tablemap',
'databasecolumnnames',
'hostcolumnnames',
'hostcolumnnames2',
'datasourcecolumnnames',
'tablecolumnnames',
'bindcount',
'db',
'tables',
'hastable',
'tablehascolumn',
'eachrow',
'bindparam',
'foreachrow',
'executelazy',
'executenow',
'lastinsertid',
'table',
'bindone',
'src',
'stat',
'colmap',
'getcolumn',
'locals',
'getcolumns',
'bodybytes',
'headerbytes',
'ready',
'token',
'url',
'done',
'header',
'result',
'statuscode',
'raw',
'version',
'perform',
'performonce',
'asraw',
'rawdiff',
'getformat',
'setformat',
'subtract',
'gmt',
'dst',
'era',
'year',
'month',
'week',
'weekofyear',
'weekofmonth',
'day',
'dayofmonth',
'dayofyear',
'dayofweek',
'dayofweekinmonth',
'ampm',
'am',
'pm',
'hour',
'hourofday',
'hourofampm',
'minute',
'millisecond',
'zoneoffset',
'dstoffset',
'yearwoy',
'dowlocal',
'extendedyear',
'julianday',
'millisecondsinday',
'firstdayofweek',
'fixformat',
'minutesbetween',
'hoursbetween',
'secondsbetween',
'daysbetween',
'businessdaysbetween',
'pdifference',
'getfield',
's',
'linediffers',
'sourceline',
'sourcecolumn',
'continuationpacket',
'continuationpoint',
'continuationstack',
'features',
'lastpoint',
'net',
'running',
'source',
'run',
'pathtouri',
'sendpacket',
'readpacket',
'handlefeatureset',
'handlefeatureget',
'handlestdin',
'handlestdout',
'handlestderr',
'isfirststep',
'handlecontinuation',
'ensurestopped',
'handlestackget',
'handlecontextnames',
'formatcontextelements',
'formatcontextelement',
'bptypetostr',
'bptoxml',
'handlebreakpointlist',
'handlebreakpointget',
'handlebreakpointremove',
'condtoint',
'inttocond',
'handlebreakpointupdate',
'handlebreakpointset',
'handlecontextget',
'handlesource',
'error',
'stoprunning',
'pollide',
'polldbg',
'runonce',
'arguments',
'argumentvalue',
'end',
'start',
'days',
'foreachday',
'padzero',
'actionparams',
'capi',
'doclose',
'isnothing',
'named',
'workinginputcolumns',
'workingkeycolumns',
'workingreturncolumns',
'workingsortcolumns',
'workingkeyfield_name',
'scanfordatasource',
'configureds',
'configuredskeys',
'scrubkeywords',
'closeprepared',
'filterinputcolumn',
'prev',
'head',
'removenode',
'listnode',
'accept',
'connect',
'foreachaccept',
'writeobjecttcp',
'readobjecttcp',
'begintls',
'endtls',
'loadcerts',
'sslerrfail',
'fromname',
'fromport',
'env',
'getclass',
'jobjectisa',
'new',
'callvoid',
'callint',
'callfloat',
'callboolean',
'callobject',
'callstring',
'callstaticobject',
'callstaticstring',
'callstaticint',
'callstaticboolean',
'chk',
'makecolor',
'realdoc',
'addbarcode',
'addchapter',
'addcheckbox',
'addcombobox',
'addhiddenfield',
'addimage',
'addlist',
'addpage',
'addparagraph',
'addpasswordfield',
'addphrase',
'addradiobutton',
'addradiogroup',
'addresetbutton',
'addsection',
'addselectlist',
'addsubmitbutton',
'addtable',
'addtextarea',
'addtextfield',
'addtext',
'arc',
'circle',
'closepath',
'curveto',
'drawtext',
'getcolor',
'getheader',
'getheaders',
'getmargins',
'getpagenumber',
'getsize',
'insertpage',
'line',
'rect',
'setcolor',
'setfont',
'setlinewidth',
'setpagenumber',
'conventionaltop',
'lowagiefont',
'jcolor',
'jbarcode',
'generatechecksum',
'getbarheight',
'getbarmultiplier',
'getbarwidth',
'getbaseline',
'getcode',
'getfont',
'gettextalignment',
'gettextsize',
'setbarheight',
'setbarmultiplier',
'setbarwidth',
'setbaseline',
'setcode',
'setgeneratechecksum',
'setshowchecksum',
'settextalignment',
'settextsize',
'showchecksum',
'showcode39startstop',
'showeanguardbars',
'jfont',
'getencoding',
'getface',
'getfullfontname',
'getpsfontname',
'getsupportedencodings',
'istruetype',
'getstyle',
'getbold',
'getitalic',
'getunderline',
'setface',
'setunderline',
'setbold',
'setitalic',
'textwidth',
'jimage',
'ontop',
'jlist',
'jread',
'addjavascript',
'exportfdf',
'extractimage',
'fieldnames',
'fieldposition',
'fieldtype',
'fieldvalue',
'gettext',
'importfdf',
'javascript',
'pagecount',
'pagerotation',
'pagesize',
'setfieldvalue',
'setpagerange',
'jtable',
'getabswidth',
'getalignment',
'getbordercolor',
'getborderwidth',
'getcolumncount',
'getpadding',
'getrowcount',
'getspacing',
'setalignment',
'setbordercolor',
'setborderwidth',
'setpadding',
'setspacing',
'jtext',
'element',
'foreachspool',
'unspool',
'err',
'in',
'out',
'pid',
'wait',
'testexitcode',
'maxworkers',
'tasks',
'workers',
'startone',
'addtask',
'waitforcompletion',
'scanworkers',
'scantasks',
'z',
'addfile',
'adddir',
'adddirpath',
'foreachfile',
'foreachfilename',
'eachfilename',
'filenames',
'getfile',
'meta',
'criteria',
'valid',
'lazyvalue',
'qdcount',
'qdarray',
'answer',
'bitformat',
'consume_rdata',
'consume_string',
'consume_label',
'consume_domain',
'consume_message',
'errors',
'warnings',
'addwarning',
'adderror',
'renderbytes',
'renderstring',
'components',
'addcomponents',
'body',
'renderdocumentbytes',
'contenttype',
'mime_boundary',
'mime_contenttype',
'mime_hdrs',
'addtextpart',
'addhtmlpart',
'addattachment',
'addpart',
'recipients',
'pop_capa',
'pop_debug',
'pop_err',
'pop_get',
'pop_ids',
'pop_index',
'pop_log',
'pop_mode',
'pop_net',
'pop_res',
'pop_server',
'pop_timeout',
'pop_token',
'pop_cmd',
'user',
'pass',
'apop',
'auth',
'quit',
'rset',
'uidl',
'retr',
'dele',
'noop',
'capa',
'stls',
'authorize',
'retrieve',
'headers',
'uniqueid',
'capabilities',
'cancel',
'results',
'lasterror',
'parse_body',
'parse_boundary',
'parse_charset',
'parse_content_disposition',
'parse_content_transfer_encoding',
'parse_content_type',
'parse_hdrs',
'parse_mode',
'parse_msg',
'parse_parts',
'parse_rawhdrs',
'rawheaders',
'content_transfer_encoding',
'content_disposition',
'boundary',
'charset',
'cc',
'subject',
'bcc',
'pause',
'continue',
'touch',
'refresh',
'status',
'queue_status',
'active_tick',
'getprefs',
'initialize',
'queue_maintenance',
'queue_messages',
'content',
'rectype',
'requestid',
'cachedappprefix',
'cachedroot',
'cookiesary',
'fcgireq',
'fileuploadsary',
'headersmap',
'httpauthorization',
'postparamsary',
'queryparamsary',
'documentroot',
'appprefix',
'httpconnection',
'httpcookie',
'httphost',
'httpuseragent',
'httpcachecontrol',
'httpreferer',
'httpreferrer',
'contentlength',
'pathtranslated',
'remoteaddr',
'remoteport',
'requestmethod',
'requesturi',
'scriptfilename',
'scriptname',
'scripturi',
'scripturl',
'serveraddr',
'serveradmin',
'servername',
'serverport',
'serverprotocol',
'serversignature',
'serversoftware',
'pathinfo',
'gatewayinterface',
'httpaccept',
'httpacceptencoding',
'httpacceptlanguage',
'ishttps',
'cookies',
'rawheader',
'queryparam',
'postparam',
'param',
'queryparams',
'querystring',
'postparams',
'poststring',
'params',
'fileuploads',
'isxhr',
'reqid',
'statusmsg',
'cap',
'n',
'proxying',
'stop',
'printsimplemsg',
'handleevalexpired',
'handlenormalconnection',
'handledevconnection',
'splittoprivatedev',
'getmode',
'novaluelists',
'makeurl',
'choosecolumntype',
'getdatabasetablepart',
'getlcapitype',
'buildquery',
'getsortfieldspart',
'endjs',
'addjs',
'addjstext',
'addendjs',
'addendjstext',
'addcss',
'addfavicon',
'attrs',
'dtdid',
'xhtml',
'code',
'msg',
'scripttype',
'defer',
'httpequiv',
'scheme',
'href',
'hreflang',
'linktype',
'rel',
'rev',
'media',
'declare',
'classid',
'codebase',
'objecttype',
'codetype',
'archive',
'standby',
'usemap',
'tabindex',
'styletype',
'method',
'enctype',
'accept_charset',
'onsubmit',
'onreset',
'accesskey',
'inputtype',
'maxlength',
'for',
'label',
'multiple',
'buff',
'wroteheaders',
'pullrequest',
'pullrawpost',
'shouldclose',
'pullurlpost',
'pullmimepost',
'pullhttpheader',
'pulloneheaderline',
'parseoneheaderline',
'addoneheaderline',
'safeexport8bits',
'writeheader',
'connhandler',
'port',
'connectionhandler',
'acceptconnections',
'gotconnection',
'failnoconnectionhandler',
'splitconnection',
'scriptextensions',
'sendfile',
'probemimetype',
'inits',
'installs',
'rootmap',
'install',
'getappsource',
'preflight',
'splituppath',
'handleresource',
'handledefinitionhead',
'handledefinitionbody',
'handledefinitionresource',
'execinstalls',
'execinits',
'payload',
'eligiblepath',
'eligiblepaths',
'expiresminutes',
'moddatestr',
'zips',
'addzip',
'getzipfilebytes',
'resourcedata',
'zipfile',
'zipname',
'zipfilename',
'rawinvokable',
'route',
'setdestination',
'encodepassword',
'checkuser',
'needinitialization',
'adduser',
'getuserid',
'getuser',
'getuserbykey',
'removeuser',
'listusers',
'listusersbygroup',
'countusersbygroup',
'addgroup',
'updategroup',
'getgroupid',
'getgroup',
'removegroup',
'listgroups',
'listgroupsbyuser',
'addusertogroup',
'removeuserfromgroup',
'removeuserfromallgroups',
'md5hex',
'usercolumns',
'groupcolumns',
'expireminutes',
'lasttouched',
'hasexpired',
'idealinmemory',
'maxinmemory',
'nextprune',
'nextprunedelta',
'sessionsdump',
'prune',
'entry',
'host',
'tb',
'setdefaultstorage',
'getdefaultstorage',
'onconvert',
'send',
'addsubnode',
'removesubnode',
'nodeforpath',
'jsonfornode',
'appmessage',
'appstatus',
'atends',
'chunked',
'cookiesarray',
'didinclude',
'errstack',
'headersarray',
'includestack',
'outputencoding',
'sessionsmap',
'htmlizestacktrace',
'respond',
'sendresponse',
'sendchunk',
'makecookieyumyum',
'includeonce',
'includelibrary',
'includelibraryonce',
'includebytes',
'addatend',
'setcookie',
'addheader',
'replaceheader',
'setheaders',
'rawcontent',
'redirectto',
'htmlizestacktracelink',
'doatbegins',
'handlelassoappcontent',
'handlelassoappresponse',
'domainbody',
'establisherrorstate',
'tryfinderrorfile',
'doatends',
'dosessions',
'makenonrelative',
'pushinclude',
'popinclude',
'findinclude',
'checkdebugging',
'splitdebuggingthread',
'matchtriggers',
'rules',
'shouldabort',
'gettrigger',
'trigger',
'rule'
],
'Lasso 8 Tags': [
'__char',
'__sync_timestamp__',
'_admin_addgroup',
'_admin_adduser',
'_admin_defaultconnector',
'_admin_defaultconnectornames',
'_admin_defaultdatabase',
'_admin_defaultfield',
'_admin_defaultgroup',
'_admin_defaulthost',
'_admin_defaulttable',
'_admin_defaultuser',
'_admin_deleteconnector',
'_admin_deletedatabase',
'_admin_deletefield',
'_admin_deletegroup',
'_admin_deletehost',
'_admin_deletetable',
'_admin_deleteuser',
'_admin_duplicategroup',
'_admin_internaldatabase',
'_admin_listconnectors',
'_admin_listdatabases',
'_admin_listfields',
'_admin_listgroups',
'_admin_listhosts',
'_admin_listtables',
'_admin_listusers',
'_admin_refreshconnector',
'_admin_refreshsecurity',
'_admin_servicepath',
'_admin_updateconnector',
'_admin_updatedatabase',
'_admin_updatefield',
'_admin_updategroup',
'_admin_updatehost',
'_admin_updatetable',
'_admin_updateuser',
'_chartfx_activation_string',
'_chartfx_getchallengestring',
'_chop_args',
'_chop_mimes',
'_client_addr_old',
'_client_address_old',
'_client_ip_old',
'_database_names',
'_datasource_reload',
'_date_current',
'_date_format',
'_date_msec',
'_date_parse',
'_execution_timelimit',
'_file_chmod',
'_initialize',
'_jdbc_acceptsurl',
'_jdbc_debug',
'_jdbc_deletehost',
'_jdbc_driverclasses',
'_jdbc_driverinfo',
'_jdbc_metainfo',
'_jdbc_propertyinfo',
'_jdbc_setdriver',
'_lasso_param',
'_log_helper',
'_proc_noparam',
'_proc_withparam',
'_recursion_limit',
'_request_param',
'_security_binaryexpiration',
'_security_flushcaches',
'_security_isserialized',
'_security_serialexpiration',
'_srand',
'_strict_literals',
'_substring',
'_xmlrpc_exconverter',
'_xmlrpc_inconverter',
'_xmlrpc_xmlinconverter',
'abort',
'accept',
'action_addinfo',
'action_addrecord',
'action_param',
'action_params',
'action_setfoundcount',
'action_setrecordid',
'action_settotalcount',
'action_statement',
'add',
'addattachment',
'addattribute',
'addbarcode',
'addchapter',
'addcheckbox',
'addchild',
'addcombobox',
'addcomment',
'addcontent',
'addhiddenfield',
'addhtmlpart',
'addimage',
'addjavascript',
'addlist',
'addnamespace',
'addnextsibling',
'addpage',
'addparagraph',
'addparenttype',
'addpart',
'addpasswordfield',
'addphrase',
'addprevsibling',
'addradiobutton',
'addradiogroup',
'addresetbutton',
'addsection',
'addselectlist',
'addsibling',
'addsubmitbutton',
'addtable',
'addtext',
'addtextarea',
'addtextfield',
'addtextpart',
'admin_allowedfileroots',
'admin_changeuser',
'admin_createuser',
'admin_currentgroups',
'admin_currentuserid',
'admin_currentusername',
'admin_getpref',
'admin_groupassignuser',
'admin_grouplistusers',
'admin_groupremoveuser',
'admin_lassoservicepath',
'admin_listgroups',
'admin_refreshlicensing',
'admin_refreshsecurity',
'admin_reloaddatasource',
'admin_removepref',
'admin_setpref',
'admin_userexists',
'admin_userlistgroups',
'alarms',
'all',
'and',
'annotate',
'answer',
'append',
'appendreplacement',
'appendtail',
'arc',
'array',
'array_iterator',
'asasync',
'astype',
'atbegin',
'atbottom',
'atend',
'atfarleft',
'atfarright',
'attop',
'attributecount',
'attributes',
'auth',
'auth_admin',
'auth_auth',
'auth_custom',
'auth_group',
'auth_prompt',
'auth_user',
'authenticate',
'authorize',
'backward',
'base64',
'baseuri',
'bcc',
'bean',
'beanproperties',
'beginswith',
'bigint',
'bind',
'bitand',
'bitclear',
'bitflip',
'bitformat',
'bitnot',
'bitor',
'bitset',
'bitshiftleft',
'bitshiftright',
'bittest',
'bitxor',
'blur',
'body',
'bom_utf16be',
'bom_utf16le',
'bom_utf32be',
'bom_utf32le',
'bom_utf8',
'boolean',
'boundary',
'bw',
'bytes',
'cache',
'cache_delete',
'cache_empty',
'cache_exists',
'cache_fetch',
'cache_internal',
'cache_maintenance',
'cache_object',
'cache_preferences',
'cache_store',
'call',
'cancel',
'capabilities',
'case',
'cc',
'chardigitvalue',
'charname',
'charset',
'chartfx',
'chartfx_records',
'chartfx_serve',
'chartype',
'checked',
'children',
'choice_list',
'choice_listitem',
'choicelistitem',
'cipher_decrypt',
'cipher_digest',
'cipher_encrypt',
'cipher_hmac',
'cipher_keylength',
'cipher_list',
'circle',
'click_text',
'client_addr',
'client_address',
'client_authorization',
'client_browser',
'client_contentlength',
'client_contenttype',
'client_cookielist',
'client_cookies',
'client_encoding',
'client_formmethod',
'client_getargs',
'client_getparams',
'client_headers',
'client_ip',
'client_ipfrominteger',
'client_iptointeger',
'client_password',
'client_postargs',
'client_postparams',
'client_type',
'client_url',
'client_username',
'close',
'closepath',
'closewrite',
'cn',
'code',
'colorspace',
'column',
'column_name',
'column_names',
'command',
'comments',
'compare',
'compare_beginswith',
'compare_contains',
'compare_endswith',
'compare_equalto',
'compare_greaterthan',
'compare_greaterthanorequals',
'compare_greaterthanorequls',
'compare_lessthan',
'compare_lessthanorequals',
'compare_notbeginswith',
'compare_notcontains',
'compare_notendswith',
'compare_notequalto',
'compare_notregexp',
'compare_regexp',
'compare_strictequalto',
'compare_strictnotequalto',
'comparecodepointorder',
'compile',
'compiler_removecacheddoc',
'compiler_setdefaultparserflags',
'composite',
'compress',
'connect',
'contains',
'content_body',
'content_disposition',
'content_encoding',
'content_header',
'content_transfer_encoding',
'content_type',
'contents',
'contrast',
'convert',
'cookie',
'cookie_set',
'crop',
'curl_ftp_getfile',
'curl_ftp_getlisting',
'curl_ftp_putfile',
'curl_include_url',
'currency',
'curveto',
'data',
'database_changecolumn',
'database_changefield',
'database_createcolumn',
'database_createfield',
'database_createtable',
'database_fmcontainer',
'database_hostinfo',
'database_inline',
'database_name',
'database_nameitem',
'database_names',
'database_realname',
'database_removecolumn',
'database_removefield',
'database_removetable',
'database_repeating',
'database_repeating_valueitem',
'database_repeatingvalueitem',
'database_schemanameitem',
'database_schemanames',
'database_tablecolumn',
'database_tablenameitem',
'database_tablenames',
'datasource_name',
'datasource_register',
'date',
'date__date_current',
'date__date_format',
'date__date_msec',
'date__date_parse',
'date_add',
'date_date',
'date_difference',
'date_duration',
'date_format',
'date_getcurrentdate',
'date_getday',
'date_getdayofweek',
'date_gethour',
'date_getlocaltimezone',
'date_getminute',
'date_getmonth',
'date_getsecond',
'date_gettime',
'date_getyear',
'date_gmttolocal',
'date_localtogmt',
'date_maximum',
'date_minimum',
'date_msec',
'date_setformat',
'date_subtract',
'day',
'daylights',
'dayofweek',
'dayofyear',
'db_layoutnameitem',
'db_layoutnames',
'db_nameitem',
'db_names',
'db_tablenameitem',
'db_tablenames',
'dbi_column_names',
'dbi_field_names',
'decimal',
'decimal_setglobaldefaultprecision',
'decode_base64',
'decode_bheader',
'decode_hex',
'decode_html',
'decode_json',
'decode_qheader',
'decode_quotedprintable',
'decode_quotedprintablebytes',
'decode_url',
'decode_xml',
'decompress',
'decrement',
'decrypt_blowfish',
'decrypt_blowfish2',
'default',
'define_atbegin',
'define_atend',
'define_constant',
'define_prototype',
'define_tag',
'define_tagp',
'define_type',
'define_typep',
'delete',
'depth',
'describe',
'description',
'deserialize',
'detach',
'detachreference',
'difference',
'digit',
'directory_directorynameitem',
'directory_lister',
'directory_nameitem',
'directorynameitem',
'dns_default',
'dns_lookup',
'dns_response',
'document',
'down',
'drawtext',
'dst',
'dump',
'duration',
'else',
'email_batch',
'email_compose',
'email_digestchallenge',
'email_digestresponse',
'email_extract',
'email_findemails',
'email_immediate',
'email_merge',
'email_mxerror',
'email_mxlookup',
'email_parse',
'email_pop',
'email_queue',
'email_result',
'email_safeemail',
'email_send',
'email_smtp',
'email_status',
'email_token',
'email_translatebreakstocrlf',
'encode_base64',
'encode_bheader',
'encode_break',
'encode_breaks',
'encode_crc32',
'encode_hex',
'encode_html',
'encode_htmltoxml',
'encode_json',
'encode_qheader',
'encode_quotedprintable',
'encode_quotedprintablebytes',
'encode_set',
'encode_smart',
'encode_sql',
'encode_sql92',
'encode_stricturl',
'encode_url',
'encode_xml',
'encrypt_blowfish',
'encrypt_blowfish2',
'encrypt_crammd5',
'encrypt_hmac',
'encrypt_md5',
'endswith',
'enhance',
'eq',
'equals',
'error_adderror',
'error_code',
'error_code_aborted',
'error_code_assert',
'error_code_bof',
'error_code_connectioninvalid',
'error_code_couldnotclosefile',
'error_code_couldnotcreateoropenfile',
'error_code_couldnotdeletefile',
'error_code_couldnotdisposememory',
'error_code_couldnotlockmemory',
'error_code_couldnotreadfromfile',
'error_code_couldnotunlockmemory',
'error_code_couldnotwritetofile',
'error_code_criterianotmet',
'error_code_datasourceerror',
'error_code_directoryfull',
'error_code_diskfull',
'error_code_dividebyzero',
'error_code_eof',
'error_code_failure',
'error_code_fieldrestriction',
'error_code_file',
'error_code_filealreadyexists',
'error_code_filecorrupt',
'error_code_fileinvalid',
'error_code_fileinvalidaccessmode',
'error_code_fileisclosed',
'error_code_fileisopen',
'error_code_filelocked',
'error_code_filenotfound',
'error_code_fileunlocked',
'error_code_httpfilenotfound',
'error_code_illegalinstruction',
'error_code_illegaluseoffrozeninstance',
'error_code_invaliddatabase',
'error_code_invalidfilename',
'error_code_invalidmemoryobject',
'error_code_invalidparameter',
'error_code_invalidpassword',
'error_code_invalidpathname',
'error_code_invalidusername',
'error_code_ioerror',
'error_code_loopaborted',
'error_code_memory',
'error_code_network',
'error_code_nilpointer',
'error_code_noerr',
'error_code_nopermission',
'error_code_outofmemory',
'error_code_outofstackspace',
'error_code_overflow',
'error_code_postconditionfailed',
'error_code_preconditionfailed',
'error_code_resnotfound',
'error_code_resource',
'error_code_streamreaderror',
'error_code_streamwriteerror',
'error_code_syntaxerror',
'error_code_tagnotfound',
'error_code_unknownerror',
'error_code_varnotfound',
'error_code_volumedoesnotexist',
'error_code_webactionnotsupported',
'error_code_webadderror',
'error_code_webdeleteerror',
'error_code_webmodulenotfound',
'error_code_webnosuchobject',
'error_code_webrepeatingrelatedfield',
'error_code_webrequiredfieldmissing',
'error_code_webtimeout',
'error_code_webupdateerror',
'error_columnrestriction',
'error_currenterror',
'error_databaseconnectionunavailable',
'error_databasetimeout',
'error_deleteerror',
'error_fieldrestriction',
'error_filenotfound',
'error_invaliddatabase',
'error_invalidpassword',
'error_invalidusername',
'error_modulenotfound',
'error_msg',
'error_msg_aborted',
'error_msg_assert',
'error_msg_bof',
'error_msg_connectioninvalid',
'error_msg_couldnotclosefile',
'error_msg_couldnotcreateoropenfile',
'error_msg_couldnotdeletefile',
'error_msg_couldnotdisposememory',
'error_msg_couldnotlockmemory',
'error_msg_couldnotreadfromfile',
'error_msg_couldnotunlockmemory',
'error_msg_couldnotwritetofile',
'error_msg_criterianotmet',
'error_msg_datasourceerror',
'error_msg_directoryfull',
'error_msg_diskfull',
'error_msg_dividebyzero',
'error_msg_eof',
'error_msg_failure',
'error_msg_fieldrestriction',
'error_msg_file',
'error_msg_filealreadyexists',
'error_msg_filecorrupt',
'error_msg_fileinvalid',
'error_msg_fileinvalidaccessmode',
'error_msg_fileisclosed',
'error_msg_fileisopen',
'error_msg_filelocked',
'error_msg_filenotfound',
'error_msg_fileunlocked',
'error_msg_httpfilenotfound',
'error_msg_illegalinstruction',
'error_msg_illegaluseoffrozeninstance',
'error_msg_invaliddatabase',
'error_msg_invalidfilename',
'error_msg_invalidmemoryobject',
'error_msg_invalidparameter',
'error_msg_invalidpassword',
'error_msg_invalidpathname',
'error_msg_invalidusername',
'error_msg_ioerror',
'error_msg_loopaborted',
'error_msg_memory',
'error_msg_network',
'error_msg_nilpointer',
'error_msg_noerr',
'error_msg_nopermission',
'error_msg_outofmemory',
'error_msg_outofstackspace',
'error_msg_overflow',
'error_msg_postconditionfailed',
'error_msg_preconditionfailed',
'error_msg_resnotfound',
'error_msg_resource',
'error_msg_streamreaderror',
'error_msg_streamwriteerror',
'error_msg_syntaxerror',
'error_msg_tagnotfound',
'error_msg_unknownerror',
'error_msg_varnotfound',
'error_msg_volumedoesnotexist',
'error_msg_webactionnotsupported',
'error_msg_webadderror',
'error_msg_webdeleteerror',
'error_msg_webmodulenotfound',
'error_msg_webnosuchobject',
'error_msg_webrepeatingrelatedfield',
'error_msg_webrequiredfieldmissing',
'error_msg_webtimeout',
'error_msg_webupdateerror',
'error_noerror',
'error_nopermission',
'error_norecordsfound',
'error_outofmemory',
'error_pop',
'error_push',
'error_reqcolumnmissing',
'error_reqfieldmissing',
'error_requiredcolumnmissing',
'error_requiredfieldmissing',
'error_reset',
'error_seterrorcode',
'error_seterrormessage',
'error_updateerror',
'errors',
'euro',
'eval',
'event_schedule',
'events',
'ew',
'execute',
'export16bits',
'export32bits',
'export64bits',
'export8bits',
'exportfdf',
'exportstring',
'extract',
'extractone',
'fail',
'fail_if',
'false',
'field',
'field_name',
'field_names',
'fieldnames',
'fieldtype',
'fieldvalue',
'file',
'file_autoresolvefullpaths',
'file_chmod',
'file_control',
'file_copy',
'file_create',
'file_creationdate',
'file_currenterror',
'file_delete',
'file_exists',
'file_getlinecount',
'file_getsize',
'file_isdirectory',
'file_listdirectory',
'file_moddate',
'file_modechar',
'file_modeline',
'file_move',
'file_openread',
'file_openreadwrite',
'file_openwrite',
'file_openwriteappend',
'file_openwritetruncate',
'file_probeeol',
'file_processuploads',
'file_read',
'file_readline',
'file_rename',
'file_serve',
'file_setsize',
'file_stream',
'file_streamcopy',
'file_uploads',
'file_waitread',
'file_waittimeout',
'file_waitwrite',
'file_write',
'find',
'find_soap_ops',
'findindex',
'findnamespace',
'findnamespacebyhref',
'findpattern',
'findposition',
'first',
'firstchild',
'fliph',
'flipv',
'flush',
'foldcase',
'foreach',
'form_param',
'format',
'forward',
'found_count',
'freebusies',
'freezetype',
'freezevalue',
'from',
'ft',
'ftp_getfile',
'ftp_getlisting',
'ftp_putfile',
'full',
'fulltype',
'generatechecksum',
'get',
'getabswidth',
'getalignment',
'getattribute',
'getattributenamespace',
'getbarheight',
'getbarmultiplier',
'getbarwidth',
'getbaseline',
'getbordercolor',
'getborderwidth',
'getcode',
'getcolor',
'getcolumncount',
'getencoding',
'getface',
'getfont',
'getformat',
'getfullfontname',
'getheaders',
'getmargins',
'getmethod',
'getnumericvalue',
'getpadding',
'getpagenumber',
'getparams',
'getproperty',
'getpsfontname',
'getrange',
'getrowcount',
'getsize',
'getspacing',
'getsupportedencodings',
'gettextalignment',
'gettextsize',
'gettype',
'global',
'global_defined',
'global_remove',
'global_reset',
'globals',
'gmt',
'groupcount',
'gt',
'gte',
'handle',
'handle_error',
'hasattribute',
'haschildren',
'hasvalue',
'header',
'headers',
'height',
'histogram',
'hosttonet16',
'hosttonet32',
'hour',
'html_comment',
'http_getfile',
'ical_alarm',
'ical_attribute',
'ical_calendar',
'ical_daylight',
'ical_event',
'ical_freebusy',
'ical_item',
'ical_journal',
'ical_parse',
'ical_standard',
'ical_timezone',
'ical_todo',
'id',
'if',
'if_empty',
'if_false',
'if_null',
'if_true',
'ignorecase',
'image',
'image_url',
'img',
'import16bits',
'import32bits',
'import64bits',
'import8bits',
'importfdf',
'importstring',
'include',
'include_cgi',
'include_currentpath',
'include_once',
'include_raw',
'include_url',
'increment',
'inline',
'input',
'insert',
'insertatcurrent',
'insertfirst',
'insertfrom',
'insertlast',
'insertpage',
'integer',
'intersection',
'invoke',
'isa',
'isalnum',
'isalpha',
'isbase',
'iscntrl',
'isdigit',
'isemptyelement',
'islower',
'isopen',
'isprint',
'isspace',
'istitle',
'istruetype',
'isualphabetic',
'isulowercase',
'isupper',
'isuuppercase',
'isuwhitespace',
'iswhitespace',
'iterate',
'iterator',
'java',
'java_bean',
'javascript',
'join',
'journals',
'json_records',
'json_rpccall',
'key',
'keycolumn_name',
'keycolumn_value',
'keyfield_name',
'keyfield_value',
'keys',
'lasso_comment',
'lasso_currentaction',
'lasso_datasourceis',
'lasso_datasourceis4d',
'lasso_datasourceisfilemaker',
'lasso_datasourceisfilemaker7',
'lasso_datasourceisfilemaker9',
'lasso_datasourceisfilemakersa',
'lasso_datasourceisjdbc',
'lasso_datasourceislassomysql',
'lasso_datasourceismysql',
'lasso_datasourceisodbc',
'lasso_datasourceisopenbase',
'lasso_datasourceisoracle',
'lasso_datasourceispostgresql',
'lasso_datasourceisspotlight',
'lasso_datasourceissqlite',
'lasso_datasourceissqlserver',
'lasso_datasourcemodulename',
'lasso_datatype',
'lasso_disableondemand',
'lasso_errorreporting',
'lasso_executiontimelimit',
'lasso_parser',
'lasso_process',
'lasso_sessionid',
'lasso_siteid',
'lasso_siteisrunning',
'lasso_sitename',
'lasso_siterestart',
'lasso_sitestart',
'lasso_sitestop',
'lasso_tagexists',
'lasso_tagmodulename',
'lasso_uniqueid',
'lasso_updatecheck',
'lasso_uptime',
'lasso_version',
'lassoapp_create',
'lassoapp_dump',
'lassoapp_flattendir',
'lassoapp_getappdata',
'lassoapp_link',
'lassoapp_list',
'lassoapp_process',
'lassoapp_unitize',
'last',
'lastchild',
'lasterror',
'layout_name',
'ldap',
'ldap_scope_base',
'ldap_scope_onelevel',
'ldap_scope_subtree',
'ldml',
'ldml_ldml',
'left',
'length',
'library',
'library_once',
'line',
'link',
'link_currentaction',
'link_currentactionparams',
'link_currentactionurl',
'link_currentgroup',
'link_currentgroupparams',
'link_currentgroupurl',
'link_currentrecord',
'link_currentrecordparams',
'link_currentrecordurl',
'link_currentsearch',
'link_currentsearchparams',
'link_currentsearchurl',
'link_detail',
'link_detailparams',
'link_detailurl',
'link_firstgroup',
'link_firstgroupparams',
'link_firstgroupurl',
'link_firstrecord',
'link_firstrecordparams',
'link_firstrecordurl',
'link_lastgroup',
'link_lastgroupparams',
'link_lastgroupurl',
'link_lastrecord',
'link_lastrecordparams',
'link_lastrecordurl',
'link_nextgroup',
'link_nextgroupparams',
'link_nextgroupurl',
'link_nextrecord',
'link_nextrecordparams',
'link_nextrecordurl',
'link_params',
'link_prevgroup',
'link_prevgroupparams',
'link_prevgroupurl',
'link_prevrecord',
'link_prevrecordparams',
'link_prevrecordurl',
'link_setformat',
'link_url',
'list',
'list_additem',
'list_fromlist',
'list_fromstring',
'list_getitem',
'list_itemcount',
'list_iterator',
'list_removeitem',
'list_replaceitem',
'list_reverseiterator',
'list_tostring',
'listen',
'literal',
'ljax_end',
'ljax_hastarget',
'ljax_include',
'ljax_start',
'ljax_target',
'local',
'local_defined',
'local_remove',
'local_reset',
'localaddress',
'locale_format',
'localname',
'locals',
'lock',
'log',
'log_always',
'log_critical',
'log_deprecated',
'log_destination_console',
'log_destination_database',
'log_destination_file',
'log_detail',
'log_level_critical',
'log_level_deprecated',
'log_level_detail',
'log_level_sql',
'log_level_warning',
'log_setdestination',
'log_sql',
'log_warning',
'logicalop_value',
'logicaloperator_value',
'lookupnamespace',
'loop',
'loop_abort',
'loop_continue',
'loop_count',
'lowercase',
'lt',
'lte',
'magick_image',
'map',
'map_iterator',
'marker',
'match_comparator',
'match_notrange',
'match_notregexp',
'match_range',
'match_regexp',
'matches',
'matchesstart',
'matchposition',
'matchstring',
'math_abs',
'math_acos',
'math_add',
'math_asin',
'math_atan',
'math_atan2',
'math_ceil',
'math_converteuro',
'math_cos',
'math_div',
'math_exp',
'math_floor',
'math_internal_rand',
'math_internal_randmax',
'math_internal_srand',
'math_ln',
'math_log',
'math_log10',
'math_max',
'math_min',
'math_mod',
'math_mult',
'math_pow',
'math_random',
'math_range',
'math_rint',
'math_roman',
'math_round',
'math_sin',
'math_sqrt',
'math_sub',
'math_tan',
'maxrecords_value',
'memory_session_driver',
'merge',
'millisecond',
'mime_type',
'minimal',
'minute',
'misc__srand',
'misc_randomnumber',
'misc_roman',
'misc_valid_creditcard',
'mode',
'modulate',
'month',
'moveto',
'movetoattributenamespace',
'movetoelement',
'movetofirstattribute',
'movetonextattribute',
'mysql_session_driver',
'name',
'named_param',
'namespace_current',
'namespace_delimiter',
'namespace_exists',
'namespace_file_fullpathexists',
'namespace_global',
'namespace_import',
'namespace_load',
'namespace_page',
'namespace_unload',
'namespace_using',
'namespaces',
'namespaceuri',
'neq',
'net',
'net_connectinprogress',
'net_connectok',
'net_typessl',
'net_typessltcp',
'net_typessludp',
'net_typetcp',
'net_typeudp',
'net_waitread',
'net_waittimeout',
'net_waitwrite',
'nettohost16',
'nettohost32',
'newchild',
'next',
'nextsibling',
'no_default_output',
'nodetype',
'none',
'noprocess',
'not',
'nrx',
'nslookup',
'null',
'object',
'once',
'oneoff',
'op_logicalvalue',
'open',
'operator_logicalvalue',
'option',
'or',
'os_process',
'output',
'output_none',
'padleading',
'padtrailing',
'pagecount',
'pagesize',
'pair',
'paraminfo',
'params',
'params_up',
'parent',
'path',
'pdf_barcode',
'pdf_color',
'pdf_doc',
'pdf_font',
'pdf_image',
'pdf_list',
'pdf_read',
'pdf_serve',
'pdf_table',
'pdf_text',
'percent',
'pixel',
'portal',
'position',
'postcondition',
'precondition',
'prefix',
'prettyprintingnsmap',
'prettyprintingtypemap',
'previoussibling',
'priorityqueue',
'private',
'proc_convert',
'proc_convertbody',
'proc_convertone',
'proc_extract',
'proc_extractone',
'proc_find',
'proc_first',
'proc_foreach',
'proc_get',
'proc_join',
'proc_lasso',
'proc_last',
'proc_map_entry',
'proc_null',
'proc_regexp',
'proc_xml',
'proc_xslt',
'process',
'properties',
'protect',
'queue',
'rand',
'randomnumber',
'raw',
'rawheaders',
'read',
'readattributevalue',
'readerror',
'readfrom',
'readline',
'readlock',
'readstring',
'readunlock',
'recid_value',
'recipients',
'record_count',
'recordcount',
'recordid_value',
'records',
'records_array',
'records_map',
'rect',
'redirect_url',
'refcount',
'reference',
'referer',
'referer_url',
'referrals',
'referrer',
'referrer_url',
'regexp',
'remoteaddress',
'remove',
'removeall',
'removeattribute',
'removechild',
'removecurrent',
'removefirst',
'removelast',
'removeleading',
'removenamespace',
'removetrailing',
'render',
'repeating',
'repeating_valueitem',
'repeatingvalueitem',
'repetition',
'replace',
'replaceall',
'replacefirst',
'replacepattern',
'replacewith',
'req_column',
'req_field',
'required_column',
'required_field',
'reserve',
'reset',
'resolutionh',
'resolutionv',
'response',
'response_fileexists',
'response_filepath',
'response_localpath',
'response_path',
'response_realm',
'results',
'resultset',
'resultset_count',
'retrieve',
'return',
'return_value',
'returntype',
'reverse',
'reverseiterator',
'right',
'roman',
'rotate',
'row_count',
'rows',
'rows_array',
'run',
'run_children',
'rx',
'save',
'scale',
'schema_name',
'scientific',
'search',
'search_args',
'search_arguments',
'search_columnitem',
'search_fielditem',
'search_operatoritem',
'search_opitem',
'search_valueitem',
'searchfielditem',
'searchoperatoritem',
'searchopitem',
'searchvalueitem',
'second',
'select',
'selected',
'self',
'send',
'serialize',
'series',
'server_date',
'server_day',
'server_ip',
'server_name',
'server_port',
'server_push',
'server_siteisrunning',
'server_sitestart',
'server_sitestop',
'server_time',
'session_abort',
'session_addoutputfilter',
'session_addvar',
'session_addvariable',
'session_deleteexpired',
'session_driver',
'session_end',
'session_id',
'session_removevar',
'session_removevariable',
'session_result',
'session_setdriver',
'session_start',
'set',
'set_iterator',
'set_reverseiterator',
'setalignment',
'setbarheight',
'setbarmultiplier',
'setbarwidth',
'setbaseline',
'setblocking',
'setbordercolor',
'setborderwidth',
'setbytes',
'setcode',
'setcolor',
'setcolorspace',
'setdatatype',
'setencoding',
'setface',
'setfieldvalue',
'setfont',
'setformat',
'setgeneratechecksum',
'setheight',
'setlassodata',
'setlinewidth',
'setmarker',
'setmode',
'setname',
'setpadding',
'setpagenumber',
'setpagerange',
'setposition',
'setproperty',
'setrange',
'setshowchecksum',
'setsize',
'setspacing',
'settemplate',
'settemplatestr',
'settextalignment',
'settextdata',
'settextsize',
'settype',
'setunderline',
'setwidth',
'setxmldata',
'sharpen',
'showchecksum',
'showcode39startstop',
'showeanguardbars',
'shown_count',
'shown_first',
'shown_last',
'signal',
'signalall',
'site_atbegin',
'site_id',
'site_name',
'site_restart',
'size',
'skiprecords_value',
'sleep',
'smooth',
'soap_convertpartstopairs',
'soap_definetag',
'soap_info',
'soap_lastrequest',
'soap_lastresponse',
'soap_stub',
'sort',
'sort_args',
'sort_arguments',
'sort_columnitem',
'sort_fielditem',
'sort_orderitem',
'sortcolumnitem',
'sortfielditem',
'sortorderitem',
'sortwith',
'split',
'sqlite_createdb',
'sqlite_session_driver',
'sqlite_setsleepmillis',
'sqlite_setsleeptries',
'srand',
'stack',
'standards',
'steal',
'stock_quote',
'string',
'string_charfromname',
'string_concatenate',
'string_countfields',
'string_endswith',
'string_extract',
'string_findposition',
'string_findregexp',
'string_fordigit',
'string_getfield',
'string_getunicodeversion',
'string_insert',
'string_isalpha',
'string_isalphanumeric',
'string_isdigit',
'string_ishexdigit',
'string_islower',
'string_isnumeric',
'string_ispunctuation',
'string_isspace',
'string_isupper',
'string_length',
'string_lowercase',
'string_remove',
'string_removeleading',
'string_removetrailing',
'string_replace',
'string_replaceregexp',
'string_todecimal',
'string_tointeger',
'string_uppercase',
'string_validcharset',
'subject',
'substring',
'subtract',
'swapbytes',
'table_name',
'table_realname',
'tag',
'tag_name',
'tags',
'tags_find',
'tags_list',
'tcp_close',
'tcp_open',
'tcp_send',
'tcp_tcp_close',
'tcp_tcp_open',
'tcp_tcp_send',
'textwidth',
'thread_abort',
'thread_atomic',
'thread_event',
'thread_exists',
'thread_getcurrentid',
'thread_getpriority',
'thread_info',
'thread_list',
'thread_lock',
'thread_pipe',
'thread_priority_default',
'thread_priority_high',
'thread_priority_low',
'thread_rwlock',
'thread_semaphore',
'thread_setpriority',
'time',
'timezones',
'titlecase',
'to',
'todos',
'token_value',
'tolower',
'total_records',
'totitle',
'toupper',
'transform',
'treemap',
'treemap_iterator',
'trim',
'true',
'type',
'unescape',
'union',
'uniqueid',
'unlock',
'unserialize',
'up',
'uppercase',
'url_rewrite',
'valid_creditcard',
'valid_date',
'valid_email',
'valid_url',
'value',
'value_list',
'value_listitem',
'valuelistitem',
'values',
'valuetype',
'var',
'var_defined',
'var_remove',
'var_reset',
'var_set',
'variable',
'variable_defined',
'variable_set',
'variables',
'variant_count',
'vars',
'wait',
'wap_isenabled',
'wap_maxbuttons',
'wap_maxcolumns',
'wap_maxhorzpixels',
'wap_maxrows',
'wap_maxvertpixels',
'waskeyword',
'week',
'while',
'width',
'write',
'writelock',
'writeto',
'writeunlock',
'wsdl_extract',
'wsdl_getbinding',
'wsdl_getbindingforoperation',
'wsdl_getbindingoperations',
'wsdl_getmessagenamed',
'wsdl_getmessageparts',
'wsdl_getmessagetriofromporttype',
'wsdl_getopbodystyle',
'wsdl_getopbodyuse',
'wsdl_getoperation',
'wsdl_getoplocation',
'wsdl_getopmessagetypes',
'wsdl_getopsoapaction',
'wsdl_getportaddress',
'wsdl_getportsforservice',
'wsdl_getporttype',
'wsdl_getporttypeoperation',
'wsdl_getservicedocumentation',
'wsdl_getservices',
'wsdl_gettargetnamespace',
'wsdl_issoapoperation',
'wsdl_listoperations',
'wsdl_maketest',
'xml',
'xml_extract',
'xml_rpc',
'xml_rpccall',
'xml_rw',
'xml_serve',
'xml_transform',
'xml_xml',
'xml_xmlstream',
'xmllang',
'xmlschematype',
'xmlstream',
'xsd_attribute',
'xsd_blankarraybase',
'xsd_blankbase',
'xsd_buildtype',
'xsd_cache',
'xsd_checkcardinality',
'xsd_continueall',
'xsd_continueannotation',
'xsd_continueany',
'xsd_continueanyattribute',
'xsd_continueattribute',
'xsd_continueattributegroup',
'xsd_continuechoice',
'xsd_continuecomplexcontent',
'xsd_continuecomplextype',
'xsd_continuedocumentation',
'xsd_continueextension',
'xsd_continuegroup',
'xsd_continuekey',
'xsd_continuelist',
'xsd_continuerestriction',
'xsd_continuesequence',
'xsd_continuesimplecontent',
'xsd_continuesimpletype',
'xsd_continueunion',
'xsd_deserialize',
'xsd_fullyqualifyname',
'xsd_generate',
'xsd_generateblankfromtype',
'xsd_generateblanksimpletype',
'xsd_generatetype',
'xsd_getschematype',
'xsd_issimpletype',
'xsd_loadschema',
'xsd_lookupnamespaceuri',
'xsd_lookuptype',
'xsd_processany',
'xsd_processattribute',
'xsd_processattributegroup',
'xsd_processcomplextype',
'xsd_processelement',
'xsd_processgroup',
'xsd_processimport',
'xsd_processinclude',
'xsd_processschema',
'xsd_processsimpletype',
'xsd_ref',
'xsd_type',
'year'
]
}
| gpl-2.0 |
mewtaylor/django | django/utils/html_parser.py | 348 | 5155 | import re
import sys
from django.utils import six
from django.utils.six.moves import html_parser as _html_parser
current_version = sys.version_info
use_workaround = current_version < (2, 7, 3)
try:
HTMLParseError = _html_parser.HTMLParseError
except AttributeError:
# create a dummy class for Python 3.5+ where it's been removed
class HTMLParseError(Exception):
pass
if not use_workaround:
if six.PY3:
class HTMLParser(_html_parser.HTMLParser):
"""Explicitly set convert_charrefs to be False.
This silences a deprecation warning on Python 3.4, but we can't do
it at call time because Python 2.7 does not have the keyword
argument.
"""
def __init__(self, convert_charrefs=False, **kwargs):
_html_parser.HTMLParser.__init__(self, convert_charrefs=convert_charrefs, **kwargs)
else:
HTMLParser = _html_parser.HTMLParser
else:
tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*')
class HTMLParser(_html_parser.HTMLParser):
"""
Patched version of stdlib's HTMLParser with patch from:
http://bugs.python.org/issue670664
"""
def __init__(self):
_html_parser.HTMLParser.__init__(self)
self.cdata_tag = None
def set_cdata_mode(self, tag):
try:
self.interesting = _html_parser.interesting_cdata
except AttributeError:
self.interesting = re.compile(r'</\s*%s\s*>' % tag.lower(), re.I)
self.cdata_tag = tag.lower()
def clear_cdata_mode(self):
self.interesting = _html_parser.interesting_normal
self.cdata_tag = None
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i + 1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = match.group(1).lower()
while k < endpos:
m = _html_parser.attrfind.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif (attrvalue[:1] == '\'' == attrvalue[-1:] or
attrvalue[:1] == '"' == attrvalue[-1:]):
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = (len(self.__starttag_text)
- self.__starttag_text.rfind("\n"))
else:
offset = offset + len(self.__starttag_text)
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag) # <--------------------------- Changed
return endpos
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i + 2] == "</", "unexpected call to parse_endtag"
match = _html_parser.endendtag.search(rawdata, i + 1) # >
if not match:
return -1
j = match.end()
match = _html_parser.endtagfind.match(rawdata, i) # </ + tag + >
if not match:
if self.cdata_tag is not None: # *** add ***
self.handle_data(rawdata[i:j]) # *** add ***
return j # *** add ***
self.error("bad end tag: %r" % (rawdata[i:j],))
# --- changed start ---------------------------------------------------
tag = match.group(1).strip()
if self.cdata_tag is not None:
if tag.lower() != self.cdata_tag:
self.handle_data(rawdata[i:j])
return j
# --- changed end -----------------------------------------------------
self.handle_endtag(tag.lower())
self.clear_cdata_mode()
return j
| bsd-3-clause |
andreparrish/python-for-android | python3-alpha/python3-src/Lib/tkinter/test/test_ttk/test_style.py | 54 | 2792 | import unittest
import tkinter
from tkinter import ttk
from test.support import requires, run_unittest
import tkinter.test.support as support
requires('gui')
class StyleTest(unittest.TestCase):
def setUp(self):
self.style = ttk.Style()
def test_configure(self):
style = self.style
style.configure('TButton', background='yellow')
self.assertEqual(style.configure('TButton', 'background'),
'yellow')
self.assertTrue(isinstance(style.configure('TButton'), dict))
def test_map(self):
style = self.style
style.map('TButton', background=[('active', 'background', 'blue')])
self.assertEqual(style.map('TButton', 'background'),
[('active', 'background', 'blue')])
self.assertTrue(isinstance(style.map('TButton'), dict))
def test_lookup(self):
style = self.style
style.configure('TButton', background='yellow')
style.map('TButton', background=[('active', 'background', 'blue')])
self.assertEqual(style.lookup('TButton', 'background'), 'yellow')
self.assertEqual(style.lookup('TButton', 'background',
['active', 'background']), 'blue')
self.assertEqual(style.lookup('TButton', 'optionnotdefined',
default='iknewit'), 'iknewit')
def test_layout(self):
style = self.style
self.assertRaises(tkinter.TclError, style.layout, 'NotALayout')
tv_style = style.layout('Treeview')
# "erase" Treeview layout
style.layout('Treeview', '')
self.assertEqual(style.layout('Treeview'),
[('null', {'sticky': 'nswe'})]
)
# restore layout
style.layout('Treeview', tv_style)
self.assertEqual(style.layout('Treeview'), tv_style)
# should return a list
self.assertTrue(isinstance(style.layout('TButton'), list))
# correct layout, but "option" doesn't exist as option
self.assertRaises(tkinter.TclError, style.layout, 'Treeview',
[('name', {'option': 'inexistent'})])
def test_theme_use(self):
self.assertRaises(tkinter.TclError, self.style.theme_use,
'nonexistingname')
curr_theme = self.style.theme_use()
new_theme = None
for theme in self.style.theme_names():
if theme != curr_theme:
new_theme = theme
self.style.theme_use(theme)
break
else:
# just one theme available, can't go on with tests
return
self.assertFalse(curr_theme == new_theme)
self.assertFalse(new_theme != self.style.theme_use())
self.style.theme_use(curr_theme)
tests_gui = (StyleTest, )
if __name__ == "__main__":
run_unittest(*tests_gui)
| apache-2.0 |
boghison/servo | tests/wpt/harness/wptrunner/wptmanifest/tests/test_static.py | 139 | 2863 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import unittest
from cStringIO import StringIO
from ..backends import static
# There aren't many tests here because it turns out to be way more convenient to
# use test_serializer for the majority of cases
class TestStatic(unittest.TestCase):
def parse(self, input_str):
return self.parser.parse(StringIO(input_str))
def compile(self, input_text, input_data):
return static.compile(input_text, input_data)
def test_get_0(self):
data = """
key: value
[Heading 1]
other_key:
if a == 1: value_1
if a == 2: value_2
value_3
"""
manifest = self.compile(data, {"a": 2})
self.assertEquals(manifest.get("key"), "value")
children = list(item for item in manifest.iterchildren())
self.assertEquals(len(children), 1)
section = children[0]
self.assertEquals(section.name, "Heading 1")
self.assertEquals(section.get("other_key"), "value_2")
self.assertEquals(section.get("key"), "value")
def test_get_1(self):
data = """
key: value
[Heading 1]
other_key:
if a == 1: value_1
if a == 2: value_2
value_3
"""
manifest = self.compile(data, {"a": 3})
children = list(item for item in manifest.iterchildren())
section = children[0]
self.assertEquals(section.get("other_key"), "value_3")
def test_get_3(self):
data = """key:
if a == "1": value_1
if a[0] == "ab"[0]: value_2
"""
manifest = self.compile(data, {"a": "1"})
self.assertEquals(manifest.get("key"), "value_1")
manifest = self.compile(data, {"a": "ac"})
self.assertEquals(manifest.get("key"), "value_2")
def test_get_4(self):
data = """key:
if not a: value_1
value_2
"""
manifest = self.compile(data, {"a": True})
self.assertEquals(manifest.get("key"), "value_2")
manifest = self.compile(data, {"a": False})
self.assertEquals(manifest.get("key"), "value_1")
def test_api(self):
data = """key:
if a == 1.5: value_1
value_2
key_1: other_value
"""
manifest = self.compile(data, {"a": 1.5})
self.assertFalse(manifest.is_empty)
self.assertEquals(manifest.root, manifest)
self.assertTrue(manifest.has_key("key_1"))
self.assertFalse(manifest.has_key("key_2"))
self.assertEquals(set(manifest.iterkeys()), set(["key", "key_1"]))
self.assertEquals(set(manifest.itervalues()), set(["value_1", "other_value"]))
def test_is_empty_1(self):
data = """
[Section]
[Subsection]
"""
manifest = self.compile(data, {})
self.assertTrue(manifest.is_empty)
| mpl-2.0 |
akintoey/django | django/template/engine.py | 48 | 7528 | from django.core.exceptions import ImproperlyConfigured
from django.utils import lru_cache, six
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
from .base import Context, Template
from .context import _builtin_context_processors
from .exceptions import TemplateDoesNotExist
from .library import import_library
class Engine(object):
default_builtins = [
'django.template.defaulttags',
'django.template.defaultfilters',
'django.template.loader_tags',
]
def __init__(self, dirs=None, app_dirs=False, context_processors=None,
debug=False, loaders=None, string_if_invalid='',
file_charset='utf-8', libraries=None, builtins=None):
if dirs is None:
dirs = []
if context_processors is None:
context_processors = []
if loaders is None:
loaders = ['django.template.loaders.filesystem.Loader']
if app_dirs:
loaders += ['django.template.loaders.app_directories.Loader']
else:
if app_dirs:
raise ImproperlyConfigured(
"app_dirs must not be set when loaders is defined.")
if libraries is None:
libraries = {}
if builtins is None:
builtins = []
self.dirs = dirs
self.app_dirs = app_dirs
self.context_processors = context_processors
self.debug = debug
self.loaders = loaders
self.string_if_invalid = string_if_invalid
self.file_charset = file_charset
self.libraries = libraries
self.template_libraries = self.get_template_libraries(libraries)
self.builtins = self.default_builtins + builtins
self.template_builtins = self.get_template_builtins(self.builtins)
@staticmethod
@lru_cache.lru_cache()
def get_default():
"""
When only one DjangoTemplates backend is configured, returns it.
Raises ImproperlyConfigured otherwise.
This is required for preserving historical APIs that rely on a
globally available, implicitly configured engine such as:
>>> from django.template import Context, Template
>>> template = Template("Hello {{ name }}!")
>>> context = Context({'name': "world"})
>>> template.render(context)
'Hello world!'
"""
# Since Engine is imported in django.template and since
# DjangoTemplates is a wrapper around this Engine class,
# local imports are required to avoid import loops.
from django.template import engines
from django.template.backends.django import DjangoTemplates
django_engines = [engine for engine in engines.all()
if isinstance(engine, DjangoTemplates)]
if len(django_engines) == 1:
# Unwrap the Engine instance inside DjangoTemplates
return django_engines[0].engine
elif len(django_engines) == 0:
raise ImproperlyConfigured(
"No DjangoTemplates backend is configured.")
else:
raise ImproperlyConfigured(
"Several DjangoTemplates backends are configured. "
"You must select one explicitly.")
@cached_property
def template_context_processors(self):
context_processors = _builtin_context_processors
context_processors += tuple(self.context_processors)
return tuple(import_string(path) for path in context_processors)
def get_template_builtins(self, builtins):
return [import_library(x) for x in builtins]
def get_template_libraries(self, libraries):
loaded = {}
for name, path in libraries.items():
loaded[name] = import_library(path)
return loaded
@cached_property
def template_loaders(self):
return self.get_template_loaders(self.loaders)
def get_template_loaders(self, template_loaders):
loaders = []
for template_loader in template_loaders:
loader = self.find_template_loader(template_loader)
if loader is not None:
loaders.append(loader)
return loaders
def find_template_loader(self, loader):
if isinstance(loader, (tuple, list)):
args = list(loader[1:])
loader = loader[0]
else:
args = []
if isinstance(loader, six.string_types):
loader_class = import_string(loader)
return loader_class(self, *args)
else:
raise ImproperlyConfigured(
"Invalid value in template loaders configuration: %r" % loader)
def find_template(self, name, dirs=None, skip=None):
tried = []
for loader in self.template_loaders:
if loader.supports_recursion:
try:
template = loader.get_template(
name, template_dirs=dirs, skip=skip,
)
return template, template.origin
except TemplateDoesNotExist as e:
tried.extend(e.tried)
else:
# RemovedInDjango20Warning: Use old api for non-recursive
# loaders.
try:
return loader(name, dirs)
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist(name, tried=tried)
def from_string(self, template_code):
"""
Returns a compiled Template object for the given template code,
handling template inheritance recursively.
"""
return Template(template_code, engine=self)
def get_template(self, template_name):
"""
Returns a compiled Template object for the given template name,
handling template inheritance recursively.
"""
template, origin = self.find_template(template_name)
if not hasattr(template, 'render'):
# template needs to be compiled
template = Template(template, origin, template_name, engine=self)
return template
def render_to_string(self, template_name, context=None):
"""
Render the template specified by template_name with the given context.
For use in Django's test suite.
"""
if isinstance(template_name, (list, tuple)):
t = self.select_template(template_name)
else:
t = self.get_template(template_name)
# Django < 1.8 accepted a Context in `context` even though that's
# unintended. Preserve this ability but don't rewrap `context`.
if isinstance(context, Context):
return t.render(context)
else:
return t.render(Context(context))
def select_template(self, template_name_list):
"""
Given a list of template names, returns the first that can be loaded.
"""
if not template_name_list:
raise TemplateDoesNotExist("No template names provided")
not_found = []
for template_name in template_name_list:
try:
return self.get_template(template_name)
except TemplateDoesNotExist as exc:
if exc.args[0] not in not_found:
not_found.append(exc.args[0])
continue
# If we get here, none of the templates could be loaded
raise TemplateDoesNotExist(', '.join(not_found))
| bsd-3-clause |
tobias47n9e/social-core | social_core/backends/amazon.py | 4 | 1572 | """
Amazon OAuth2 backend, docs at:
https://python-social-auth.readthedocs.io/en/latest/backends/amazon.html
"""
import ssl
from .oauth import BaseOAuth2
class AmazonOAuth2(BaseOAuth2):
name = 'amazon'
ID_KEY = 'user_id'
AUTHORIZATION_URL = 'https://www.amazon.com/ap/oa'
ACCESS_TOKEN_URL = 'https://api.amazon.com/auth/o2/token'
DEFAULT_SCOPE = ['profile']
REDIRECT_STATE = False
ACCESS_TOKEN_METHOD = 'POST'
SSL_PROTOCOL = ssl.PROTOCOL_TLSv1
EXTRA_DATA = [
('refresh_token', 'refresh_token', True),
('user_id', 'user_id'),
('postal_code', 'postal_code')
]
def get_user_details(self, response):
"""Return user details from amazon account"""
name = response.get('name') or ''
fullname, first_name, last_name = self.get_user_names(name)
return {'username': name,
'email': response.get('email'),
'fullname': fullname,
'first_name': first_name,
'last_name': last_name}
def user_data(self, access_token, *args, **kwargs):
"""Grab user profile information from amazon."""
response = self.get_json('https://www.amazon.com/ap/user/profile',
params={'access_token': access_token})
if 'Profile' in response:
response = {
'user_id': response['Profile']['CustomerId'],
'name': response['Profile']['Name'],
'email': response['Profile']['PrimaryEmail']
}
return response
| bsd-3-clause |
timothsp/where2ate | venv/lib/python3.3/site-packages/pip/_vendor/pkg_resources/__init__.py | 252 | 106466 | """
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
from __future__ import absolute_import
import sys
import os
import io
import time
import re
import types
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import token
import symbol
import operator
import platform
import collections
import plistlib
import email.parser
import tempfile
import textwrap
from pkgutil import get_importer
try:
import _imp
except ImportError:
# Python 3.2 compatibility
import imp as _imp
PY3 = sys.version_info > (3,)
PY2 = not PY3
if PY3:
from urllib.parse import urlparse, urlunparse
if PY2:
from urlparse import urlparse, urlunparse
if PY3:
string_types = str,
else:
string_types = str, eval('unicode')
iteritems = (lambda i: i.items()) if PY3 else lambda i: i.iteritems()
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
# Avoid try/except due to potential problems with delayed import mechanisms.
if sys.version_info >= (3, 3) and sys.implementation.name == "cpython":
import importlib.machinery as importlib_machinery
else:
importlib_machinery = None
try:
import parser
except ImportError:
pass
import pip._vendor.packaging.version
import pip._vendor.packaging.specifiers
packaging = pip._vendor.packaging
# declare some globals that will be defined later to
# satisfy the linters.
require = None
working_set = None
class PEP440Warning(RuntimeWarning):
"""
Used when there is an issue with a version or specifier not complying with
PEP 440.
"""
class _SetuptoolsVersionMixin(object):
def __hash__(self):
return super(_SetuptoolsVersionMixin, self).__hash__()
def __lt__(self, other):
if isinstance(other, tuple):
return tuple(self) < other
else:
return super(_SetuptoolsVersionMixin, self).__lt__(other)
def __le__(self, other):
if isinstance(other, tuple):
return tuple(self) <= other
else:
return super(_SetuptoolsVersionMixin, self).__le__(other)
def __eq__(self, other):
if isinstance(other, tuple):
return tuple(self) == other
else:
return super(_SetuptoolsVersionMixin, self).__eq__(other)
def __ge__(self, other):
if isinstance(other, tuple):
return tuple(self) >= other
else:
return super(_SetuptoolsVersionMixin, self).__ge__(other)
def __gt__(self, other):
if isinstance(other, tuple):
return tuple(self) > other
else:
return super(_SetuptoolsVersionMixin, self).__gt__(other)
def __ne__(self, other):
if isinstance(other, tuple):
return tuple(self) != other
else:
return super(_SetuptoolsVersionMixin, self).__ne__(other)
def __getitem__(self, key):
return tuple(self)[key]
def __iter__(self):
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {
'pre': 'c',
'preview': 'c',
'-': 'final-',
'rc': 'c',
'dev': '@',
}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part, part)
if not part or part == '.':
continue
if part[:1] in '0123456789':
# pad for numeric comparison
yield part.zfill(8)
else:
yield '*'+part
# ensure that alpha/beta/candidate are before final
yield '*final'
def old_parse_version(s):
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
# remove '-' before a prerelease tag
if part < '*final':
while parts and parts[-1] == '*final-':
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == '00000000':
parts.pop()
parts.append(part)
return tuple(parts)
# Warn for use of this function
warnings.warn(
"You have iterated over the result of "
"pkg_resources.parse_version. This is a legacy behavior which is "
"inconsistent with the new version class introduced in setuptools "
"8.0. In most cases, conversion to a tuple is unnecessary. For "
"comparison of versions, sort the Version instances directly. If "
"you have another use case requiring the tuple, please file a "
"bug with the setuptools project describing that need.",
RuntimeWarning,
stacklevel=1,
)
for part in old_parse_version(str(self)):
yield part
class SetuptoolsVersion(_SetuptoolsVersionMixin, packaging.version.Version):
pass
class SetuptoolsLegacyVersion(_SetuptoolsVersionMixin,
packaging.version.LegacyVersion):
pass
def parse_version(v):
try:
return SetuptoolsVersion(v)
except packaging.version.InvalidVersion:
return SetuptoolsLegacyVersion(v)
_state_vars = {}
def _declare_state(vartype, **kw):
globals().update(kw)
_state_vars.update(dict.fromkeys(kw, vartype))
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_'+v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_'+_state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
# not Mac OS X
pass
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info',
'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError', 'VersionConflict', 'DistributionNotFound',
'UnknownExtra', 'ExtractionError',
# Warnings
'PEP440Warning',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__+repr(self.args)
class VersionConflict(ResolutionError):
"""
An already-installed version conflicts with the requested version.
Should be initialized with the installed Distribution and the requested
Requirement.
"""
_template = "{self.dist} is installed but {self.req} is required"
@property
def dist(self):
return self.args[0]
@property
def req(self):
return self.args[1]
def report(self):
return self._template.format(**locals())
def with_context(self, required_by):
"""
If required_by is non-empty, return a version of self that is a
ContextualVersionConflict.
"""
if not required_by:
return self
args = self.args + (required_by,)
return ContextualVersionConflict(*args)
class ContextualVersionConflict(VersionConflict):
"""
A VersionConflict that accepts a third parameter, the set of the
requirements that required the installed Distribution.
"""
_template = VersionConflict._template + ' by {self.required_by}'
@property
def required_by(self):
return self.args[2]
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
_template = ("The '{self.req}' distribution was not found "
"and is required by {self.requirers_str}")
@property
def req(self):
return self.args[0]
@property
def requirers(self):
return self.args[1]
@property
def requirers_str(self):
if not self.requirers:
return 'the application'
return ', '.join(self.requirers)
def report(self):
return self._template.format(**locals())
def __str__(self):
return self.report()
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq, Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
try:
# Python 2.7 or >=3.2
from sysconfig import get_platform
except ImportError:
from distutils.util import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
_macosx_arch(machine))
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
# XXX backward compat
get_platform = get_build_platform
def compatible_platforms(provided, required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided==required:
# easy case
return True
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
return True
# egg isn't macosx or legacy darwin
return False
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
# backward compatibility
run_main = run_script
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist, string_types):
dist = Requirement.parse(dist)
if isinstance(dist, Requirement):
dist = get_provider(dist)
if not isinstance(dist, Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
@classmethod
def _build_master(cls):
"""
Prepare the master working set.
"""
ws = cls()
try:
from __main__ import __requires__
except ImportError:
# The main program does not list any requirements
return ws
# ensure the requirements are met
try:
ws.require(__requires__)
except VersionConflict:
return cls._build_from_requirements(__requires__)
return ws
@classmethod
def _build_from_requirements(cls, req_spec):
"""
Build a working set from a requirement spec. Rewrites sys.path.
"""
# try it without defaults already on sys.path
# by starting with an empty path
ws = cls([])
reqs = parse_requirements(req_spec)
dists = ws.resolve(reqs, Environment())
for dist in dists:
ws.add(dist)
# add any missing entries from sys.path
for entry in sys.path:
if entry not in ws.entries:
ws.add_entry(entry)
# then copy back to sys.path
sys.path[:] = ws.entries
return ws
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self, dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
# XXX add more info
raise VersionConflict(dist, req)
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key]=1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True, replace=False):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set, unless `replace=True`.
If it's added, any callbacks registered with the ``subscribe()`` method
will be called.
"""
if insert:
dist.insert_on(self.entries, entry)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry,[])
keys2 = self.entry_keys.setdefault(dist.location,[])
if not replace and dist.key in self.by_key:
# ignore hidden distros
return
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None,
replace_conflicting=False):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
Unless `replace_conflicting=True`, raises a VersionConflict exception if
any requirements are found on the path that have the correct name but
the wrong version. Otherwise, if an `installer` is supplied it will be
invoked to obtain the correct version of the requirement and activate
it.
"""
# set up the stack
requirements = list(requirements)[::-1]
# set of processed requirements
processed = {}
# key -> dist
best = {}
to_activate = []
# Mapping of requirement to set of distributions that required it;
# useful for reporting info about conflicts.
required_by = collections.defaultdict(set)
while requirements:
# process dependencies breadth-first
req = requirements.pop(0)
if req in processed:
# Ignore cyclic or redundant dependencies
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None or (dist not in req and replace_conflicting):
ws = self
if env is None:
if dist is None:
env = Environment(self.entries)
else:
# Use an empty environment and workingset to avoid
# any further conflicts with the conflicting
# distribution
env = Environment([])
ws = WorkingSet([])
dist = best[req.key] = env.best_match(req, ws, installer)
if dist is None:
requirers = required_by.get(req, None)
raise DistributionNotFound(req, requirers)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
dependent_req = required_by[req]
raise VersionConflict(dist, req).with_context(dependent_req)
# push the new requirements onto the stack
new_requirements = dist.requires(req.extras)[::-1]
requirements.extend(new_requirements)
# Register the new requirements needed by req
for new_requirement in new_requirements:
required_by[new_requirement].add(req.project_name)
processed[req] = True
# return list of distros to activate
return to_activate
def find_plugins(self, plugin_env, full_env=None, installer=None,
fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
# add plugins+libs to sys.path
map(working_set.add, distributions)
# display errors
print('Could not load', errors)
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
# scan project names in alphabetic order
plugin_projects.sort()
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
# put all our entries in shadow_set
list(map(shadow_set.add, self))
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError as v:
# save error info
error_info[dist] = v
if fallback:
# try the next older version of project
continue
else:
# give up on this project, keep going
break
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback):
"""Invoke `callback` for all distributions (including existing ones)"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(self, search_path=None, platform=get_supported_platform(),
python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.3'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
return (self.python is None or dist.py_version is None
or dist.py_version==self.python) \
and compatible_platforms(dist.platform, self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self, project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
Uses case-insensitive `project_name` comparison, assuming all the
project's distributions use their project's name converted to all
lowercase as their key.
"""
distribution_key = project_name.lower()
return self._distmap.get(distribution_key, [])
def add(self, dist):
"""Add `dist` if we ``can_add()`` it and it has not already been added
"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key, [])
if dist not in dists:
dists.append(dist)
dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
def best_match(self, req, working_set, installer=None):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
dist = working_set.find(req)
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
# try to download/install
return self.obtain(req, installer)
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]:
yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other, Distribution):
self.add(other)
elif isinstance(other, Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
# XXX backward compatibility
AvailableDistributions = Environment
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
err = ExtractionError("""Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s) to the Python egg
cache:
%s
The Python egg cache directory is currently set to:
%s
Perhaps your account does not have write access to this directory? You can
change the cache directory by setting the PYTHON_EGG_CACHE environment
variable to point to an accessible directory.
""" % (old_exc, cache_path)
)
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name+'-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = ("%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""Determine the default cache location
This returns the ``PYTHON_EGG_CACHE`` environment variable, if set.
Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the
"Application Data" directory. On all other systems, it's "~/.python-eggs".
"""
try:
return os.environ['PYTHON_EGG_CACHE']
except KeyError:
pass
if os.name!='nt':
return os.path.expanduser('~/.python-eggs')
# XXX this may be locale-specific!
app_data = 'Application Data'
app_homes = [
# best option, should be locale-safe
(('APPDATA',), None),
(('USERPROFILE',), app_data),
(('HOMEDRIVE','HOMEPATH'), app_data),
(('HOMEPATH',), app_data),
(('HOME',), None),
# 95/98/ME
(('WINDIR',), app_data),
]
for keys, subdir in app_homes:
dirname = ''
for key in keys:
if key in os.environ:
dirname = os.path.join(dirname, os.environ[key])
else:
break
else:
if subdir:
dirname = os.path.join(dirname, subdir)
return os.path.join(dirname, 'Python-Eggs')
else:
raise RuntimeError(
"Please set the PYTHON_EGG_CACHE enviroment variable"
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""
Convert an arbitrary string to a standard version string
"""
try:
# normalize the version
return str(packaging.version.Version(version))
except packaging.version.InvalidVersion:
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')
class MarkerEvaluation(object):
values = {
'os_name': lambda: os.name,
'sys_platform': lambda: sys.platform,
'python_full_version': platform.python_version,
'python_version': lambda: platform.python_version()[:3],
'platform_version': platform.version,
'platform_machine': platform.machine,
'python_implementation': platform.python_implementation,
}
@classmethod
def is_invalid_marker(cls, text):
"""
Validate text as a PEP 426 environment marker; return an exception
if invalid or False otherwise.
"""
try:
cls.evaluate_marker(text)
except SyntaxError as e:
return cls.normalize_exception(e)
return False
@staticmethod
def normalize_exception(exc):
"""
Given a SyntaxError from a marker evaluation, normalize the error
message:
- Remove indications of filename and line number.
- Replace platform-specific error messages with standard error
messages.
"""
subs = {
'unexpected EOF while parsing': 'invalid syntax',
'parenthesis is never closed': 'invalid syntax',
}
exc.filename = None
exc.lineno = None
exc.msg = subs.get(exc.msg, exc.msg)
return exc
@classmethod
def and_test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
items = [
cls.interpret(nodelist[i])
for i in range(1, len(nodelist), 2)
]
return functools.reduce(operator.and_, items)
@classmethod
def test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
items = [
cls.interpret(nodelist[i])
for i in range(1, len(nodelist), 2)
]
return functools.reduce(operator.or_, items)
@classmethod
def atom(cls, nodelist):
t = nodelist[1][0]
if t == token.LPAR:
if nodelist[2][0] == token.RPAR:
raise SyntaxError("Empty parentheses")
return cls.interpret(nodelist[2])
msg = "Language feature not supported in environment markers"
raise SyntaxError(msg)
@classmethod
def comparison(cls, nodelist):
if len(nodelist) > 4:
msg = "Chained comparison not allowed in environment markers"
raise SyntaxError(msg)
comp = nodelist[2][1]
cop = comp[1]
if comp[0] == token.NAME:
if len(nodelist[2]) == 3:
if cop == 'not':
cop = 'not in'
else:
cop = 'is not'
try:
cop = cls.get_op(cop)
except KeyError:
msg = repr(cop) + " operator not allowed in environment markers"
raise SyntaxError(msg)
return cop(cls.evaluate(nodelist[1]), cls.evaluate(nodelist[3]))
@classmethod
def get_op(cls, op):
ops = {
symbol.test: cls.test,
symbol.and_test: cls.and_test,
symbol.atom: cls.atom,
symbol.comparison: cls.comparison,
'not in': lambda x, y: x not in y,
'in': lambda x, y: x in y,
'==': operator.eq,
'!=': operator.ne,
'<': operator.lt,
'>': operator.gt,
'<=': operator.le,
'>=': operator.ge,
}
if hasattr(symbol, 'or_test'):
ops[symbol.or_test] = cls.test
return ops[op]
@classmethod
def evaluate_marker(cls, text, extra=None):
"""
Evaluate a PEP 426 environment marker on CPython 2.4+.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'parser' module, which is not implemented
on
Jython and has been superseded by the 'ast' module in Python 2.6 and
later.
"""
return cls.interpret(parser.expr(text).totuple(1)[1])
@classmethod
def _markerlib_evaluate(cls, text):
"""
Evaluate a PEP 426 environment marker using markerlib.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
"""
from pip._vendor import _markerlib
# markerlib implements Metadata 1.2 (PEP 345) environment markers.
# Translate the variables to Metadata 2.0 (PEP 426).
env = _markerlib.default_environment()
for key in env.keys():
new_key = key.replace('.', '_')
env[new_key] = env.pop(key)
try:
result = _markerlib.interpret(text, env)
except NameError as e:
raise SyntaxError(e.args[0])
return result
if 'parser' not in globals():
# Fall back to less-complete _markerlib implementation if 'parser' module
# is not available.
evaluate_marker = _markerlib_evaluate
@classmethod
def interpret(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
try:
op = cls.get_op(nodelist[0])
except KeyError:
raise SyntaxError("Comparison or logical expression expected")
return op(nodelist)
@classmethod
def evaluate(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
kind = nodelist[0]
name = nodelist[1]
if kind==token.NAME:
try:
op = cls.values[name]
except KeyError:
raise SyntaxError("Unknown name %r" % name)
return op()
if kind==token.STRING:
s = nodelist[1]
if not cls._safe_string(s):
raise SyntaxError(
"Only plain strings allowed in environment markers")
return s[1:-1]
msg = "Language feature not supported in environment markers"
raise SyntaxError(msg)
@staticmethod
def _safe_string(cand):
return (
cand[:1] in "'\"" and
not cand.startswith('"""') and
not cand.startswith("'''") and
'\\' not in cand
)
invalid_marker = MarkerEvaluation.is_invalid_marker
evaluate_marker = MarkerEvaluation.evaluate_marker
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return io.BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info, name))
if sys.version_info <= (3,):
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info, name))
else:
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info, name)).decode("utf-8")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self, resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self, name):
return self.egg_info and self._isdir(self._fn(self.egg_info, name))
def resource_listdir(self, resource_name):
return self._listdir(self._fn(self.module_path, resource_name))
def metadata_listdir(self, name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info, name))
return []
def run_script(self, script_name, namespace):
script = 'scripts/'+script_name
if not self.has_metadata(script):
raise ResolutionError("No script named %r" % script_name)
script_text = self.get_metadata(script).replace('\r\n', '\n')
script_text = script_text.replace('\r', '\n')
script_filename = self._fn(self.egg_info, script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
source = open(script_filename).read()
code = compile(source, script_filename, 'exec')
exec(code, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text, script_filename,'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self, module):
NullProvider.__init__(self, module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path!=old:
if path.lower().endswith('.egg'):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self, path):
return os.path.isdir(path)
def _listdir(self, path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
with open(path, 'rb') as stream:
return stream.read()
register_loader_type(type(None), DefaultProvider)
if importlib_machinery is not None:
register_loader_type(importlib_machinery.SourceFileLoader, DefaultProvider)
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
_isdir = _has = lambda self, path: False
_get = lambda self, path: ''
_listdir = lambda self, path: []
module_path = None
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipManifests(dict):
"""
zip manifest builder
"""
@classmethod
def build(cls, path):
"""
Build a dictionary similar to the zipimport directory
caches, except instead of tuples, store ZipInfo objects.
Use a platform-specific path separator (os.sep) for the path keys
for compatibility with pypy on Windows.
"""
with ContextualZipFile(path) as zfile:
items = (
(
name.replace('/', os.sep),
zfile.getinfo(name),
)
for name in zfile.namelist()
)
return dict(items)
load = build
class MemoizedZipManifests(ZipManifests):
"""
Memoized zipfile manifests.
"""
manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
def load(self, path):
"""
Load a manifest at path or return a suitable manifest already loaded.
"""
path = os.path.normpath(path)
mtime = os.stat(path).st_mtime
if path not in self or self[path].mtime != mtime:
manifest = self.build(path)
self[path] = self.manifest_mod(manifest, mtime)
return self[path].manifest
class ContextualZipFile(zipfile.ZipFile):
"""
Supplement ZipFile class to support context manager for Python 2.6
"""
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __new__(cls, *args, **kwargs):
"""
Construct a ZipFile or ContextualZipFile as appropriate
"""
if hasattr(zipfile.ZipFile, '__exit__'):
return zipfile.ZipFile(*args, **kwargs)
return super(ContextualZipFile, cls).__new__(cls)
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
_zip_manifests = MemoizedZipManifests()
def __init__(self, module):
EggProvider.__init__(self, module)
self.zip_pre = self.loader.archive+os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.zip_pre)
)
def _parts(self, zip_path):
# Convert a zipfile subpath into an egg-relative path part list.
# pseudo-fs path
fspath = self.zip_pre+zip_path
if fspath.startswith(self.egg_root+os.sep):
return fspath[len(self.egg_root)+1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.egg_root)
)
@property
def zipinfo(self):
return self._zip_manifests.load(self.loader.archive)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
# ymdhms+wday, yday, dst
date_time = zip_stat.date_time + (0, 0, -1)
# 1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
# return the extracted directory name
return os.path.dirname(last)
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp, timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
# Windows, del old file and retry
elif os.name=='nt':
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
# report a user-friendly error
manager.extraction_error()
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size!=size or stat.st_mtime!=timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
with open(file_path, 'rb') as f:
file_contents = f.read()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self, fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self, fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.egg_root, resource_name))
def _resource_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.module_path, resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self, path):
self.path = path
def has_metadata(self, name):
return name=='PKG-INFO'
def get_metadata(self, name):
if name=='PKG-INFO':
with open(self.path,'rU') as f:
metadata = f.read()
return metadata
raise KeyError("No metadata except PKG-INFO is available")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zip_pre = importer.archive+os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders = {})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
# don't yield nested distros
return
for subitem in metadata.resource_listdir('/'):
if subitem.endswith('.egg'):
subpath = os.path.join(path_item, subitem)
for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object, find_nothing)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
if path_item.lower().endswith('.egg'):
# unpacked egg
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item,'EGG-INFO')
)
)
else:
# scan for .egg and .egg-info in directory
for entry in os.listdir(path_item):
lower = entry.lower()
if lower.endswith('.egg-info') or lower.endswith('.dist-info'):
fullpath = os.path.join(path_item, entry)
if os.path.isdir(fullpath):
# egg-info directory, allow getting metadata
metadata = PathMetadata(path_item, fullpath)
else:
metadata = FileMetadata(fullpath)
yield Distribution.from_location(
path_item, entry, metadata, precedence=DEVELOP_DIST
)
elif not only and lower.endswith('.egg'):
dists = find_distributions(os.path.join(path_item, entry))
for dist in dists:
yield dist
elif not only and lower.endswith('.egg-link'):
with open(os.path.join(path_item, entry)) as entry_file:
entry_lines = entry_file.readlines()
for line in entry_lines:
if not line.strip():
continue
path = os.path.join(path_item, line.rstrip())
dists = find_distributions(path)
for item in dists:
yield item
break
register_finder(pkgutil.ImpImporter, find_on_path)
if importlib_machinery is not None:
register_finder(importlib_machinery.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer, path_entry, moduleName, module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = types.ModuleType(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module,'__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer, path_item, packageName, module)
if subpath is not None:
path = module.__path__
path.append(subpath)
loader.load_module(packageName)
for path_item in path:
if path_item not in module.__path__:
module.__path__.append(path_item)
return subpath
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
_imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent,[]).append(packageName)
_namespace_packages.setdefault(packageName,[])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
_imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
_imp.acquire_lock()
try:
for package in _namespace_packages.get(parent,()):
subpath = _handle_ns(package, path_item)
if subpath:
fixup_namespace_packages(subpath, package)
finally:
_imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item)==normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
register_namespace_handler(zipimport.zipimporter, file_ns_handler)
if importlib_machinery is not None:
register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object, null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename, _cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a string or sequence"""
if isinstance(strs, string_types):
for s in strs.splitlines():
s = s.strip()
# skip blank lines/comments
if s and not s.startswith('#'):
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
# whitespace and comment
LINE_END = re.compile(r"\s*(#.*)?$").match
# line continuation
CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match
# Distribution or extra
DISTRO = re.compile(r"\s*((\w|[-.])+)").match
# ver. info
VERSION = re.compile(r"\s*(<=?|>=?|===?|!=|~=)\s*((\w|[-.*_!+])+)").match
# comma between items
COMMA = re.compile(r"\s*,").match
OBRACKET = re.compile(r"\s*\[").match
CBRACKET = re.compile(r"\s*\]").match
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"""
(?P<name>[^-]+) (
-(?P<ver>[^-]+) (
-py(?P<pyver>[^-]+) (
-(?P<plat>.+)
)?
)?
)?
""",
re.VERBOSE | re.IGNORECASE,
).match
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, *args, **kwargs):
"""
Require packages for this EntryPoint, then resolve it.
"""
if not require or args or kwargs:
warnings.warn(
"Parameters to load are deprecated. Call .resolve and "
".require separately.",
DeprecationWarning,
stacklevel=2,
)
if require:
self.require(*args, **kwargs)
return self.resolve()
def resolve(self):
"""
Resolve the entry point from its module and attrs.
"""
module = __import__(self.module_name, fromlist=['__name__'], level=0)
try:
return functools.reduce(getattr, self.attrs, module)
except AttributeError as exc:
raise ImportError(str(exc))
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
reqs = self.dist.requires(self.extras)
items = working_set.resolve(reqs, env, installer)
list(map(working_set.add, items))
pattern = re.compile(
r'\s*'
r'(?P<name>.+?)\s*'
r'=\s*'
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1, extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
m = cls.pattern.match(src)
if not m:
msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
raise ValueError(msg, src)
res = m.groupdict()
extras = cls._parse_extras(res['extras'])
attrs = res['attr'].split('.') if res['attr'] else ()
return cls(res['name'], res['module'], attrs, extras, dist)
@classmethod
def _parse_extras(cls, extras_spec):
if not extras_spec:
return ()
req = Requirement.parse('x' + extras_spec)
if req.specs:
raise ValueError()
return req.extras
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name]=ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data, dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urlparse(location)
if parsed[-1].startswith('md5='):
return urlunparse(parsed[:-1] + ('',))
return location
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(self, location=None, metadata=None, project_name=None,
version=None, py_version=PY_MAJOR, platform=None,
precedence=EGG_DIST):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls, location, basename, metadata=None,**kw):
project_name, version, py_version, platform = [None]*4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
# .dist-info gets much metadata differently
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name','ver','pyver','plat'
)
cls = _distributionImpl[ext.lower()]
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)
@property
def hashcmp(self):
return (
self.parsed_version,
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version or '',
self.platform or '',
)
def __hash__(self):
return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
if not hasattr(self, "_parsed_version"):
self._parsed_version = parse_version(self.version)
return self._parsed_version
def _warn_legacy_version(self):
LV = packaging.version.LegacyVersion
is_legacy = isinstance(self._parsed_version, LV)
if not is_legacy:
return
# While an empty version is technically a legacy version and
# is not a valid PEP 440 version, it's also unlikely to
# actually come from someone and instead it is more likely that
# it comes from setuptools attempting to parse a filename and
# including it in the list. So for that we'll gate this warning
# on if the version is anything at all or not.
if not self.version:
return
tmpl = textwrap.dedent("""
'{project_name} ({version})' is being parsed as a legacy,
non PEP 440,
version. You may find odd behavior and sort order.
In particular it will be sorted as less than 0.0. It
is recommended to migrate to PEP 440 compatible
versions.
""").strip().replace('\n', ' ')
warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
@property
def version(self):
try:
return self._version
except AttributeError:
for line in self._get_metadata(self.PKG_INFO):
if line.lower().startswith('version:'):
self._version = safe_version(line.split(':',1)[1].strip())
return self._version
else:
tmpl = "Missing 'Version:' header and/or %s file"
raise ValueError(tmpl % self.PKG_INFO, self)
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
dm = self.__dep_map = {None: []}
for name in 'requires.txt', 'depends.txt':
for extra, reqs in split_sections(self._get_metadata(name)):
if extra:
if ':' in extra:
extra, marker = extra.split(':', 1)
if invalid_marker(marker):
# XXX warn
reqs=[]
elif not evaluate_marker(marker):
reqs=[]
extra = safe_extra(extra) or None
dm.setdefault(extra,[]).extend(parse_requirements(reqs))
return dm
def requires(self, extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None, ()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self, name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self, path=None):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None:
path = sys.path
self.insert_on(path)
if path is sys.path:
fixup_namespace_packages(self.location)
for pkg in self._get_metadata('namespace_packages.txt'):
if pkg in sys.modules:
declare_namespace(pkg)
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-' + self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self, self.location)
else:
return str(self)
def __str__(self):
try:
version = getattr(self, 'version', None)
except ValueError:
version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name, version)
def __getattr__(self, attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
@classmethod
def from_filename(cls, filename, metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
if isinstance(self.parsed_version, packaging.version.Version):
spec = "%s==%s" % (self.project_name, self.parsed_version)
else:
spec = "%s===%s" % (self.project_name, self.parsed_version)
return Requirement.parse(spec)
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group, name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group, name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group,{})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc = None):
"""Insert self.location in path before its nearest parent directory"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath= [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item == nloc:
break
elif item == bdir and self.precedence == EGG_DIST:
# if it's an .egg, give it precedence over its directory
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while True:
try:
np = npath.index(nloc, p+1)
except ValueError:
break
else:
del npath[np], path[np]
# ha!
p = np
return
def check_version_conflict(self):
if self.key == 'setuptools':
# ignore the inevitable setuptools self-conflicts :(
return
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for " + repr(self))
return False
return True
def clone(self,**kw):
"""Copy this distribution, substituting in any changed keyword args"""
names = 'project_name version py_version platform location precedence'
for attr in names.split():
kw.setdefault(attr, getattr(self, attr, None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class DistInfoDistribution(Distribution):
"""Wrap an actual or potential sys.path entry w/metadata, .dist-info style"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
metadata = self.get_metadata(self.PKG_INFO)
self._pkg_info = email.parser.Parser().parsestr(metadata)
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _preparse_requirement(self, requires_dist):
"""Convert 'Foobar (1); baz' to ('Foobar ==1', 'baz')
Split environment marker, add == prefix to version specifiers as
necessary, and remove parenthesis.
"""
parts = requires_dist.split(';', 1) + ['']
distvers = parts[0].strip()
mark = parts[1].strip()
distvers = re.sub(self.EQEQ, r"\1==\2\3", distvers)
distvers = distvers.replace('(', '').replace(')', '')
return (distvers, mark)
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
from pip._vendor._markerlib import compile as compile_marker
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
distvers, mark = self._preparse_requirement(req)
parsed = next(parse_requirements(distvers))
parsed.marker_fn = compile_marker(mark)
reqs.append(parsed)
def reqs_for_extra(extra):
for req in reqs:
if req.marker_fn(override={'extra':extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
extra = safe_extra(extra.strip())
dm[extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': Distribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args,**kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
warnings.warn(stacklevel=level + 1, *args, **kw)
class RequirementParseError(ValueError):
def __str__(self):
return ' '.join(self.args)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be a string, or a (possibly-nested) iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
def scan_list(ITEM, TERMINATOR, line, p, groups, item_name):
items = []
while not TERMINATOR(line, p):
if CONTINUE(line, p):
try:
line = next(lines)
p = 0
except StopIteration:
msg = "\\ must not appear on the last nonblank line"
raise RequirementParseError(msg)
match = ITEM(line, p)
if not match:
msg = "Expected " + item_name + " in"
raise RequirementParseError(msg, line, "at", line[p:])
items.append(match.group(*groups))
p = match.end()
match = COMMA(line, p)
if match:
# skip the comma
p = match.end()
elif not TERMINATOR(line, p):
msg = "Expected ',' or end-of-list in"
raise RequirementParseError(msg, line, "at", line[p:])
match = TERMINATOR(line, p)
# skip the terminator, if any
if match:
p = match.end()
return line, p, items
for line in lines:
match = DISTRO(line)
if not match:
raise RequirementParseError("Missing distribution spec", line)
project_name = match.group(1)
p = match.end()
extras = []
match = OBRACKET(line, p)
if match:
p = match.end()
line, p, extras = scan_list(
DISTRO, CBRACKET, line, p, (1,), "'extra' name"
)
line, p, specs = scan_list(VERSION, LINE_END, line, p, (1, 2),
"version spec")
specs = [(op, val) for op, val in specs]
yield Requirement(project_name, specs, extras)
class Requirement:
def __init__(self, project_name, specs, extras):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
self.unsafe_name, project_name = project_name, safe_name(project_name)
self.project_name, self.key = project_name, project_name.lower()
self.specifier = packaging.specifiers.SpecifierSet(
",".join(["".join([x, y]) for x, y in specs])
)
self.specs = specs
self.extras = tuple(map(safe_extra, extras))
self.hashCmp = (
self.key,
self.specifier,
frozenset(self.extras),
)
self.__hash = hash(self.hashCmp)
def __str__(self):
extras = ','.join(self.extras)
if extras:
extras = '[%s]' % extras
return '%s%s%s' % (self.project_name, extras, self.specifier)
def __eq__(self, other):
return (
isinstance(other, Requirement) and
self.hashCmp == other.hashCmp
)
def __ne__(self, other):
return not self == other
def __contains__(self, item):
if isinstance(item, Distribution):
if item.key != self.key:
return False
item = item.version
# Allow prereleases always in order to match the previous behavior of
# this method. In the future this should be smarter and follow PEP 440
# more accurately.
return self.specifier.contains(item, prereleases=True)
def __hash__(self):
return self.__hash
def __repr__(self): return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
reqs = list(parse_requirements(s))
if reqs:
if len(reqs) == 1:
return reqs[0]
raise ValueError("Expected only one requirement", s)
raise ValueError("No requirements found", s)
def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls, type):
class cls(cls, object): pass
return cls.__mro__[1:]
return cls.__mro__
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
for t in _get_mro(getattr(ob, '__class__', type(ob))):
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _bypass_ensure_directory(path):
"""Sandbox-bypassing version of ensure_directory()"""
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(path)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, 0o755)
def split_sections(s):
"""Split a string or iterable thereof into (section, content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args,**kw):
old_open = os.open
try:
# temporarily bypass sandboxing
os.open = os_open
return tempfile.mkstemp(*args,**kw)
finally:
# and then put it back
os.open = old_open
# Silence the PEP440Warning by default, so that end users don't get hit by it
# randomly just because they use pkg_resources. We want to append the rule
# because we want earlier uses of filterwarnings to take precedence over this
# one.
warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
# from jaraco.functools 1.3
def _call_aside(f, *args, **kwargs):
f(*args, **kwargs)
return f
@_call_aside
def _initialize(g=globals()):
"Set up global resource manager (deliberately not state-saved)"
manager = ResourceManager()
g['_manager'] = manager
for name in dir(manager):
if not name.startswith('_'):
g[name] = getattr(manager, name)
@_call_aside
def _initialize_master_working_set():
"""
Prepare the master working set and make the ``require()``
API available.
This function has explicit effects on the global state
of pkg_resources. It is intended to be invoked once at
the initialization of this module.
Invocation by other packages is unsupported and done
at their own risk.
"""
working_set = WorkingSet._build_master()
_declare_state('object', working_set=working_set)
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
# backward compatibility
run_main = run_script
# Activate all distributions already on sys.path, and ensure that
# all distributions added to the working set in the future (e.g. by
# calling ``require()``) will get activated as well.
add_activation_listener(lambda dist: dist.activate())
working_set.entries=[]
# match order
list(map(working_set.add_entry, sys.path))
globals().update(locals())
| cc0-1.0 |
Endika/odoo | addons/account/wizard/account_report_common_journal.py | 385 | 2942 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_common_journal_report(osv.osv_memory):
_name = 'account.common.journal.report'
_description = 'Account Common Journal Report'
_inherit = "account.common.report"
_columns = {
'amount_currency': fields.boolean("With Currency", help="Print Report with the currency column if the currency differs from the company currency."),
}
def _build_contexts(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
result = super(account_common_journal_report, self)._build_contexts(cr, uid, ids, data, context=context)
if data['form']['filter'] == 'filter_date':
cr.execute('SELECT period_id FROM account_move_line WHERE date >= %s AND date <= %s', (data['form']['date_from'], data['form']['date_to']))
result['periods'] = map(lambda x: x[0], cr.fetchall())
elif data['form']['filter'] == 'filter_period':
result['periods'] = self.pool.get('account.period').build_ctx_periods(cr, uid, data['form']['period_from'], data['form']['period_to'])
return result
def pre_print_report(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
data['form'].update(self.read(cr, uid, ids, ['amount_currency'], context=context)[0])
fy_ids = data['form']['fiscalyear_id'] and [data['form']['fiscalyear_id']] or self.pool.get('account.fiscalyear').search(cr, uid, [('state', '=', 'draft')], context=context)
period_list = data['form']['periods'] or self.pool.get('account.period').search(cr, uid, [('fiscalyear_id', 'in', fy_ids)], context=context)
data['form']['active_ids'] = self.pool.get('account.journal.period').search(cr, uid, [('journal_id', 'in', data['form']['journal_ids']), ('period_id', 'in', period_list)], context=context)
return data
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
buguelos/odoo | addons/point_of_sale/__openerp__.py | 261 | 3612 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Point of Sale',
'version': '1.0.1',
'category': 'Point Of Sale',
'sequence': 6,
'summary': 'Touchscreen Interface for Shops',
'description': """
Quick and Easy sale process
===========================
This module allows you to manage your shop sales very easily with a fully web based touchscreen interface.
It is compatible with all PC tablets and the iPad, offering multiple payment methods.
Product selection can be done in several ways:
* Using a barcode reader
* Browsing through categories of products or via a text search.
Main Features
-------------
* Fast encoding of the sale
* Choose one payment method (the quick way) or split the payment between several payment methods
* Computation of the amount of money to return
* Create and confirm the picking list automatically
* Allows the user to create an invoice automatically
* Refund previous sales
""",
'author': 'OpenERP SA',
'depends': ['sale_stock'],
'data': [
'data/report_paperformat.xml',
'security/point_of_sale_security.xml',
'security/ir.model.access.csv',
'wizard/pos_box.xml',
'wizard/pos_confirm.xml',
'wizard/pos_details.xml',
'wizard/pos_discount.xml',
'wizard/pos_open_statement.xml',
'wizard/pos_payment.xml',
'wizard/pos_session_opening.xml',
'views/templates.xml',
'point_of_sale_report.xml',
'point_of_sale_view.xml',
'point_of_sale_sequence.xml',
'point_of_sale_data.xml',
'report/pos_order_report_view.xml',
'point_of_sale_workflow.xml',
'account_statement_view.xml',
'account_statement_report.xml',
'res_users_view.xml',
'res_partner_view.xml',
'views/report_statement.xml',
'views/report_usersproduct.xml',
'views/report_receipt.xml',
'views/report_saleslines.xml',
'views/report_detailsofsales.xml',
'views/report_payment.xml',
'views/report_sessionsummary.xml',
'views/point_of_sale.xml',
],
'demo': [
'point_of_sale_demo.xml',
'account_statement_demo.xml',
],
'test': [
'test/00_register_open.yml',
'test/01_order_to_payment.yml',
'test/02_order_to_invoice.yml',
'test/point_of_sale_report.yml',
'test/account_statement_reports.yml',
],
'installable': True,
'application': True,
'qweb': ['static/src/xml/pos.xml'],
'website': 'https://www.odoo.com/page/point-of-sale',
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
makc/three.js.fork | utils/exporters/max/annotate/annotate.py | 160 | 3036 | #!/usr/bin/env python
__author__ = 'Andrew Dunai <andrew@dun.ai>'
import sys
import json
import argparse
import re
from collections import namedtuple
try:
from PyQt4 import QtGui
import argparseui
except ImportError:
CAN_GUI = False
else:
CAN_GUI = True
range_regexp = re.compile(r'^([\w\d]+)\=([\d]+)\.\.([\d]+)$')
Range = namedtuple('Range', ('name', 'start', 'end'))
def parse_range(value):
match = range_regexp.match(value)
if not match:
raise argparse.ArgumentTypeError('Ranges should be in form "name=frame..frame"')
return Range(match.group(1), int(match.group(2)) - 1, int(match.group(3)) - 1)
epilog = 'example:\n %(prog)s -i model.js -o model.new.js idle=1..10 walk=11..20'
if not CAN_GUI:
epilog += '\npro tip:\n Install PyQt4 and argparseui packages to use GUI ("-u" option).'
epilog += '\nCreated by {}'.format(__author__)
parser = argparse.ArgumentParser(
description='Split THREE.js model animation into seperate parts.',
epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter
)
CAN_GUI and parser.add_argument('-u', '--gui', help='run in GUI', dest='gui', action='store_true')
parser.add_argument('-i', metavar='FILE', help='input file name', required=True, dest='source', type=argparse.FileType('r'))
parser.add_argument('-o', metavar='FILE', help='output file name', required=True, dest='destination', type=argparse.FileType('w'))
parser.add_argument('range', nargs='+', help='range in format "name=frame..frame"', type=parse_range)
def process(parser):
args = parser.parse_args()
data = json.loads(args.source.read())
animation = data.get('animation')
fps = float(animation.get('fps'))
length = float(animation.get('length'))
frame_count = int(length * fps)
frame_duration = 1.0 / fps
all_hierarchy = animation.get('hierarchy')
animations = {}
for r in args.range:
# Create animation & hierarchy
hierarchy = []
animation = {
'name': r.name,
'fps': fps,
'length': (r.end - r.start) * frame_duration,
'hierarchy': hierarchy
}
# Go through each bone animation
for bone in all_hierarchy:
keys = [key for key in bone['keys'] if (key['time'] >= r.start * frame_duration) and (key['time'] <= r.end * frame_duration)]
# Patch time
time = 0.0
for key in keys:
key['time'] = round(time, 3)
time += frame_duration
new_bone = {
'parent': bone['parent'],
'keys': keys
}
hierarchy.append(new_bone)
animations[r.name] = animation
del data['animation']
data['animations'] = animations
args.destination.write(json.dumps(data))
if '-u' in sys.argv and CAN_GUI:
app = QtGui.QApplication(sys.argv)
a = argparseui.ArgparseUi(parser)
a.show()
app.exec_()
if a.result() == 1:
process(a)
else:
process(parser)
| mit |
pombredanne/unuk | examples/newrpc.py | 1 | 1067 | '''Experimenting with the new JSON-RPC Server
'''
import sys
import datetime
import logging
from environment import settings, local_dir
from siro.core.api import siro_api
from djpcms import http
from django.core.handlers import wsgi
from gevent.wsgi import WSGIServer
class WSGIHandler(wsgi.WSGIHandler):
def __init__(self):
super(WSGIHandler,self).__init__()
self.http = http.get_http('django')
self.request_class = self.http.Request
self._handler = siro_api(http = self.http)
def get_response(self, request):
try:
# Apply request middleware
for middleware_method in self._request_middleware:
response = middleware_method(request)
if response:
return response
return self._handler.serve(request)
except:
return self.http.HttpResponse(status = 500)
if __name__ == '__main__':
WSGIServer(('', 8020), WSGIHandler()).serve_forever()
| bsd-3-clause |
bmanojlovic/ansible | lib/ansible/plugins/terminal/vyos.py | 10 | 1815 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
from ansible.plugins.terminal import TerminalBase
from ansible.errors import AnsibleConnectionFailure
class TerminalModule(TerminalBase):
terminal_prompts_re = [
re.compile(r"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(r"\@[\w\-\.]+:\S+?[>#\$] ?$")
]
terminal_errors_re = [
re.compile(r"\n\s*Invalid command:"),
re.compile(r"\nCommit failed"),
re.compile(r"\n\s+Set failed"),
]
terminal_length = os.getenv('ANSIBLE_VYOS_TERMINAL_LENGTH', 10000)
def on_open_shell(self):
try:
self._exec_cli_command('set terminal length 0')
self._exec_cli_command('set terminal length %s' % self.terminal_length)
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
@staticmethod
def guess_network_os(conn):
stdin, stdout, stderr = conn.exec_command('cat /etc/issue')
if 'VyOS' in stdout.read():
return 'vyos'
| gpl-3.0 |
mattnenterprise/servo | tests/wpt/web-platform-tests/resources/test/conftest.py | 4 | 4612 | import io
import json
import os
import html5lib
import pytest
from selenium import webdriver
from wptserver import WPTServer
ENC = 'utf8'
HERE = os.path.dirname(os.path.abspath(__file__))
WPT_ROOT = os.path.normpath(os.path.join(HERE, '..', '..'))
HARNESS = os.path.join(HERE, 'harness.html')
def pytest_addoption(parser):
parser.addoption("--binary", action="store", default=None, help="path to browser binary")
def pytest_collect_file(path, parent):
if path.ext.lower() == '.html':
return HTMLItem(str(path), parent)
def pytest_configure(config):
config.driver = webdriver.Firefox(firefox_binary=config.getoption("--binary"))
config.server = WPTServer(WPT_ROOT)
config.server.start()
config.add_cleanup(config.server.stop)
config.add_cleanup(config.driver.quit)
class HTMLItem(pytest.Item, pytest.Collector):
def __init__(self, filename, parent):
self.filename = filename
with io.open(filename, encoding=ENC) as f:
markup = f.read()
parsed = html5lib.parse(markup, namespaceHTMLElements=False)
name = None
self.expected = None
for element in parsed.getiterator():
if not name and element.tag == 'title':
name = element.text
continue
if element.attrib.get('id') == 'expected':
self.expected = json.loads(unicode(element.text))
continue
if not name:
raise ValueError('No name found in file: %s' % filename)
super(HTMLItem, self).__init__(name, parent)
def reportinfo(self):
return self.fspath, None, self.filename
def repr_failure(self, excinfo):
return pytest.Collector.repr_failure(self, excinfo)
def runtest(self):
driver = self.session.config.driver
server = self.session.config.server
driver.get(server.url(HARNESS))
actual = driver.execute_async_script('runTest("%s", "foo", arguments[0])' % server.url(str(self.filename)))
# Test object ordering is not guaranteed. This weak assertion verifies
# that the indices are unique and sequential
indices = [test_obj.get('index') for test_obj in actual['tests']]
self._assert_sequence(indices)
summarized = {}
summarized[u'summarized_status'] = self._summarize_status(actual['status'])
summarized[u'summarized_tests'] = [
self._summarize_test(test) for test in actual['tests']]
summarized[u'summarized_tests'].sort(key=lambda test_obj: test_obj.get('name'))
summarized[u'type'] = actual['type']
if not self.expected:
assert summarized[u'summarized_status'][u'status_string'] == u'OK', summarized[u'summarized_status'][u'message']
for test in summarized[u'summarized_tests']:
msg = "%s\n%s:\n%s" % (test[u'name'], test[u'message'], test[u'stack'])
assert test[u'status_string'] == u'PASS', msg
else:
assert summarized == self.expected
@staticmethod
def _assert_sequence(nums):
if nums and len(nums) > 0:
assert nums == range(1, nums[-1] + 1)
@staticmethod
def _scrub_stack(test_obj):
copy = dict(test_obj)
assert 'stack' in copy
if copy['stack'] is not None:
copy['stack'] = u'(implementation-defined)'
return copy
@staticmethod
def _expand_status(status_obj):
for key, value in [item for item in status_obj.items()]:
# In "status" and "test" objects, the "status" value enum
# definitions are interspersed with properties for unrelated
# metadata. The following condition is a best-effort attempt to
# ignore non-enum properties.
if key != key.upper() or not isinstance(value, int):
continue
del status_obj[key]
if status_obj['status'] == value:
status_obj[u'status_string'] = key
del status_obj['status']
return status_obj
@staticmethod
def _summarize_test(test_obj):
del test_obj['index']
assert 'phase' in test_obj
assert 'phases' in test_obj
assert 'COMPLETE' in test_obj['phases']
assert test_obj['phase'] == test_obj['phases']['COMPLETE']
del test_obj['phases']
del test_obj['phase']
return HTMLItem._expand_status(HTMLItem._scrub_stack(test_obj))
@staticmethod
def _summarize_status(status_obj):
return HTMLItem._expand_status(HTMLItem._scrub_stack(status_obj))
| mpl-2.0 |
tomhughes/python-mapnik | test/python_tests/python_plugin_test.py | 6 | 6233 | # #!/usr/bin/env python
# # -*- coding: utf-8 -*-
# import os
# import math
# import mapnik
# import sys
# from utilities import execution_path, run_all
# from nose.tools import *
# def setup():
# # All of the paths used are relative, if we run the tests
# # from another directory we need to chdir()
# os.chdir(execution_path('.'))
# class PointDatasource(mapnik.PythonDatasource):
# def __init__(self):
# super(PointDatasource, self).__init__(
# geometry_type = mapnik.DataGeometryType.Point,
# envelope = mapnik.Box2d(0,-10,100,110),
# data_type = mapnik.DataType.Vector
# )
# def features(self, query):
# return mapnik.PythonDatasource.wkt_features(
# keys = ('label',),
# features = (
# ( 'POINT (5 6)', { 'label': 'foo-bar'} ),
# ( 'POINT (60 50)', { 'label': 'buzz-quux'} ),
# )
# )
# class ConcentricCircles(object):
# def __init__(self, centre, bounds, step=1):
# self.centre = centre
# self.bounds = bounds
# self.step = step
# class Iterator(object):
# def __init__(self, container):
# self.container = container
# centre = self.container.centre
# bounds = self.container.bounds
# step = self.container.step
# self.radius = step
# def next(self):
# points = []
# for alpha in xrange(0, 361, 5):
# x = math.sin(math.radians(alpha)) * self.radius + self.container.centre[0]
# y = math.cos(math.radians(alpha)) * self.radius + self.container.centre[1]
# points.append('%s %s' % (x,y))
# circle = 'POLYGON ((' + ','.join(points) + '))'
# # has the circle grown so large that the boundary is entirely within it?
# tl = (self.container.bounds.maxx, self.container.bounds.maxy)
# tr = (self.container.bounds.maxx, self.container.bounds.maxy)
# bl = (self.container.bounds.minx, self.container.bounds.miny)
# br = (self.container.bounds.minx, self.container.bounds.miny)
# def within_circle(p):
# delta_x = p[0] - self.container.centre[0]
# delta_y = p[0] - self.container.centre[0]
# return delta_x*delta_x + delta_y*delta_y < self.radius*self.radius
# if all(within_circle(p) for p in (tl,tr,bl,br)):
# raise StopIteration()
# self.radius += self.container.step
# return ( circle, { } )
# def __iter__(self):
# return ConcentricCircles.Iterator(self)
# class CirclesDatasource(mapnik.PythonDatasource):
# def __init__(self, centre_x=-20, centre_y=0, step=10):
# super(CirclesDatasource, self).__init__(
# geometry_type = mapnik.DataGeometryType.Polygon,
# envelope = mapnik.Box2d(-180, -90, 180, 90),
# data_type = mapnik.DataType.Vector
# )
# # note that the plugin loader will set all arguments to strings and will not try to parse them
# centre_x = int(centre_x)
# centre_y = int(centre_y)
# step = int(step)
# self.centre_x = centre_x
# self.centre_y = centre_y
# self.step = step
# def features(self, query):
# centre = (self.centre_x, self.centre_y)
# return mapnik.PythonDatasource.wkt_features(
# keys = (),
# features = ConcentricCircles(centre, query.bbox, self.step)
# )
# if 'python' in mapnik.DatasourceCache.plugin_names():
# # make sure we can load from ourself as a module
# sys.path.append(execution_path('.'))
# def test_python_point_init():
# ds = mapnik.Python(factory='python_plugin_test:PointDatasource')
# e = ds.envelope()
# assert_almost_equal(e.minx, 0, places=7)
# assert_almost_equal(e.miny, -10, places=7)
# assert_almost_equal(e.maxx, 100, places=7)
# assert_almost_equal(e.maxy, 110, places=7)
# def test_python_circle_init():
# ds = mapnik.Python(factory='python_plugin_test:CirclesDatasource')
# e = ds.envelope()
# assert_almost_equal(e.minx, -180, places=7)
# assert_almost_equal(e.miny, -90, places=7)
# assert_almost_equal(e.maxx, 180, places=7)
# assert_almost_equal(e.maxy, 90, places=7)
# def test_python_circle_init_with_args():
# ds = mapnik.Python(factory='python_plugin_test:CirclesDatasource', centre_x=40, centre_y=7)
# e = ds.envelope()
# assert_almost_equal(e.minx, -180, places=7)
# assert_almost_equal(e.miny, -90, places=7)
# assert_almost_equal(e.maxx, 180, places=7)
# assert_almost_equal(e.maxy, 90, places=7)
# def test_python_point_rendering():
# m = mapnik.Map(512,512)
# mapnik.load_map(m,'../data/python_plugin/python_point_datasource.xml')
# m.zoom_all()
# im = mapnik.Image(512,512)
# mapnik.render(m,im)
# actual = '/tmp/mapnik-python-point-render1.png'
# expected = 'images/support/mapnik-python-point-render1.png'
# im.save(actual)
# expected_im = mapnik.Image.open(expected)
# eq_(im.tostring('png32'),expected_im.tostring('png32'),
# 'failed comparing actual (%s) and expected (%s)' % (actual,'tests/python_tests/'+ expected))
# def test_python_circle_rendering():
# m = mapnik.Map(512,512)
# mapnik.load_map(m,'../data/python_plugin/python_circle_datasource.xml')
# m.zoom_all()
# im = mapnik.Image(512,512)
# mapnik.render(m,im)
# actual = '/tmp/mapnik-python-circle-render1.png'
# expected = 'images/support/mapnik-python-circle-render1.png'
# im.save(actual)
# expected_im = mapnik.Image.open(expected)
# eq_(im.tostring('png32'),expected_im.tostring('png32'),
# 'failed comparing actual (%s) and expected (%s)' % (actual,'tests/python_tests/'+ expected))
# if __name__ == "__main__":
# setup()
# run_all(eval(x) for x in dir() if x.startswith("test_"))
| lgpl-2.1 |
sfepy/sfepy | sfepy/homogenization/coefs_elastic.py | 4 | 3052 | from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import output, assert_, get_default, Struct
from sfepy.homogenization.coefs_base import CorrSolution, \
TCorrectorsViaPressureEVP, CorrMiniApp
from sfepy.solvers.ts import TimeStepper
from six.moves import range
class PressureRHSVector( CorrMiniApp ):
def __call__( self, problem = None, data = None ):
problem = get_default( problem, self.problem )
problem.select_variables( self.variables )
problem.set_equations( self.equations )
problem.select_bcs(ebc_names = self.ebcs, epbc_names = self.epbcs,
lcbc_names=self.get('lcbcs', []))
state = problem.create_state()
state.apply_ebc()
eqs = problem.equations
eqs.set_variables_from_state(state.vec)
vec = eqs.create_stripped_state_vector()
eqs.time_update_materials(problem.get_timestepper())
eqs.evaluate(mode='weak', dw_mode='vector', asm_obj=vec)
return vec
class TCorrectorsRSViaPressureEVP( TCorrectorsViaPressureEVP ):
def __call__( self, problem = None, data = None ):
"""data: corrs_rs, evp"""
problem = get_default( problem, self.problem )
self.init_solvers(problem)
ts = problem.get_timestepper()
corrs, evp = [data[ii] for ii in self.requires]
assert_( evp.ebcs == self.ebcs )
assert_( evp.epbcs == self.epbcs )
dim = problem.get_dim()
self.setup_equations(self.equations)
solve = self.compute_correctors
states = nm.zeros((dim, dim), dtype=nm.object)
clist = []
for ir in range( dim ):
for ic in range( dim ):
states[ir,ic] = solve(evp, -1.0, corrs.states[ir,ic], ts)
clist.append((ir, ic))
corr_sol = CorrSolution(name=self.name,
states=states,
n_step=ts.n_step,
components=clist)
self.save(corr_sol, problem, ts)
return corr_sol
class TCorrectorsPressureViaPressureEVP( TCorrectorsViaPressureEVP ):
def __call__( self, problem = None, data = None, save_hook = None ):
"""data: corrs_pressure, evp, optionally vec_g"""
problem = get_default( problem, self.problem )
self.init_solvers(problem)
ts = problem.get_timestepper()
corrs, evp = [data[ii] for ii in self.requires[:2]]
if len(self.requires) == 3:
vec_g = data[self.requires[2]]
else:
vec_g = None
assert_( evp.ebcs == self.ebcs )
assert_( evp.epbcs == self.epbcs )
self.setup_equations(self.equations)
solve = self.compute_correctors
state = solve(evp, 1.0, corrs.state, ts, vec_g=vec_g)
corr_sol = CorrSolution(name=self.name,
state=state,
n_step=ts.n_step)
self.save(corr_sol, problem, ts)
return corr_sol
| bsd-3-clause |
baylee/django | tests/template_tests/syntax_tests/test_firstof.py | 177 | 3215 | from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import setup
class FirstOfTagTests(SimpleTestCase):
@setup({'firstof01': '{% firstof a b c %}'})
def test_firstof01(self):
output = self.engine.render_to_string('firstof01', {'a': 0, 'c': 0, 'b': 0})
self.assertEqual(output, '')
@setup({'firstof02': '{% firstof a b c %}'})
def test_firstof02(self):
output = self.engine.render_to_string('firstof02', {'a': 1, 'c': 0, 'b': 0})
self.assertEqual(output, '1')
@setup({'firstof03': '{% firstof a b c %}'})
def test_firstof03(self):
output = self.engine.render_to_string('firstof03', {'a': 0, 'c': 0, 'b': 2})
self.assertEqual(output, '2')
@setup({'firstof04': '{% firstof a b c %}'})
def test_firstof04(self):
output = self.engine.render_to_string('firstof04', {'a': 0, 'c': 3, 'b': 0})
self.assertEqual(output, '3')
@setup({'firstof05': '{% firstof a b c %}'})
def test_firstof05(self):
output = self.engine.render_to_string('firstof05', {'a': 1, 'c': 3, 'b': 2})
self.assertEqual(output, '1')
@setup({'firstof06': '{% firstof a b c %}'})
def test_firstof06(self):
output = self.engine.render_to_string('firstof06', {'c': 3, 'b': 0})
self.assertEqual(output, '3')
@setup({'firstof07': '{% firstof a b "c" %}'})
def test_firstof07(self):
output = self.engine.render_to_string('firstof07', {'a': 0})
self.assertEqual(output, 'c')
@setup({'firstof08': '{% firstof a b "c and d" %}'})
def test_firstof08(self):
output = self.engine.render_to_string('firstof08', {'a': 0, 'b': 0})
self.assertEqual(output, 'c and d')
@setup({'firstof09': '{% firstof %}'})
def test_firstof09(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('firstof09')
@setup({'firstof10': '{% firstof a %}'})
def test_firstof10(self):
output = self.engine.render_to_string('firstof10', {'a': '<'})
self.assertEqual(output, '<')
@setup({'firstof11': '{% firstof a b %}'})
def test_firstof11(self):
output = self.engine.render_to_string('firstof11', {'a': '<', 'b': '>'})
self.assertEqual(output, '<')
@setup({'firstof12': '{% firstof a b %}'})
def test_firstof12(self):
output = self.engine.render_to_string('firstof12', {'a': '', 'b': '>'})
self.assertEqual(output, '>')
@setup({'firstof13': '{% autoescape off %}{% firstof a %}{% endautoescape %}'})
def test_firstof13(self):
output = self.engine.render_to_string('firstof13', {'a': '<'})
self.assertEqual(output, '<')
@setup({'firstof14': '{% firstof a|safe b %}'})
def test_firstof14(self):
output = self.engine.render_to_string('firstof14', {'a': '<'})
self.assertEqual(output, '<')
@setup({'firstof15': '{% firstof a b c as myvar %}'})
def test_firstof15(self):
ctx = {'a': 0, 'b': 2, 'c': 3}
output = self.engine.render_to_string('firstof15', ctx)
self.assertEqual(ctx['myvar'], '2')
self.assertEqual(output, '')
| bsd-3-clause |
likaiwalkman/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/port/server_process_unittest.py | 121 | 5514 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import time
import unittest2 as unittest
from webkitpy.port.factory import PortFactory
from webkitpy.port import server_process
from webkitpy.common.system.systemhost import SystemHost
from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.common.system.outputcapture import OutputCapture
class TrivialMockPort(object):
def __init__(self):
self.host = MockSystemHost()
self.host.executive.kill_process = lambda x: None
self.host.executive.kill_process = lambda x: None
def results_directory(self):
return "/mock-results"
def check_for_leaks(self, process_name, process_pid):
pass
def process_kill_time(self):
return 1
class MockFile(object):
def __init__(self, server_process):
self._server_process = server_process
self.closed = False
def fileno(self):
return 1
def write(self, line):
self._server_process.broken_pipes.append(self)
raise IOError
def close(self):
self.closed = True
class MockProc(object):
def __init__(self, server_process):
self.stdin = MockFile(server_process)
self.stdout = MockFile(server_process)
self.stderr = MockFile(server_process)
self.pid = 1
def poll(self):
return 1
def wait(self):
return 0
class FakeServerProcess(server_process.ServerProcess):
def _start(self):
self._proc = MockProc(self)
self.stdin = self._proc.stdin
self.stdout = self._proc.stdout
self.stderr = self._proc.stderr
self._pid = self._proc.pid
self.broken_pipes = []
class TestServerProcess(unittest.TestCase):
def test_basic(self):
cmd = [sys.executable, '-c', 'import sys; import time; time.sleep(0.02); print "stdout"; sys.stdout.flush(); print >>sys.stderr, "stderr"']
host = SystemHost()
factory = PortFactory(host)
port = factory.get()
now = time.time()
proc = server_process.ServerProcess(port, 'python', cmd)
proc.write('')
self.assertEqual(proc.poll(), None)
self.assertFalse(proc.has_crashed())
# check that doing a read after an expired deadline returns
# nothing immediately.
line = proc.read_stdout_line(now - 1)
self.assertEqual(line, None)
# FIXME: This part appears to be flaky. line should always be non-None.
# FIXME: https://bugs.webkit.org/show_bug.cgi?id=88280
line = proc.read_stdout_line(now + 1.0)
if line:
self.assertEqual(line.strip(), "stdout")
line = proc.read_stderr_line(now + 1.0)
if line:
self.assertEqual(line.strip(), "stderr")
proc.stop(0)
def test_cleanup(self):
port_obj = TrivialMockPort()
server_process = FakeServerProcess(port_obj=port_obj, name="test", cmd=["test"])
server_process._start()
server_process.stop()
self.assertTrue(server_process.stdin.closed)
self.assertTrue(server_process.stdout.closed)
self.assertTrue(server_process.stderr.closed)
def test_broken_pipe(self):
port_obj = TrivialMockPort()
port_obj.host.platform.os_name = 'win'
server_process = FakeServerProcess(port_obj=port_obj, name="test", cmd=["test"])
server_process.write("should break")
self.assertTrue(server_process.has_crashed())
self.assertIsNotNone(server_process.pid())
self.assertIsNone(server_process._proc)
self.assertEqual(server_process.broken_pipes, [server_process.stdin])
port_obj.host.platform.os_name = 'mac'
server_process = FakeServerProcess(port_obj=port_obj, name="test", cmd=["test"])
server_process.write("should break")
self.assertTrue(server_process.has_crashed())
self.assertIsNone(server_process._proc)
self.assertEqual(server_process.broken_pipes, [server_process.stdin])
| bsd-3-clause |
dhp-denero/LibrERP | account_vat_period_end_statement/report/vat_period_end_statement.py | 2 | 4638 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011-2012 Domsense s.r.l. (<http://www.domsense.com>).
# Copyright (C) 2012 Agile Business Group sagl (<http://www.agilebg.com>)
# Copyright (C) 2013 Associazione OpenERP Italia
# (<http://www.openerp-italia.org>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
from openerp.tools.translate import _
from openerp.osv import orm
class print_vat_period_end_statement(report_sxw.rml_parse):
_name = 'parser.vat.period.end.statement'
def _build_codes_dict(self, tax_code, res={}, context=None):
if context is None:
context = {}
tax_pool = self.pool.get('account.tax')
if tax_code.sum_period:
if res.get(tax_code.name, False):
raise orm.except_orm(_('Error'), _('Too many occurences of tax code %s') % tax_code.name)
# search for taxes linked to that code
tax_ids = tax_pool.search(self.cr, self.uid, [('tax_code_id', '=', tax_code.id)], context=context)
if tax_ids:
tax = tax_pool.browse(self.cr, self.uid, tax_ids[0], context=context)
# search for the related base code
base_code = tax.base_code_id or tax.parent_id and tax.parent_id.base_code_id or False
if not base_code:
raise orm.except_orm(_('Error'), _('No base code found for tax code %s') % tax_code.name)
# check if every tax is linked to the same tax code and base code
for tax in tax_pool.browse(self.cr, self.uid, tax_ids, context=context):
test_base_code = tax.base_code_id or tax.parent_id and tax.parent_id.base_code_id or False
if test_base_code.id != base_code.id:
raise orm.except_orm(_('Error'), _('Not every tax linked to tax code %s is linked the same base code') % tax_code.name)
res[tax_code.name] = {
'vat': tax_code.sum_period,
'base': base_code.sum_period,
}
for child_code in tax_code.child_ids:
res = self._build_codes_dict(child_code, res=res, context=context)
return res
def _get_tax_codes_amounts(self, period_id, tax_code_ids=[], context=None):
if context is None:
context = {}
res = {}
code_pool = self.pool.get('account.tax.code')
context['period_id'] = period_id
for tax_code in code_pool.browse(self.cr, self.uid, tax_code_ids, context=context):
res = self._build_codes_dict(tax_code, res=res, context=context)
return res
def find_period(self, date, context=None):
if context is None:
context = {}
period_pool = self.pool.get('account.period')
period_ids = period_pool.find(self.cr, self.uid, dt=date, context=context)
if len(period_ids) > 1:
raise orm.except_orm(_('Error'), _('Too many periods for date %s') % str(date))
return period_ids[0]
def __init__(self, cr, uid, name, context=None):
if context is None:
context = {}
super(print_vat_period_end_statement, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'tax_codes_amounts': self._get_tax_codes_amounts,
'find_period': self.find_period,
})
self.context = context
report_sxw.report_sxw('report.account.print.vat.period.end.statement',
'account.vat.period.end.statement',
'addons/account_vat_period_end_statement/report/vat_period_end_statement.mako',
parser=print_vat_period_end_statement)
| agpl-3.0 |
usakhelo/FreeCAD | src/Mod/Ship/shipHydrostatics/Tools.py | 3 | 18781 | #***************************************************************************
#* *
#* Copyright (c) 2011, 2016 *
#* Jose Luis Cercos Pita <jlcercos@gmail.com> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
import math
import random
from FreeCAD import Vector, Rotation, Matrix, Placement
import Part
import Units
import FreeCAD as App
import FreeCADGui as Gui
from PySide import QtGui, QtCore
import Instance
from shipUtils import Math
import shipUtils.Units as USys
DENS = Units.parseQuantity("1025 kg/m^3") # Salt water
COMMON_BOOLEAN_ITERATIONS = 10
def placeShipShape(shape, draft, roll, trim):
"""Move the ship shape such that the free surface matches with the plane
z=0. The transformation will be applied on the input shape, so copy it
before calling this method if it should be preserved.
Position arguments:
shape -- Ship shape
draft -- Ship draft
roll -- Roll angle
trim -- Trim angle
Returned values:
shape -- The same transformed input shape. Just for debugging purposes, you
can discard it.
base_z -- The new base z coordinate (after applying the roll angle). Useful
if you want to revert back the transformation
"""
# Roll the ship. In order to can deal with large roll angles, we are
# proceeding as follows:
# 1.- Applying the roll with respect the base line
# 2.- Recentering the ship in the y direction
# 3.- Readjusting the base line
shape.rotate(Vector(0.0, 0.0, 0.0), Vector(1.0, 0.0, 0.0), roll)
base_z = shape.BoundBox.ZMin
shape.translate(Vector(0.0, draft * math.sin(math.radians(roll)), -base_z))
# Trim the ship. In this case we only need to correct the x direction
shape.rotate(Vector(0.0, 0.0, 0.0), Vector(0.0, -1.0, 0.0), trim)
shape.translate(Vector(draft * math.sin(math.radians(trim)), 0.0, 0.0))
shape.translate(Vector(0.0, 0.0, -draft))
return shape, base_z
def getUnderwaterSide(shape, force=True):
"""Get the underwater shape, simply cropping the provided shape by the z=0
free surface plane.
Position arguments:
shape -- Solid shape to be cropped
Keyword arguments:
force -- True if in case the common boolean operation fails, i.e. returns
no solids, the tool should retry it slightly moving the free surface. False
otherwise. (True by default)
Returned value:
Cropped shape. It is not modifying the input shape
"""
# Convert the shape into an active object
Part.show(shape)
orig = App.ActiveDocument.Objects[-1]
bbox = shape.BoundBox
xmin = bbox.XMin
xmax = bbox.XMax
ymin = bbox.YMin
ymax = bbox.YMax
zmin = bbox.ZMin
zmax = bbox.ZMax
# Create the "sea" box to intersect the ship
L = xmax - xmin
B = ymax - ymin
H = zmax - zmin
box = App.ActiveDocument.addObject("Part::Box","Box")
length_format = USys.getLengthFormat()
box.Placement = Placement(Vector(xmin - L, ymin - B, zmin - H),
Rotation(App.Vector(0,0,1),0))
box.Length = length_format.format(3.0 * L)
box.Width = length_format.format(3.0 * B)
box.Height = length_format.format(- zmin + H)
App.ActiveDocument.recompute()
common = App.activeDocument().addObject("Part::MultiCommon",
"UnderwaterSideHelper")
common.Shapes = [orig, box]
App.ActiveDocument.recompute()
if force and len(common.Shape.Solids) == 0:
# The common operation is failing, let's try moving a bit the free
# surface
msg = QtGui.QApplication.translate(
"ship_console",
"Boolean operation failed when trying to get the underwater side."
" The tool is retrying such operation slightly moving the free"
" surface position",
None)
App.Console.PrintWarning(msg + '\n')
random_bounds = 0.01 * H
i = 0
while len(common.Shape.Solids) == 0 and i < COMMON_BOOLEAN_ITERATIONS:
i += 1
box.Height = length_format.format(
- zmin + H + random.uniform(-random_bounds, random_bounds))
App.ActiveDocument.recompute()
out = common.Shape
App.ActiveDocument.removeObject(common.Name)
App.ActiveDocument.removeObject(orig.Name)
App.ActiveDocument.removeObject(box.Name)
App.ActiveDocument.recompute()
return out
def areas(ship, n, draft=None,
roll=Units.parseQuantity("0 deg"),
trim=Units.parseQuantity("0 deg")):
"""Compute the ship transversal areas
Position arguments:
ship -- Ship object (see createShip)
n -- Number of points to compute
Keyword arguments:
draft -- Ship draft (Design ship draft by default)
roll -- Roll angle (0 degrees by default)
trim -- Trim angle (0 degrees by default)
Returned value:
List of sections, each section contains 2 values, the x longitudinal
coordinate, and the transversal area. If n < 2, an empty list will be
returned.
"""
if n < 2:
return []
if draft is None:
draft = ship.Draft
shape, _ = placeShipShape(ship.Shape.copy(), draft, roll, trim)
shape = getUnderwaterSide(shape)
# Sections distance computation
bbox = shape.BoundBox
xmin = bbox.XMin
xmax = bbox.XMax
dx = (xmax - xmin) / (n - 1.0)
# Since we are computing the sections in the total length (not in the
# length between perpendiculars), we can grant that the starting and
# ending sections have null area
areas = [(Units.Quantity(xmin, Units.Length),
Units.Quantity(0.0, Units.Area))]
# And since we just need to compute areas we will create boxes with its
# front face at the desired transversal area position, computing the
# common solid part, dividing it by faces, and getting only the desired
# ones.
App.Console.PrintMessage("Computing transversal areas...\n")
App.Console.PrintMessage("Some Inventor representation errors can be"
" shown, please ignore them.\n")
for i in range(1, n - 1):
App.Console.PrintMessage("{0} / {1}\n".format(i, n - 2))
x = xmin + i * dx
try:
f = Part.Face(shape.slice(Vector(1,0,0), x))
except Part.OCCError:
msg = QtGui.QApplication.translate(
"ship_console",
"Part.OCCError: Transversal area computation failed",
None)
App.Console.PrintError(msg + '\n')
areas.append((Units.Quantity(x, Units.Length),
Units.Quantity(0.0, Units.Area)))
continue
# It is a valid face, so we can add this area
areas.append((Units.Quantity(x, Units.Length),
Units.Quantity(f.Area, Units.Area)))
# Last area is equal to zero (due to the total length usage)
areas.append((Units.Quantity(xmax, Units.Length),
Units.Quantity(0.0, Units.Area)))
App.Console.PrintMessage("Done!\n")
return areas
def displacement(ship, draft=None,
roll=Units.parseQuantity("0 deg"),
trim=Units.parseQuantity("0 deg")):
"""Compute the ship displacement
Position arguments:
ship -- Ship object (see createShip)
Keyword arguments:
draft -- Ship draft (Design ship draft by default)
roll -- Roll angle (0 degrees by default)
trim -- Trim angle (0 degrees by default)
Returned values:
disp -- The ship displacement (a density of the water of 1025 kg/m^3 is
assumed)
B -- Bouyance application point, i.e. Center of mass of the underwater side
Cb -- Block coefficient
The Bouyance center is referred to the original ship position.
"""
if draft is None:
draft = ship.Draft
shape, base_z = placeShipShape(ship.Shape.copy(), draft, roll, trim)
shape = getUnderwaterSide(shape)
vol = 0.0
cog = Vector()
if len(shape.Solids) > 0:
for solid in shape.Solids:
vol += solid.Volume
sCoG = solid.CenterOfMass
cog.x = cog.x + sCoG.x * solid.Volume
cog.y = cog.y + sCoG.y * solid.Volume
cog.z = cog.z + sCoG.z * solid.Volume
cog.x = cog.x / vol
cog.y = cog.y / vol
cog.z = cog.z / vol
bbox = shape.BoundBox
Vol = (bbox.XMax - bbox.XMin) * (bbox.YMax - bbox.YMin) * abs(bbox.ZMin)
# Undo the transformations on the bouyance point
B = Part.Point(Vector(cog.x, cog.y, cog.z))
m = Matrix()
m.move(Vector(0.0, 0.0, draft))
m.move(Vector(-draft * math.sin(trim.getValueAs("rad")), 0.0, 0.0))
m.rotateY(trim.getValueAs("rad"))
m.move(Vector(0.0,
-draft * math.sin(roll.getValueAs("rad")),
base_z))
m.rotateX(-roll.getValueAs("rad"))
B.transform(m)
try:
cb = vol / Vol
except ZeroDivisionError:
msg = QtGui.QApplication.translate(
"ship_console",
"ZeroDivisionError: Null volume found during the displacement"
" computation!",
None)
App.Console.PrintError(msg + '\n')
cb = 0.0
# Return the computed data
return (DENS * Units.Quantity(vol, Units.Volume),
Vector(B.X, B.Y, B.Z),
cb)
def wettedArea(shape, draft, roll=Units.parseQuantity("0 deg"),
trim=Units.parseQuantity("0 deg")):
"""Compute the ship wetted area
Position arguments:
shape -- External faces of the ship hull
draft -- Ship draft
Keyword arguments:
roll -- Roll angle (0 degrees by default)
trim -- Trim angle (0 degrees by default)
Returned value:
The wetted area, i.e. The underwater side area
"""
shape, _ = placeShipShape(shape.copy(), draft, roll, trim)
shape = getUnderwaterSide(shape, force=False)
area = 0.0
for f in shape.Faces:
area = area + f.Area
return Units.Quantity(area, Units.Area)
def moment(ship, draft=None,
roll=Units.parseQuantity("0 deg"),
trim=Units.parseQuantity("0 deg")):
"""Compute the moment required to trim the ship 1cm
Position arguments:
ship -- Ship object (see createShip)
Keyword arguments:
draft -- Ship draft (Design ship draft by default)
roll -- Roll angle (0 degrees by default)
trim -- Trim angle (0 degrees by default)
Returned value:
Moment required to trim the ship 1cm. Such moment is positive if it cause a
positive trim angle. The moment is expressed as a mass by a distance, not as
a force by a distance
"""
disp_orig, B_orig, _ = displacement(ship, draft, roll, trim)
xcb_orig = Units.Quantity(B_orig.x, Units.Length)
factor = 10.0
x = 0.5 * ship.Length.getValueAs('cm').Value
y = 1.0
angle = math.atan2(y, x) * Units.Radian
trim_new = trim + factor * angle
disp_new, B_new, _ = displacement(ship, draft, roll, trim_new)
xcb_new = Units.Quantity(B_new.x, Units.Length)
mom0 = -disp_orig * xcb_orig
mom1 = -disp_new * xcb_new
return (mom1 - mom0) / factor
def floatingArea(ship, draft=None,
roll=Units.parseQuantity("0 deg"),
trim=Units.parseQuantity("0 deg")):
"""Compute the ship floating area
Position arguments:
ship -- Ship object (see createShip)
Keyword arguments:
draft -- Ship draft (Design ship draft by default)
roll -- Roll angle (0 degrees by default)
trim -- Trim angle (0 degrees by default)
Returned values:
area -- Ship floating area
cf -- Floating area coefficient
"""
if draft is None:
draft = ship.Draft
# We want to intersect the whole ship with the free surface, so in this case
# we must not use the underwater side (or the tool will fail)
shape, _ = placeShipShape(ship.Shape.copy(), draft, roll, trim)
try:
f = Part.Face(shape.slice(Vector(0,0,1), 0.0))
area = Units.Quantity(f.Area, Units.Area)
except Part.OCCError:
msg = QtGui.QApplication.translate(
"ship_console",
"Part.OCCError: Floating area cannot be computed",
None)
App.Console.PrintError(msg + '\n')
area = Units.Quantity(0.0, Units.Area)
bbox = shape.BoundBox
Area = (bbox.XMax - bbox.XMin) * (bbox.YMax - bbox.YMin)
try:
cf = area.Value / Area
except ZeroDivisionError:
msg = QtGui.QApplication.translate(
"ship_console",
"ZeroDivisionError: Null area found during the floating area"
" computation!",
None)
App.Console.PrintError(msg + '\n')
cf = 0.0
return area, cf
def BMT(ship, draft=None, trim=Units.parseQuantity("0 deg")):
"""Calculate "ship Bouyance center" - "transversal metacenter" radius
Position arguments:
ship -- Ship object (see createShip)
Keyword arguments:
draft -- Ship draft (Design ship draft by default)
trim -- Trim angle (0 degrees by default)
Returned value:
BMT radius
"""
if draft is None:
draft = ship.Draft
roll = Units.parseQuantity("0 deg")
_, B0, _ = displacement(ship, draft, roll, trim)
nRoll = 2
maxRoll = Units.parseQuantity("7 deg")
BM = 0.0
for i in range(nRoll):
roll = (maxRoll / nRoll) * (i + 1)
_, B1, _ = displacement(ship, draft, roll, trim)
# * M
# / \
# / \ BM ==|> BM = (BB/2) / sin(alpha/2)
# / \
# *-------*
# BB
BB = B1 - B0
BB.x = 0.0
# nRoll is actually representing the weight function
BM += 0.5 * BB.Length / math.sin(math.radians(0.5 * roll)) / nRoll
return Units.Quantity(BM, Units.Length)
def mainFrameCoeff(ship, draft=None):
"""Compute the main frame coefficient
Position arguments:
ship -- Ship object (see createShip)
Keyword arguments:
draft -- Ship draft (Design ship draft by default)
Returned value:
Ship main frame area coefficient
"""
if draft is None:
draft = ship.Draft
shape, _ = placeShipShape(ship.Shape.copy(), draft,
Units.parseQuantity("0 deg"),
Units.parseQuantity("0 deg"))
shape = getUnderwaterSide(shape)
try:
f = Part.Face(shape.slice(Vector(1,0,0), 0.0))
area = f.Area
except Part.OCCError:
msg = QtGui.QApplication.translate(
"ship_console",
"Part.OCCError: Main frame area cannot be computed",
None)
App.Console.PrintError(msg + '\n')
area = 0.0
bbox = shape.BoundBox
Area = (bbox.YMax - bbox.YMin) * (bbox.ZMax - bbox.ZMin)
try:
cm = area / Area
except ZeroDivisionError:
msg = QtGui.QApplication.translate(
"ship_console",
"ZeroDivisionError: Null area found during the main frame area"
" coefficient computation!",
None)
App.Console.PrintError(msg + '\n')
cm = 0.0
return cm
class Point:
"""Hydrostatics point, that contains the following members:
draft -- Ship draft
trim -- Ship trim
disp -- Ship displacement
xcb -- Bouyance center X coordinate
wet -- Wetted ship area
mom -- Triming 1cm ship moment
farea -- Floating area
KBt -- Transversal KB height
BMt -- Transversal BM height
Cb -- Block coefficient.
Cf -- Floating coefficient.
Cm -- Main frame coefficient.
The moment to trim the ship 1 cm is positive when is resulting in a positive
trim angle.
"""
def __init__(self, ship, faces, draft, trim):
"""Compute all the hydrostatics.
Position argument:
ship -- Ship instance
faces -- Ship external faces
draft -- Ship draft
trim -- Trim angle
"""
disp, B, cb = displacement(ship, draft=draft, trim=trim)
if not faces:
wet = 0.0
else:
wet = wettedArea(faces, draft=draft, trim=trim)
mom = moment(ship, draft=draft, trim=trim)
farea, cf = floatingArea(ship, draft=draft, trim=trim)
bm = BMT(ship, draft=draft, trim=trim)
cm = mainFrameCoeff(ship, draft=draft)
# Store final data
self.draft = draft
self.trim = trim
self.disp = disp
self.xcb = Units.Quantity(B.x, Units.Length)
self.wet = wet
self.farea = farea
self.mom = mom
self.KBt = Units.Quantity(B.z, Units.Length)
self.BMt = bm
self.Cb = cb
self.Cf = cf
self.Cm = cm
| lgpl-2.1 |
frederick623/HTI | omm/ced_part.py | 1 | 5118 | import os
import sys
import sqlite3
import decimal
import math
import datetime
import ast
import csv
def adapt_decimal(d):
return str(d)
def convert_decimal(s):
return decimal.Decimal(s)
def question_marks(st):
question_marks = '?'
for i in range(0, len(st.split(','))-1):
question_marks = question_marks + ",?"
return question_marks
def arr_to_csv(file_name, header, data_arr):
csv_file = open(file_name, 'wb')
wr = csv.writer(csv_file, quoting=csv.QUOTE_ALL)
wr.writerow(header.split(','))
for data_row in data_arr:
line = []
for ele in data_row:
line.append(str(ele))
wr.writerow(line)
csv_file.close()
return
def dec(s):
if isinstance(s, basestring):
s = str(s).replace("#","")
if s == "" or str(float(s)) == "nan":
return 0
try:
return decimal.Decimal(str(s))
except:
return 0
return s
def db_cur():
# Register the adapter
sqlite3.register_adapter(decimal.Decimal, adapt_decimal)
# Register the converter
sqlite3.register_converter("DECTEXT", convert_decimal)
conn = sqlite3.connect(":memory:", detect_types=sqlite3.PARSE_DECLTYPES)
conn.row_factory = sqlite3.Row
cur = conn.cursor()
return conn, cur
def csv_to_arr(csv_file, start=1, has_header=True):
arr = []
with open(csv_file, 'rU') as f:
reader = csv.reader(f)
arr = list(reader)
header = ""
if has_header:
header = ','.join(arr[0])
arr = arr[start:]
return header, arr
else:
return arr[start:]
return
def ced_to_db(cur, ced_file):
ced_header = "id,instrumentCode,InstrumentName,MarketCode,ISINCode,tradeSide,accountId,ourRefNumber,isShortSell,unitPrice,tradeQuantity,tradeNum,executionDateTime,brokerID,CounterParty,marketmaker,investorId,investorOrderNum,OrderID,etfQuantity,glTradeType,manualTradeKey,houseAccountId,traderId,ourOrderID,clClientID,isOrderFullyFilled,Channel,SEHKTradeRef"
sd4_header = "hor_trade_id,trading_date,trade_time,broker_firm_id,broker_number,broker_firm_name,stock_code,buy_sell,corr_broker_number,trade_type,settlement_type,price,quantity,transaction_value,stamp_duty_adj_amount,amendment_applied,trade_reference_group,reserved,source,priority"
ced_arr = []
with open(ced_file, 'rU') as f:
rows = f.readlines()
if len(rows) < 1:
return
for row in rows:
if row[0:6].strip().isdigit():
ced_arr.append([row[0:6].strip(), row[7:22].strip(), row[23:43].strip(), row[44:54].strip(), row[55:75].strip(), row[76:91].strip(), row[92:117].strip(),
row[118:143].strip(), row[144:172].strip(), float(row[173:198].strip()), int(row[199:214].strip()), row[215:240].strip(), str(row[241:266].strip()),
row[268:282].strip(), row[284:298].strip(), row[300:309].strip(), row[311:328].strip(), row[330:361].strip(), row[363:412].strip(),
(row[413:423].strip()), row[424:429].strip(), row[430:455].strip(), row[456:472].strip(), row[473:483].strip(), row[484:534].strip(),
row[535:545].strip(), row[546:566].strip(), row[567:574].strip(), row[575:590].strip()])
cur.execute("CREATE TABLE ced (" + ced_header + ");")
cur.executemany("INSERT INTO ced VALUES ("+question_marks(ced_header)+")", ced_arr)
cur.execute("select * from ced join tir on ced.instrumentCode = tir.sehk_code where omm <> '' ")
rows = cur.fetchall()
ced_arr = []
for idx,row in enumerate(rows):
ced_trade_id = row["tradeNum"]
trading_date = row["executionDateTime"][0:8]
trade_time = row["executionDateTime"][8:16]
broker_firm_id = " 1143"
broker_number = row["brokerID"]
broker_firm_name = "Haitong Intl Sec Co Ltd".upper()
stock_code = row["instrumentCode"]
buy_sell = row["tradeSide"][0]
corr_broker_number = row["CounterParty"]
trade_type = 'A'
settlement_type = ' '
price = float(row["unitPrice"])
quantity = int(row["tradeQuantity"])
transaction_value = float(row["unitPrice"])*float(row["tradeQuantity"])
stamp_duty_adj_amount = math.ceil(float(row["unitPrice"])*float(row["tradeQuantity"])/1000)
amendment_applied = "N"
trade_reference_group = row["SEHKTradeRef"]
reserved = " "
source = "Ullink"
priority = 2
ced_arr.append([ced_trade_id,trading_date,trade_time,broker_firm_id,broker_number,broker_firm_name,stock_code,buy_sell,corr_broker_number,trade_type,settlement_type
,price,quantity,transaction_value,stamp_duty_adj_amount,amendment_applied,trade_reference_group,reserved,source,priority])
cur.executemany("INSERT INTO sd4 VALUES ("+question_marks(sd4_header)+")", ced_arr)
return
sd4_header = "hor_trade_id,trading_date,trade_time,broker_firm_id,broker_number,broker_firm_name,stock_code,buy_sell,corr_broker_number,trade_type,settlement_type,price,quantity,transaction_value,stamp_duty_adj_amount,amendment_applied,trade_reference_group,reserved,source,priority"
conn, cur = db_cur()
tir_header, tir_arr = csv_to_arr("tier.csv")
cur.execute("CREATE TABLE tir (" + tir_header + ");")
cur.executemany("INSERT INTO tir VALUES ("+question_marks(tir_header)+")", tir_arr)
cur.execute("CREATE TABLE sd4 (" + sd4_header + ");")
ced_to_db(cur, "ced.txt")
cur.execute("select * from sd4")
sd4_rows = cur.fetchall()
for row in sd4_rows:
print row
arr_to_csv("ced.csv", sd4_header, sd4_rows) | apache-2.0 |
gdi2290/rethinkdb | test/rql_test/connections/http_support/flask/module.py | 850 | 1363 | # -*- coding: utf-8 -*-
"""
flask.module
~~~~~~~~~~~~
Implements a class that represents module blueprints.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
from .blueprints import Blueprint
def blueprint_is_module(bp):
"""Used to figure out if something is actually a module"""
return isinstance(bp, Module)
class Module(Blueprint):
"""Deprecated module support. Until Flask 0.6 modules were a different
name of the concept now available as blueprints in Flask. They are
essentially doing the same but have some bad semantics for templates and
static files that were fixed with blueprints.
.. versionchanged:: 0.7
Modules were deprecated in favor for blueprints.
"""
def __init__(self, import_name, name=None, url_prefix=None,
static_path=None, subdomain=None):
if name is None:
assert '.' in import_name, 'name required if package name ' \
'does not point to a submodule'
name = import_name.rsplit('.', 1)[1]
Blueprint.__init__(self, name, import_name, url_prefix=url_prefix,
subdomain=subdomain, template_folder='templates')
if os.path.isdir(os.path.join(self.root_path, 'static')):
self._static_folder = 'static'
| agpl-3.0 |
cheddartv/stockstream.live | webapp/robinhood/robinhood_api.py | 1 | 1709 | import requests
import httputil
import traceback
from datetime import datetime, timedelta
def get_symbol_to_quotes(symbols):
if len(symbols) == 0:
return {}
symbol_to_quote = {}
try:
quotes = httputil.get_json_object_from_url("https://api.robinhood.com/quotes/?symbols={}".format(",".join(symbols)))['results']
for quote in quotes:
if quote:
symbol_to_quote[quote['symbol']] = quote
except Exception as e:
traceback.print_exc()
print("ERROR: " + str(e))
return symbol_to_quote
def get_market_hours(date):
market_date = httputil.get_json_object_from_url("https://api.robinhood.com/markets/XNAS/hours/{}/".format(date))
return market_date
def find_last_market_open_date():
now = datetime.utcnow()
date_str = now.strftime('%Y-%m-%d')
start = datetime.strptime(date_str, "%Y-%m-%d")
while True:
market_hours = get_market_hours(start.strftime('%Y-%m-%d'))
market_open_time = market_hours['extended_opens_at']
now_str = now.isoformat()
if market_hours['is_open'] and now_str > market_open_time:
return start.strftime('%Y-%m-%d')
start = start - timedelta(days=1)
def get_fundamentals(symbol):
fundamentals = httputil.get_json_object_from_url("https://api.robinhood.com/fundamentals/{}/".format(symbol))
return fundamentals
def get_quote(symbol):
return httputil.get_json_object_from_url("https://api.robinhood.com/quotes/{}/".format(symbol))
def get_instrument_for_symbol(symbol):
symbol_to_quote = get_symbol_to_quotes([symbol])
quote = symbol_to_quote[symbol]
return requests.get(quote["instrument"]).json()
| mit |
mayavanand/RMMAFinalProject | azimuth/features/featurization.py | 1 | 26462 | import pandas
import time
import sklearn
import numpy as np
import Bio.SeqUtils as SeqUtil
import Bio.Seq as Seq
import util
import sys
import Bio.SeqUtils.MeltingTemp as Tm
import pickle
import itertools
def featurize_data(data, learn_options, Y, gene_position, pam_audit=True, length_audit=True):
'''
assumes that data contains the 30mer
returns set of features from which one can make a kernel for each one
'''
all_lens = data['30mer'].apply(len).values
unique_lengths = np.unique(all_lens)
num_lengths = len(unique_lengths)
assert num_lengths == 1, "should only have sequences of a single length, but found %s: %s" % (num_lengths, str(unique_lengths))
print "Constructing features..."
t0 = time.time()
feature_sets = {}
if learn_options["nuc_features"]:
# spectrum kernels (position-independent) and weighted degree kernels (position-dependent)
get_all_order_nuc_features(data['30mer'], feature_sets, learn_options, learn_options["order"], max_index_to_use=30)
check_feature_set(feature_sets)
if learn_options["gc_features"]:
gc_above_10, gc_below_10, gc_count = gc_features(data, length_audit)
feature_sets['gc_above_10'] = pandas.DataFrame(gc_above_10)
feature_sets['gc_below_10'] = pandas.DataFrame(gc_below_10)
feature_sets['gc_count'] = pandas.DataFrame(gc_count)
if learn_options["pam_features"]:
pam_above_1, pam_equals_1, pam_count = pam_features(data, length_audit)
feature_sets['pam_above_1'] = pandas.DataFrame(pam_above_1)
feature_sets['pam_equals_1'] = pandas.DataFrame(pam_equals_1)
feature_sets['pam_count'] = pandas.DataFrame(pam_count)
'''
if learn_options["repeat_features"]:
repeat_above_0, repeat_equals_0, repeat_count = repeat_features(data, length_audit)
feature_sets['repeat_above_0'] = pandas.DataFrame(repeat_above_0)
feature_sets['repeat_equals_1'] = pandas.DataFrame(repeat_equals_0)
feature_sets['repeat_count'] = pandas.DataFrame(repeat_count)
'''
if learn_options["include_gene_position"]:
# gene_position_columns = ["Amino Acid Cut position", "Percent Peptide", "Nucleotide cut position"]
# gene_position_columns = ["Percent Peptide", "Nucleotide cut position"]
for set in gene_position.columns:
set_name = set
feature_sets[set_name] = pandas.DataFrame(gene_position[set])
feature_sets["Percent Peptide <50%"] = feature_sets["Percent Peptide"] < 50
feature_sets["Percent Peptide <50%"]['Percent Peptide <50%'] = feature_sets["Percent Peptide <50%"].pop("Percent Peptide")
if learn_options["include_gene_effect"]:
print "including gene effect"
gene_names = Y['Target gene']
enc = sklearn.preprocessing.OneHotEncoder()
label_encoder = sklearn.preprocessing.LabelEncoder()
label_encoder.fit(gene_names)
one_hot_genes = np.array(enc.fit_transform(label_encoder.transform(gene_names)[:, None]).todense())
feature_sets["gene effect"] = pandas.DataFrame(one_hot_genes,
columns=["gene_%d" % i for i in range(one_hot_genes.shape[1])], index=gene_names.index)
if learn_options['include_known_pairs']:
feature_sets['known pairs'] = pandas.DataFrame(Y['test'])
if learn_options["include_NGGX_interaction"]:
feature_sets["NGGX"] = NGGX_interaction_feature(data, pam_audit)
#if learn_options["include_NGGXX_interaction"]:
# feature_sets["NGGXX"] = NGGXX_interaction_feature(data, pam_audit)
if learn_options["include_Tm"]:
feature_sets["Tm"] = Tm_feature(data, pam_audit)
if learn_options["include_sgRNAscore"]:
feature_sets["sgRNA Score"] = pandas.DataFrame(data["sgRNA Score"])
if learn_options["include_drug"]:
# feature_sets["drug"] = pandas.DataFrame(data["drug"])
drug_names = Y.index.get_level_values('drug').tolist()
enc = sklearn.preprocessing.OneHotEncoder()
label_encoder = sklearn.preprocessing.LabelEncoder()
label_encoder.fit(drug_names)
one_hot_drugs = np.array(enc.fit_transform(label_encoder.transform(drug_names)[:, None]).todense())
feature_sets["drug"] = pandas.DataFrame(one_hot_drugs, columns=["drug_%d" % i for i in range(one_hot_drugs.shape[1])], index=drug_names)
if learn_options['include_strand']:
feature_sets['Strand effect'] = (pandas.DataFrame(data['Strand']) == 'sense')*1
if learn_options["include_gene_feature"]:
feature_sets["gene features"] = gene_feature(Y, data, learn_options)
if learn_options["include_gene_guide_feature"] > 0:
tmp_feature_sets = gene_guide_feature(Y, data, learn_options)
for key in tmp_feature_sets:
feature_sets[key] = tmp_feature_sets[key]
if learn_options["include_microhomology"]:
feature_sets["microhomology"] = get_micro_homology_features(Y['Target gene'], learn_options, data)
t1 = time.time()
print "\t\tElapsed time for constructing features is %.2f seconds" % (t1-t0)
check_feature_set(feature_sets)
if learn_options['normalize_features']:
assert("should not be here as doesn't make sense when we make one-off predictions, but could make sense for internal model comparisons when using regularized models")
feature_sets = normalize_feature_sets(feature_sets)
check_feature_set(feature_sets)
return feature_sets
def check_feature_set(feature_sets):
'''
Ensure the # of people is the same in each feature set
'''
assert feature_sets != {}, "no feature sets present"
N = None
for ft in feature_sets.keys():
N2 = feature_sets[ft].shape[0]
if N is None:
N = N2
else:
assert N >= 1, "should be at least one individual"
assert N == N2, "# of individuals do not match up across feature sets"
for set in feature_sets.keys():
if np.any(np.isnan(feature_sets[set])):
raise Exception("found Nan in set %s" % set)
def NGGX_interaction_feature(data, pam_audit=True):
'''
assuming 30-mer, grab the NGGX _ _ positions, and make a one-hot
encoding of the NX nucleotides yielding 4x4=16 features
'''
sequence = data['30mer'].values
feat_NX = pandas.DataFrame()
# check that GG is where we think
for seq in sequence:
if pam_audit and seq[25:27] != "GG":
raise Exception("expected GG but found %s" % seq[25:27])
NX = seq[24]+seq[27]
NX_onehot = nucleotide_features(NX,order=2, feature_type='pos_dependent', max_index_to_use=2, prefix="NGGX")
# NX_onehot[:] = np.random.rand(NX_onehot.shape[0]) ##TESTING RANDOM FEATURE
feat_NX = pandas.concat([feat_NX, NX_onehot], axis=1)
return feat_NX.T
def NGGXX_interaction_feature(data, pam_audit=True):
#added by Maya and Rachel
#assuming 30-mer, grab the NGGXX _ _ positions, and make a one-hot
#encoding of the NXX nucleotides yielding 4x4x4=64 features
sequence = data['30mer'].values
feat_NXX = pandas.DataFrame()
# check that GG is where we think
for seq in sequence:
if pam_audit and seq[25:27] != "GG":
raise Exception("expected GG but found %s" % seq[25:27])
NXX = seq[24]+seq[27]+seq[28]
NXX_onehot = nucleotide_features(NXX, order=3, feature_type='pos_dependent', max_index_to_use=3, prefix="NGGXX")
# NXX_onehot[:] = np.random.rand(NXX_onehot.shape[0]) ##TESTING RANDOM FEATURE
feat_NXX = pandas.concat([feat_NXX, NXX_onehot], axis=1)
return feat_NXX.T
def countPAM(s, length_audit=True):
#added by Maya and Rachel
#number of PAMs for the entire 30mer
if length_audit:
assert len(s) == 30
#check to ensure s of right length
numPams = 0
i = 1
while(i < 30):
if s[i] == 'G':
if s[i+1] == 'G':
numPams = numPams+1
i = i+3
return numPams
def countRepeats(s, length_audit=True):
#added by Maya and Rachel
#number of repeats for the entire 30mer
D = {}
i = 0
numRepeats = 0
while(i < 30):
codon = s[i] + s[i+1] + s[i+2]
if codon in D.keys():
D[codon] = D[codon] + 1
else:
D[codon] = 1
i = i+3
for key in D.keys():
if D[key] != 1:
numRepeats = numRepeats + D[key] - 1
return numRepeats
def get_all_order_nuc_features(data, feature_sets, learn_options, maxorder, max_index_to_use, prefix=""):
for order in range(1, maxorder+1):
print "\t\tconstructing order %s features" % order
nuc_features_pd, nuc_features_pi = apply_nucleotide_features(data, order, learn_options["num_proc"],
include_pos_independent=True, max_index_to_use=max_index_to_use, prefix=prefix)
feature_sets['%s_nuc_pd_Order%i' % (prefix, order)] = nuc_features_pd
if learn_options['include_pi_nuc_feat']:
feature_sets['%s_nuc_pi_Order%i' % (prefix, order)] = nuc_features_pi
check_feature_set(feature_sets)
print "\t\t\t\t\t\t\tdone"
def countGC(s, length_audit=True):
'''
GC content for only the 20mer, as per the Doench paper/code
'''
if length_audit:
assert len(s) == 30, "seems to assume 30mer"
return len(s[4:24].replace('A', '').replace('T', ''))
def SeqUtilFeatures(data):
'''
assuming '30-mer'is a key
get melting temperature features from:
0-the 30-mer ("global Tm")
1-the Tm (melting temperature) of the DNA:RNA hybrid from positions 16 - 20 of the sgRNA, i.e. the 5nts immediately proximal of the NGG PAM
2-the Tm of the DNA:RNA hybrid from position 8 - 15 (i.e. 8 nt)
3-the Tm of the DNA:RNA hybrid from position 3 - 7 (i.e. 5 nt)
'''
sequence = data['30mer'].values
num_features = 1
featarray = np.ones((sequence.shape[0], num_features))
for i, seq in enumerate(sequence):
assert len(seq) == 30, "seems to assume 30mer"
featarray[i, 0] = SeqUtil.molecular_weight(str(seq))
feat = pandas.DataFrame(pandas.DataFrame(featarray))
return feat
def organism_feature(data):
'''
Human vs. mouse
'''
organism = np.array(data['Organism'].values)
feat = pandas.DataFrame(pandas.DataFrame(featarray))
import ipdb; ipdb.set_trace()
return feat
def get_micro_homology_features(gene_names, learn_options, X):
# originally was flipping the guide itself as necessary, but now flipping the gene instead
print "building microhomology features"
feat = pandas.DataFrame(index=X.index)
feat["mh_score"] = ""
feat["oof_score"] = ""
#with open(r"tmp\V%s_gene_mismatches.csv" % learn_options["V"],'wb') as f:
if True:
# number of nulceotides to take to the left and right of the guide
k_mer_length_left = 9
k_mer_length_right = 21
for gene in gene_names.unique():
gene_seq = Seq.Seq(util.get_gene_sequence(gene)).reverse_complement()
guide_inds = np.where(gene_names.values == gene)[0]
print "getting microhomology for all %d guides in gene %s" % (len(guide_inds), gene)
for j, ps in enumerate(guide_inds):
guide_seq = Seq.Seq(X['30mer'][ps])
strand = X['Strand'][ps]
if strand=='sense':
gene_seq = gene_seq.reverse_complement()
# figure out the sequence to the left and right of this guide, in the gene
ind = gene_seq.find(guide_seq)
if ind==-1:
gene_seq = gene_seq.reverse_complement()
ind = gene_seq.find(guide_seq)
#assert ind != -1, "still didn't work"
#print "shouldn't get here"
else:
#print "all good"
pass
#assert ind != -1, "could not find guide in gene"
if ind==-1:
#print "***could not find guide %s for gene %s" % (str(guide_seq), str(gene))
#if.write(str(gene) + "," + str(guide_seq))
mh_score = 0
oof_score = 0
else:
#print "worked"
assert gene_seq[ind:(ind+len(guide_seq))]==guide_seq, "match not right"
left_win = gene_seq[(ind - k_mer_length_left):ind]
right_win = gene_seq[(ind + len(guide_seq)):(ind + len(guide_seq) + k_mer_length_right)]
#if strand=='antisense':
# # it's arbitrary which of sense and anti-sense we flip, we just want
# # to keep them in the same relative alphabet/direction
# left_win = left_win.reverse_complement()
# right_win = right_win.reverse_complement()
assert len(left_win.tostring())==k_mer_length_left
assert len(right_win.tostring())==k_mer_length_right
sixtymer = str(left_win) + str(guide_seq) + str(right_win)
assert len(sixtymer)==60, "should be of length 60"
mh_score, oof_score = microhomology.compute_score(sixtymer)
feat.ix[ps,"mh_score"] = mh_score
feat.ix[ps,"oof_score"] = oof_score
print "computed microhomology of %s" % (str(gene))
return pandas.DataFrame(feat, dtype='float')
def local_gene_seq_features(gene_names, learn_options, X):
print "building local gene sequence features"
feat = pandas.DataFrame(index=X.index)
feat["gene_left_win"] = ""
feat["gene_right_win"] = ""
# number of nulceotides to take to the left and right of the guide
k_mer_length = learn_options['include_gene_guide_feature']
for gene in gene_names.unique():
gene_seq = Seq.Seq(util.get_gene_sequence(gene)).reverse_complement()
for ps in np.where(gene_names.values==gene)[0]:
guide_seq = Seq.Seq(X['30mer'][ps])
strand = X['Strand'][ps]
if strand=='sense':
guide_seq = guide_seq.reverse_complement()
#gene_seq = gene_seq.reverse_complement()
# figure out the sequence to the left and right of this guide, in the gene
ind = gene_seq.find(guide_seq)
if ind ==-1:
#gene_seq = gene_seq.reverse_complement()
#ind = gene_seq.find(guide_seq)
assert ind != -1, "could not find guide in gene"
assert gene_seq[ind:(ind+len(guide_seq))]==guide_seq, "match not right"
left_win = gene_seq[(ind - k_mer_length):ind]
right_win = gene_seq[(ind + len(guide_seq)):(ind + len(guide_seq) + k_mer_length)]
if strand=='antisense':
# it's arbitrary which of sense and anti-sense we flip, we just want
# to keep them in the same relative alphabet/direction
left_win = left_win.reverse_complement()
right_win = right_win.reverse_complement()
assert not left_win.tostring()=="", "k_mer_context, %s, is too large" % k_mer_length
assert not left_win.tostring()=="", "k_mer_context, %s, is too large" % k_mer_length
assert len(left_win)==len(right_win), "k_mer_context, %s, is too large" % k_mer_length
feat.ix[ps,"gene_left_win"] = left_win.tostring()
feat.ix[ps,"gene_right_win"] = right_win.tostring()
print "featurizing local context of %s" % (gene)
feature_sets = {}
get_all_order_nuc_features(feat["gene_left_win"], feature_sets, learn_options, learn_options["order"], max_index_to_use=sys.maxint, prefix="gene_left_win")
get_all_order_nuc_features(feat["gene_right_win"], feature_sets, learn_options, learn_options["order"], max_index_to_use=sys.maxint, prefix="gene_right_win")
return feature_sets
def gene_feature(Y, X, learn_options):
'''
Things like the sequence of the gene, the DNA Tm of the gene, etc.
'''
gene_names = Y['Target gene']
gene_length = np.zeros((gene_names.values.shape[0], 1))
gc_content = np.zeros((gene_names.shape[0], 1))
temperature = np.zeros((gene_names.shape[0], 1))
molecular_weight = np.zeros((gene_names.shape[0], 1))
for gene in gene_names.unique():
seq = util.get_gene_sequence(gene)
gene_length[gene_names.values==gene] = len(seq)
gc_content[gene_names.values==gene] = SeqUtil.GC(seq)
temperature[gene_names.values==gene] = Tm.Tm_staluc(seq, rna=False)
molecular_weight[gene_names.values==gene] = SeqUtil.molecular_weight(seq, 'DNA')
all = np.concatenate((gene_length, gc_content, temperature, molecular_weight), axis=1)
df = pandas.DataFrame(data=all, index=gene_names.index, columns=['gene length',
'gene GC content',
'gene temperature',
'gene molecular weight'])
return df
def gene_guide_feature(Y, X, learn_options):
#features, which are related to parts of the gene-local to the guide, and
#possibly incorporating the guide or interactions with it
#expensive, so pickle if necessary
gene_file = r"..\data\gene_seq_feat_V%s_km%s.ord%s.pickle" % (learn_options['V'], learn_options['include_gene_guide_feature'], learn_options['order'])
if False: #os.path.isfile(gene_file): #while debugging, comment out
print "loading local gene seq feats from file %s" % gene_file
with open(gene_file, "rb") as f: feature_sets = pickle.load(f)
else:
feature_sets = local_gene_seq_features(Y['Target gene'], learn_options, X)
print "writing local gene seq feats to file %s" % gene_file
with open(gene_file, "wb") as f: pickle.dump(feature_sets, f)
return feature_sets
def gc_cont(seq):
return (seq.count('G') + seq.count('C'))/float(len(seq))
def Tm_feature(data, pam_audit=True):
'''
assuming '30-mer'is a key
get melting temperature features from:
0-the 30-mer ("global Tm")
1-the Tm (melting temperature) of the DNA:RNA hybrid from positions 16 - 20 of the sgRNA, i.e. the 5nts immediately proximal of the NGG PAM
2-the Tm of the DNA:RNA hybrid from position 8 - 15 (i.e. 8 nt)
3-the Tm of the DNA:RNA hybrid from position 3 - 7 (i.e. 5 nt)
'''
sequence = data['30mer'].values
featarray = np.ones((sequence.shape[0],4))
for i, seq in enumerate(sequence):
if pam_audit and seq[25:27]!="GG":
raise Exception("expected GG but found %s" % seq[25:27])
rna = False
featarray[i,0] = Tm.Tm_staluc(seq, rna=rna) #30mer Tm
featarray[i,1] = Tm.Tm_staluc(seq[19:24], rna=rna) #5nts immediately proximal of the NGG PAM
featarray[i,2] = Tm.Tm_staluc(seq[11:19], rna=rna) #8-mer
featarray[i,3] = Tm.Tm_staluc(seq[6:11], rna=rna) #5-mer
feat = pandas.DataFrame(featarray, index=data.index, columns=["Tm global_%s" % rna, "5mer_end_%s" %rna, "8mer_middle_%s" %rna, "5mer_start_%s" %rna])
return feat
def gc_features(data, audit=True):
gc_count = data['30mer'].apply(lambda seq: countGC(seq, audit))
gc_count.name = 'GC count'
gc_above_10 = (gc_count > 10)*1
gc_above_10.name = 'GC > 10'
gc_below_10 = (gc_count < 10)*1
gc_below_10.name = 'GC < 10'
return gc_above_10, gc_below_10, gc_count
def pam_features(data, audit=True):
pam_count = data['30mer'].apply(lambda seq: countPAM(seq, audit))
pam_count.name = 'PAM count'
pam_above_1 = (pam_count > 1)*1
pam_above_1.name = 'PAM > 1'
pam_equals_1 = (pam_count < 2)*1
pam_equals_1.name = 'PAM = 1'
return pam_above_1, pam_equals_1, pam_count
def repeat_features(data, audit=True):
repeat_count = data['30mer'].apply(lambda seq: countRepeats(seq, audit))
repeat_count.name = 'repeat count'
repeat_above_0 = (repeat_count > 0)*1
repeat_above_0.name = 'repeat > 0'
repeat_equals_0 = (repeat_count < 1)*1
repeat_equals_0.name = 'repeat < 1'
return repeat_above_0, repeat_equals_0, repeat_count
def normalize_features(data,axis):
'''
input: Pandas.DataFrame of dtype=np.float64 array, of dimensions
mean-center, and unit variance each feature
'''
data -= data.mean(axis)
data /= data.std(axis)
# remove rows with NaNs
data = data.dropna(1)
if np.any(np.isnan(data.values)): raise Exception("found NaN in normalized features")
return data
def apply_nucleotide_features(seq_data_frame, order, num_proc, include_pos_independent, max_index_to_use, prefix=""):
fast = True
if include_pos_independent:
feat_pd = seq_data_frame.apply(nucleotide_features, args=(order, max_index_to_use, prefix, 'pos_dependent'))
feat_pi = seq_data_frame.apply(nucleotide_features, args=(order, max_index_to_use, prefix, 'pos_independent'))
assert not np.any(np.isnan(feat_pd)), "nans here can arise from sequences of different lengths"
assert not np.any(np.isnan(feat_pi)), "nans here can arise from sequences of different lengths"
return feat_pd, feat_pi
else:
feat_pd = seq_data_frame.apply(nucleotide_features, args=(order, max_index_to_use, prefix, 'pos_dependent'))
assert not np.any(np.isnan(feat_pd)), "found nan in feat_pd"
return feat_pd
def get_alphabet(order, raw_alphabet = ['A', 'T', 'C', 'G']):
alphabet = ["".join(i) for i in itertools.product(raw_alphabet, repeat=order)]
return alphabet, raw_alphabet
def nucleotide_features(s, order, max_index_to_use, prefix="", feature_type='all', raw_alphabet = ['A', 'T', 'C', 'G']):
'''
compute position-specific order-mer features for the 4-letter alphabet
(e.g. for a sequence of length 30, there are 30*4 single nucleotide features
and (30-1)*4^2=464 double nucleotide features
'''
assert feature_type in ['all', 'pos_independent', 'pos_dependent']
if max_index_to_use <= len(s):
#print "WARNING: trimming max_index_to use down to length of string=%s" % len(s)
max_index_to_use = len(s)
if max_index_to_use is not None:
s = s[:max_index_to_use]
#assert(len(s)==30, "length not 30")
#s = s[:30] #cut-off at thirty to clean up extra data that they accidentally left in, and were instructed to ignore in this way
alphabet, raw_alphabet = get_alphabet(order, raw_alphabet = raw_alphabet)
features_pos_dependent = np.zeros(len(alphabet)*(len(s)-(order-1)))
features_pos_independent = np.zeros(np.power(len(raw_alphabet),order))
for position in range(0, len(s)-order+1, 1):
nucl = s[position:position+order]
features_pos_dependent[alphabet.index(nucl) + (position*len(alphabet))] = 1.0
features_pos_independent[alphabet.index(nucl)] += 1.0
index_dependent = ['%s_pd.Order%d_P%d' % (prefix, order, i) for i in range(len(features_pos_dependent))]
if np.any(np.isnan(features_pos_dependent)):
raise Exception("found nan features in features_pos_dependent")
if np.any(np.isnan(features_pos_independent)):
raise Exception("found nan features in features_pos_independent")
if feature_type == 'all' or feature_type == 'pos_independent':
index_independent = ['%s_pi.Order%d_P%d' % (prefix, order,i) for i in range(len(features_pos_independent))]
if feature_type == 'all':
res = pandas.Series(features_pos_dependent,index=index_dependent), pandas.Series(features_pos_independent,index=index_independent)
assert not np.any(np.isnan(res.values))
return res
else:
res = pandas.Series(features_pos_independent, index=index_independent)
assert not np.any(np.isnan(res.values))
return res
res = pandas.Series(features_pos_dependent, index=index_dependent)
assert not np.any(np.isnan(res.values))
return res
def nucleotide_features_dictionary(prefix=''):
seqname = ['-4', '-3', '-2', '-1']
seqname.extend([str(i) for i in range(1,21)])
seqname.extend(['N', 'G', 'G', '+1', '+2', '+3'])
orders = [1, 2, 3]
sequence = 30
feature_names_dep = []
feature_names_indep = []
index_dependent = []
index_independent = []
for order in orders:
raw_alphabet = ['A', 'T', 'C', 'G']
alphabet = ["".join(i) for i in itertools.product(raw_alphabet, repeat=order)]
features_pos_dependent = np.zeros(len(alphabet)*(sequence-(order-1)))
features_pos_independent = np.zeros(np.power(len(raw_alphabet),order))
index_dependent.extend(['%s_pd.Order%d_P%d' % (prefix, order, i) for i in range(len(features_pos_dependent))])
index_independent.extend(['%s_pi.Order%d_P%d' % (prefix, order,i) for i in range(len(features_pos_independent))])
for pos in range(sequence-(order-1)):
for letter in alphabet:
feature_names_dep.append('%s_%s' % (letter, seqname[pos]))
for letter in alphabet:
feature_names_indep.append('%s' % letter)
assert len(feature_names_indep) == len(index_independent)
assert len(feature_names_dep) == len(index_dependent)
index_all = index_dependent + index_independent
feature_all = feature_names_dep + feature_names_indep
return dict(zip(index_all, feature_all))
def normalize_feature_sets(feature_sets):
'''
zero-mean, unit-variance each feature within each set
'''
print "Normalizing features..."
t1 = time.time()
new_feature_sets = {}
for set in feature_sets:
new_feature_sets[set] = normalize_features(feature_sets[set],axis=0)
if np.any(np.isnan(new_feature_sets[set].values)):
raise Exception("found Nan feature values in set=%s" % set)
assert new_feature_sets[set].shape[1] > 0, "0 columns of features"
t2 = time.time()
print "\t\tElapsed time for normalizing features is %.2f seconds" % (t2-t1)
return new_feature_sets
| bsd-3-clause |
sheppard/django-rest-framework | rest_framework/utils/encoders.py | 88 | 2310 | """
Helper classes for parsers.
"""
from __future__ import unicode_literals
import datetime
import decimal
import json
import uuid
from django.db.models.query import QuerySet
from django.utils import six, timezone
from django.utils.encoding import force_text
from django.utils.functional import Promise
from rest_framework.compat import total_seconds
class JSONEncoder(json.JSONEncoder):
"""
JSONEncoder subclass that knows how to encode date/time/timedelta,
decimal types, generators and other basic python objects.
"""
def default(self, obj):
# For Date Time string spec, see ECMA 262
# http://ecma-international.org/ecma-262/5.1/#sec-15.9.1.15
if isinstance(obj, Promise):
return force_text(obj)
elif isinstance(obj, datetime.datetime):
representation = obj.isoformat()
if obj.microsecond:
representation = representation[:23] + representation[26:]
if representation.endswith('+00:00'):
representation = representation[:-6] + 'Z'
return representation
elif isinstance(obj, datetime.date):
return obj.isoformat()
elif isinstance(obj, datetime.time):
if timezone and timezone.is_aware(obj):
raise ValueError("JSON can't represent timezone-aware times.")
representation = obj.isoformat()
if obj.microsecond:
representation = representation[:12]
return representation
elif isinstance(obj, datetime.timedelta):
return six.text_type(total_seconds(obj))
elif isinstance(obj, decimal.Decimal):
# Serializers will coerce decimals to strings by default.
return float(obj)
elif isinstance(obj, uuid.UUID):
return six.text_type(obj)
elif isinstance(obj, QuerySet):
return tuple(obj)
elif hasattr(obj, 'tolist'):
# Numpy arrays and array scalars.
return obj.tolist()
elif hasattr(obj, '__getitem__'):
try:
return dict(obj)
except:
pass
elif hasattr(obj, '__iter__'):
return tuple(item for item in obj)
return super(JSONEncoder, self).default(obj)
| bsd-2-clause |
RockySteveJobs/python-for-android | python3-alpha/python3-src/Lib/test/test_unary.py | 179 | 1752 | """Test compiler changes for unary ops (+, -, ~) introduced in Python 2.2"""
import unittest
from test.support import run_unittest
class UnaryOpTestCase(unittest.TestCase):
def test_negative(self):
self.assertTrue(-2 == 0 - 2)
self.assertEqual(-0, 0)
self.assertEqual(--2, 2)
self.assertTrue(-2 == 0 - 2)
self.assertTrue(-2.0 == 0 - 2.0)
self.assertTrue(-2j == 0 - 2j)
def test_positive(self):
self.assertEqual(+2, 2)
self.assertEqual(+0, 0)
self.assertEqual(++2, 2)
self.assertEqual(+2, 2)
self.assertEqual(+2.0, 2.0)
self.assertEqual(+2j, 2j)
def test_invert(self):
self.assertTrue(-2 == 0 - 2)
self.assertEqual(-0, 0)
self.assertEqual(--2, 2)
self.assertTrue(-2 == 0 - 2)
def test_no_overflow(self):
nines = "9" * 32
self.assertTrue(eval("+" + nines) == 10**32-1)
self.assertTrue(eval("-" + nines) == -(10**32-1))
self.assertTrue(eval("~" + nines) == ~(10**32-1))
def test_negation_of_exponentiation(self):
# Make sure '**' does the right thing; these form a
# regression test for SourceForge bug #456756.
self.assertEqual(-2 ** 3, -8)
self.assertEqual((-2) ** 3, -8)
self.assertEqual(-2 ** 4, -16)
self.assertEqual((-2) ** 4, 16)
def test_bad_types(self):
for op in '+', '-', '~':
self.assertRaises(TypeError, eval, op + "b'a'")
self.assertRaises(TypeError, eval, op + "'a'")
self.assertRaises(TypeError, eval, "~2j")
self.assertRaises(TypeError, eval, "~2.0")
def test_main():
run_unittest(UnaryOpTestCase)
if __name__ == "__main__":
test_main()
| apache-2.0 |
KerstenDoering/PubMedPortable | full_text_index/RunXapian.py | 4 | 4217 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Copyright (c) 2014, Kersten Doering <kersten.doering@gmail.com>, Christian Senger <der.senger@googlemail.com>
"""
#Kersten HowTo:
#"python RunXapian.py -x" for indexing and searching
#"python RunXapian.py -f" for indexing
#"python RunXapian.py" for searching
#"python RunXapian.py -h" for help
import xappy
import os.path
import sys
import os
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from SynonymParser import SynonymParser
from PubMedXapian import PubMedXapian
from optparse import OptionParser
if __name__=="__main__":
parser = OptionParser()
parser.add_option("-b", "--b_year", dest="b", help="year of the index to begin parsing (default: 1809)", default=1809)
parser.add_option("-p", "--xapian_database_path", dest="xapian_database_path", help="Specify the path to the Xapian full text index.", default="xapian")
parser.add_option("-e", "--e_year", dest="e", help="year of the index to end parsing (default: 2016)", default=2016)
parser.add_option("-x", "--index", dest="x", action="store_true", default=False, help="Create Xapian index first (default: False)")
parser.add_option("-s", "--synoynm_path", dest="s", help="relative path to synonym list (default: synonyms/pancreatic_cancer.txt)", default = "synonyms/pancreatic_cancer.txt")
parser.add_option("-d", "--db_psql", dest="d", help="database in PostgreSQL to connect to (default: pancreatic_cancer_db)", default = "pancreatic_cancer_db")
parser.add_option("-f", "--no_search", dest="f", action="store_false", help="find synonyms in Xapian database (default: True)", default=True)
parser.add_option("-r", "--results_name", dest="r", help="name of the results file (default: results.csv)", default = "results")
parser.add_option("-n", "--name_xapian_db", dest="n", help="name of the xapian database folder (default: xapian<e_year>)", default = "xapian")
(options, args) = parser.parse_args()
#in PubMed, first articles published are from 1809:
#http://www.nlm.nih.gov/bsd/licensee/2015_stats/baseline_med_filecount.html
#set range of years
b_year = options.b
e_year = options.e
#set results filename
filename = options.r
#set Xapian database name (default: "xapian<e_year>")
if options.n == "xapian":
xapian_name = options.n + str(options.e)
else:
xapian_name = options.n
#set PSQL database name
database = options.d
#set synonym path
synonymPath = options.s
#Synonym file to use
if not (os.path.isfile(synonymPath)):
sys.exit( "synonym file not existing - programme terminates" )
if options.x:
#import class Article from Article.py and connect to PostgreSQL database
from Article import Article
Article.getConnection(database)
#select all articles in a range of years x >= b_year and x <= e_year
articles = Article.getArticlesByYear(b_year,e_year)
Article.closeConnection()
print "\n-------------"
print "processing files from year " + str(b_year) + " to " + str(e_year)
print "-------------"
print "got articles from PostgreSQL database"
print "-------------"
#take the last year to create directory
indexer = PubMedXapian(xapian_name, xapianPath = options.xapian_database_path)
#build full text index with Xapian for all articles selected before
if options.x:
print "now indexing articles in Xapian"
indexer.buildIndexWithArticles(articles)
print "\n-------------"
if not ( os.path.isdir( os.path.join(options.xapian_database_path, xapian_name) ) ):
parser.print_help()
exit("xapian files are not existing")
if options.f:
synonymParser = SynonymParser(synonymPath, indexer, filename)
synonymParser.parseAndFind()
if filename == "results":
print "\nquery results written to %s.csv" % filename
else:
print "\nquery results written to %s" % filename
else:
print "no search of synonyms performed, use \"python RunXapian.py -h\" for parameter view"
| isc |
lseyesl/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/commands/rebaseline.py | 119 | 23221 | # Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import logging
import optparse
import sys
from webkitpy.common.checkout.baselineoptimizer import BaselineOptimizer
from webkitpy.common.system.executive import ScriptError
from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
from webkitpy.layout_tests.models import test_failures
from webkitpy.layout_tests.models.test_expectations import TestExpectations, BASELINE_SUFFIX_LIST
from webkitpy.port import builders
from webkitpy.port import factory
from webkitpy.tool.multicommandtool import Command
_log = logging.getLogger(__name__)
# FIXME: Should TestResultWriter know how to compute this string?
def _baseline_name(fs, test_name, suffix):
return fs.splitext(test_name)[0] + TestResultWriter.FILENAME_SUFFIX_EXPECTED + "." + suffix
class AbstractRebaseliningCommand(Command):
# not overriding execute() - pylint: disable=W0223
move_overwritten_baselines_option = optparse.make_option("--move-overwritten-baselines", action="store_true", default=False,
help="Move overwritten baselines elsewhere in the baseline path. This is for bringing up new ports.")
no_optimize_option = optparse.make_option('--no-optimize', dest='optimize', action='store_false', default=True,
help=('Do not optimize/de-dup the expectations after rebaselining (default is to de-dup automatically). '
'You can use "webkit-patch optimize-baselines" to optimize separately.'))
platform_options = factory.platform_options(use_globs=True)
results_directory_option = optparse.make_option("--results-directory", help="Local results directory to use")
suffixes_option = optparse.make_option("--suffixes", default=','.join(BASELINE_SUFFIX_LIST), action="store",
help="Comma-separated-list of file types to rebaseline")
def __init__(self, options=None):
super(AbstractRebaseliningCommand, self).__init__(options=options)
self._baseline_suffix_list = BASELINE_SUFFIX_LIST
class RebaselineTest(AbstractRebaseliningCommand):
name = "rebaseline-test-internal"
help_text = "Rebaseline a single test from a buildbot. Only intended for use by other webkit-patch commands."
def __init__(self):
super(RebaselineTest, self).__init__(options=[
self.no_optimize_option,
self.results_directory_option,
self.suffixes_option,
optparse.make_option("--builder", help="Builder to pull new baselines from"),
optparse.make_option("--move-overwritten-baselines-to", action="append", default=[],
help="Platform to move existing baselines to before rebaselining. This is for bringing up new ports."),
optparse.make_option("--test", help="Test to rebaseline"),
])
self._scm_changes = {'add': []}
def _results_url(self, builder_name):
return self._tool.buildbot.builder_with_name(builder_name).latest_layout_test_results_url()
def _baseline_directory(self, builder_name):
port = self._tool.port_factory.get_from_builder_name(builder_name)
override_dir = builders.rebaseline_override_dir(builder_name)
if override_dir:
return self._tool.filesystem.join(port.layout_tests_dir(), 'platform', override_dir)
return port.baseline_version_dir()
def _copy_existing_baseline(self, move_overwritten_baselines_to, test_name, suffix):
old_baselines = []
new_baselines = []
# Need to gather all the baseline paths before modifying the filesystem since
# the modifications can affect the results of port.expected_filename.
for platform in move_overwritten_baselines_to:
port = self._tool.port_factory.get(platform)
old_baseline = port.expected_filename(test_name, "." + suffix)
if not self._tool.filesystem.exists(old_baseline):
_log.debug("No existing baseline for %s." % test_name)
continue
new_baseline = self._tool.filesystem.join(port.baseline_path(), self._file_name_for_expected_result(test_name, suffix))
if self._tool.filesystem.exists(new_baseline):
_log.debug("Existing baseline at %s, not copying over it." % new_baseline)
continue
old_baselines.append(old_baseline)
new_baselines.append(new_baseline)
for i in range(len(old_baselines)):
old_baseline = old_baselines[i]
new_baseline = new_baselines[i]
_log.debug("Copying baseline from %s to %s." % (old_baseline, new_baseline))
self._tool.filesystem.maybe_make_directory(self._tool.filesystem.dirname(new_baseline))
self._tool.filesystem.copyfile(old_baseline, new_baseline)
if not self._tool.scm().exists(new_baseline):
self._add_to_scm(new_baseline)
def _save_baseline(self, data, target_baseline):
if not data:
return
filesystem = self._tool.filesystem
filesystem.maybe_make_directory(filesystem.dirname(target_baseline))
filesystem.write_binary_file(target_baseline, data)
if not self._tool.scm().exists(target_baseline):
self._add_to_scm(target_baseline)
def _add_to_scm(self, path):
self._scm_changes['add'].append(path)
def _update_expectations_file(self, builder_name, test_name):
port = self._tool.port_factory.get_from_builder_name(builder_name)
# Since rebaseline-test-internal can be called multiple times in parallel,
# we need to ensure that we're not trying to update the expectations file
# concurrently as well.
# FIXME: We should rework the code to not need this; maybe just download
# the files in parallel and rebaseline local files serially?
try:
path = port.path_to_test_expectations_file()
lock = self._tool.make_file_lock(path + '.lock')
lock.acquire_lock()
expectations = TestExpectations(port, include_generic=False, include_overrides=False)
for test_configuration in port.all_test_configurations():
if test_configuration.version == port.test_configuration().version:
expectationsString = expectations.remove_configuration_from_test(test_name, test_configuration)
self._tool.filesystem.write_text_file(path, expectationsString)
finally:
lock.release_lock()
def _test_root(self, test_name):
return self._tool.filesystem.splitext(test_name)[0]
def _file_name_for_actual_result(self, test_name, suffix):
return "%s-actual.%s" % (self._test_root(test_name), suffix)
def _file_name_for_expected_result(self, test_name, suffix):
return "%s-expected.%s" % (self._test_root(test_name), suffix)
def _rebaseline_test(self, builder_name, test_name, move_overwritten_baselines_to, suffix, results_url):
baseline_directory = self._baseline_directory(builder_name)
source_baseline = "%s/%s" % (results_url, self._file_name_for_actual_result(test_name, suffix))
target_baseline = self._tool.filesystem.join(baseline_directory, self._file_name_for_expected_result(test_name, suffix))
if move_overwritten_baselines_to:
self._copy_existing_baseline(move_overwritten_baselines_to, test_name, suffix)
_log.debug("Retrieving %s." % source_baseline)
self._save_baseline(self._tool.web.get_binary(source_baseline, convert_404_to_None=True), target_baseline)
def _rebaseline_test_and_update_expectations(self, options):
if options.results_directory:
results_url = 'file://' + options.results_directory
else:
results_url = self._results_url(options.builder)
self._baseline_suffix_list = options.suffixes.split(',')
for suffix in self._baseline_suffix_list:
self._rebaseline_test(options.builder, options.test, options.move_overwritten_baselines_to, suffix, results_url)
self._update_expectations_file(options.builder, options.test)
def execute(self, options, args, tool):
self._rebaseline_test_and_update_expectations(options)
print json.dumps(self._scm_changes)
class OptimizeBaselines(AbstractRebaseliningCommand):
name = "optimize-baselines"
help_text = "Reshuffles the baselines for the given tests to use as litte space on disk as possible."
argument_names = "TEST_NAMES"
def __init__(self):
super(OptimizeBaselines, self).__init__(options=[self.suffixes_option] + self.platform_options)
def _optimize_baseline(self, optimizer, test_name):
for suffix in self._baseline_suffix_list:
baseline_name = _baseline_name(self._tool.filesystem, test_name, suffix)
if not optimizer.optimize(baseline_name):
print "Heuristics failed to optimize %s" % baseline_name
def execute(self, options, args, tool):
self._baseline_suffix_list = options.suffixes.split(',')
port_names = tool.port_factory.all_port_names(options.platform)
if not port_names:
print "No port names match '%s'" % options.platform
return
optimizer = BaselineOptimizer(tool, port_names)
port = tool.port_factory.get(port_names[0])
for test_name in port.tests(args):
_log.info("Optimizing %s" % test_name)
self._optimize_baseline(optimizer, test_name)
class AnalyzeBaselines(AbstractRebaseliningCommand):
name = "analyze-baselines"
help_text = "Analyzes the baselines for the given tests and prints results that are identical."
argument_names = "TEST_NAMES"
def __init__(self):
super(AnalyzeBaselines, self).__init__(options=[
self.suffixes_option,
optparse.make_option('--missing', action='store_true', default=False, help='show missing baselines as well'),
] + self.platform_options)
self._optimizer_class = BaselineOptimizer # overridable for testing
self._baseline_optimizer = None
self._port = None
def _write(self, msg):
print msg
def _analyze_baseline(self, options, test_name):
for suffix in self._baseline_suffix_list:
baseline_name = _baseline_name(self._tool.filesystem, test_name, suffix)
results_by_directory = self._baseline_optimizer.read_results_by_directory(baseline_name)
if results_by_directory:
self._write("%s:" % baseline_name)
self._baseline_optimizer.write_by_directory(results_by_directory, self._write, " ")
elif options.missing:
self._write("%s: (no baselines found)" % baseline_name)
def execute(self, options, args, tool):
self._baseline_suffix_list = options.suffixes.split(',')
port_names = tool.port_factory.all_port_names(options.platform)
if not port_names:
print "No port names match '%s'" % options.platform
return
self._baseline_optimizer = self._optimizer_class(tool, port_names)
self._port = tool.port_factory.get(port_names[0])
for test_name in self._port.tests(args):
self._analyze_baseline(options, test_name)
class AbstractParallelRebaselineCommand(AbstractRebaseliningCommand):
# not overriding execute() - pylint: disable=W0223
def _run_webkit_patch(self, args, verbose):
try:
verbose_args = ['--verbose'] if verbose else []
stderr = self._tool.executive.run_command([self._tool.path()] + verbose_args + args, cwd=self._tool.scm().checkout_root, return_stderr=True)
for line in stderr.splitlines():
print >> sys.stderr, line
except ScriptError, e:
_log.error(e)
def _builders_to_fetch_from(self, builders_to_check):
# This routine returns the subset of builders that will cover all of the baseline search paths
# used in the input list. In particular, if the input list contains both Release and Debug
# versions of a configuration, we *only* return the Release version (since we don't save
# debug versions of baselines).
release_builders = set()
debug_builders = set()
builders_to_fallback_paths = {}
for builder in builders_to_check:
port = self._tool.port_factory.get_from_builder_name(builder)
if port.test_configuration().build_type == 'Release':
release_builders.add(builder)
else:
debug_builders.add(builder)
for builder in list(release_builders) + list(debug_builders):
port = self._tool.port_factory.get_from_builder_name(builder)
fallback_path = port.baseline_search_path()
if fallback_path not in builders_to_fallback_paths.values():
builders_to_fallback_paths[builder] = fallback_path
return builders_to_fallback_paths.keys()
def _rebaseline_commands(self, test_list, options):
path_to_webkit_patch = self._tool.path()
cwd = self._tool.scm().checkout_root
commands = []
for test in test_list:
for builder in self._builders_to_fetch_from(test_list[test]):
suffixes = ','.join(test_list[test][builder])
cmd_line = [path_to_webkit_patch, 'rebaseline-test-internal', '--suffixes', suffixes, '--builder', builder, '--test', test]
if options.move_overwritten_baselines:
move_overwritten_baselines_to = builders.move_overwritten_baselines_to(builder)
for platform in move_overwritten_baselines_to:
cmd_line.extend(['--move-overwritten-baselines-to', platform])
if options.results_directory:
cmd_line.extend(['--results-directory', options.results_directory])
if options.verbose:
cmd_line.append('--verbose')
commands.append(tuple([cmd_line, cwd]))
return commands
def _files_to_add(self, command_results):
files_to_add = set()
for output in [result[1].split('\n') for result in command_results]:
file_added = False
for line in output:
try:
if line:
files_to_add.update(json.loads(line)['add'])
file_added = True
except ValueError:
_log.debug('"%s" is not a JSON object, ignoring' % line)
if not file_added:
_log.debug('Could not add file based off output "%s"' % output)
return list(files_to_add)
def _optimize_baselines(self, test_list, verbose=False):
# We don't run this in parallel because modifying the SCM in parallel is unreliable.
for test in test_list:
all_suffixes = set()
for builder in self._builders_to_fetch_from(test_list[test]):
all_suffixes.update(test_list[test][builder])
# FIXME: We should propagate the platform options as well.
self._run_webkit_patch(['optimize-baselines', '--suffixes', ','.join(all_suffixes), test], verbose)
def _rebaseline(self, options, test_list):
for test, builders_to_check in sorted(test_list.items()):
_log.info("Rebaselining %s" % test)
for builder, suffixes in sorted(builders_to_check.items()):
_log.debug(" %s: %s" % (builder, ",".join(suffixes)))
commands = self._rebaseline_commands(test_list, options)
command_results = self._tool.executive.run_in_parallel(commands)
log_output = '\n'.join(result[2] for result in command_results).replace('\n\n', '\n')
for line in log_output.split('\n'):
if line:
print >> sys.stderr, line # FIXME: Figure out how to log properly.
files_to_add = self._files_to_add(command_results)
if files_to_add:
self._tool.scm().add_list(list(files_to_add))
if options.optimize:
self._optimize_baselines(test_list, options.verbose)
class RebaselineJson(AbstractParallelRebaselineCommand):
name = "rebaseline-json"
help_text = "Rebaseline based off JSON passed to stdin. Intended to only be called from other scripts."
def __init__(self,):
super(RebaselineJson, self).__init__(options=[
self.move_overwritten_baselines_option,
self.no_optimize_option,
self.results_directory_option,
])
def execute(self, options, args, tool):
self._rebaseline(options, json.loads(sys.stdin.read()))
class RebaselineExpectations(AbstractParallelRebaselineCommand):
name = "rebaseline-expectations"
help_text = "Rebaselines the tests indicated in TestExpectations."
def __init__(self):
super(RebaselineExpectations, self).__init__(options=[
self.move_overwritten_baselines_option,
self.no_optimize_option,
] + self.platform_options)
self._test_list = None
def _update_expectations_files(self, port_name):
port = self._tool.port_factory.get(port_name)
expectations = TestExpectations(port)
for path in port.expectations_dict():
if self._tool.filesystem.exists(path):
self._tool.filesystem.write_text_file(path, expectations.remove_rebaselined_tests(expectations.get_rebaselining_failures(), path))
def _tests_to_rebaseline(self, port):
tests_to_rebaseline = {}
expectations = TestExpectations(port, include_overrides=True)
for test in expectations.get_rebaselining_failures():
tests_to_rebaseline[test] = TestExpectations.suffixes_for_expectations(expectations.get_expectations(test))
return tests_to_rebaseline
def _add_tests_to_rebaseline_for_port(self, port_name):
builder_name = builders.builder_name_for_port_name(port_name)
if not builder_name:
return
tests = self._tests_to_rebaseline(self._tool.port_factory.get(port_name)).items()
if tests:
_log.info("Retrieving results for %s from %s." % (port_name, builder_name))
for test_name, suffixes in tests:
_log.info(" %s (%s)" % (test_name, ','.join(suffixes)))
if test_name not in self._test_list:
self._test_list[test_name] = {}
self._test_list[test_name][builder_name] = suffixes
def execute(self, options, args, tool):
options.results_directory = None
self._test_list = {}
port_names = tool.port_factory.all_port_names(options.platform)
for port_name in port_names:
self._add_tests_to_rebaseline_for_port(port_name)
if not self._test_list:
_log.warning("Did not find any tests marked Rebaseline.")
return
self._rebaseline(options, self._test_list)
for port_name in port_names:
self._update_expectations_files(port_name)
class Rebaseline(AbstractParallelRebaselineCommand):
name = "rebaseline"
help_text = "Rebaseline tests with results from the build bots. Shows the list of failing tests on the builders if no test names are provided."
argument_names = "[TEST_NAMES]"
def __init__(self):
super(Rebaseline, self).__init__(options=[
self.move_overwritten_baselines_option,
self.no_optimize_option,
# FIXME: should we support the platform options in addition to (or instead of) --builders?
self.suffixes_option,
optparse.make_option("--builders", default=None, action="append", help="Comma-separated-list of builders to pull new baselines from (can also be provided multiple times)"),
])
def _builders_to_pull_from(self):
webkit_buildbot_builder_names = []
for name in builders.all_builder_names():
webkit_buildbot_builder_names.append(name)
titles = ["build.webkit.org bots"]
lists = [webkit_buildbot_builder_names]
chosen_names = self._tool.user.prompt_with_multiple_lists("Which builder to pull results from:", titles, lists, can_choose_multiple=True)
return [self._builder_with_name(name) for name in chosen_names]
def _builder_with_name(self, name):
return self._tool.buildbot.builder_with_name(name)
def _tests_to_update(self, builder):
failing_tests = builder.latest_layout_test_results().tests_matching_failure_types([test_failures.FailureTextMismatch])
return self._tool.user.prompt_with_list("Which test(s) to rebaseline for %s:" % builder.name(), failing_tests, can_choose_multiple=True)
def execute(self, options, args, tool):
options.results_directory = None
if options.builders:
builders_to_check = []
for builder_names in options.builders:
builders_to_check += [self._builder_with_name(name) for name in builder_names.split(",")]
else:
builders_to_check = self._builders_to_pull_from()
test_list = {}
suffixes_to_update = options.suffixes.split(",")
for builder in builders_to_check:
tests = args or self._tests_to_update(builder)
for test in tests:
if test not in test_list:
test_list[test] = {}
test_list[test][builder.name()] = suffixes_to_update
if options.verbose:
_log.debug("rebaseline-json: " + str(test_list))
self._rebaseline(options, test_list)
| bsd-3-clause |
Scapogo/zipline | zipline/finance/blotter.py | 5 | 14953 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from logbook import Logger
from collections import defaultdict
from copy import copy
from six import iteritems
from zipline.assets import Equity, Future, Asset
from zipline.finance.order import Order
from zipline.finance.slippage import (
DEFAULT_FUTURE_VOLUME_SLIPPAGE_BAR_LIMIT,
VolatilityVolumeShare,
VolumeShareSlippage,
)
from zipline.finance.commission import (
DEFAULT_PER_CONTRACT_COST,
FUTURE_EXCHANGE_FEES_BY_SYMBOL,
PerContract,
PerShare,
)
from zipline.finance.cancel_policy import NeverCancel
from zipline.utils.input_validation import expect_types
log = Logger('Blotter')
warning_logger = Logger('AlgoWarning')
class Blotter(object):
def __init__(self, data_frequency, equity_slippage=None,
future_slippage=None, equity_commission=None,
future_commission=None, cancel_policy=None):
# these orders are aggregated by asset
self.open_orders = defaultdict(list)
# keep a dict of orders by their own id
self.orders = {}
# holding orders that have come in since the last event.
self.new_orders = []
self.current_dt = None
self.max_shares = int(1e+11)
self.slippage_models = {
Equity: equity_slippage or VolumeShareSlippage(),
Future: future_slippage or VolatilityVolumeShare(
volume_limit=DEFAULT_FUTURE_VOLUME_SLIPPAGE_BAR_LIMIT,
),
}
self.commission_models = {
Equity: equity_commission or PerShare(),
Future: future_commission or PerContract(
cost=DEFAULT_PER_CONTRACT_COST,
exchange_fee=FUTURE_EXCHANGE_FEES_BY_SYMBOL,
),
}
self.data_frequency = data_frequency
self.cancel_policy = cancel_policy if cancel_policy else NeverCancel()
def __repr__(self):
return """
{class_name}(
slippage_models={slippage_models},
commission_models={commission_models},
open_orders={open_orders},
orders={orders},
new_orders={new_orders},
current_dt={current_dt})
""".strip().format(class_name=self.__class__.__name__,
slippage_models=self.slippage_models,
commission_models=self.commission_models,
open_orders=self.open_orders,
orders=self.orders,
new_orders=self.new_orders,
current_dt=self.current_dt)
def set_date(self, dt):
self.current_dt = dt
@expect_types(asset=Asset)
def order(self, asset, amount, style, order_id=None):
"""Place an order.
Parameters
----------
asset : zipline.assets.Asset
The asset that this order is for.
amount : int
The amount of shares to order. If ``amount`` is positive, this is
the number of shares to buy or cover. If ``amount`` is negative,
this is the number of shares to sell or short.
style : zipline.finance.execution.ExecutionStyle
The execution style for the order.
order_id : str, optional
The unique identifier for this order.
Returns
-------
order_id : str or None
The unique identifier for this order, or None if no order was
placed.
Notes
-----
amount > 0 :: Buy/Cover
amount < 0 :: Sell/Short
Market order: order(asset, amount)
Limit order: order(asset, amount, style=LimitOrder(limit_price))
Stop order: order(asset, amount, style=StopOrder(stop_price))
StopLimit order: order(asset, amount, style=StopLimitOrder(limit_price,
stop_price))
"""
# something could be done with amount to further divide
# between buy by share count OR buy shares up to a dollar amount
# numeric == share count AND "$dollar.cents" == cost amount
if amount == 0:
# Don't bother placing orders for 0 shares.
return None
elif amount > self.max_shares:
# Arbitrary limit of 100 billion (US) shares will never be
# exceeded except by a buggy algorithm.
raise OverflowError("Can't order more than %d shares" %
self.max_shares)
is_buy = (amount > 0)
order = Order(
dt=self.current_dt,
asset=asset,
amount=amount,
stop=style.get_stop_price(is_buy),
limit=style.get_limit_price(is_buy),
id=order_id
)
self.open_orders[order.asset].append(order)
self.orders[order.id] = order
self.new_orders.append(order)
return order.id
def batch_order(self, order_arg_lists):
"""Place a batch of orders.
Parameters
----------
order_arg_lists : iterable[tuple]
Tuples of args that `order` expects.
Returns
-------
order_ids : list[str or None]
The unique identifier (or None) for each of the orders placed
(or not placed).
Notes
-----
This is required for `Blotter` subclasses to be able to place a batch
of orders, instead of being passed the order requests one at a time.
"""
return [self.order(*order_args) for order_args in order_arg_lists]
def cancel(self, order_id, relay_status=True):
if order_id not in self.orders:
return
cur_order = self.orders[order_id]
if cur_order.open:
order_list = self.open_orders[cur_order.asset]
if cur_order in order_list:
order_list.remove(cur_order)
if cur_order in self.new_orders:
self.new_orders.remove(cur_order)
cur_order.cancel()
cur_order.dt = self.current_dt
if relay_status:
# we want this order's new status to be relayed out
# along with newly placed orders.
self.new_orders.append(cur_order)
def cancel_all_orders_for_asset(self, asset, warn=False,
relay_status=True):
"""
Cancel all open orders for a given asset.
"""
# (sadly) open_orders is a defaultdict, so this will always succeed.
orders = self.open_orders[asset]
# We're making a copy here because `cancel` mutates the list of open
# orders in place. The right thing to do here would be to make
# self.open_orders no longer a defaultdict. If we do that, then we
# should just remove the orders once here and be done with the matter.
for order in orders[:]:
self.cancel(order.id, relay_status)
if warn:
# Message appropriately depending on whether there's
# been a partial fill or not.
if order.filled > 0:
warning_logger.warn(
'Your order for {order_amt} shares of '
'{order_sym} has been partially filled. '
'{order_filled} shares were successfully '
'purchased. {order_failed} shares were not '
'filled by the end of day and '
'were canceled.'.format(
order_amt=order.amount,
order_sym=order.asset.symbol,
order_filled=order.filled,
order_failed=order.amount - order.filled,
)
)
elif order.filled < 0:
warning_logger.warn(
'Your order for {order_amt} shares of '
'{order_sym} has been partially filled. '
'{order_filled} shares were successfully '
'sold. {order_failed} shares were not '
'filled by the end of day and '
'were canceled.'.format(
order_amt=order.amount,
order_sym=order.asset.symbol,
order_filled=-1 * order.filled,
order_failed=-1 * (order.amount - order.filled),
)
)
else:
warning_logger.warn(
'Your order for {order_amt} shares of '
'{order_sym} failed to fill by the end of day '
'and was canceled.'.format(
order_amt=order.amount,
order_sym=order.asset.symbol,
)
)
assert not orders
del self.open_orders[asset]
def execute_cancel_policy(self, event):
if self.cancel_policy.should_cancel(event):
warn = self.cancel_policy.warn_on_cancel
for asset in copy(self.open_orders):
self.cancel_all_orders_for_asset(asset, warn,
relay_status=False)
def reject(self, order_id, reason=''):
"""
Mark the given order as 'rejected', which is functionally similar to
cancelled. The distinction is that rejections are involuntary (and
usually include a message from a broker indicating why the order was
rejected) while cancels are typically user-driven.
"""
if order_id not in self.orders:
return
cur_order = self.orders[order_id]
order_list = self.open_orders[cur_order.asset]
if cur_order in order_list:
order_list.remove(cur_order)
if cur_order in self.new_orders:
self.new_orders.remove(cur_order)
cur_order.reject(reason=reason)
cur_order.dt = self.current_dt
# we want this order's new status to be relayed out
# along with newly placed orders.
self.new_orders.append(cur_order)
def hold(self, order_id, reason=''):
"""
Mark the order with order_id as 'held'. Held is functionally similar
to 'open'. When a fill (full or partial) arrives, the status
will automatically change back to open/filled as necessary.
"""
if order_id not in self.orders:
return
cur_order = self.orders[order_id]
if cur_order.open:
if cur_order in self.new_orders:
self.new_orders.remove(cur_order)
cur_order.hold(reason=reason)
cur_order.dt = self.current_dt
# we want this order's new status to be relayed out
# along with newly placed orders.
self.new_orders.append(cur_order)
def process_splits(self, splits):
"""
Processes a list of splits by modifying any open orders as needed.
Parameters
----------
splits: list
A list of splits. Each split is a tuple of (asset, ratio).
Returns
-------
None
"""
for asset, ratio in splits:
if asset not in self.open_orders:
continue
orders_to_modify = self.open_orders[asset]
for order in orders_to_modify:
order.handle_split(ratio)
def get_transactions(self, bar_data):
"""
Creates a list of transactions based on the current open orders,
slippage model, and commission model.
Parameters
----------
bar_data: zipline._protocol.BarData
Notes
-----
This method book-keeps the blotter's open_orders dictionary, so that
it is accurate by the time we're done processing open orders.
Returns
-------
transactions_list: List
transactions_list: list of transactions resulting from the current
open orders. If there were no open orders, an empty list is
returned.
commissions_list: List
commissions_list: list of commissions resulting from filling the
open orders. A commission is an object with "asset" and "cost"
parameters.
closed_orders: List
closed_orders: list of all the orders that have filled.
"""
closed_orders = []
transactions = []
commissions = []
if self.open_orders:
for asset, asset_orders in iteritems(self.open_orders):
slippage = self.slippage_models[type(asset)]
for order, txn in \
slippage.simulate(bar_data, asset, asset_orders):
commission = self.commission_models[type(asset)]
additional_commission = commission.calculate(order, txn)
if additional_commission > 0:
commissions.append({
"asset": order.asset,
"order": order,
"cost": additional_commission
})
order.filled += txn.amount
order.commission += additional_commission
order.dt = txn.dt
transactions.append(txn)
if not order.open:
closed_orders.append(order)
return transactions, commissions, closed_orders
def prune_orders(self, closed_orders):
"""
Removes all given orders from the blotter's open_orders list.
Parameters
----------
closed_orders: iterable of orders that are closed.
Returns
-------
None
"""
# remove all closed orders from our open_orders dict
for order in closed_orders:
asset = order.asset
asset_orders = self.open_orders[asset]
try:
asset_orders.remove(order)
except ValueError:
continue
# now clear out the assets from our open_orders dict that have
# zero open orders
for asset in list(self.open_orders.keys()):
if len(self.open_orders[asset]) == 0:
del self.open_orders[asset]
| apache-2.0 |
Sergiojimenez/criticas_del_doctor_Mabuse | node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/generator/msvs.py | 886 | 131038 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import ntpath
import os
import posixpath
import re
import subprocess
import sys
import gyp.common
import gyp.easy_xml as easy_xml
import gyp.generator.ninja as ninja_generator
import gyp.MSVSNew as MSVSNew
import gyp.MSVSProject as MSVSProject
import gyp.MSVSSettings as MSVSSettings
import gyp.MSVSToolFile as MSVSToolFile
import gyp.MSVSUserFile as MSVSUserFile
import gyp.MSVSUtil as MSVSUtil
import gyp.MSVSVersion as MSVSVersion
from gyp.common import GypError
from gyp.common import OrderedSet
# TODO: Remove once bots are on 2.7, http://crbug.com/241769
def _import_OrderedDict():
import collections
try:
return collections.OrderedDict
except AttributeError:
import gyp.ordered_dict
return gyp.ordered_dict.OrderedDict
OrderedDict = _import_OrderedDict()
# Regular expression for validating Visual Studio GUIDs. If the GUID
# contains lowercase hex letters, MSVS will be fine. However,
# IncrediBuild BuildConsole will parse the solution file, but then
# silently skip building the target causing hard to track down errors.
# Note that this only happens with the BuildConsole, and does not occur
# if IncrediBuild is executed from inside Visual Studio. This regex
# validates that the string looks like a GUID with all uppercase hex
# letters.
VALID_MSVS_GUID_CHARS = re.compile(r'^[A-F0-9\-]+$')
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '.exe',
'STATIC_LIB_PREFIX': '',
'SHARED_LIB_PREFIX': '',
'STATIC_LIB_SUFFIX': '.lib',
'SHARED_LIB_SUFFIX': '.dll',
'INTERMEDIATE_DIR': '$(IntDir)',
'SHARED_INTERMEDIATE_DIR': '$(OutDir)obj/global_intermediate',
'OS': 'win',
'PRODUCT_DIR': '$(OutDir)',
'LIB_DIR': '$(OutDir)lib',
'RULE_INPUT_ROOT': '$(InputName)',
'RULE_INPUT_DIRNAME': '$(InputDir)',
'RULE_INPUT_EXT': '$(InputExt)',
'RULE_INPUT_NAME': '$(InputFileName)',
'RULE_INPUT_PATH': '$(InputPath)',
'CONFIGURATION_NAME': '$(ConfigurationName)',
}
# The msvs specific sections that hold paths
generator_additional_path_sections = [
'msvs_cygwin_dirs',
'msvs_props',
]
generator_additional_non_configuration_keys = [
'msvs_cygwin_dirs',
'msvs_cygwin_shell',
'msvs_large_pdb',
'msvs_shard',
'msvs_external_builder',
'msvs_external_builder_out_dir',
'msvs_external_builder_build_cmd',
'msvs_external_builder_clean_cmd',
'msvs_external_builder_clcompile_cmd',
'msvs_enable_winrt',
'msvs_requires_importlibrary',
'msvs_enable_winphone',
'msvs_application_type_revision',
'msvs_target_platform_version',
'msvs_target_platform_minversion',
]
# List of precompiled header related keys.
precomp_keys = [
'msvs_precompiled_header',
'msvs_precompiled_source',
]
cached_username = None
cached_domain = None
# TODO(gspencer): Switch the os.environ calls to be
# win32api.GetDomainName() and win32api.GetUserName() once the
# python version in depot_tools has been updated to work on Vista
# 64-bit.
def _GetDomainAndUserName():
if sys.platform not in ('win32', 'cygwin'):
return ('DOMAIN', 'USERNAME')
global cached_username
global cached_domain
if not cached_domain or not cached_username:
domain = os.environ.get('USERDOMAIN')
username = os.environ.get('USERNAME')
if not domain or not username:
call = subprocess.Popen(['net', 'config', 'Workstation'],
stdout=subprocess.PIPE)
config = call.communicate()[0]
username_re = re.compile(r'^User name\s+(\S+)', re.MULTILINE)
username_match = username_re.search(config)
if username_match:
username = username_match.group(1)
domain_re = re.compile(r'^Logon domain\s+(\S+)', re.MULTILINE)
domain_match = domain_re.search(config)
if domain_match:
domain = domain_match.group(1)
cached_domain = domain
cached_username = username
return (cached_domain, cached_username)
fixpath_prefix = None
def _NormalizedSource(source):
"""Normalize the path.
But not if that gets rid of a variable, as this may expand to something
larger than one directory.
Arguments:
source: The path to be normalize.d
Returns:
The normalized path.
"""
normalized = os.path.normpath(source)
if source.count('$') == normalized.count('$'):
source = normalized
return source
def _FixPath(path):
"""Convert paths to a form that will make sense in a vcproj file.
Arguments:
path: The path to convert, may contain / etc.
Returns:
The path with all slashes made into backslashes.
"""
if fixpath_prefix and path and not os.path.isabs(path) and not path[0] == '$':
path = os.path.join(fixpath_prefix, path)
path = path.replace('/', '\\')
path = _NormalizedSource(path)
if path and path[-1] == '\\':
path = path[:-1]
return path
def _FixPaths(paths):
"""Fix each of the paths of the list."""
return [_FixPath(i) for i in paths]
def _ConvertSourcesToFilterHierarchy(sources, prefix=None, excluded=None,
list_excluded=True, msvs_version=None):
"""Converts a list split source file paths into a vcproj folder hierarchy.
Arguments:
sources: A list of source file paths split.
prefix: A list of source file path layers meant to apply to each of sources.
excluded: A set of excluded files.
msvs_version: A MSVSVersion object.
Returns:
A hierarchy of filenames and MSVSProject.Filter objects that matches the
layout of the source tree.
For example:
_ConvertSourcesToFilterHierarchy([['a', 'bob1.c'], ['b', 'bob2.c']],
prefix=['joe'])
-->
[MSVSProject.Filter('a', contents=['joe\\a\\bob1.c']),
MSVSProject.Filter('b', contents=['joe\\b\\bob2.c'])]
"""
if not prefix: prefix = []
result = []
excluded_result = []
folders = OrderedDict()
# Gather files into the final result, excluded, or folders.
for s in sources:
if len(s) == 1:
filename = _NormalizedSource('\\'.join(prefix + s))
if filename in excluded:
excluded_result.append(filename)
else:
result.append(filename)
elif msvs_version and not msvs_version.UsesVcxproj():
# For MSVS 2008 and earlier, we need to process all files before walking
# the sub folders.
if not folders.get(s[0]):
folders[s[0]] = []
folders[s[0]].append(s[1:])
else:
contents = _ConvertSourcesToFilterHierarchy([s[1:]], prefix + [s[0]],
excluded=excluded,
list_excluded=list_excluded,
msvs_version=msvs_version)
contents = MSVSProject.Filter(s[0], contents=contents)
result.append(contents)
# Add a folder for excluded files.
if excluded_result and list_excluded:
excluded_folder = MSVSProject.Filter('_excluded_files',
contents=excluded_result)
result.append(excluded_folder)
if msvs_version and msvs_version.UsesVcxproj():
return result
# Populate all the folders.
for f in folders:
contents = _ConvertSourcesToFilterHierarchy(folders[f], prefix=prefix + [f],
excluded=excluded,
list_excluded=list_excluded,
msvs_version=msvs_version)
contents = MSVSProject.Filter(f, contents=contents)
result.append(contents)
return result
def _ToolAppend(tools, tool_name, setting, value, only_if_unset=False):
if not value: return
_ToolSetOrAppend(tools, tool_name, setting, value, only_if_unset)
def _ToolSetOrAppend(tools, tool_name, setting, value, only_if_unset=False):
# TODO(bradnelson): ugly hack, fix this more generally!!!
if 'Directories' in setting or 'Dependencies' in setting:
if type(value) == str:
value = value.replace('/', '\\')
else:
value = [i.replace('/', '\\') for i in value]
if not tools.get(tool_name):
tools[tool_name] = dict()
tool = tools[tool_name]
if tool.get(setting):
if only_if_unset: return
if type(tool[setting]) == list and type(value) == list:
tool[setting] += value
else:
raise TypeError(
'Appending "%s" to a non-list setting "%s" for tool "%s" is '
'not allowed, previous value: %s' % (
value, setting, tool_name, str(tool[setting])))
else:
tool[setting] = value
def _ConfigPlatform(config_data):
return config_data.get('msvs_configuration_platform', 'Win32')
def _ConfigBaseName(config_name, platform_name):
if config_name.endswith('_' + platform_name):
return config_name[0:-len(platform_name) - 1]
else:
return config_name
def _ConfigFullName(config_name, config_data):
platform_name = _ConfigPlatform(config_data)
return '%s|%s' % (_ConfigBaseName(config_name, platform_name), platform_name)
def _BuildCommandLineForRuleRaw(spec, cmd, cygwin_shell, has_input_path,
quote_cmd, do_setup_env):
if [x for x in cmd if '$(InputDir)' in x]:
input_dir_preamble = (
'set INPUTDIR=$(InputDir)\n'
'if NOT DEFINED INPUTDIR set INPUTDIR=.\\\n'
'set INPUTDIR=%INPUTDIR:~0,-1%\n'
)
else:
input_dir_preamble = ''
if cygwin_shell:
# Find path to cygwin.
cygwin_dir = _FixPath(spec.get('msvs_cygwin_dirs', ['.'])[0])
# Prepare command.
direct_cmd = cmd
direct_cmd = [i.replace('$(IntDir)',
'`cygpath -m "${INTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(OutDir)',
'`cygpath -m "${OUTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(InputDir)',
'`cygpath -m "${INPUTDIR}"`') for i in direct_cmd]
if has_input_path:
direct_cmd = [i.replace('$(InputPath)',
'`cygpath -m "${INPUTPATH}"`')
for i in direct_cmd]
direct_cmd = ['\\"%s\\"' % i.replace('"', '\\\\\\"') for i in direct_cmd]
# direct_cmd = gyp.common.EncodePOSIXShellList(direct_cmd)
direct_cmd = ' '.join(direct_cmd)
# TODO(quote): regularize quoting path names throughout the module
cmd = ''
if do_setup_env:
cmd += 'call "$(ProjectDir)%(cygwin_dir)s\\setup_env.bat" && '
cmd += 'set CYGWIN=nontsec&& '
if direct_cmd.find('NUMBER_OF_PROCESSORS') >= 0:
cmd += 'set /a NUMBER_OF_PROCESSORS_PLUS_1=%%NUMBER_OF_PROCESSORS%%+1&& '
if direct_cmd.find('INTDIR') >= 0:
cmd += 'set INTDIR=$(IntDir)&& '
if direct_cmd.find('OUTDIR') >= 0:
cmd += 'set OUTDIR=$(OutDir)&& '
if has_input_path and direct_cmd.find('INPUTPATH') >= 0:
cmd += 'set INPUTPATH=$(InputPath) && '
cmd += 'bash -c "%(cmd)s"'
cmd = cmd % {'cygwin_dir': cygwin_dir,
'cmd': direct_cmd}
return input_dir_preamble + cmd
else:
# Convert cat --> type to mimic unix.
if cmd[0] == 'cat':
command = ['type']
else:
command = [cmd[0].replace('/', '\\')]
# Add call before command to ensure that commands can be tied together one
# after the other without aborting in Incredibuild, since IB makes a bat
# file out of the raw command string, and some commands (like python) are
# actually batch files themselves.
command.insert(0, 'call')
# Fix the paths
# TODO(quote): This is a really ugly heuristic, and will miss path fixing
# for arguments like "--arg=path" or "/opt:path".
# If the argument starts with a slash or dash, it's probably a command line
# switch
arguments = [i if (i[:1] in "/-") else _FixPath(i) for i in cmd[1:]]
arguments = [i.replace('$(InputDir)', '%INPUTDIR%') for i in arguments]
arguments = [MSVSSettings.FixVCMacroSlashes(i) for i in arguments]
if quote_cmd:
# Support a mode for using cmd directly.
# Convert any paths to native form (first element is used directly).
# TODO(quote): regularize quoting path names throughout the module
arguments = ['"%s"' % i for i in arguments]
# Collapse into a single command.
return input_dir_preamble + ' '.join(command + arguments)
def _BuildCommandLineForRule(spec, rule, has_input_path, do_setup_env):
# Currently this weird argument munging is used to duplicate the way a
# python script would need to be run as part of the chrome tree.
# Eventually we should add some sort of rule_default option to set this
# per project. For now the behavior chrome needs is the default.
mcs = rule.get('msvs_cygwin_shell')
if mcs is None:
mcs = int(spec.get('msvs_cygwin_shell', 1))
elif isinstance(mcs, str):
mcs = int(mcs)
quote_cmd = int(rule.get('msvs_quote_cmd', 1))
return _BuildCommandLineForRuleRaw(spec, rule['action'], mcs, has_input_path,
quote_cmd, do_setup_env=do_setup_env)
def _AddActionStep(actions_dict, inputs, outputs, description, command):
"""Merge action into an existing list of actions.
Care must be taken so that actions which have overlapping inputs either don't
get assigned to the same input, or get collapsed into one.
Arguments:
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
inputs: list of inputs
outputs: list of outputs
description: description of the action
command: command line to execute
"""
# Require there to be at least one input (call sites will ensure this).
assert inputs
action = {
'inputs': inputs,
'outputs': outputs,
'description': description,
'command': command,
}
# Pick where to stick this action.
# While less than optimal in terms of build time, attach them to the first
# input for now.
chosen_input = inputs[0]
# Add it there.
if chosen_input not in actions_dict:
actions_dict[chosen_input] = []
actions_dict[chosen_input].append(action)
def _AddCustomBuildToolForMSVS(p, spec, primary_input,
inputs, outputs, description, cmd):
"""Add a custom build tool to execute something.
Arguments:
p: the target project
spec: the target project dict
primary_input: input file to attach the build tool to
inputs: list of inputs
outputs: list of outputs
description: description of the action
cmd: command line to execute
"""
inputs = _FixPaths(inputs)
outputs = _FixPaths(outputs)
tool = MSVSProject.Tool(
'VCCustomBuildTool',
{'Description': description,
'AdditionalDependencies': ';'.join(inputs),
'Outputs': ';'.join(outputs),
'CommandLine': cmd,
})
# Add to the properties of primary input for each config.
for config_name, c_data in spec['configurations'].iteritems():
p.AddFileConfig(_FixPath(primary_input),
_ConfigFullName(config_name, c_data), tools=[tool])
def _AddAccumulatedActionsToMSVS(p, spec, actions_dict):
"""Add actions accumulated into an actions_dict, merging as needed.
Arguments:
p: the target project
spec: the target project dict
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
"""
for primary_input in actions_dict:
inputs = OrderedSet()
outputs = OrderedSet()
descriptions = []
commands = []
for action in actions_dict[primary_input]:
inputs.update(OrderedSet(action['inputs']))
outputs.update(OrderedSet(action['outputs']))
descriptions.append(action['description'])
commands.append(action['command'])
# Add the custom build step for one input file.
description = ', and also '.join(descriptions)
command = '\r\n'.join(commands)
_AddCustomBuildToolForMSVS(p, spec,
primary_input=primary_input,
inputs=inputs,
outputs=outputs,
description=description,
cmd=command)
def _RuleExpandPath(path, input_file):
"""Given the input file to which a rule applied, string substitute a path.
Arguments:
path: a path to string expand
input_file: the file to which the rule applied.
Returns:
The string substituted path.
"""
path = path.replace('$(InputName)',
os.path.splitext(os.path.split(input_file)[1])[0])
path = path.replace('$(InputDir)', os.path.dirname(input_file))
path = path.replace('$(InputExt)',
os.path.splitext(os.path.split(input_file)[1])[1])
path = path.replace('$(InputFileName)', os.path.split(input_file)[1])
path = path.replace('$(InputPath)', input_file)
return path
def _FindRuleTriggerFiles(rule, sources):
"""Find the list of files which a particular rule applies to.
Arguments:
rule: the rule in question
sources: the set of all known source files for this project
Returns:
The list of sources that trigger a particular rule.
"""
return rule.get('rule_sources', [])
def _RuleInputsAndOutputs(rule, trigger_file):
"""Find the inputs and outputs generated by a rule.
Arguments:
rule: the rule in question.
trigger_file: the main trigger for this rule.
Returns:
The pair of (inputs, outputs) involved in this rule.
"""
raw_inputs = _FixPaths(rule.get('inputs', []))
raw_outputs = _FixPaths(rule.get('outputs', []))
inputs = OrderedSet()
outputs = OrderedSet()
inputs.add(trigger_file)
for i in raw_inputs:
inputs.add(_RuleExpandPath(i, trigger_file))
for o in raw_outputs:
outputs.add(_RuleExpandPath(o, trigger_file))
return (inputs, outputs)
def _GenerateNativeRulesForMSVS(p, rules, output_dir, spec, options):
"""Generate a native rules file.
Arguments:
p: the target project
rules: the set of rules to include
output_dir: the directory in which the project/gyp resides
spec: the project dict
options: global generator options
"""
rules_filename = '%s%s.rules' % (spec['target_name'],
options.suffix)
rules_file = MSVSToolFile.Writer(os.path.join(output_dir, rules_filename),
spec['target_name'])
# Add each rule.
for r in rules:
rule_name = r['rule_name']
rule_ext = r['extension']
inputs = _FixPaths(r.get('inputs', []))
outputs = _FixPaths(r.get('outputs', []))
# Skip a rule with no action and no inputs.
if 'action' not in r and not r.get('rule_sources', []):
continue
cmd = _BuildCommandLineForRule(spec, r, has_input_path=True,
do_setup_env=True)
rules_file.AddCustomBuildRule(name=rule_name,
description=r.get('message', rule_name),
extensions=[rule_ext],
additional_dependencies=inputs,
outputs=outputs,
cmd=cmd)
# Write out rules file.
rules_file.WriteIfChanged()
# Add rules file to project.
p.AddToolFile(rules_filename)
def _Cygwinify(path):
path = path.replace('$(OutDir)', '$(OutDirCygwin)')
path = path.replace('$(IntDir)', '$(IntDirCygwin)')
return path
def _GenerateExternalRules(rules, output_dir, spec,
sources, options, actions_to_add):
"""Generate an external makefile to do a set of rules.
Arguments:
rules: the list of rules to include
output_dir: path containing project and gyp files
spec: project specification data
sources: set of sources known
options: global generator options
actions_to_add: The list of actions we will add to.
"""
filename = '%s_rules%s.mk' % (spec['target_name'], options.suffix)
mk_file = gyp.common.WriteOnDiff(os.path.join(output_dir, filename))
# Find cygwin style versions of some paths.
mk_file.write('OutDirCygwin:=$(shell cygpath -u "$(OutDir)")\n')
mk_file.write('IntDirCygwin:=$(shell cygpath -u "$(IntDir)")\n')
# Gather stuff needed to emit all: target.
all_inputs = OrderedSet()
all_outputs = OrderedSet()
all_output_dirs = OrderedSet()
first_outputs = []
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
all_inputs.update(OrderedSet(inputs))
all_outputs.update(OrderedSet(outputs))
# Only use one target from each rule as the dependency for
# 'all' so we don't try to build each rule multiple times.
first_outputs.append(list(outputs)[0])
# Get the unique output directories for this rule.
output_dirs = [os.path.split(i)[0] for i in outputs]
for od in output_dirs:
all_output_dirs.add(od)
first_outputs_cyg = [_Cygwinify(i) for i in first_outputs]
# Write out all: target, including mkdir for each output directory.
mk_file.write('all: %s\n' % ' '.join(first_outputs_cyg))
for od in all_output_dirs:
if od:
mk_file.write('\tmkdir -p `cygpath -u "%s"`\n' % od)
mk_file.write('\n')
# Define how each output is generated.
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
# Get all the inputs and outputs for this rule for this trigger file.
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
inputs = [_Cygwinify(i) for i in inputs]
outputs = [_Cygwinify(i) for i in outputs]
# Prepare the command line for this rule.
cmd = [_RuleExpandPath(c, tf) for c in rule['action']]
cmd = ['"%s"' % i for i in cmd]
cmd = ' '.join(cmd)
# Add it to the makefile.
mk_file.write('%s: %s\n' % (' '.join(outputs), ' '.join(inputs)))
mk_file.write('\t%s\n\n' % cmd)
# Close up the file.
mk_file.close()
# Add makefile to list of sources.
sources.add(filename)
# Add a build action to call makefile.
cmd = ['make',
'OutDir=$(OutDir)',
'IntDir=$(IntDir)',
'-j', '${NUMBER_OF_PROCESSORS_PLUS_1}',
'-f', filename]
cmd = _BuildCommandLineForRuleRaw(spec, cmd, True, False, True, True)
# Insert makefile as 0'th input, so it gets the action attached there,
# as this is easier to understand from in the IDE.
all_inputs = list(all_inputs)
all_inputs.insert(0, filename)
_AddActionStep(actions_to_add,
inputs=_FixPaths(all_inputs),
outputs=_FixPaths(all_outputs),
description='Running external rules for %s' %
spec['target_name'],
command=cmd)
def _EscapeEnvironmentVariableExpansion(s):
"""Escapes % characters.
Escapes any % characters so that Windows-style environment variable
expansions will leave them alone.
See http://connect.microsoft.com/VisualStudio/feedback/details/106127/cl-d-name-text-containing-percentage-characters-doesnt-compile
to understand why we have to do this.
Args:
s: The string to be escaped.
Returns:
The escaped string.
"""
s = s.replace('%', '%%')
return s
quote_replacer_regex = re.compile(r'(\\*)"')
def _EscapeCommandLineArgumentForMSVS(s):
"""Escapes a Windows command-line argument.
So that the Win32 CommandLineToArgv function will turn the escaped result back
into the original string.
See http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
("Parsing C++ Command-Line Arguments") to understand why we have to do
this.
Args:
s: the string to be escaped.
Returns:
the escaped string.
"""
def _Replace(match):
# For a literal quote, CommandLineToArgv requires an odd number of
# backslashes preceding it, and it produces half as many literal backslashes
# (rounded down). So we need to produce 2n+1 backslashes.
return 2 * match.group(1) + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex.sub(_Replace, s)
# Now add unescaped quotes so that any whitespace is interpreted literally.
s = '"' + s + '"'
return s
delimiters_replacer_regex = re.compile(r'(\\*)([,;]+)')
def _EscapeVCProjCommandLineArgListItem(s):
"""Escapes command line arguments for MSVS.
The VCProj format stores string lists in a single string using commas and
semi-colons as separators, which must be quoted if they are to be
interpreted literally. However, command-line arguments may already have
quotes, and the VCProj parser is ignorant of the backslash escaping
convention used by CommandLineToArgv, so the command-line quotes and the
VCProj quotes may not be the same quotes. So to store a general
command-line argument in a VCProj list, we need to parse the existing
quoting according to VCProj's convention and quote any delimiters that are
not already quoted by that convention. The quotes that we add will also be
seen by CommandLineToArgv, so if backslashes precede them then we also have
to escape those backslashes according to the CommandLineToArgv
convention.
Args:
s: the string to be escaped.
Returns:
the escaped string.
"""
def _Replace(match):
# For a non-literal quote, CommandLineToArgv requires an even number of
# backslashes preceding it, and it produces half as many literal
# backslashes. So we need to produce 2n backslashes.
return 2 * match.group(1) + '"' + match.group(2) + '"'
segments = s.split('"')
# The unquoted segments are at the even-numbered indices.
for i in range(0, len(segments), 2):
segments[i] = delimiters_replacer_regex.sub(_Replace, segments[i])
# Concatenate back into a single string
s = '"'.join(segments)
if len(segments) % 2 == 0:
# String ends while still quoted according to VCProj's convention. This
# means the delimiter and the next list item that follow this one in the
# .vcproj file will be misinterpreted as part of this item. There is nothing
# we can do about this. Adding an extra quote would correct the problem in
# the VCProj but cause the same problem on the final command-line. Moving
# the item to the end of the list does works, but that's only possible if
# there's only one such item. Let's just warn the user.
print >> sys.stderr, ('Warning: MSVS may misinterpret the odd number of ' +
'quotes in ' + s)
return s
def _EscapeCppDefineForMSVS(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSVS(s)
s = _EscapeVCProjCommandLineArgListItem(s)
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
s = s.replace('#', '\\%03o' % ord('#'))
return s
quote_replacer_regex2 = re.compile(r'(\\+)"')
def _EscapeCommandLineArgumentForMSBuild(s):
"""Escapes a Windows command-line argument for use by MSBuild."""
def _Replace(match):
return (len(match.group(1)) / 2 * 4) * '\\' + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex2.sub(_Replace, s)
return s
def _EscapeMSBuildSpecialCharacters(s):
escape_dictionary = {
'%': '%25',
'$': '%24',
'@': '%40',
"'": '%27',
';': '%3B',
'?': '%3F',
'*': '%2A'
}
result = ''.join([escape_dictionary.get(c, c) for c in s])
return result
def _EscapeCppDefineForMSBuild(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSBuild(s)
s = _EscapeMSBuildSpecialCharacters(s)
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
s = s.replace('#', '\\%03o' % ord('#'))
return s
def _GenerateRulesForMSVS(p, output_dir, options, spec,
sources, excluded_sources,
actions_to_add):
"""Generate all the rules for a particular project.
Arguments:
p: the project
output_dir: directory to emit rules to
options: global options passed to the generator
spec: the specification for this project
sources: the set of all known source files in this project
excluded_sources: the set of sources excluded from normal processing
actions_to_add: deferred list of actions to add in
"""
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
# Handle rules that use a native rules file.
if rules_native:
_GenerateNativeRulesForMSVS(p, rules_native, output_dir, spec, options)
# Handle external rules (non-native rules).
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(rules, sources, excluded_sources, False)
def _AdjustSourcesForRules(rules, sources, excluded_sources, is_msbuild):
# Add outputs generated by each rule (if applicable).
for rule in rules:
# Add in the outputs from this rule.
trigger_files = _FindRuleTriggerFiles(rule, sources)
for trigger_file in trigger_files:
# Remove trigger_file from excluded_sources to let the rule be triggered
# (e.g. rule trigger ax_enums.idl is added to excluded_sources
# because it's also in an action's inputs in the same project)
excluded_sources.discard(_FixPath(trigger_file))
# Done if not processing outputs as sources.
if int(rule.get('process_outputs_as_sources', False)):
inputs, outputs = _RuleInputsAndOutputs(rule, trigger_file)
inputs = OrderedSet(_FixPaths(inputs))
outputs = OrderedSet(_FixPaths(outputs))
inputs.remove(_FixPath(trigger_file))
sources.update(inputs)
if not is_msbuild:
excluded_sources.update(inputs)
sources.update(outputs)
def _FilterActionsFromExcluded(excluded_sources, actions_to_add):
"""Take inputs with actions attached out of the list of exclusions.
Arguments:
excluded_sources: list of source files not to be built.
actions_to_add: dict of actions keyed on source file they're attached to.
Returns:
excluded_sources with files that have actions attached removed.
"""
must_keep = OrderedSet(_FixPaths(actions_to_add.keys()))
return [s for s in excluded_sources if s not in must_keep]
def _GetDefaultConfiguration(spec):
return spec['configurations'][spec['default_configuration']]
def _GetGuidOfProject(proj_path, spec):
"""Get the guid for the project.
Arguments:
proj_path: Path of the vcproj or vcxproj file to generate.
spec: The target dictionary containing the properties of the target.
Returns:
the guid.
Raises:
ValueError: if the specified GUID is invalid.
"""
# Pluck out the default configuration.
default_config = _GetDefaultConfiguration(spec)
# Decide the guid of the project.
guid = default_config.get('msvs_guid')
if guid:
if VALID_MSVS_GUID_CHARS.match(guid) is None:
raise ValueError('Invalid MSVS guid: "%s". Must match regex: "%s".' %
(guid, VALID_MSVS_GUID_CHARS.pattern))
guid = '{%s}' % guid
guid = guid or MSVSNew.MakeGuid(proj_path)
return guid
def _GetMsbuildToolsetOfProject(proj_path, spec, version):
"""Get the platform toolset for the project.
Arguments:
proj_path: Path of the vcproj or vcxproj file to generate.
spec: The target dictionary containing the properties of the target.
version: The MSVSVersion object.
Returns:
the platform toolset string or None.
"""
# Pluck out the default configuration.
default_config = _GetDefaultConfiguration(spec)
toolset = default_config.get('msbuild_toolset')
if not toolset and version.DefaultToolset():
toolset = version.DefaultToolset()
return toolset
def _GenerateProject(project, options, version, generator_flags):
"""Generates a vcproj file.
Arguments:
project: the MSVSProject object.
options: global generator options.
version: the MSVSVersion object.
generator_flags: dict of generator-specific flags.
Returns:
A list of source files that cannot be found on disk.
"""
default_config = _GetDefaultConfiguration(project.spec)
# Skip emitting anything if told to with msvs_existing_vcproj option.
if default_config.get('msvs_existing_vcproj'):
return []
if version.UsesVcxproj():
return _GenerateMSBuildProject(project, options, version, generator_flags)
else:
return _GenerateMSVSProject(project, options, version, generator_flags)
# TODO: Avoid code duplication with _ValidateSourcesForOSX in make.py.
def _ValidateSourcesForMSVSProject(spec, version):
"""Makes sure if duplicate basenames are not specified in the source list.
Arguments:
spec: The target dictionary containing the properties of the target.
version: The VisualStudioVersion object.
"""
# This validation should not be applied to MSVC2010 and later.
assert not version.UsesVcxproj()
# TODO: Check if MSVC allows this for loadable_module targets.
if spec.get('type', None) not in ('static_library', 'shared_library'):
return
sources = spec.get('sources', [])
basenames = {}
for source in sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
spec['target_name'] + error + 'MSVC08 cannot handle that.')
raise GypError('Duplicate basenames in sources section, see list above')
def _GenerateMSVSProject(project, options, version, generator_flags):
"""Generates a .vcproj file. It may create .rules and .user files too.
Arguments:
project: The project object we will generate the file for.
options: Global options passed to the generator.
version: The VisualStudioVersion object.
generator_flags: dict of generator-specific flags.
"""
spec = project.spec
gyp.common.EnsureDirExists(project.path)
platforms = _GetUniquePlatforms(spec)
p = MSVSProject.Writer(project.path, version, spec['target_name'],
project.guid, platforms)
# Get directory project file is in.
project_dir = os.path.split(project.path)[0]
gyp_path = _NormalizedSource(project.build_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir)
config_type = _GetMSVSConfigurationType(spec, project.build_file)
for config_name, config in spec['configurations'].iteritems():
_AddConfigurationToMSVSProject(p, spec, config_type, config_name, config)
# MSVC08 and prior version cannot handle duplicate basenames in the same
# target.
# TODO: Take excluded sources into consideration if possible.
_ValidateSourcesForMSVSProject(spec, version)
# Prepare list of sources and excluded sources.
gyp_file = os.path.split(project.build_file)[1]
sources, excluded_sources = _PrepareListOfSources(spec, generator_flags,
gyp_file)
# Add rules.
actions_to_add = {}
_GenerateRulesForMSVS(p, project_dir, options, spec,
sources, excluded_sources,
actions_to_add)
list_excluded = generator_flags.get('msvs_list_excluded_files', True)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(spec, options, project_dir,
sources, excluded_sources,
list_excluded, version))
# Add in files.
missing_sources = _VerifySourcesExist(sources, project_dir)
p.AddFiles(sources)
_AddToolFilesToMSVS(p, spec)
_HandlePreCompiledHeaders(p, sources, spec)
_AddActions(actions_to_add, spec, relative_path_of_gyp_file)
_AddCopies(actions_to_add, spec)
_WriteMSVSUserFile(project.path, version, spec)
# NOTE: this stanza must appear after all actions have been decided.
# Don't excluded sources with actions attached, or they won't run.
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
_ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl,
list_excluded)
_AddAccumulatedActionsToMSVS(p, spec, actions_to_add)
# Write it out.
p.WriteIfChanged()
return missing_sources
def _GetUniquePlatforms(spec):
"""Returns the list of unique platforms for this spec, e.g ['win32', ...].
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
# Gather list of unique platforms.
platforms = OrderedSet()
for configuration in spec['configurations']:
platforms.add(_ConfigPlatform(spec['configurations'][configuration]))
platforms = list(platforms)
return platforms
def _CreateMSVSUserFile(proj_path, version, spec):
"""Generates a .user file for the user running this Gyp program.
Arguments:
proj_path: The path of the project file being created. The .user file
shares the same path (with an appropriate suffix).
version: The VisualStudioVersion object.
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
(domain, username) = _GetDomainAndUserName()
vcuser_filename = '.'.join([proj_path, domain, username, 'user'])
user_file = MSVSUserFile.Writer(vcuser_filename, version,
spec['target_name'])
return user_file
def _GetMSVSConfigurationType(spec, build_file):
"""Returns the configuration type for this project.
It's a number defined by Microsoft. May raise an exception.
Args:
spec: The target dictionary containing the properties of the target.
build_file: The path of the gyp file.
Returns:
An integer, the configuration type.
"""
try:
config_type = {
'executable': '1', # .exe
'shared_library': '2', # .dll
'loadable_module': '2', # .dll
'static_library': '4', # .lib
'none': '10', # Utility type
}[spec['type']]
except KeyError:
if spec.get('type'):
raise GypError('Target type %s is not a valid target type for '
'target %s in %s.' %
(spec['type'], spec['target_name'], build_file))
else:
raise GypError('Missing type field for target %s in %s.' %
(spec['target_name'], build_file))
return config_type
def _AddConfigurationToMSVSProject(p, spec, config_type, config_name, config):
"""Adds a configuration to the MSVS project.
Many settings in a vcproj file are specific to a configuration. This
function the main part of the vcproj file that's configuration specific.
Arguments:
p: The target project being generated.
spec: The target dictionary containing the properties of the target.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
config: The dictionary that defines the special processing to be done
for this configuration.
"""
# Get the information for this configuration
include_dirs, midl_include_dirs, resource_include_dirs = \
_GetIncludeDirs(config)
libraries = _GetLibraries(spec)
library_dirs = _GetLibraryDirs(config)
out_file, vc_tool, _ = _GetOutputFilePathAndTool(spec, msbuild=False)
defines = _GetDefines(config)
defines = [_EscapeCppDefineForMSVS(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(config)
prebuild = config.get('msvs_prebuild')
postbuild = config.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = config.get('msvs_precompiled_header')
# Prepare the list of tools as a dictionary.
tools = dict()
# Add in user specified msvs_settings.
msvs_settings = config.get('msvs_settings', {})
MSVSSettings.ValidateMSVSSettings(msvs_settings)
# Prevent default library inheritance from the environment.
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', ['$(NOINHERIT)'])
for tool in msvs_settings:
settings = config['msvs_settings'][tool]
for setting in settings:
_ToolAppend(tools, tool, setting, settings[setting])
# Add the information to the appropriate tool
_ToolAppend(tools, 'VCCLCompilerTool',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(tools, 'VCMIDLTool',
'AdditionalIncludeDirectories', midl_include_dirs)
_ToolAppend(tools, 'VCResourceCompilerTool',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries.
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', libraries)
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalLibraryDirectories',
library_dirs)
if out_file:
_ToolAppend(tools, vc_tool, 'OutputFile', out_file, only_if_unset=True)
# Add defines.
_ToolAppend(tools, 'VCCLCompilerTool', 'PreprocessorDefinitions', defines)
_ToolAppend(tools, 'VCResourceCompilerTool', 'PreprocessorDefinitions',
defines)
# Change program database directory to prevent collisions.
_ToolAppend(tools, 'VCCLCompilerTool', 'ProgramDataBaseFileName',
'$(IntDir)$(ProjectName)\\vc80.pdb', only_if_unset=True)
# Add disabled warnings.
_ToolAppend(tools, 'VCCLCompilerTool',
'DisableSpecificWarnings', disabled_warnings)
# Add Pre-build.
_ToolAppend(tools, 'VCPreBuildEventTool', 'CommandLine', prebuild)
# Add Post-build.
_ToolAppend(tools, 'VCPostBuildEventTool', 'CommandLine', postbuild)
# Turn on precompiled headers if appropriate.
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(tools, 'VCCLCompilerTool', 'UsePrecompiledHeader', '2')
_ToolAppend(tools, 'VCCLCompilerTool',
'PrecompiledHeaderThrough', precompiled_header)
_ToolAppend(tools, 'VCCLCompilerTool',
'ForcedIncludeFiles', precompiled_header)
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(tools, 'VCLinkerTool', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(tools, 'VCLinkerTool', 'ModuleDefinitionFile', def_file)
_AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name)
def _GetIncludeDirs(config):
"""Returns the list of directories to be used for #include directives.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of directory paths.
"""
# TODO(bradnelson): include_dirs should really be flexible enough not to
# require this sort of thing.
include_dirs = (
config.get('include_dirs', []) +
config.get('msvs_system_include_dirs', []))
midl_include_dirs = (
config.get('midl_include_dirs', []) +
config.get('msvs_system_include_dirs', []))
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
include_dirs = _FixPaths(include_dirs)
midl_include_dirs = _FixPaths(midl_include_dirs)
resource_include_dirs = _FixPaths(resource_include_dirs)
return include_dirs, midl_include_dirs, resource_include_dirs
def _GetLibraryDirs(config):
"""Returns the list of directories to be used for library search paths.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of directory paths.
"""
library_dirs = config.get('library_dirs', [])
library_dirs = _FixPaths(library_dirs)
return library_dirs
def _GetLibraries(spec):
"""Returns the list of libraries for this configuration.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The list of directory paths.
"""
libraries = spec.get('libraries', [])
# Strip out -l, as it is not used on windows (but is needed so we can pass
# in libraries that are assumed to be in the default library path).
# Also remove duplicate entries, leaving only the last duplicate, while
# preserving order.
found = OrderedSet()
unique_libraries_list = []
for entry in reversed(libraries):
library = re.sub(r'^\-l', '', entry)
if not os.path.splitext(library)[1]:
library += '.lib'
if library not in found:
found.add(library)
unique_libraries_list.append(library)
unique_libraries_list.reverse()
return unique_libraries_list
def _GetOutputFilePathAndTool(spec, msbuild):
"""Returns the path and tool to use for this target.
Figures out the path of the file this spec will create and the name of
the VC tool that will create it.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
A triple of (file path, name of the vc tool, name of the msbuild tool)
"""
# Select a name for the output file.
out_file = ''
vc_tool = ''
msbuild_tool = ''
output_file_map = {
'executable': ('VCLinkerTool', 'Link', '$(OutDir)', '.exe'),
'shared_library': ('VCLinkerTool', 'Link', '$(OutDir)', '.dll'),
'loadable_module': ('VCLinkerTool', 'Link', '$(OutDir)', '.dll'),
'static_library': ('VCLibrarianTool', 'Lib', '$(OutDir)lib\\', '.lib'),
}
output_file_props = output_file_map.get(spec['type'])
if output_file_props and int(spec.get('msvs_auto_output_file', 1)):
vc_tool, msbuild_tool, out_dir, suffix = output_file_props
if spec.get('standalone_static_library', 0):
out_dir = '$(OutDir)'
out_dir = spec.get('product_dir', out_dir)
product_extension = spec.get('product_extension')
if product_extension:
suffix = '.' + product_extension
elif msbuild:
suffix = '$(TargetExt)'
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
out_file = ntpath.join(out_dir, prefix + product_name + suffix)
return out_file, vc_tool, msbuild_tool
def _GetOutputTargetExt(spec):
"""Returns the extension for this target, including the dot
If product_extension is specified, set target_extension to this to avoid
MSB8012, returns None otherwise. Ignores any target_extension settings in
the input files.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
A string with the extension, or None
"""
target_extension = spec.get('product_extension')
if target_extension:
return '.' + target_extension
return None
def _GetDefines(config):
"""Returns the list of preprocessor definitions for this configuation.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of preprocessor definitions.
"""
defines = []
for d in config.get('defines', []):
if type(d) == list:
fd = '='.join([str(dpart) for dpart in d])
else:
fd = str(d)
defines.append(fd)
return defines
def _GetDisabledWarnings(config):
return [str(i) for i in config.get('msvs_disabled_warnings', [])]
def _GetModuleDefinition(spec):
def_file = ''
if spec['type'] in ['shared_library', 'loadable_module', 'executable']:
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
def_file = _FixPath(def_files[0])
elif def_files:
raise ValueError(
'Multiple module definition files in one target, target %s lists '
'multiple .def files: %s' % (
spec['target_name'], ' '.join(def_files)))
return def_file
def _ConvertToolsToExpectedForm(tools):
"""Convert tools to a form expected by Visual Studio.
Arguments:
tools: A dictionary of settings; the tool name is the key.
Returns:
A list of Tool objects.
"""
tool_list = []
for tool, settings in tools.iteritems():
# Collapse settings with lists.
settings_fixed = {}
for setting, value in settings.iteritems():
if type(value) == list:
if ((tool == 'VCLinkerTool' and
setting == 'AdditionalDependencies') or
setting == 'AdditionalOptions'):
settings_fixed[setting] = ' '.join(value)
else:
settings_fixed[setting] = ';'.join(value)
else:
settings_fixed[setting] = value
# Add in this tool.
tool_list.append(MSVSProject.Tool(tool, settings_fixed))
return tool_list
def _AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name):
"""Add to the project file the configuration specified by config.
Arguments:
p: The target project being generated.
spec: the target project dict.
tools: A dictionary of settings; the tool name is the key.
config: The dictionary that defines the special processing to be done
for this configuration.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
"""
attributes = _GetMSVSAttributes(spec, config, config_type)
# Add in this configuration.
tool_list = _ConvertToolsToExpectedForm(tools)
p.AddConfig(_ConfigFullName(config_name, config),
attrs=attributes, tools=tool_list)
def _GetMSVSAttributes(spec, config, config_type):
# Prepare configuration attributes.
prepared_attrs = {}
source_attrs = config.get('msvs_configuration_attributes', {})
for a in source_attrs:
prepared_attrs[a] = source_attrs[a]
# Add props files.
vsprops_dirs = config.get('msvs_props', [])
vsprops_dirs = _FixPaths(vsprops_dirs)
if vsprops_dirs:
prepared_attrs['InheritedPropertySheets'] = ';'.join(vsprops_dirs)
# Set configuration type.
prepared_attrs['ConfigurationType'] = config_type
output_dir = prepared_attrs.get('OutputDirectory',
'$(SolutionDir)$(ConfigurationName)')
prepared_attrs['OutputDirectory'] = _FixPath(output_dir) + '\\'
if 'IntermediateDirectory' not in prepared_attrs:
intermediate = '$(ConfigurationName)\\obj\\$(ProjectName)'
prepared_attrs['IntermediateDirectory'] = _FixPath(intermediate) + '\\'
else:
intermediate = _FixPath(prepared_attrs['IntermediateDirectory']) + '\\'
intermediate = MSVSSettings.FixVCMacroSlashes(intermediate)
prepared_attrs['IntermediateDirectory'] = intermediate
return prepared_attrs
def _AddNormalizedSources(sources_set, sources_array):
sources_set.update(_NormalizedSource(s) for s in sources_array)
def _PrepareListOfSources(spec, generator_flags, gyp_file):
"""Prepare list of sources and excluded sources.
Besides the sources specified directly in the spec, adds the gyp file so
that a change to it will cause a re-compile. Also adds appropriate sources
for actions and copies. Assumes later stage will un-exclude files which
have custom build steps attached.
Arguments:
spec: The target dictionary containing the properties of the target.
gyp_file: The name of the gyp file.
Returns:
A pair of (list of sources, list of excluded sources).
The sources will be relative to the gyp file.
"""
sources = OrderedSet()
_AddNormalizedSources(sources, spec.get('sources', []))
excluded_sources = OrderedSet()
# Add in the gyp file.
if not generator_flags.get('standalone'):
sources.add(gyp_file)
# Add in 'action' inputs and outputs.
for a in spec.get('actions', []):
inputs = a['inputs']
inputs = [_NormalizedSource(i) for i in inputs]
# Add all inputs to sources and excluded sources.
inputs = OrderedSet(inputs)
sources.update(inputs)
if not spec.get('msvs_external_builder'):
excluded_sources.update(inputs)
if int(a.get('process_outputs_as_sources', False)):
_AddNormalizedSources(sources, a.get('outputs', []))
# Add in 'copies' inputs and outputs.
for cpy in spec.get('copies', []):
_AddNormalizedSources(sources, cpy.get('files', []))
return (sources, excluded_sources)
def _AdjustSourcesAndConvertToFilterHierarchy(
spec, options, gyp_dir, sources, excluded_sources, list_excluded, version):
"""Adjusts the list of sources and excluded sources.
Also converts the sets to lists.
Arguments:
spec: The target dictionary containing the properties of the target.
options: Global generator options.
gyp_dir: The path to the gyp file being processed.
sources: A set of sources to be included for this project.
excluded_sources: A set of sources to be excluded for this project.
version: A MSVSVersion object.
Returns:
A trio of (list of sources, list of excluded sources,
path of excluded IDL file)
"""
# Exclude excluded sources coming into the generator.
excluded_sources.update(OrderedSet(spec.get('sources_excluded', [])))
# Add excluded sources into sources for good measure.
sources.update(excluded_sources)
# Convert to proper windows form.
# NOTE: sources goes from being a set to a list here.
# NOTE: excluded_sources goes from being a set to a list here.
sources = _FixPaths(sources)
# Convert to proper windows form.
excluded_sources = _FixPaths(excluded_sources)
excluded_idl = _IdlFilesHandledNonNatively(spec, sources)
precompiled_related = _GetPrecompileRelatedFiles(spec)
# Find the excluded ones, minus the precompiled header related ones.
fully_excluded = [i for i in excluded_sources if i not in precompiled_related]
# Convert to folders and the right slashes.
sources = [i.split('\\') for i in sources]
sources = _ConvertSourcesToFilterHierarchy(sources, excluded=fully_excluded,
list_excluded=list_excluded,
msvs_version=version)
# Prune filters with a single child to flatten ugly directory structures
# such as ../../src/modules/module1 etc.
if version.UsesVcxproj():
while all([isinstance(s, MSVSProject.Filter) for s in sources]) \
and len(set([s.name for s in sources])) == 1:
assert all([len(s.contents) == 1 for s in sources])
sources = [s.contents[0] for s in sources]
else:
while len(sources) == 1 and isinstance(sources[0], MSVSProject.Filter):
sources = sources[0].contents
return sources, excluded_sources, excluded_idl
def _IdlFilesHandledNonNatively(spec, sources):
# If any non-native rules use 'idl' as an extension exclude idl files.
# Gather a list here to use later.
using_idl = False
for rule in spec.get('rules', []):
if rule['extension'] == 'idl' and int(rule.get('msvs_external_rule', 0)):
using_idl = True
break
if using_idl:
excluded_idl = [i for i in sources if i.endswith('.idl')]
else:
excluded_idl = []
return excluded_idl
def _GetPrecompileRelatedFiles(spec):
# Gather a list of precompiled header related sources.
precompiled_related = []
for _, config in spec['configurations'].iteritems():
for k in precomp_keys:
f = config.get(k)
if f:
precompiled_related.append(_FixPath(f))
return precompiled_related
def _ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl,
list_excluded):
exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
for file_name, excluded_configs in exclusions.iteritems():
if (not list_excluded and
len(excluded_configs) == len(spec['configurations'])):
# If we're not listing excluded files, then they won't appear in the
# project, so don't try to configure them to be excluded.
pass
else:
for config_name, config in excluded_configs:
p.AddFileConfig(file_name, _ConfigFullName(config_name, config),
{'ExcludedFromBuild': 'true'})
def _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl):
exclusions = {}
# Exclude excluded sources from being built.
for f in excluded_sources:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
precomped = [_FixPath(config.get(i, '')) for i in precomp_keys]
# Don't do this for ones that are precompiled header related.
if f not in precomped:
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
# If any non-native rules use 'idl' as an extension exclude idl files.
# Exclude them now.
for f in excluded_idl:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
return exclusions
def _AddToolFilesToMSVS(p, spec):
# Add in tool files (rules).
tool_files = OrderedSet()
for _, config in spec['configurations'].iteritems():
for f in config.get('msvs_tool_files', []):
tool_files.add(f)
for f in tool_files:
p.AddToolFile(f)
def _HandlePreCompiledHeaders(p, sources, spec):
# Pre-compiled header source stubs need a different compiler flag
# (generate precompiled header) and any source file not of the same
# kind (i.e. C vs. C++) as the precompiled header source stub needs
# to have use of precompiled headers disabled.
extensions_excluded_from_precompile = []
for config_name, config in spec['configurations'].iteritems():
source = config.get('msvs_precompiled_source')
if source:
source = _FixPath(source)
# UsePrecompiledHeader=1 for if using precompiled headers.
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '1'})
p.AddFileConfig(source, _ConfigFullName(config_name, config),
{}, tools=[tool])
basename, extension = os.path.splitext(source)
if extension == '.c':
extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx']
else:
extensions_excluded_from_precompile = ['.c']
def DisableForSourceTree(source_tree):
for source in source_tree:
if isinstance(source, MSVSProject.Filter):
DisableForSourceTree(source.contents)
else:
basename, extension = os.path.splitext(source)
if extension in extensions_excluded_from_precompile:
for config_name, config in spec['configurations'].iteritems():
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '0',
'ForcedIncludeFiles': '$(NOINHERIT)'})
p.AddFileConfig(_FixPath(source),
_ConfigFullName(config_name, config),
{}, tools=[tool])
# Do nothing if there was no precompiled source.
if extensions_excluded_from_precompile:
DisableForSourceTree(sources)
def _AddActions(actions_to_add, spec, relative_path_of_gyp_file):
# Add actions.
actions = spec.get('actions', [])
# Don't setup_env every time. When all the actions are run together in one
# batch file in VS, the PATH will grow too long.
# Membership in this set means that the cygwin environment has been set up,
# and does not need to be set up again.
have_setup_env = set()
for a in actions:
# Attach actions to the gyp file if nothing else is there.
inputs = a.get('inputs') or [relative_path_of_gyp_file]
attached_to = inputs[0]
need_setup_env = attached_to not in have_setup_env
cmd = _BuildCommandLineForRule(spec, a, has_input_path=False,
do_setup_env=need_setup_env)
have_setup_env.add(attached_to)
# Add the action.
_AddActionStep(actions_to_add,
inputs=inputs,
outputs=a.get('outputs', []),
description=a.get('message', a['action_name']),
command=cmd)
def _WriteMSVSUserFile(project_path, version, spec):
# Add run_as and test targets.
if 'run_as' in spec:
run_as = spec['run_as']
action = run_as.get('action', [])
environment = run_as.get('environment', [])
working_directory = run_as.get('working_directory', '.')
elif int(spec.get('test', 0)):
action = ['$(TargetPath)', '--gtest_print_time']
environment = []
working_directory = '.'
else:
return # Nothing to add
# Write out the user file.
user_file = _CreateMSVSUserFile(project_path, version, spec)
for config_name, c_data in spec['configurations'].iteritems():
user_file.AddDebugSettings(_ConfigFullName(config_name, c_data),
action, environment, working_directory)
user_file.WriteIfChanged()
def _AddCopies(actions_to_add, spec):
copies = _GetCopies(spec)
for inputs, outputs, cmd, description in copies:
_AddActionStep(actions_to_add, inputs=inputs, outputs=outputs,
description=description, command=cmd)
def _GetCopies(spec):
copies = []
# Add copies.
for cpy in spec.get('copies', []):
for src in cpy.get('files', []):
dst = os.path.join(cpy['destination'], os.path.basename(src))
# _AddCustomBuildToolForMSVS() will call _FixPath() on the inputs and
# outputs, so do the same for our generated command line.
if src.endswith('/'):
src_bare = src[:-1]
base_dir = posixpath.split(src_bare)[0]
outer_dir = posixpath.split(src_bare)[1]
cmd = 'cd "%s" && xcopy /e /f /y "%s" "%s\\%s\\"' % (
_FixPath(base_dir), outer_dir, _FixPath(dst), outer_dir)
copies.append(([src], ['dummy_copies', dst], cmd,
'Copying %s to %s' % (src, dst)))
else:
cmd = 'mkdir "%s" 2>nul & set ERRORLEVEL=0 & copy /Y "%s" "%s"' % (
_FixPath(cpy['destination']), _FixPath(src), _FixPath(dst))
copies.append(([src], [dst], cmd, 'Copying %s to %s' % (src, dst)))
return copies
def _GetPathDict(root, path):
# |path| will eventually be empty (in the recursive calls) if it was initially
# relative; otherwise it will eventually end up as '\', 'D:\', etc.
if not path or path.endswith(os.sep):
return root
parent, folder = os.path.split(path)
parent_dict = _GetPathDict(root, parent)
if folder not in parent_dict:
parent_dict[folder] = dict()
return parent_dict[folder]
def _DictsToFolders(base_path, bucket, flat):
# Convert to folders recursively.
children = []
for folder, contents in bucket.iteritems():
if type(contents) == dict:
folder_children = _DictsToFolders(os.path.join(base_path, folder),
contents, flat)
if flat:
children += folder_children
else:
folder_children = MSVSNew.MSVSFolder(os.path.join(base_path, folder),
name='(' + folder + ')',
entries=folder_children)
children.append(folder_children)
else:
children.append(contents)
return children
def _CollapseSingles(parent, node):
# Recursively explorer the tree of dicts looking for projects which are
# the sole item in a folder which has the same name as the project. Bring
# such projects up one level.
if (type(node) == dict and
len(node) == 1 and
node.keys()[0] == parent + '.vcproj'):
return node[node.keys()[0]]
if type(node) != dict:
return node
for child in node:
node[child] = _CollapseSingles(child, node[child])
return node
def _GatherSolutionFolders(sln_projects, project_objects, flat):
root = {}
# Convert into a tree of dicts on path.
for p in sln_projects:
gyp_file, target = gyp.common.ParseQualifiedTarget(p)[0:2]
gyp_dir = os.path.dirname(gyp_file)
path_dict = _GetPathDict(root, gyp_dir)
path_dict[target + '.vcproj'] = project_objects[p]
# Walk down from the top until we hit a folder that has more than one entry.
# In practice, this strips the top-level "src/" dir from the hierarchy in
# the solution.
while len(root) == 1 and type(root[root.keys()[0]]) == dict:
root = root[root.keys()[0]]
# Collapse singles.
root = _CollapseSingles('', root)
# Merge buckets until everything is a root entry.
return _DictsToFolders('', root, flat)
def _GetPathOfProject(qualified_target, spec, options, msvs_version):
default_config = _GetDefaultConfiguration(spec)
proj_filename = default_config.get('msvs_existing_vcproj')
if not proj_filename:
proj_filename = (spec['target_name'] + options.suffix +
msvs_version.ProjectExtension())
build_file = gyp.common.BuildFile(qualified_target)
proj_path = os.path.join(os.path.dirname(build_file), proj_filename)
fix_prefix = None
if options.generator_output:
project_dir_path = os.path.dirname(os.path.abspath(proj_path))
proj_path = os.path.join(options.generator_output, proj_path)
fix_prefix = gyp.common.RelativePath(project_dir_path,
os.path.dirname(proj_path))
return proj_path, fix_prefix
def _GetPlatformOverridesOfProject(spec):
# Prepare a dict indicating which project configurations are used for which
# solution configurations for this target.
config_platform_overrides = {}
for config_name, c in spec['configurations'].iteritems():
config_fullname = _ConfigFullName(config_name, c)
platform = c.get('msvs_target_platform', _ConfigPlatform(c))
fixed_config_fullname = '%s|%s' % (
_ConfigBaseName(config_name, _ConfigPlatform(c)), platform)
config_platform_overrides[config_fullname] = fixed_config_fullname
return config_platform_overrides
def _CreateProjectObjects(target_list, target_dicts, options, msvs_version):
"""Create a MSVSProject object for the targets found in target list.
Arguments:
target_list: the list of targets to generate project objects for.
target_dicts: the dictionary of specifications.
options: global generator options.
msvs_version: the MSVSVersion object.
Returns:
A set of created projects, keyed by target.
"""
global fixpath_prefix
# Generate each project.
projects = {}
for qualified_target in target_list:
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise GypError(
'Multiple toolsets not supported in msvs build (target %s)' %
qualified_target)
proj_path, fixpath_prefix = _GetPathOfProject(qualified_target, spec,
options, msvs_version)
guid = _GetGuidOfProject(proj_path, spec)
overrides = _GetPlatformOverridesOfProject(spec)
build_file = gyp.common.BuildFile(qualified_target)
# Create object for this project.
obj = MSVSNew.MSVSProject(
proj_path,
name=spec['target_name'],
guid=guid,
spec=spec,
build_file=build_file,
config_platform_overrides=overrides,
fixpath_prefix=fixpath_prefix)
# Set project toolset if any (MS build only)
if msvs_version.UsesVcxproj():
obj.set_msbuild_toolset(
_GetMsbuildToolsetOfProject(proj_path, spec, msvs_version))
projects[qualified_target] = obj
# Set all the dependencies, but not if we are using an external builder like
# ninja
for project in projects.values():
if not project.spec.get('msvs_external_builder'):
deps = project.spec.get('dependencies', [])
deps = [projects[d] for d in deps]
project.set_dependencies(deps)
return projects
def _InitNinjaFlavor(params, target_list, target_dicts):
"""Initialize targets for the ninja flavor.
This sets up the necessary variables in the targets to generate msvs projects
that use ninja as an external builder. The variables in the spec are only set
if they have not been set. This allows individual specs to override the
default values initialized here.
Arguments:
params: Params provided to the generator.
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
"""
for qualified_target in target_list:
spec = target_dicts[qualified_target]
if spec.get('msvs_external_builder'):
# The spec explicitly defined an external builder, so don't change it.
continue
path_to_ninja = spec.get('msvs_path_to_ninja', 'ninja.exe')
spec['msvs_external_builder'] = 'ninja'
if not spec.get('msvs_external_builder_out_dir'):
gyp_file, _, _ = gyp.common.ParseQualifiedTarget(qualified_target)
gyp_dir = os.path.dirname(gyp_file)
configuration = '$(Configuration)'
if params.get('target_arch') == 'x64':
configuration += '_x64'
spec['msvs_external_builder_out_dir'] = os.path.join(
gyp.common.RelativePath(params['options'].toplevel_dir, gyp_dir),
ninja_generator.ComputeOutputDir(params),
configuration)
if not spec.get('msvs_external_builder_build_cmd'):
spec['msvs_external_builder_build_cmd'] = [
path_to_ninja,
'-C',
'$(OutDir)',
'$(ProjectName)',
]
if not spec.get('msvs_external_builder_clean_cmd'):
spec['msvs_external_builder_clean_cmd'] = [
path_to_ninja,
'-C',
'$(OutDir)',
'-tclean',
'$(ProjectName)',
]
def CalculateVariables(default_variables, params):
"""Generated variables that require params to be known."""
generator_flags = params.get('generator_flags', {})
# Select project file format version (if unset, default to auto detecting).
msvs_version = MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'))
# Stash msvs_version for later (so we don't have to probe the system twice).
params['msvs_version'] = msvs_version
# Set a variable so conditions can be based on msvs_version.
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if (os.environ.get('PROCESSOR_ARCHITECTURE', '').find('64') >= 0 or
os.environ.get('PROCESSOR_ARCHITEW6432', '').find('64') >= 0):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
if gyp.common.GetFlavor(params) == 'ninja':
default_variables['SHARED_INTERMEDIATE_DIR'] = '$(OutDir)gen'
def PerformBuild(data, configurations, params):
options = params['options']
msvs_version = params['msvs_version']
devenv = os.path.join(msvs_version.path, 'Common7', 'IDE', 'devenv.com')
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
sln_path = build_file_root + options.suffix + '.sln'
if options.generator_output:
sln_path = os.path.join(options.generator_output, sln_path)
for config in configurations:
arguments = [devenv, sln_path, '/Build', config]
print 'Building [%s]: %s' % (config, arguments)
rtn = subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate .sln and .vcproj files.
This is the entry point for this generator.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dictionary containing per .gyp data.
"""
global fixpath_prefix
options = params['options']
# Get the project file format version back out of where we stashed it in
# GeneratorCalculatedVariables.
msvs_version = params['msvs_version']
generator_flags = params.get('generator_flags', {})
# Optionally shard targets marked with 'msvs_shard': SHARD_COUNT.
(target_list, target_dicts) = MSVSUtil.ShardTargets(target_list, target_dicts)
# Optionally use the large PDB workaround for targets marked with
# 'msvs_large_pdb': 1.
(target_list, target_dicts) = MSVSUtil.InsertLargePdbShims(
target_list, target_dicts, generator_default_variables)
# Optionally configure each spec to use ninja as the external builder.
if params.get('flavor') == 'ninja':
_InitNinjaFlavor(params, target_list, target_dicts)
# Prepare the set of configurations.
configs = set()
for qualified_target in target_list:
spec = target_dicts[qualified_target]
for config_name, config in spec['configurations'].iteritems():
configs.add(_ConfigFullName(config_name, config))
configs = list(configs)
# Figure out all the projects that will be generated and their guids
project_objects = _CreateProjectObjects(target_list, target_dicts, options,
msvs_version)
# Generate each project.
missing_sources = []
for project in project_objects.values():
fixpath_prefix = project.fixpath_prefix
missing_sources.extend(_GenerateProject(project, options, msvs_version,
generator_flags))
fixpath_prefix = None
for build_file in data:
# Validate build_file extension
if not build_file.endswith('.gyp'):
continue
sln_path = os.path.splitext(build_file)[0] + options.suffix + '.sln'
if options.generator_output:
sln_path = os.path.join(options.generator_output, sln_path)
# Get projects in the solution, and their dependents.
sln_projects = gyp.common.BuildFileTargets(target_list, build_file)
sln_projects += gyp.common.DeepDependencyTargets(target_dicts, sln_projects)
# Create folder hierarchy.
root_entries = _GatherSolutionFolders(
sln_projects, project_objects, flat=msvs_version.FlatSolution())
# Create solution.
sln = MSVSNew.MSVSSolution(sln_path,
entries=root_entries,
variants=configs,
websiteProperties=False,
version=msvs_version)
sln.Write()
if missing_sources:
error_message = "Missing input files:\n" + \
'\n'.join(set(missing_sources))
if generator_flags.get('msvs_error_on_missing_sources', False):
raise GypError(error_message)
else:
print >> sys.stdout, "Warning: " + error_message
def _GenerateMSBuildFiltersFile(filters_path, source_files,
rule_dependencies, extension_to_rule_name):
"""Generate the filters file.
This file is used by Visual Studio to organize the presentation of source
files into folders.
Arguments:
filters_path: The path of the file to be created.
source_files: The hierarchical structure of all the sources.
extension_to_rule_name: A dictionary mapping file extensions to rules.
"""
filter_group = []
source_group = []
_AppendFiltersForMSBuild('', source_files, rule_dependencies,
extension_to_rule_name, filter_group, source_group)
if filter_group:
content = ['Project',
{'ToolsVersion': '4.0',
'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'
},
['ItemGroup'] + filter_group,
['ItemGroup'] + source_group
]
easy_xml.WriteXmlIfChanged(content, filters_path, pretty=True, win32=True)
elif os.path.exists(filters_path):
# We don't need this filter anymore. Delete the old filter file.
os.unlink(filters_path)
def _AppendFiltersForMSBuild(parent_filter_name, sources, rule_dependencies,
extension_to_rule_name,
filter_group, source_group):
"""Creates the list of filters and sources to be added in the filter file.
Args:
parent_filter_name: The name of the filter under which the sources are
found.
sources: The hierarchy of filters and sources to process.
extension_to_rule_name: A dictionary mapping file extensions to rules.
filter_group: The list to which filter entries will be appended.
source_group: The list to which source entries will be appeneded.
"""
for source in sources:
if isinstance(source, MSVSProject.Filter):
# We have a sub-filter. Create the name of that sub-filter.
if not parent_filter_name:
filter_name = source.name
else:
filter_name = '%s\\%s' % (parent_filter_name, source.name)
# Add the filter to the group.
filter_group.append(
['Filter', {'Include': filter_name},
['UniqueIdentifier', MSVSNew.MakeGuid(source.name)]])
# Recurse and add its dependents.
_AppendFiltersForMSBuild(filter_name, source.contents,
rule_dependencies, extension_to_rule_name,
filter_group, source_group)
else:
# It's a source. Create a source entry.
_, element = _MapFileToMsBuildSourceType(source, rule_dependencies,
extension_to_rule_name)
source_entry = [element, {'Include': source}]
# Specify the filter it is part of, if any.
if parent_filter_name:
source_entry.append(['Filter', parent_filter_name])
source_group.append(source_entry)
def _MapFileToMsBuildSourceType(source, rule_dependencies,
extension_to_rule_name):
"""Returns the group and element type of the source file.
Arguments:
source: The source file name.
extension_to_rule_name: A dictionary mapping file extensions to rules.
Returns:
A pair of (group this file should be part of, the label of element)
"""
_, ext = os.path.splitext(source)
if ext in extension_to_rule_name:
group = 'rule'
element = extension_to_rule_name[ext]
elif ext in ['.cc', '.cpp', '.c', '.cxx']:
group = 'compile'
element = 'ClCompile'
elif ext in ['.h', '.hxx']:
group = 'include'
element = 'ClInclude'
elif ext == '.rc':
group = 'resource'
element = 'ResourceCompile'
elif ext == '.asm':
group = 'masm'
element = 'MASM'
elif ext == '.idl':
group = 'midl'
element = 'Midl'
elif source in rule_dependencies:
group = 'rule_dependency'
element = 'CustomBuild'
else:
group = 'none'
element = 'None'
return (group, element)
def _GenerateRulesForMSBuild(output_dir, options, spec,
sources, excluded_sources,
props_files_of_rules, targets_files_of_rules,
actions_to_add, rule_dependencies,
extension_to_rule_name):
# MSBuild rules are implemented using three files: an XML file, a .targets
# file and a .props file.
# See http://blogs.msdn.com/b/vcblog/archive/2010/04/21/quick-help-on-vs2010-custom-build-rule.aspx
# for more details.
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
msbuild_rules = []
for rule in rules_native:
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
msbuild_rule = MSBuildRule(rule, spec)
msbuild_rules.append(msbuild_rule)
rule_dependencies.update(msbuild_rule.additional_dependencies.split(';'))
extension_to_rule_name[msbuild_rule.extension] = msbuild_rule.rule_name
if msbuild_rules:
base = spec['target_name'] + options.suffix
props_name = base + '.props'
targets_name = base + '.targets'
xml_name = base + '.xml'
props_files_of_rules.add(props_name)
targets_files_of_rules.add(targets_name)
props_path = os.path.join(output_dir, props_name)
targets_path = os.path.join(output_dir, targets_name)
xml_path = os.path.join(output_dir, xml_name)
_GenerateMSBuildRulePropsFile(props_path, msbuild_rules)
_GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules)
_GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules)
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(rules, sources, excluded_sources, True)
class MSBuildRule(object):
"""Used to store information used to generate an MSBuild rule.
Attributes:
rule_name: The rule name, sanitized to use in XML.
target_name: The name of the target.
after_targets: The name of the AfterTargets element.
before_targets: The name of the BeforeTargets element.
depends_on: The name of the DependsOn element.
compute_output: The name of the ComputeOutput element.
dirs_to_make: The name of the DirsToMake element.
inputs: The name of the _inputs element.
tlog: The name of the _tlog element.
extension: The extension this rule applies to.
description: The message displayed when this rule is invoked.
additional_dependencies: A string listing additional dependencies.
outputs: The outputs of this rule.
command: The command used to run the rule.
"""
def __init__(self, rule, spec):
self.display_name = rule['rule_name']
# Assure that the rule name is only characters and numbers
self.rule_name = re.sub(r'\W', '_', self.display_name)
# Create the various element names, following the example set by the
# Visual Studio 2008 to 2010 conversion. I don't know if VS2010
# is sensitive to the exact names.
self.target_name = '_' + self.rule_name
self.after_targets = self.rule_name + 'AfterTargets'
self.before_targets = self.rule_name + 'BeforeTargets'
self.depends_on = self.rule_name + 'DependsOn'
self.compute_output = 'Compute%sOutput' % self.rule_name
self.dirs_to_make = self.rule_name + 'DirsToMake'
self.inputs = self.rule_name + '_inputs'
self.tlog = self.rule_name + '_tlog'
self.extension = rule['extension']
if not self.extension.startswith('.'):
self.extension = '.' + self.extension
self.description = MSVSSettings.ConvertVCMacrosToMSBuild(
rule.get('message', self.rule_name))
old_additional_dependencies = _FixPaths(rule.get('inputs', []))
self.additional_dependencies = (
';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i)
for i in old_additional_dependencies]))
old_outputs = _FixPaths(rule.get('outputs', []))
self.outputs = ';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i)
for i in old_outputs])
old_command = _BuildCommandLineForRule(spec, rule, has_input_path=True,
do_setup_env=True)
self.command = MSVSSettings.ConvertVCMacrosToMSBuild(old_command)
def _GenerateMSBuildRulePropsFile(props_path, msbuild_rules):
"""Generate the .props file."""
content = ['Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'}]
for rule in msbuild_rules:
content.extend([
['PropertyGroup',
{'Condition': "'$(%s)' == '' and '$(%s)' == '' and "
"'$(ConfigurationType)' != 'Makefile'" % (rule.before_targets,
rule.after_targets)
},
[rule.before_targets, 'Midl'],
[rule.after_targets, 'CustomBuild'],
],
['PropertyGroup',
[rule.depends_on,
{'Condition': "'$(ConfigurationType)' != 'Makefile'"},
'_SelectedFiles;$(%s)' % rule.depends_on
],
],
['ItemDefinitionGroup',
[rule.rule_name,
['CommandLineTemplate', rule.command],
['Outputs', rule.outputs],
['ExecutionDescription', rule.description],
['AdditionalDependencies', rule.additional_dependencies],
],
]
])
easy_xml.WriteXmlIfChanged(content, props_path, pretty=True, win32=True)
def _GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules):
"""Generate the .targets file."""
content = ['Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'
}
]
item_group = [
'ItemGroup',
['PropertyPageSchema',
{'Include': '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'}
]
]
for rule in msbuild_rules:
item_group.append(
['AvailableItemName',
{'Include': rule.rule_name},
['Targets', rule.target_name],
])
content.append(item_group)
for rule in msbuild_rules:
content.append(
['UsingTask',
{'TaskName': rule.rule_name,
'TaskFactory': 'XamlTaskFactory',
'AssemblyName': 'Microsoft.Build.Tasks.v4.0'
},
['Task', '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'],
])
for rule in msbuild_rules:
rule_name = rule.rule_name
target_outputs = '%%(%s.Outputs)' % rule_name
target_inputs = ('%%(%s.Identity);%%(%s.AdditionalDependencies);'
'$(MSBuildProjectFile)') % (rule_name, rule_name)
rule_inputs = '%%(%s.Identity)' % rule_name
extension_condition = ("'%(Extension)'=='.obj' or "
"'%(Extension)'=='.res' or "
"'%(Extension)'=='.rsc' or "
"'%(Extension)'=='.lib'")
remove_section = [
'ItemGroup',
{'Condition': "'@(SelectedFiles)' != ''"},
[rule_name,
{'Remove': '@(%s)' % rule_name,
'Condition': "'%(Identity)' != '@(SelectedFiles)'"
}
]
]
inputs_section = [
'ItemGroup',
[rule.inputs, {'Include': '%%(%s.AdditionalDependencies)' % rule_name}]
]
logging_section = [
'ItemGroup',
[rule.tlog,
{'Include': '%%(%s.Outputs)' % rule_name,
'Condition': ("'%%(%s.Outputs)' != '' and "
"'%%(%s.ExcludedFromBuild)' != 'true'" %
(rule_name, rule_name))
},
['Source', "@(%s, '|')" % rule_name],
['Inputs', "@(%s -> '%%(Fullpath)', ';')" % rule.inputs],
],
]
message_section = [
'Message',
{'Importance': 'High',
'Text': '%%(%s.ExecutionDescription)' % rule_name
}
]
write_tlog_section = [
'WriteLinesToFile',
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule.tlog, rule.tlog),
'File': '$(IntDir)$(ProjectName).write.1.tlog',
'Lines': "^%%(%s.Source);@(%s->'%%(Fullpath)')" % (rule.tlog,
rule.tlog)
}
]
read_tlog_section = [
'WriteLinesToFile',
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule.tlog, rule.tlog),
'File': '$(IntDir)$(ProjectName).read.1.tlog',
'Lines': "^%%(%s.Source);%%(%s.Inputs)" % (rule.tlog, rule.tlog)
}
]
command_and_input_section = [
rule_name,
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule_name, rule_name),
'EchoOff': 'true',
'StandardOutputImportance': 'High',
'StandardErrorImportance': 'High',
'CommandLineTemplate': '%%(%s.CommandLineTemplate)' % rule_name,
'AdditionalOptions': '%%(%s.AdditionalOptions)' % rule_name,
'Inputs': rule_inputs
}
]
content.extend([
['Target',
{'Name': rule.target_name,
'BeforeTargets': '$(%s)' % rule.before_targets,
'AfterTargets': '$(%s)' % rule.after_targets,
'Condition': "'@(%s)' != ''" % rule_name,
'DependsOnTargets': '$(%s);%s' % (rule.depends_on,
rule.compute_output),
'Outputs': target_outputs,
'Inputs': target_inputs
},
remove_section,
inputs_section,
logging_section,
message_section,
write_tlog_section,
read_tlog_section,
command_and_input_section,
],
['PropertyGroup',
['ComputeLinkInputsTargets',
'$(ComputeLinkInputsTargets);',
'%s;' % rule.compute_output
],
['ComputeLibInputsTargets',
'$(ComputeLibInputsTargets);',
'%s;' % rule.compute_output
],
],
['Target',
{'Name': rule.compute_output,
'Condition': "'@(%s)' != ''" % rule_name
},
['ItemGroup',
[rule.dirs_to_make,
{'Condition': "'@(%s)' != '' and "
"'%%(%s.ExcludedFromBuild)' != 'true'" % (rule_name, rule_name),
'Include': '%%(%s.Outputs)' % rule_name
}
],
['Link',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
['Lib',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
['ImpLib',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
],
['MakeDir',
{'Directories': ("@(%s->'%%(RootDir)%%(Directory)')" %
rule.dirs_to_make)
}
]
],
])
easy_xml.WriteXmlIfChanged(content, targets_path, pretty=True, win32=True)
def _GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules):
# Generate the .xml file
content = [
'ProjectSchemaDefinitions',
{'xmlns': ('clr-namespace:Microsoft.Build.Framework.XamlTypes;'
'assembly=Microsoft.Build.Framework'),
'xmlns:x': 'http://schemas.microsoft.com/winfx/2006/xaml',
'xmlns:sys': 'clr-namespace:System;assembly=mscorlib',
'xmlns:transformCallback':
'Microsoft.Cpp.Dev10.ConvertPropertyCallback'
}
]
for rule in msbuild_rules:
content.extend([
['Rule',
{'Name': rule.rule_name,
'PageTemplate': 'tool',
'DisplayName': rule.display_name,
'Order': '200'
},
['Rule.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': rule.rule_name
}
]
],
['Rule.Categories',
['Category',
{'Name': 'General'},
['Category.DisplayName',
['sys:String', 'General'],
],
],
['Category',
{'Name': 'Command Line',
'Subtype': 'CommandLine'
},
['Category.DisplayName',
['sys:String', 'Command Line'],
],
],
],
['StringListProperty',
{'Name': 'Inputs',
'Category': 'Command Line',
'IsRequired': 'true',
'Switch': ' '
},
['StringListProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': rule.rule_name,
'SourceType': 'Item'
}
]
],
],
['StringProperty',
{'Name': 'CommandLineTemplate',
'DisplayName': 'Command Line',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['DynamicEnumProperty',
{'Name': rule.before_targets,
'Category': 'General',
'EnumProvider': 'Targets',
'IncludeInCommandLine': 'False'
},
['DynamicEnumProperty.DisplayName',
['sys:String', 'Execute Before'],
],
['DynamicEnumProperty.Description',
['sys:String', 'Specifies the targets for the build customization'
' to run before.'
],
],
['DynamicEnumProperty.ProviderSettings',
['NameValuePair',
{'Name': 'Exclude',
'Value': '^%s|^Compute' % rule.before_targets
}
]
],
['DynamicEnumProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'HasConfigurationCondition': 'true'
}
]
],
],
['DynamicEnumProperty',
{'Name': rule.after_targets,
'Category': 'General',
'EnumProvider': 'Targets',
'IncludeInCommandLine': 'False'
},
['DynamicEnumProperty.DisplayName',
['sys:String', 'Execute After'],
],
['DynamicEnumProperty.Description',
['sys:String', ('Specifies the targets for the build customization'
' to run after.')
],
],
['DynamicEnumProperty.ProviderSettings',
['NameValuePair',
{'Name': 'Exclude',
'Value': '^%s|^Compute' % rule.after_targets
}
]
],
['DynamicEnumProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': '',
'HasConfigurationCondition': 'true'
}
]
],
],
['StringListProperty',
{'Name': 'Outputs',
'DisplayName': 'Outputs',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['StringProperty',
{'Name': 'ExecutionDescription',
'DisplayName': 'Execution Description',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['StringListProperty',
{'Name': 'AdditionalDependencies',
'DisplayName': 'Additional Dependencies',
'IncludeInCommandLine': 'False',
'Visible': 'false'
}
],
['StringProperty',
{'Subtype': 'AdditionalOptions',
'Name': 'AdditionalOptions',
'Category': 'Command Line'
},
['StringProperty.DisplayName',
['sys:String', 'Additional Options'],
],
['StringProperty.Description',
['sys:String', 'Additional Options'],
],
],
],
['ItemType',
{'Name': rule.rule_name,
'DisplayName': rule.display_name
}
],
['FileExtension',
{'Name': '*' + rule.extension,
'ContentType': rule.rule_name
}
],
['ContentType',
{'Name': rule.rule_name,
'DisplayName': '',
'ItemType': rule.rule_name
}
]
])
easy_xml.WriteXmlIfChanged(content, xml_path, pretty=True, win32=True)
def _GetConfigurationAndPlatform(name, settings):
configuration = name.rsplit('_', 1)[0]
platform = settings.get('msvs_configuration_platform', 'Win32')
return (configuration, platform)
def _GetConfigurationCondition(name, settings):
return (r"'$(Configuration)|$(Platform)'=='%s|%s'" %
_GetConfigurationAndPlatform(name, settings))
def _GetMSBuildProjectConfigurations(configurations):
group = ['ItemGroup', {'Label': 'ProjectConfigurations'}]
for (name, settings) in sorted(configurations.iteritems()):
configuration, platform = _GetConfigurationAndPlatform(name, settings)
designation = '%s|%s' % (configuration, platform)
group.append(
['ProjectConfiguration', {'Include': designation},
['Configuration', configuration],
['Platform', platform]])
return [group]
def _GetMSBuildGlobalProperties(spec, guid, gyp_file_name):
namespace = os.path.splitext(gyp_file_name)[0]
properties = [
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', guid],
['Keyword', 'Win32Proj'],
['RootNamespace', namespace],
['IgnoreWarnCompileDuplicatedFilename', 'true'],
]
]
if os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or \
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64':
properties[0].append(['PreferredToolArchitecture', 'x64'])
if spec.get('msvs_enable_winrt'):
properties[0].append(['DefaultLanguage', 'en-US'])
properties[0].append(['AppContainerApplication', 'true'])
if spec.get('msvs_application_type_revision'):
app_type_revision = spec.get('msvs_application_type_revision')
properties[0].append(['ApplicationTypeRevision', app_type_revision])
else:
properties[0].append(['ApplicationTypeRevision', '8.1'])
if spec.get('msvs_target_platform_version'):
target_platform_version = spec.get('msvs_target_platform_version')
properties[0].append(['WindowsTargetPlatformVersion',
target_platform_version])
if spec.get('msvs_target_platform_minversion'):
target_platform_minversion = spec.get('msvs_target_platform_minversion')
properties[0].append(['WindowsTargetPlatformMinVersion',
target_platform_minversion])
else:
properties[0].append(['WindowsTargetPlatformMinVersion',
target_platform_version])
if spec.get('msvs_enable_winphone'):
properties[0].append(['ApplicationType', 'Windows Phone'])
else:
properties[0].append(['ApplicationType', 'Windows Store'])
return properties
def _GetMSBuildConfigurationDetails(spec, build_file):
properties = {}
for name, settings in spec['configurations'].iteritems():
msbuild_attributes = _GetMSBuildAttributes(spec, settings, build_file)
condition = _GetConfigurationCondition(name, settings)
character_set = msbuild_attributes.get('CharacterSet')
_AddConditionalProperty(properties, condition, 'ConfigurationType',
msbuild_attributes['ConfigurationType'])
if character_set:
if 'msvs_enable_winrt' not in spec :
_AddConditionalProperty(properties, condition, 'CharacterSet',
character_set)
return _GetMSBuildPropertyGroup(spec, 'Configuration', properties)
def _GetMSBuildLocalProperties(msbuild_toolset):
# Currently the only local property we support is PlatformToolset
properties = {}
if msbuild_toolset:
properties = [
['PropertyGroup', {'Label': 'Locals'},
['PlatformToolset', msbuild_toolset],
]
]
return properties
def _GetMSBuildPropertySheets(configurations):
user_props = r'$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props'
additional_props = {}
props_specified = False
for name, settings in sorted(configurations.iteritems()):
configuration = _GetConfigurationCondition(name, settings)
if settings.has_key('msbuild_props'):
additional_props[configuration] = _FixPaths(settings['msbuild_props'])
props_specified = True
else:
additional_props[configuration] = ''
if not props_specified:
return [
['ImportGroup',
{'Label': 'PropertySheets'},
['Import',
{'Project': user_props,
'Condition': "exists('%s')" % user_props,
'Label': 'LocalAppDataPlatform'
}
]
]
]
else:
sheets = []
for condition, props in additional_props.iteritems():
import_group = [
'ImportGroup',
{'Label': 'PropertySheets',
'Condition': condition
},
['Import',
{'Project': user_props,
'Condition': "exists('%s')" % user_props,
'Label': 'LocalAppDataPlatform'
}
]
]
for props_file in props:
import_group.append(['Import', {'Project':props_file}])
sheets.append(import_group)
return sheets
def _ConvertMSVSBuildAttributes(spec, config, build_file):
config_type = _GetMSVSConfigurationType(spec, build_file)
msvs_attributes = _GetMSVSAttributes(spec, config, config_type)
msbuild_attributes = {}
for a in msvs_attributes:
if a in ['IntermediateDirectory', 'OutputDirectory']:
directory = MSVSSettings.ConvertVCMacrosToMSBuild(msvs_attributes[a])
if not directory.endswith('\\'):
directory += '\\'
msbuild_attributes[a] = directory
elif a == 'CharacterSet':
msbuild_attributes[a] = _ConvertMSVSCharacterSet(msvs_attributes[a])
elif a == 'ConfigurationType':
msbuild_attributes[a] = _ConvertMSVSConfigurationType(msvs_attributes[a])
else:
print 'Warning: Do not know how to convert MSVS attribute ' + a
return msbuild_attributes
def _ConvertMSVSCharacterSet(char_set):
if char_set.isdigit():
char_set = {
'0': 'MultiByte',
'1': 'Unicode',
'2': 'MultiByte',
}[char_set]
return char_set
def _ConvertMSVSConfigurationType(config_type):
if config_type.isdigit():
config_type = {
'1': 'Application',
'2': 'DynamicLibrary',
'4': 'StaticLibrary',
'10': 'Utility'
}[config_type]
return config_type
def _GetMSBuildAttributes(spec, config, build_file):
if 'msbuild_configuration_attributes' not in config:
msbuild_attributes = _ConvertMSVSBuildAttributes(spec, config, build_file)
else:
config_type = _GetMSVSConfigurationType(spec, build_file)
config_type = _ConvertMSVSConfigurationType(config_type)
msbuild_attributes = config.get('msbuild_configuration_attributes', {})
msbuild_attributes.setdefault('ConfigurationType', config_type)
output_dir = msbuild_attributes.get('OutputDirectory',
'$(SolutionDir)$(Configuration)')
msbuild_attributes['OutputDirectory'] = _FixPath(output_dir) + '\\'
if 'IntermediateDirectory' not in msbuild_attributes:
intermediate = _FixPath('$(Configuration)') + '\\'
msbuild_attributes['IntermediateDirectory'] = intermediate
if 'CharacterSet' in msbuild_attributes:
msbuild_attributes['CharacterSet'] = _ConvertMSVSCharacterSet(
msbuild_attributes['CharacterSet'])
if 'TargetName' not in msbuild_attributes:
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
target_name = prefix + product_name
msbuild_attributes['TargetName'] = target_name
if 'TargetExt' not in msbuild_attributes and 'product_extension' in spec:
ext = spec.get('product_extension')
msbuild_attributes['TargetExt'] = '.' + ext
if spec.get('msvs_external_builder'):
external_out_dir = spec.get('msvs_external_builder_out_dir', '.')
msbuild_attributes['OutputDirectory'] = _FixPath(external_out_dir) + '\\'
# Make sure that 'TargetPath' matches 'Lib.OutputFile' or 'Link.OutputFile'
# (depending on the tool used) to avoid MSB8012 warning.
msbuild_tool_map = {
'executable': 'Link',
'shared_library': 'Link',
'loadable_module': 'Link',
'static_library': 'Lib',
}
msbuild_tool = msbuild_tool_map.get(spec['type'])
if msbuild_tool:
msbuild_settings = config['finalized_msbuild_settings']
out_file = msbuild_settings[msbuild_tool].get('OutputFile')
if out_file:
msbuild_attributes['TargetPath'] = _FixPath(out_file)
target_ext = msbuild_settings[msbuild_tool].get('TargetExt')
if target_ext:
msbuild_attributes['TargetExt'] = target_ext
return msbuild_attributes
def _GetMSBuildConfigurationGlobalProperties(spec, configurations, build_file):
# TODO(jeanluc) We could optimize out the following and do it only if
# there are actions.
# TODO(jeanluc) Handle the equivalent of setting 'CYGWIN=nontsec'.
new_paths = []
cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])[0]
if cygwin_dirs:
cyg_path = '$(MSBuildProjectDirectory)\\%s\\bin\\' % _FixPath(cygwin_dirs)
new_paths.append(cyg_path)
# TODO(jeanluc) Change the convention to have both a cygwin_dir and a
# python_dir.
python_path = cyg_path.replace('cygwin\\bin', 'python_26')
new_paths.append(python_path)
if new_paths:
new_paths = '$(ExecutablePath);' + ';'.join(new_paths)
properties = {}
for (name, configuration) in sorted(configurations.iteritems()):
condition = _GetConfigurationCondition(name, configuration)
attributes = _GetMSBuildAttributes(spec, configuration, build_file)
msbuild_settings = configuration['finalized_msbuild_settings']
_AddConditionalProperty(properties, condition, 'IntDir',
attributes['IntermediateDirectory'])
_AddConditionalProperty(properties, condition, 'OutDir',
attributes['OutputDirectory'])
_AddConditionalProperty(properties, condition, 'TargetName',
attributes['TargetName'])
if 'TargetExt' in attributes:
_AddConditionalProperty(properties, condition, 'TargetExt',
attributes['TargetExt'])
if attributes.get('TargetPath'):
_AddConditionalProperty(properties, condition, 'TargetPath',
attributes['TargetPath'])
if attributes.get('TargetExt'):
_AddConditionalProperty(properties, condition, 'TargetExt',
attributes['TargetExt'])
if new_paths:
_AddConditionalProperty(properties, condition, 'ExecutablePath',
new_paths)
tool_settings = msbuild_settings.get('', {})
for name, value in sorted(tool_settings.iteritems()):
formatted_value = _GetValueFormattedForMSBuild('', name, value)
_AddConditionalProperty(properties, condition, name, formatted_value)
return _GetMSBuildPropertyGroup(spec, None, properties)
def _AddConditionalProperty(properties, condition, name, value):
"""Adds a property / conditional value pair to a dictionary.
Arguments:
properties: The dictionary to be modified. The key is the name of the
property. The value is itself a dictionary; its key is the value and
the value a list of condition for which this value is true.
condition: The condition under which the named property has the value.
name: The name of the property.
value: The value of the property.
"""
if name not in properties:
properties[name] = {}
values = properties[name]
if value not in values:
values[value] = []
conditions = values[value]
conditions.append(condition)
# Regex for msvs variable references ( i.e. $(FOO) ).
MSVS_VARIABLE_REFERENCE = re.compile(r'\$\(([a-zA-Z_][a-zA-Z0-9_]*)\)')
def _GetMSBuildPropertyGroup(spec, label, properties):
"""Returns a PropertyGroup definition for the specified properties.
Arguments:
spec: The target project dict.
label: An optional label for the PropertyGroup.
properties: The dictionary to be converted. The key is the name of the
property. The value is itself a dictionary; its key is the value and
the value a list of condition for which this value is true.
"""
group = ['PropertyGroup']
if label:
group.append({'Label': label})
num_configurations = len(spec['configurations'])
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
edges = set()
for value in sorted(properties[node].keys()):
# Add to edges all $(...) references to variables.
#
# Variable references that refer to names not in properties are excluded
# These can exist for instance to refer built in definitions like
# $(SolutionDir).
#
# Self references are ignored. Self reference is used in a few places to
# append to the default value. I.e. PATH=$(PATH);other_path
edges.update(set([v for v in MSVS_VARIABLE_REFERENCE.findall(value)
if v in properties and v != node]))
return edges
properties_ordered = gyp.common.TopologicallySorted(
properties.keys(), GetEdges)
# Walk properties in the reverse of a topological sort on
# user_of_variable -> used_variable as this ensures variables are
# defined before they are used.
# NOTE: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
for name in reversed(properties_ordered):
values = properties[name]
for value, conditions in sorted(values.iteritems()):
if len(conditions) == num_configurations:
# If the value is the same all configurations,
# just add one unconditional entry.
group.append([name, value])
else:
for condition in conditions:
group.append([name, {'Condition': condition}, value])
return [group]
def _GetMSBuildToolSettingsSections(spec, configurations):
groups = []
for (name, configuration) in sorted(configurations.iteritems()):
msbuild_settings = configuration['finalized_msbuild_settings']
group = ['ItemDefinitionGroup',
{'Condition': _GetConfigurationCondition(name, configuration)}
]
for tool_name, tool_settings in sorted(msbuild_settings.iteritems()):
# Skip the tool named '' which is a holder of global settings handled
# by _GetMSBuildConfigurationGlobalProperties.
if tool_name:
if tool_settings:
tool = [tool_name]
for name, value in sorted(tool_settings.iteritems()):
formatted_value = _GetValueFormattedForMSBuild(tool_name, name,
value)
tool.append([name, formatted_value])
group.append(tool)
groups.append(group)
return groups
def _FinalizeMSBuildSettings(spec, configuration):
if 'msbuild_settings' in configuration:
converted = False
msbuild_settings = configuration['msbuild_settings']
MSVSSettings.ValidateMSBuildSettings(msbuild_settings)
else:
converted = True
msvs_settings = configuration.get('msvs_settings', {})
msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(msvs_settings)
include_dirs, midl_include_dirs, resource_include_dirs = \
_GetIncludeDirs(configuration)
libraries = _GetLibraries(spec)
library_dirs = _GetLibraryDirs(configuration)
out_file, _, msbuild_tool = _GetOutputFilePathAndTool(spec, msbuild=True)
target_ext = _GetOutputTargetExt(spec)
defines = _GetDefines(configuration)
if converted:
# Visual Studio 2010 has TR1
defines = [d for d in defines if d != '_HAS_TR1=0']
# Warn of ignored settings
ignored_settings = ['msvs_tool_files']
for ignored_setting in ignored_settings:
value = configuration.get(ignored_setting)
if value:
print ('Warning: The automatic conversion to MSBuild does not handle '
'%s. Ignoring setting of %s' % (ignored_setting, str(value)))
defines = [_EscapeCppDefineForMSBuild(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(configuration)
prebuild = configuration.get('msvs_prebuild')
postbuild = configuration.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = configuration.get('msvs_precompiled_header')
# Add the information to the appropriate tool
# TODO(jeanluc) We could optimize and generate these settings only if
# the corresponding files are found, e.g. don't generate ResourceCompile
# if you don't have any resources.
_ToolAppend(msbuild_settings, 'ClCompile',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(msbuild_settings, 'Midl',
'AdditionalIncludeDirectories', midl_include_dirs)
_ToolAppend(msbuild_settings, 'ResourceCompile',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries, note that even for empty libraries, we want this
# set, to prevent inheriting default libraries from the enviroment.
_ToolSetOrAppend(msbuild_settings, 'Link', 'AdditionalDependencies',
libraries)
_ToolAppend(msbuild_settings, 'Link', 'AdditionalLibraryDirectories',
library_dirs)
if out_file:
_ToolAppend(msbuild_settings, msbuild_tool, 'OutputFile', out_file,
only_if_unset=True)
if target_ext:
_ToolAppend(msbuild_settings, msbuild_tool, 'TargetExt', target_ext,
only_if_unset=True)
# Add defines.
_ToolAppend(msbuild_settings, 'ClCompile',
'PreprocessorDefinitions', defines)
_ToolAppend(msbuild_settings, 'ResourceCompile',
'PreprocessorDefinitions', defines)
# Add disabled warnings.
_ToolAppend(msbuild_settings, 'ClCompile',
'DisableSpecificWarnings', disabled_warnings)
# Turn on precompiled headers if appropriate.
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(msbuild_settings, 'ClCompile', 'PrecompiledHeader', 'Use')
_ToolAppend(msbuild_settings, 'ClCompile',
'PrecompiledHeaderFile', precompiled_header)
_ToolAppend(msbuild_settings, 'ClCompile',
'ForcedIncludeFiles', [precompiled_header])
else:
_ToolAppend(msbuild_settings, 'ClCompile', 'PrecompiledHeader', 'NotUsing')
# Turn off WinRT compilation
_ToolAppend(msbuild_settings, 'ClCompile', 'CompileAsWinRT', 'false')
# Turn on import libraries if appropriate
if spec.get('msvs_requires_importlibrary'):
_ToolAppend(msbuild_settings, '', 'IgnoreImportLibrary', 'false')
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(msbuild_settings, '', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(msbuild_settings, 'Link', 'ModuleDefinitionFile', def_file)
configuration['finalized_msbuild_settings'] = msbuild_settings
if prebuild:
_ToolAppend(msbuild_settings, 'PreBuildEvent', 'Command', prebuild)
if postbuild:
_ToolAppend(msbuild_settings, 'PostBuildEvent', 'Command', postbuild)
def _GetValueFormattedForMSBuild(tool_name, name, value):
if type(value) == list:
# For some settings, VS2010 does not automatically extends the settings
# TODO(jeanluc) Is this what we want?
if name in ['AdditionalIncludeDirectories',
'AdditionalLibraryDirectories',
'AdditionalOptions',
'DelayLoadDLLs',
'DisableSpecificWarnings',
'PreprocessorDefinitions']:
value.append('%%(%s)' % name)
# For most tools, entries in a list should be separated with ';' but some
# settings use a space. Check for those first.
exceptions = {
'ClCompile': ['AdditionalOptions'],
'Link': ['AdditionalOptions'],
'Lib': ['AdditionalOptions']}
if tool_name in exceptions and name in exceptions[tool_name]:
char = ' '
else:
char = ';'
formatted_value = char.join(
[MSVSSettings.ConvertVCMacrosToMSBuild(i) for i in value])
else:
formatted_value = MSVSSettings.ConvertVCMacrosToMSBuild(value)
return formatted_value
def _VerifySourcesExist(sources, root_dir):
"""Verifies that all source files exist on disk.
Checks that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation but no otherwise
visible errors.
Arguments:
sources: A recursive list of Filter/file names.
root_dir: The root directory for the relative path names.
Returns:
A list of source files that cannot be found on disk.
"""
missing_sources = []
for source in sources:
if isinstance(source, MSVSProject.Filter):
missing_sources.extend(_VerifySourcesExist(source.contents, root_dir))
else:
if '$' not in source:
full_path = os.path.join(root_dir, source)
if not os.path.exists(full_path):
missing_sources.append(full_path)
return missing_sources
def _GetMSBuildSources(spec, sources, exclusions, rule_dependencies,
extension_to_rule_name, actions_spec,
sources_handled_by_action, list_excluded):
groups = ['none', 'masm', 'midl', 'include', 'compile', 'resource', 'rule',
'rule_dependency']
grouped_sources = {}
for g in groups:
grouped_sources[g] = []
_AddSources2(spec, sources, exclusions, grouped_sources,
rule_dependencies, extension_to_rule_name,
sources_handled_by_action, list_excluded)
sources = []
for g in groups:
if grouped_sources[g]:
sources.append(['ItemGroup'] + grouped_sources[g])
if actions_spec:
sources.append(['ItemGroup'] + actions_spec)
return sources
def _AddSources2(spec, sources, exclusions, grouped_sources,
rule_dependencies, extension_to_rule_name,
sources_handled_by_action,
list_excluded):
extensions_excluded_from_precompile = []
for source in sources:
if isinstance(source, MSVSProject.Filter):
_AddSources2(spec, source.contents, exclusions, grouped_sources,
rule_dependencies, extension_to_rule_name,
sources_handled_by_action,
list_excluded)
else:
if not source in sources_handled_by_action:
detail = []
excluded_configurations = exclusions.get(source, [])
if len(excluded_configurations) == len(spec['configurations']):
detail.append(['ExcludedFromBuild', 'true'])
else:
for config_name, configuration in sorted(excluded_configurations):
condition = _GetConfigurationCondition(config_name, configuration)
detail.append(['ExcludedFromBuild',
{'Condition': condition},
'true'])
# Add precompile if needed
for config_name, configuration in spec['configurations'].iteritems():
precompiled_source = configuration.get('msvs_precompiled_source', '')
if precompiled_source != '':
precompiled_source = _FixPath(precompiled_source)
if not extensions_excluded_from_precompile:
# If the precompiled header is generated by a C source, we must
# not try to use it for C++ sources, and vice versa.
basename, extension = os.path.splitext(precompiled_source)
if extension == '.c':
extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx']
else:
extensions_excluded_from_precompile = ['.c']
if precompiled_source == source:
condition = _GetConfigurationCondition(config_name, configuration)
detail.append(['PrecompiledHeader',
{'Condition': condition},
'Create'
])
else:
# Turn off precompiled header usage for source files of a
# different type than the file that generated the
# precompiled header.
for extension in extensions_excluded_from_precompile:
if source.endswith(extension):
detail.append(['PrecompiledHeader', ''])
detail.append(['ForcedIncludeFiles', ''])
group, element = _MapFileToMsBuildSourceType(source, rule_dependencies,
extension_to_rule_name)
grouped_sources[group].append([element, {'Include': source}] + detail)
def _GetMSBuildProjectReferences(project):
references = []
if project.dependencies:
group = ['ItemGroup']
for dependency in project.dependencies:
guid = dependency.guid
project_dir = os.path.split(project.path)[0]
relative_path = gyp.common.RelativePath(dependency.path, project_dir)
project_ref = ['ProjectReference',
{'Include': relative_path},
['Project', guid],
['ReferenceOutputAssembly', 'false']
]
for config in dependency.spec.get('configurations', {}).itervalues():
# If it's disabled in any config, turn it off in the reference.
if config.get('msvs_2010_disable_uldi_when_referenced', 0):
project_ref.append(['UseLibraryDependencyInputs', 'false'])
break
group.append(project_ref)
references.append(group)
return references
def _GenerateMSBuildProject(project, options, version, generator_flags):
spec = project.spec
configurations = spec['configurations']
project_dir, project_file_name = os.path.split(project.path)
gyp.common.EnsureDirExists(project.path)
# Prepare list of sources and excluded sources.
gyp_path = _NormalizedSource(project.build_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir)
gyp_file = os.path.split(project.build_file)[1]
sources, excluded_sources = _PrepareListOfSources(spec, generator_flags,
gyp_file)
# Add rules.
actions_to_add = {}
props_files_of_rules = set()
targets_files_of_rules = set()
rule_dependencies = set()
extension_to_rule_name = {}
list_excluded = generator_flags.get('msvs_list_excluded_files', True)
# Don't generate rules if we are using an external builder like ninja.
if not spec.get('msvs_external_builder'):
_GenerateRulesForMSBuild(project_dir, options, spec,
sources, excluded_sources,
props_files_of_rules, targets_files_of_rules,
actions_to_add, rule_dependencies,
extension_to_rule_name)
else:
rules = spec.get('rules', [])
_AdjustSourcesForRules(rules, sources, excluded_sources, True)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(spec, options,
project_dir, sources,
excluded_sources,
list_excluded, version))
# Don't add actions if we are using an external builder like ninja.
if not spec.get('msvs_external_builder'):
_AddActions(actions_to_add, spec, project.build_file)
_AddCopies(actions_to_add, spec)
# NOTE: this stanza must appear after all actions have been decided.
# Don't excluded sources with actions attached, or they won't run.
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
actions_spec, sources_handled_by_action = _GenerateActionsForMSBuild(
spec, actions_to_add)
_GenerateMSBuildFiltersFile(project.path + '.filters', sources,
rule_dependencies,
extension_to_rule_name)
missing_sources = _VerifySourcesExist(sources, project_dir)
for configuration in configurations.itervalues():
_FinalizeMSBuildSettings(spec, configuration)
# Add attributes to root element
import_default_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.Default.props'}]]
import_cpp_props_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.props'}]]
import_cpp_targets_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.targets'}]]
import_masm_props_section = [
['Import',
{'Project': r'$(VCTargetsPath)\BuildCustomizations\masm.props'}]]
import_masm_targets_section = [
['Import',
{'Project': r'$(VCTargetsPath)\BuildCustomizations\masm.targets'}]]
macro_section = [['PropertyGroup', {'Label': 'UserMacros'}]]
content = [
'Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003',
'ToolsVersion': version.ProjectVersion(),
'DefaultTargets': 'Build'
}]
content += _GetMSBuildProjectConfigurations(configurations)
content += _GetMSBuildGlobalProperties(spec, project.guid, project_file_name)
content += import_default_section
content += _GetMSBuildConfigurationDetails(spec, project.build_file)
if spec.get('msvs_enable_winphone'):
content += _GetMSBuildLocalProperties('v120_wp81')
else:
content += _GetMSBuildLocalProperties(project.msbuild_toolset)
content += import_cpp_props_section
content += import_masm_props_section
content += _GetMSBuildExtensions(props_files_of_rules)
content += _GetMSBuildPropertySheets(configurations)
content += macro_section
content += _GetMSBuildConfigurationGlobalProperties(spec, configurations,
project.build_file)
content += _GetMSBuildToolSettingsSections(spec, configurations)
content += _GetMSBuildSources(
spec, sources, exclusions, rule_dependencies, extension_to_rule_name,
actions_spec, sources_handled_by_action, list_excluded)
content += _GetMSBuildProjectReferences(project)
content += import_cpp_targets_section
content += import_masm_targets_section
content += _GetMSBuildExtensionTargets(targets_files_of_rules)
if spec.get('msvs_external_builder'):
content += _GetMSBuildExternalBuilderTargets(spec)
# TODO(jeanluc) File a bug to get rid of runas. We had in MSVS:
# has_run_as = _WriteMSVSUserFile(project.path, version, spec)
easy_xml.WriteXmlIfChanged(content, project.path, pretty=True, win32=True)
return missing_sources
def _GetMSBuildExternalBuilderTargets(spec):
"""Return a list of MSBuild targets for external builders.
The "Build" and "Clean" targets are always generated. If the spec contains
'msvs_external_builder_clcompile_cmd', then the "ClCompile" target will also
be generated, to support building selected C/C++ files.
Arguments:
spec: The gyp target spec.
Returns:
List of MSBuild 'Target' specs.
"""
build_cmd = _BuildCommandLineForRuleRaw(
spec, spec['msvs_external_builder_build_cmd'],
False, False, False, False)
build_target = ['Target', {'Name': 'Build'}]
build_target.append(['Exec', {'Command': build_cmd}])
clean_cmd = _BuildCommandLineForRuleRaw(
spec, spec['msvs_external_builder_clean_cmd'],
False, False, False, False)
clean_target = ['Target', {'Name': 'Clean'}]
clean_target.append(['Exec', {'Command': clean_cmd}])
targets = [build_target, clean_target]
if spec.get('msvs_external_builder_clcompile_cmd'):
clcompile_cmd = _BuildCommandLineForRuleRaw(
spec, spec['msvs_external_builder_clcompile_cmd'],
False, False, False, False)
clcompile_target = ['Target', {'Name': 'ClCompile'}]
clcompile_target.append(['Exec', {'Command': clcompile_cmd}])
targets.append(clcompile_target)
return targets
def _GetMSBuildExtensions(props_files_of_rules):
extensions = ['ImportGroup', {'Label': 'ExtensionSettings'}]
for props_file in props_files_of_rules:
extensions.append(['Import', {'Project': props_file}])
return [extensions]
def _GetMSBuildExtensionTargets(targets_files_of_rules):
targets_node = ['ImportGroup', {'Label': 'ExtensionTargets'}]
for targets_file in sorted(targets_files_of_rules):
targets_node.append(['Import', {'Project': targets_file}])
return [targets_node]
def _GenerateActionsForMSBuild(spec, actions_to_add):
"""Add actions accumulated into an actions_to_add, merging as needed.
Arguments:
spec: the target project dict
actions_to_add: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
Returns:
A pair of (action specification, the sources handled by this action).
"""
sources_handled_by_action = OrderedSet()
actions_spec = []
for primary_input, actions in actions_to_add.iteritems():
inputs = OrderedSet()
outputs = OrderedSet()
descriptions = []
commands = []
for action in actions:
inputs.update(OrderedSet(action['inputs']))
outputs.update(OrderedSet(action['outputs']))
descriptions.append(action['description'])
cmd = action['command']
# For most actions, add 'call' so that actions that invoke batch files
# return and continue executing. msbuild_use_call provides a way to
# disable this but I have not seen any adverse effect from doing that
# for everything.
if action.get('msbuild_use_call', True):
cmd = 'call ' + cmd
commands.append(cmd)
# Add the custom build action for one input file.
description = ', and also '.join(descriptions)
# We can't join the commands simply with && because the command line will
# get too long. See also _AddActions: cygwin's setup_env mustn't be called
# for every invocation or the command that sets the PATH will grow too
# long.
command = '\r\n'.join([c + '\r\nif %errorlevel% neq 0 exit /b %errorlevel%'
for c in commands])
_AddMSBuildAction(spec,
primary_input,
inputs,
outputs,
command,
description,
sources_handled_by_action,
actions_spec)
return actions_spec, sources_handled_by_action
def _AddMSBuildAction(spec, primary_input, inputs, outputs, cmd, description,
sources_handled_by_action, actions_spec):
command = MSVSSettings.ConvertVCMacrosToMSBuild(cmd)
primary_input = _FixPath(primary_input)
inputs_array = _FixPaths(inputs)
outputs_array = _FixPaths(outputs)
additional_inputs = ';'.join([i for i in inputs_array
if i != primary_input])
outputs = ';'.join(outputs_array)
sources_handled_by_action.add(primary_input)
action_spec = ['CustomBuild', {'Include': primary_input}]
action_spec.extend(
# TODO(jeanluc) 'Document' for all or just if as_sources?
[['FileType', 'Document'],
['Command', command],
['Message', description],
['Outputs', outputs]
])
if additional_inputs:
action_spec.append(['AdditionalInputs', additional_inputs])
actions_spec.append(action_spec)
| mit |
laperry1/android_external_chromium_org | components/crash/tools/generate_breakpad_symbols.py | 43 | 7519 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A tool to generate symbols for a binary suitable for breakpad.
Currently, the tool only supports Linux, Android, and Mac. Support for other
platforms is planned.
"""
import errno
import optparse
import os
import Queue
import re
import shutil
import subprocess
import sys
import threading
CONCURRENT_TASKS=4
def GetCommandOutput(command):
"""Runs the command list, returning its output.
Prints the given command (which should be a list of one or more strings),
then runs it and returns its output (stdout) as a string.
From chromium_utils.
"""
devnull = open(os.devnull, 'w')
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=devnull,
bufsize=1)
output = proc.communicate()[0]
return output
def GetDumpSymsBinary(build_dir=None):
"""Returns the path to the dump_syms binary."""
DUMP_SYMS = 'dump_syms'
dump_syms_bin = os.path.join(os.path.expanduser(build_dir), DUMP_SYMS)
if not os.access(dump_syms_bin, os.X_OK):
print 'Cannot find %s.' % DUMP_SYMS
sys.exit(1)
return dump_syms_bin
def Resolve(path, exe_path, loader_path, rpaths):
"""Resolve a dyld path.
@executable_path is replaced with |exe_path|
@loader_path is replaced with |loader_path|
@rpath is replaced with the first path in |rpaths| where the referenced file
is found
"""
path = path.replace('@loader_path', loader_path)
path = path.replace('@executable_path', exe_path)
if path.find('@rpath') != -1:
for rpath in rpaths:
new_path = Resolve(path.replace('@rpath', rpath), exe_path, loader_path,
[])
if os.access(new_path, os.X_OK):
return new_path
return ''
return path
def GetSharedLibraryDependenciesLinux(binary):
"""Return absolute paths to all shared library dependecies of the binary.
This implementation assumes that we're running on a Linux system."""
ldd = GetCommandOutput(['ldd', binary])
lib_re = re.compile('\t.* => (.+) \(.*\)$')
result = []
for line in ldd.splitlines():
m = lib_re.match(line)
if m:
result.append(m.group(1))
return result
def GetSharedLibraryDependenciesMac(binary, exe_path):
"""Return absolute paths to all shared library dependecies of the binary.
This implementation assumes that we're running on a Mac system."""
loader_path = os.path.dirname(binary)
otool = GetCommandOutput(['otool', '-l', binary]).splitlines()
rpaths = []
for idx, line in enumerate(otool):
if line.find('cmd LC_RPATH') != -1:
m = re.match(' *path (.*) \(offset .*\)$', otool[idx+2])
rpaths.append(m.group(1))
otool = GetCommandOutput(['otool', '-L', binary]).splitlines()
lib_re = re.compile('\t(.*) \(compatibility .*\)$')
deps = []
for line in otool:
m = lib_re.match(line)
if m:
dep = Resolve(m.group(1), exe_path, loader_path, rpaths)
if dep:
deps.append(os.path.normpath(dep))
return deps
def GetSharedLibraryDependencies(options, binary, exe_path):
"""Return absolute paths to all shared library dependecies of the binary."""
deps = []
if sys.platform.startswith('linux'):
deps = GetSharedLibraryDependenciesLinux(binary)
elif sys.platform == 'darwin':
deps = GetSharedLibraryDependenciesMac(binary, exe_path)
else:
print "Platform not supported."
sys.exit(1)
result = []
build_dir = os.path.abspath(options.build_dir)
for dep in deps:
if (os.access(dep, os.X_OK) and
os.path.abspath(os.path.dirname(dep)).startswith(build_dir)):
result.append(dep)
return result
def mkdir_p(path):
"""Simulates mkdir -p."""
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def GenerateSymbols(options, binaries):
"""Dumps the symbols of binary and places them in the given directory."""
queue = Queue.Queue()
print_lock = threading.Lock()
def _Worker():
while True:
binary = queue.get()
should_dump_syms = True
reason = "no reason"
output_path = os.path.join(
options.symbols_dir, os.path.basename(binary))
if os.path.isdir(output_path):
if os.path.getmtime(binary) < os.path.getmtime(output_path):
should_dump_syms = False
reason = "symbols are more current than binary"
if not should_dump_syms:
if options.verbose:
with print_lock:
print "Skipping %s (%s)" % (binary, reason)
queue.task_done()
continue
if options.verbose:
with print_lock:
print "Generating symbols for %s" % binary
if os.path.isdir(output_path):
os.utime(output_path, None)
syms = GetCommandOutput([GetDumpSymsBinary(options.build_dir), '-r',
binary])
module_line = re.match("MODULE [^ ]+ [^ ]+ ([0-9A-F]+) (.*)\n", syms)
output_path = os.path.join(options.symbols_dir, module_line.group(2),
module_line.group(1))
mkdir_p(output_path)
symbol_file = "%s.sym" % module_line.group(2)
try:
f = open(os.path.join(output_path, symbol_file), 'w')
f.write(syms)
f.close()
except Exception, e:
# Not much we can do about this.
with print_lock:
print e
queue.task_done()
for binary in binaries:
queue.put(binary)
for _ in range(options.jobs):
t = threading.Thread(target=_Worker)
t.daemon = True
t.start()
queue.join()
def main():
parser = optparse.OptionParser()
parser.add_option('', '--build-dir', default='',
help='The build output directory.')
parser.add_option('', '--symbols-dir', default='',
help='The directory where to write the symbols file.')
parser.add_option('', '--binary', default='',
help='The path of the binary to generate symbols for.')
parser.add_option('', '--clear', default=False, action='store_true',
help='Clear the symbols directory before writing new '
'symbols.')
parser.add_option('-j', '--jobs', default=CONCURRENT_TASKS, action='store',
type='int', help='Number of parallel tasks to run.')
parser.add_option('-v', '--verbose', action='store_true',
help='Print verbose status output.')
(options, _) = parser.parse_args()
if not options.symbols_dir:
print "Required option --symbols-dir missing."
return 1
if not options.build_dir:
print "Required option --build-dir missing."
return 1
if not options.binary:
print "Required option --binary missing."
return 1
if not os.access(options.binary, os.X_OK):
print "Cannot find %s." % options.binary
return 1
if options.clear:
try:
shutil.rmtree(options.symbols_dir)
except:
pass
# Build the transitive closure of all dependencies.
binaries = set([options.binary])
queue = [options.binary]
exe_path = os.path.dirname(options.binary)
while queue:
deps = GetSharedLibraryDependencies(options, queue.pop(0), exe_path)
new_deps = set(deps) - binaries
binaries |= new_deps
queue.extend(list(new_deps))
GenerateSymbols(options, binaries)
return 0
if '__main__' == __name__:
sys.exit(main())
| bsd-3-clause |
BizzCloud/PosBox | addons/auth_ldap/users_ldap.py | 12 | 10518 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import ldap
import logging
from ldap.filter import filter_format
import openerp.exceptions
from openerp import tools
from openerp.osv import fields, osv
from openerp import SUPERUSER_ID
from openerp.modules.registry import RegistryManager
_logger = logging.getLogger(__name__)
class CompanyLDAP(osv.osv):
_name = 'res.company.ldap'
_order = 'sequence'
_rec_name = 'ldap_server'
def get_ldap_dicts(self, cr, ids=None):
"""
Retrieve res_company_ldap resources from the database in dictionary
format.
:param list ids: Valid ids of model res_company_ldap. If not \
specified, process all resources (unlike other ORM methods).
:return: ldap configurations
:rtype: list of dictionaries
"""
if ids:
id_clause = 'AND id IN (%s)'
args = [tuple(ids)]
else:
id_clause = ''
args = []
cr.execute("""
SELECT id, company, ldap_server, ldap_server_port, ldap_binddn,
ldap_password, ldap_filter, ldap_base, "user", create_user,
ldap_tls
FROM res_company_ldap
WHERE ldap_server != '' """ + id_clause + """ ORDER BY sequence
""", args)
return cr.dictfetchall()
def connect(self, conf):
"""
Connect to an LDAP server specified by an ldap
configuration dictionary.
:param dict conf: LDAP configuration
:return: an LDAP object
"""
uri = 'ldap://%s:%d' % (conf['ldap_server'],
conf['ldap_server_port'])
connection = ldap.initialize(uri)
if conf['ldap_tls']:
connection.start_tls_s()
return connection
def authenticate(self, conf, login, password):
"""
Authenticate a user against the specified LDAP server.
In order to prevent an unintended 'unauthenticated authentication',
which is an anonymous bind with a valid dn and a blank password,
check for empty passwords explicitely (:rfc:`4513#section-6.3.1`)
:param dict conf: LDAP configuration
:param login: username
:param password: Password for the LDAP user
:return: LDAP entry of authenticated user or False
:rtype: dictionary of attributes
"""
if not password:
return False
entry = False
filter = filter_format(conf['ldap_filter'], (login,))
try:
results = self.query(conf, filter)
# Get rid of (None, attrs) for searchResultReference replies
results = [i for i in results if i[0]]
if results and len(results) == 1:
dn = results[0][0]
conn = self.connect(conf)
conn.simple_bind_s(dn, password)
conn.unbind()
entry = results[0]
except ldap.INVALID_CREDENTIALS:
return False
except ldap.LDAPError, e:
_logger.error('An LDAP exception occurred: %s', e)
return entry
def query(self, conf, filter, retrieve_attributes=None):
"""
Query an LDAP server with the filter argument and scope subtree.
Allow for all authentication methods of the simple authentication
method:
- authenticated bind (non-empty binddn + valid password)
- anonymous bind (empty binddn + empty password)
- unauthenticated authentication (non-empty binddn + empty password)
.. seealso::
:rfc:`4513#section-5.1` - LDAP: Simple Authentication Method.
:param dict conf: LDAP configuration
:param filter: valid LDAP filter
:param list retrieve_attributes: LDAP attributes to be retrieved. \
If not specified, return all attributes.
:return: ldap entries
:rtype: list of tuples (dn, attrs)
"""
results = []
try:
conn = self.connect(conf)
conn.simple_bind_s(conf['ldap_binddn'] or '',
conf['ldap_password'] or '')
results = conn.search_st(conf['ldap_base'], ldap.SCOPE_SUBTREE,
filter, retrieve_attributes, timeout=60)
conn.unbind()
except ldap.INVALID_CREDENTIALS:
_logger.error('LDAP bind failed.')
except ldap.LDAPError, e:
_logger.error('An LDAP exception occurred: %s', e)
return results
def map_ldap_attributes(self, cr, uid, conf, login, ldap_entry):
"""
Compose values for a new resource of model res_users,
based upon the retrieved ldap entry and the LDAP settings.
:param dict conf: LDAP configuration
:param login: the new user's login
:param tuple ldap_entry: single LDAP result (dn, attrs)
:return: parameters for a new resource of model res_users
:rtype: dict
"""
values = { 'name': ldap_entry[1]['cn'][0],
'login': login,
'company_id': conf['company']
}
return values
def get_or_create_user(self, cr, uid, conf, login, ldap_entry,
context=None):
"""
Retrieve an active resource of model res_users with the specified
login. Create the user if it is not initially found.
:param dict conf: LDAP configuration
:param login: the user's login
:param tuple ldap_entry: single LDAP result (dn, attrs)
:return: res_users id
:rtype: int
"""
user_id = False
login = tools.ustr(login.lower())
cr.execute("SELECT id, active FROM res_users WHERE lower(login)=%s", (login,))
res = cr.fetchone()
if res:
if res[1]:
user_id = res[0]
elif conf['create_user']:
_logger.debug("Creating new OpenERP user \"%s\" from LDAP" % login)
user_obj = self.pool['res.users']
values = self.map_ldap_attributes(cr, uid, conf, login, ldap_entry)
if conf['user']:
values['active'] = True
user_id = user_obj.copy(cr, SUPERUSER_ID, conf['user'],
default=values)
else:
user_id = user_obj.create(cr, SUPERUSER_ID, values)
return user_id
_columns = {
'sequence': fields.integer('Sequence'),
'company': fields.many2one('res.company', 'Company', required=True,
ondelete='cascade'),
'ldap_server': fields.char('LDAP Server address', required=True),
'ldap_server_port': fields.integer('LDAP Server port', required=True),
'ldap_binddn': fields.char('LDAP binddn',
help=("The user account on the LDAP server that is used to query "
"the directory. Leave empty to connect anonymously.")),
'ldap_password': fields.char('LDAP password',
help=("The password of the user account on the LDAP server that is "
"used to query the directory.")),
'ldap_filter': fields.char('LDAP filter', required=True),
'ldap_base': fields.char('LDAP base', required=True),
'user': fields.many2one('res.users', 'Template User',
help="User to copy when creating new users"),
'create_user': fields.boolean('Create user',
help="Automatically create local user accounts for new users authenticating via LDAP"),
'ldap_tls': fields.boolean('Use TLS',
help="Request secure TLS/SSL encryption when connecting to the LDAP server. "
"This option requires a server with STARTTLS enabled, "
"otherwise all authentication attempts will fail."),
}
_defaults = {
'ldap_server': '127.0.0.1',
'ldap_server_port': 389,
'sequence': 10,
'create_user': True,
}
class res_company(osv.osv):
_inherit = "res.company"
_columns = {
'ldaps': fields.one2many(
'res.company.ldap', 'company', 'LDAP Parameters', copy=True),
}
class users(osv.osv):
_inherit = "res.users"
def _login(self, db, login, password):
user_id = super(users, self)._login(db, login, password)
if user_id:
return user_id
registry = RegistryManager.get(db)
with registry.cursor() as cr:
ldap_obj = registry.get('res.company.ldap')
for conf in ldap_obj.get_ldap_dicts(cr):
entry = ldap_obj.authenticate(conf, login, password)
if entry:
user_id = ldap_obj.get_or_create_user(
cr, SUPERUSER_ID, conf, login, entry)
if user_id:
break
return user_id
def check_credentials(self, cr, uid, password):
try:
super(users, self).check_credentials(cr, uid, password)
except openerp.exceptions.AccessDenied:
cr.execute('SELECT login FROM res_users WHERE id=%s AND active=TRUE',
(int(uid),))
res = cr.fetchone()
if res:
ldap_obj = self.pool['res.company.ldap']
for conf in ldap_obj.get_ldap_dicts(cr):
if ldap_obj.authenticate(conf, res[0], password):
return
raise
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
pascalchevrel/bedrock | tests/functional/firefox/test_all.py | 4 | 3785 | # -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import pytest
from pages.firefox.all import FirefoxAllPage
@pytest.mark.smoke
@pytest.mark.nondestructive
def test_firefox_release(base_url, selenium):
page = FirefoxAllPage(selenium, base_url).open()
product = page.select_product('Firefox')
product.select_platform('Windows 64-bit')
product.select_language('English (US)')
assert page.is_desktop_download_button_displayed
assert page.is_desktop_download_link_valid
assert 'product=firefox-latest-ssl' and 'os=win64' and 'lang=en-US' in page.desktop_download_link
@pytest.mark.smoke
@pytest.mark.nondestructive
def test_firefox_beta(base_url, selenium):
page = FirefoxAllPage(selenium, base_url).open()
product = page.select_product('Firefox Beta')
product.select_platform('macOS')
product.select_language(u'German — Deutsch')
assert page.is_desktop_download_button_displayed
assert page.is_desktop_download_link_valid
assert 'product=firefox-beta-latest-ssl' and 'os=osx' and 'lang=de' in page.desktop_download_link
@pytest.mark.smoke
@pytest.mark.nondestructive
def test_firefox_developer(base_url, selenium):
page = FirefoxAllPage(selenium, base_url).open()
product = page.select_product('Firefox Developer Edition')
product.select_platform('Linux 64-bit')
product.select_language('English (US)')
assert page.is_desktop_download_button_displayed
assert page.is_desktop_download_link_valid
assert 'product=firefox-devedition-latest-ssl' and 'os=linux64' and 'lang=en-US' in page.desktop_download_link
@pytest.mark.smoke
@pytest.mark.nondestructive
def test_firefox_nightly(base_url, selenium):
page = FirefoxAllPage(selenium, base_url).open()
product = page.select_product('Firefox Nightly')
product.select_platform('Windows 32-bit')
product.select_language(u'German — Deutsch')
assert page.is_desktop_download_button_displayed
assert page.is_desktop_download_link_valid
assert 'product=firefox-nightly-latest-ssl' and 'os=win' and 'lang=de' in page.desktop_download_link
@pytest.mark.smoke
@pytest.mark.nondestructive
def test_firefox_esr(base_url, selenium):
page = FirefoxAllPage(selenium, base_url).open()
product = page.select_product('Firefox Extended Support Release')
product.select_platform('Linux 32-bit')
product.select_language('English (US)')
assert page.is_desktop_download_button_displayed
assert page.is_desktop_download_link_valid
assert 'product=firefox-esr-latest-ssl' and 'os=linux' and 'lang=en-US' in page.desktop_download_link
@pytest.mark.smoke
@pytest.mark.nondestructive
def test_firefox_android(base_url, selenium):
page = FirefoxAllPage(selenium, base_url).open()
page.select_product('Firefox Android')
assert page.is_android_download_button_displayed
@pytest.mark.smoke
@pytest.mark.nondestructive
def test_firefox_android_beta(base_url, selenium):
page = FirefoxAllPage(selenium, base_url).open()
page.select_product('Firefox Android Beta')
assert page.is_android_beta_download_button_displayed
@pytest.mark.smoke
@pytest.mark.nondestructive
def test_firefox_android_nightly(base_url, selenium):
page = FirefoxAllPage(selenium, base_url).open()
page.select_product('Firefox Android Nightly')
assert page.is_android_nightly_download_button_displayed
@pytest.mark.smoke
@pytest.mark.nondestructive
def test_firefox_ios(base_url, selenium):
page = FirefoxAllPage(selenium, base_url).open()
page.select_product('Firefox iOS')
assert page.is_ios_download_button_displayed
| mpl-2.0 |
qlands/onadata | onadata/apps/main/tests/test_form_enter_data.py | 5 | 5453 | import os
import re
import requests
from urlparse import urlparse
from time import time
from httmock import urlmatch, HTTMock
from django.test import RequestFactory
from django.core.urlresolvers import reverse
from django.core.validators import URLValidator
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from nose import SkipTest
from onadata.apps.main.views import set_perm, show, qrcode
from onadata.apps.main.models import MetaData
from onadata.apps.logger.views import enter_data
from onadata.libs.utils.viewer_tools import enketo_url
from test_base import TestBase
@urlmatch(netloc=r'(.*\.)?enketo\.ona\.io$')
def enketo_mock(url, request):
response = requests.Response()
response.status_code = 201
response._content = '{"url": "https://hmh2a.enketo.ona.io"}'
return response
@urlmatch(netloc=r'(.*\.)?enketo\.ona\.io$')
def enketo_error_mock(url, request):
response = requests.Response()
response.status_code = 400
response._content = '{"message": ' \
'"no account exists for this OpenRosa server"}'
return response
class TestFormEnterData(TestBase):
def setUp(self):
TestBase.setUp(self)
self._create_user_and_login()
self._publish_transportation_form_and_submit_instance()
self.perm_url = reverse(set_perm, kwargs={
'username': self.user.username, 'id_string': self.xform.id_string})
self.show_url = reverse(show, kwargs={'uuid': self.xform.uuid})
self.url = reverse(enter_data, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
})
def _running_enketo(self, check_url=False):
if hasattr(settings, 'ENKETO_URL') and \
(not check_url or self._check_url(settings.ENKETO_URL)):
return True
return False
def test_enketo_remote_server(self):
if not self._running_enketo():
raise SkipTest
with HTTMock(enketo_mock):
server_url = 'https://testserver.com/bob'
form_id = "test_%s" % re.sub(re.compile("\."), "_", str(time()))
url = enketo_url(server_url, form_id)
self.assertIsInstance(url, basestring)
self.assertIsNone(URLValidator()(url))
def _get_grcode_view_response(self):
factory = RequestFactory()
request = factory.get('/')
request.user = self.user
response = qrcode(
request, self.user.username, self.xform.id_string)
return response
def test_qrcode_view(self):
with HTTMock(enketo_mock):
response = self._get_grcode_view_response()
qrfile = os.path.join(
self.this_directory, 'fixtures', 'qrcode.response')
with open(qrfile, 'r') as f:
data = f.read()
self.assertContains(response, data.strip(), status_code=200)
def test_qrcode_view_with_enketo_error(self):
with HTTMock(enketo_error_mock):
response = self._get_grcode_view_response()
self.assertEqual(response.status_code, 400)
def test_enter_data_redir(self):
if not self._running_enketo():
raise SkipTest
with HTTMock(enketo_mock):
factory = RequestFactory()
request = factory.get('/')
request.user = self.user
response = enter_data(
request, self.user.username, self.xform.id_string)
# make sure response redirect to an enketo site
enketo_base_url = urlparse(settings.ENKETO_URL).netloc
redirected_base_url = urlparse(response['Location']).netloc
# TODO: checking if the form is valid on enketo side
self.assertIn(enketo_base_url, redirected_base_url)
self.assertEqual(response.status_code, 302)
def test_enter_data_no_permission(self):
response = self.anon.get(self.url)
self.assertEqual(response.status_code, 403)
def test_public_with_link_to_share_toggle_on(self):
# sharing behavior as of 09/13/2012:
# it requires both data_share and form_share both turned on
# in order to grant anon access to form uploading
# TODO: findout 'for_user': 'all' and what it means
response = self.client.post(self.perm_url, {'for_user': 'all',
'perm_type': 'link'})
self.assertEqual(response.status_code, 302)
self.assertEqual(MetaData.public_link(self.xform), True)
# toggle shared on
self.xform.shared = True
self.xform.shared_data = True
self.xform.save()
response = self.anon.get(self.show_url)
self.assertEqual(response.status_code, 302)
if not self._running_enketo():
raise SkipTest
with HTTMock(enketo_mock):
factory = RequestFactory()
request = factory.get('/')
request.user = AnonymousUser()
response = enter_data(
request, self.user.username, self.xform.id_string)
self.assertEqual(response.status_code, 302)
def test_enter_data_non_existent_user(self):
url = reverse(enter_data, kwargs={
'username': 'nonexistentuser',
'id_string': self.xform.id_string
})
response = self.anon.get(url)
self.assertEqual(response.status_code, 404)
| bsd-2-clause |
Drooids/odoo | addons/sales_team/res_config.py | 366 | 1922 | # -*- coding: utf-8 -*-
from openerp.osv import fields, osv
class sales_team_configuration(osv.TransientModel):
_name = 'sale.config.settings'
_inherit = ['sale.config.settings']
def set_group_multi_salesteams(self, cr, uid, ids, context=None):
""" This method is automatically called by res_config as it begins
with set. It is used to implement the 'one group or another'
behavior. We have to perform some group manipulation by hand
because in res_config.execute(), set_* methods are called
after group_*; therefore writing on an hidden res_config file
could not work.
If group_multi_salesteams is checked: remove group_mono_salesteams
from group_user, remove the users. Otherwise, just add
group_mono_salesteams in group_user.
The inverse logic about group_multi_salesteams is managed by the
normal behavior of 'group_multi_salesteams' field.
"""
def ref(xml_id):
mod, xml = xml_id.split('.', 1)
return self.pool['ir.model.data'].get_object(cr, uid, mod, xml, context)
for obj in self.browse(cr, uid, ids, context=context):
config_group = ref('base.group_mono_salesteams')
base_group = ref('base.group_user')
if obj.group_multi_salesteams:
base_group.write({'implied_ids': [(3, config_group.id)]})
config_group.write({'users': [(3, u.id) for u in base_group.users]})
else:
base_group.write({'implied_ids': [(4, config_group.id)]})
return True
_columns = {
'group_multi_salesteams': fields.boolean("Organize Sales activities into multiple Sales Teams",
implied_group='base.group_multi_salesteams',
help="""Allows you to use Sales Teams to manage your leads and opportunities."""),
}
| agpl-3.0 |
wbsavage/shinken | shinken/objects/module.py | 2 | 2515 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from item import Item, Items
from shinken.property import StringProp, ListProp
from shinken.util import strip_and_uniq
from shinken.log import logger
class Module(Item):
id = 1 # zero is always special in database, so we do not take risk here
my_type = 'module'
properties = Item.properties.copy()
properties.update({
'module_name': StringProp(),
'module_type': StringProp(),
'modules': ListProp(default=''),
})
macros = {}
# For debugging purpose only (nice name)
def get_name(self):
return self.module_name
class Modules(Items):
name_property = "module_name"
inner_class = Module
def linkify(self):
self.linkify_s_by_plug()
def linkify_s_by_plug(self):
for s in self:
new_modules = []
mods = s.modules.split(',')
mods = strip_and_uniq(mods)
for plug_name in mods:
plug_name = plug_name.strip()
# don't read void names
if plug_name == '':
continue
# We are the modules, we search them :)
plug = self.find_by_name(plug_name)
if plug is not None:
new_modules.append(plug)
else:
err = "[module] unknown %s module from %s" % (plug_name, s.get_name())
logger.error(err)
s.configuration_errors.append(err)
s.modules = new_modules
# We look for contacts property in contacts and
def explode(self):
pass
| agpl-3.0 |
jc0n/scrapy-webdriver | scrapy_webdriver/selector.py | 5 | 3473 | import re
try:
from scrapy.selector import Selector, XPathSelectorList
except ImportError: # scrapy < 0.20
from scrapy.selector import XPathSelector as Selector, XPathSelectorList
_UNSUPPORTED_XPATH_ENDING = re.compile(r'.*/((@)?([^/()]+)(\(\))?)$')
class WebdriverXPathSelector(Selector):
"""Scrapy selector that works using XPath selectors in a remote browser.
Based on some code from Marconi Moreto:
https://github.com/marconi/ghost-selector
"""
def __init__(self, response=None, webdriver=None, element=None,
*args, **kwargs):
kwargs['response'] = response
super(WebdriverXPathSelector, self).__init__(*args, **kwargs)
self.response = response
self.webdriver = webdriver or response.webdriver
self.element = element
def _make_result(self, result):
if type(result) is not list:
result = [result]
return [self.__class__(webdriver=self.webdriver, element=e)
for e in result]
def select(self, xpath):
"""Return elements using the webdriver `find_elements_by_xpath` method.
Some XPath features are not supported by the webdriver implementation.
Namely, selecting text content or attributes:
- /some/element/text()
- /some/element/@attribute
This function offers workarounds for both, so it should be safe to use
them as you would with HtmlXPathSelector for simple content extraction.
"""
xpathev = self.element if self.element else self.webdriver
ending = _UNSUPPORTED_XPATH_ENDING.match(xpath)
atsign = parens = None
if ending:
match, atsign, name, parens = ending.groups()
if atsign:
xpath = xpath[:-len(name) - 2]
elif parens and name == 'text':
xpath = xpath[:-len(name) - 3]
result = self._make_result(xpathev.find_elements_by_xpath(xpath))
if atsign:
result = (_NodeAttribute(r.element, name) for r in result)
elif parens and result and name == 'text':
result = (_TextNode(self.webdriver, r.element) for r in result)
return XPathSelectorList(result)
def select_script(self, script, *args):
"""Return elements using JavaScript snippet execution."""
result = self.webdriver.execute_script(script, *args)
return XPathSelectorList(self._make_result(result))
def extract(self):
"""Extract text from selenium element."""
return self.element.text if self.element else None
class _NodeAttribute(object):
"""Works around webdriver XPath inability to select attributes."""
def __init__(self, element, attribute):
self.element = element
self.attribute = attribute
def extract(self):
return self.element.get_attribute(self.attribute)
class _TextNode(object):
"""Works around webdriver XPath inability to select text nodes.
It's a rather contrived element API implementation, it should probably
be expanded.
"""
JS_FIND_FIRST_TEXT_NODE = ('return arguments[0].firstChild '
'&& arguments[0].firstChild.nodeValue')
def __init__(self, webdriver, element):
self.element = element
self.webdriver = webdriver
def extract(self):
args = (self.JS_FIND_FIRST_TEXT_NODE, self.element)
return self.webdriver.execute_script(*args)
| mit |
ForensicTools/GRREAT-475_2141-Chaigon-Failey-Siebert | client/vfs_handlers/registry.py | 3 | 15249 | #!/usr/bin/env python
"""Implement access to the windows registry."""
import ctypes
import ctypes.wintypes
import exceptions
import os
import stat
import StringIO
import _winreg
from grr.client import vfs
from grr.lib import rdfvalue
from grr.lib import utils
# Difference between 1 Jan 1601 and 1 Jan 1970.
WIN_UNIX_DIFF_MSECS = 11644473600
# KEY_READ = STANDARD_RIGHTS_READ | KEY_QUERY_VALUE |
# KEY_ENUMERATE_SUB_KEYS | KEY_NOTIFY
# Also see: http://msdn.microsoft.com/en-us/library/windows/desktop/
# ms724878(v=vs.85).aspx
KEY_READ = 0x20019
def CanonicalPathToLocalPath(path):
path = path.replace("/", "\\")
return path.strip("\\")
# _winreg is broken on Python 2.x and doesn't support unicode registry values.
# We provide some replacement functions here.
advapi32 = ctypes.windll.advapi32
LPDWORD = ctypes.POINTER(ctypes.wintypes.DWORD)
LPBYTE = ctypes.POINTER(ctypes.wintypes.BYTE)
ERROR_SUCCESS = 0
ERROR_MORE_DATA = 234
class FileTime(ctypes.Structure):
_fields_ = [("dwLowDateTime", ctypes.wintypes.DWORD),
("dwHighDateTime", ctypes.wintypes.DWORD)]
RegCloseKey = advapi32.RegCloseKey # pylint: disable=g-bad-name
RegCloseKey.restype = ctypes.c_long
RegCloseKey.argtypes = [ctypes.c_void_p]
RegEnumKeyEx = advapi32.RegEnumKeyExW # pylint: disable=g-bad-name
RegEnumKeyEx.restype = ctypes.c_long
RegEnumKeyEx.argtypes = [ctypes.c_void_p, ctypes.wintypes.DWORD,
ctypes.c_wchar_p, LPDWORD,
LPDWORD, ctypes.c_wchar_p, LPDWORD,
ctypes.POINTER(FileTime)]
RegEnumValue = advapi32.RegEnumValueW # pylint: disable=g-bad-name
RegEnumValue.restype = ctypes.c_long
RegEnumValue.argtypes = [ctypes.c_void_p, ctypes.wintypes.DWORD,
ctypes.c_wchar_p, LPDWORD, LPDWORD, LPDWORD, LPBYTE,
LPDWORD]
RegOpenKeyEx = advapi32.RegOpenKeyExW # pylint: disable=g-bad-name
RegOpenKeyEx.restype = ctypes.c_long
RegOpenKeyEx.argtypes = [ctypes.c_void_p, ctypes.c_wchar_p, ctypes.c_ulong,
ctypes.c_ulong, ctypes.POINTER(ctypes.c_void_p)]
RegQueryInfoKey = advapi32.RegQueryInfoKeyW # pylint: disable=g-bad-name
RegQueryInfoKey.restype = ctypes.c_long
RegQueryInfoKey.argtypes = [ctypes.c_void_p, ctypes.c_wchar_p, LPDWORD, LPDWORD,
LPDWORD, LPDWORD, LPDWORD, LPDWORD,
LPDWORD, LPDWORD, LPDWORD,
ctypes.POINTER(FileTime)]
RegQueryValueEx = advapi32.RegQueryValueExW # pylint: disable=g-bad-name
RegQueryValueEx.restype = ctypes.c_long
RegQueryValueEx.argtypes = [ctypes.c_void_p, ctypes.c_wchar_p, LPDWORD, LPDWORD,
LPBYTE, LPDWORD]
class KeyHandle(object):
"""A wrapper class for a registry key handle."""
def __init__(self, value=0):
if value:
self.handle = ctypes.c_void_p(value)
else:
self.handle = ctypes.c_void_p()
def __enter__(self):
return self
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
self.Close()
return False
def Close(self):
if not self.handle:
return
if RegCloseKey is None:
return # Globals become None during exit.
rc = RegCloseKey(self.handle)
self.handle = ctypes.c_void_p()
if rc != ERROR_SUCCESS:
raise ctypes.WinError(2)
def __del__(self):
self.Close()
def OpenKey(key, sub_key):
"""This calls the Windows OpenKeyEx function in a Unicode safe way."""
new_key = KeyHandle()
# Don't use KEY_WOW64_64KEY (0x100) since it breaks on Windows 2000
rc = RegOpenKeyEx(key.handle, sub_key, 0, KEY_READ,
ctypes.cast(ctypes.byref(new_key.handle),
ctypes.POINTER(ctypes.c_void_p)))
if rc != ERROR_SUCCESS:
raise ctypes.WinError(2)
return new_key
def CloseKey(key):
rc = RegCloseKey(key)
if rc != ERROR_SUCCESS:
raise ctypes.WinError(2)
def QueryInfoKey(key):
"""This calls the Windows RegQueryInfoKey function in a Unicode safe way."""
null = LPDWORD()
num_sub_keys = ctypes.wintypes.DWORD()
num_values = ctypes.wintypes.DWORD()
ft = FileTime()
rc = RegQueryInfoKey(key.handle, ctypes.c_wchar_p(), null, null,
ctypes.byref(num_sub_keys), null, null,
ctypes.byref(num_values), null, null, null,
ctypes.byref(ft))
if rc != ERROR_SUCCESS:
raise ctypes.WinError(2)
return (num_sub_keys.value, num_values.value,
ft.dwLowDateTime | (ft.dwHighDateTime << 32))
def QueryValueEx(key, value_name):
"""This calls the Windows QueryValueEx function in a Unicode safe way."""
size = 256
data_type = ctypes.wintypes.DWORD()
while True:
tmp_size = ctypes.wintypes.DWORD(size)
buf = ctypes.create_string_buffer(size)
rc = RegQueryValueEx(key.handle, value_name, LPDWORD(),
ctypes.byref(data_type),
ctypes.cast(buf, LPBYTE), ctypes.byref(tmp_size))
if rc != ERROR_MORE_DATA:
break
# We limit the size here to ~10 MB so the response doesn't get too big.
if size > 10 * 1024 * 1024:
raise exceptions.WindowsError("Value too big to be read by GRR.")
size *= 2
if rc != ERROR_SUCCESS:
raise ctypes.WinError(2)
return (Reg2Py(buf, tmp_size.value, data_type.value), data_type.value)
def EnumKey(key, index):
"""This calls the Windows RegEnumKeyEx function in a Unicode safe way."""
buf = ctypes.create_unicode_buffer(257)
length = ctypes.wintypes.DWORD(257)
rc = RegEnumKeyEx(key.handle, index,
ctypes.cast(buf, ctypes.c_wchar_p),
ctypes.byref(length),
LPDWORD(), ctypes.c_wchar_p(), LPDWORD(),
ctypes.POINTER(FileTime)())
if rc != 0:
raise ctypes.WinError(2)
return ctypes.wstring_at(buf, length.value).rstrip(u"\x00")
def EnumValue(key, index):
"""This calls the Windows RegEnumValue function in a Unicode safe way."""
null = ctypes.POINTER(ctypes.wintypes.DWORD)()
value_size = ctypes.wintypes.DWORD()
data_size = ctypes.wintypes.DWORD()
rc = RegQueryInfoKey(key.handle, ctypes.c_wchar_p(), null, null, null,
null, null, null,
ctypes.byref(value_size), ctypes.byref(data_size),
null, ctypes.POINTER(FileTime)())
if rc != ERROR_SUCCESS:
raise ctypes.WinError(2)
value_size.value += 1
data_size.value += 1
value = ctypes.create_unicode_buffer(value_size.value)
while True:
data = ctypes.create_string_buffer(data_size.value)
tmp_value_size = ctypes.wintypes.DWORD(value_size.value)
tmp_data_size = ctypes.wintypes.DWORD(data_size.value)
data_type = ctypes.wintypes.DWORD()
rc = RegEnumValue(key.handle, index,
ctypes.cast(value, ctypes.c_wchar_p),
ctypes.byref(tmp_value_size), null,
ctypes.byref(data_type),
ctypes.cast(data, LPBYTE),
ctypes.byref(tmp_data_size))
if rc != ERROR_MORE_DATA:
break
data_size.value *= 2
if rc != ERROR_SUCCESS:
raise ctypes.WinError(2)
return (value.value, Reg2Py(data, tmp_data_size.value, data_type.value),
data_type.value)
def Reg2Py(data, size, data_type):
if data_type == _winreg.REG_DWORD:
if size == 0:
return 0
return ctypes.cast(data, ctypes.POINTER(ctypes.c_int)).contents.value
elif data_type == _winreg.REG_SZ or data_type == _winreg.REG_EXPAND_SZ:
return ctypes.wstring_at(data, size // 2).rstrip(u"\x00")
elif data_type == _winreg.REG_MULTI_SZ:
return ctypes.wstring_at(data, size // 2).rstrip(u"\x00").split(u"\x00")
else:
if size == 0:
return None
return ctypes.string_at(data, size)
class RegistryFile(vfs.VFSHandler):
"""Emulate registry access through the VFS."""
supported_pathtype = rdfvalue.PathSpec.PathType.REGISTRY
auto_register = True
value = None
value_type = _winreg.REG_NONE
hive = None
last_modified = 0
is_directory = True
fd = None
# Maps the registry types to protobuf enums
registry_map = {
_winreg.REG_NONE: rdfvalue.StatEntry.RegistryType.REG_NONE,
_winreg.REG_SZ: rdfvalue.StatEntry.RegistryType.REG_SZ,
_winreg.REG_EXPAND_SZ: rdfvalue.StatEntry.RegistryType.REG_EXPAND_SZ,
_winreg.REG_BINARY: rdfvalue.StatEntry.RegistryType.REG_BINARY,
_winreg.REG_DWORD: rdfvalue.StatEntry.RegistryType.REG_DWORD,
_winreg.REG_DWORD_LITTLE_ENDIAN: (
rdfvalue.StatEntry.RegistryType.REG_DWORD_LITTLE_ENDIAN),
_winreg.REG_DWORD_BIG_ENDIAN: (
rdfvalue.StatEntry.RegistryType.REG_DWORD_BIG_ENDIAN),
_winreg.REG_LINK: rdfvalue.StatEntry.RegistryType.REG_LINK,
_winreg.REG_MULTI_SZ: rdfvalue.StatEntry.RegistryType.REG_MULTI_SZ,
}
def __init__(self, base_fd, pathspec=None, progress_callback=None):
super(RegistryFile, self).__init__(base_fd, pathspec=pathspec,
progress_callback=progress_callback)
if base_fd is None:
self.pathspec.Append(pathspec)
elif base_fd.IsDirectory():
self.pathspec.last.path = utils.JoinPath(self.pathspec.last.path,
pathspec.path)
else:
raise IOError("Registry handler can not be stacked on another handler.")
path_components = filter(None, self.pathspec.last.path.split("/"))
try:
# The first component MUST be a hive
self.hive = getattr(_winreg, path_components[0])
self.hive = KeyHandle(self.hive)
except AttributeError:
raise IOError("Unknown hive name %s" % path_components[0])
except IndexError:
# A hive is not specified, we just list all the hives.
return
# Normalize the path casing if needed
self.key_name = "/".join(path_components[1:])
self.local_path = CanonicalPathToLocalPath(self.key_name)
try:
# Maybe its a value
key_name, value_name = os.path.split(self.local_path)
with OpenKey(self.hive, key_name) as key:
self.value, self.value_type = QueryValueEx(key, value_name)
# We are a value and therefore not a directory.
self.is_directory = False
except exceptions.WindowsError:
try:
# Try to get the default value for this key
with OpenKey(self.hive, self.local_path) as key:
# Check for default value.
try:
self.value, self.value_type = QueryValueEx(key, "")
except exceptions.WindowsError:
# Empty default value
self.value = ""
self.value_type = _winreg.REG_NONE
except exceptions.WindowsError:
raise IOError("Unable to open key %s" % self.key_name)
def Stat(self):
return self._Stat("", self.value, self.value_type)
def _Stat(self, name, value, value_type):
response = rdfvalue.StatEntry()
response_pathspec = self.pathspec.Copy()
# No matter how we got here, there is no need to do case folding from now on
# since this is the exact filename casing.
response_pathspec.path_options = rdfvalue.PathSpec.Options.CASE_LITERAL
response_pathspec.last.path = utils.JoinPath(
response_pathspec.last.path, name)
response.pathspec = response_pathspec
if self.IsDirectory():
response.st_mode = stat.S_IFDIR
else:
response.st_mode = stat.S_IFREG
response.st_mtime = self.last_modified
response.st_size = len(utils.SmartStr(value))
if value_type is not None:
response.registry_type = self.registry_map.get(value_type, 0)
response.registry_data = rdfvalue.DataBlob().SetValue(value)
return response
def ListNames(self):
"""List the names of all keys and values."""
if not self.IsDirectory(): return
# Handle the special case where no hive is specified and just list the hives
if self.hive is None:
for name in dir(_winreg):
if name.startswith("HKEY_"):
yield name
return
try:
with OpenKey(self.hive, self.local_path) as key:
(self.number_of_keys, self.number_of_values,
self.last_modified) = QueryInfoKey(key)
self.last_modified = self.last_modified / 10000000 - WIN_UNIX_DIFF_MSECS
# First keys
for i in range(self.number_of_keys):
try:
yield EnumKey(key, i)
except exceptions.WindowsError:
pass
# Now Values
for i in range(self.number_of_values):
try:
name, unused_value, unused_value_type = EnumValue(key, i)
yield name
except exceptions.WindowsError:
pass
except exceptions.WindowsError as e:
raise IOError("Unable to list key %s: %s" % (self.key_name, e))
def ListFiles(self):
"""A generator of all keys and values."""
if not self.IsDirectory(): return
if self.hive is None:
for name in dir(_winreg):
if name.startswith("HKEY_"):
response = rdfvalue.StatEntry(
st_mode=stat.S_IFDIR)
response_pathspec = self.pathspec.Copy()
response_pathspec.last.path = utils.JoinPath(
response_pathspec.last.path, name)
response.pathspec = response_pathspec
yield response
return
try:
with OpenKey(self.hive, self.local_path) as key:
(self.number_of_keys, self.number_of_values,
self.last_modified) = QueryInfoKey(key)
self.last_modified = self.last_modified / 10000000 - WIN_UNIX_DIFF_MSECS
# First keys - These will look like directories.
for i in range(self.number_of_keys):
try:
name = EnumKey(key, i)
key_name = utils.JoinPath(self.local_path, name)
try:
# Store the default value in the stat response for values.
with OpenKey(self.hive, key_name) as subkey:
value, value_type = QueryValueEx(subkey, "")
except exceptions.WindowsError:
value, value_type = None, None
response = self._Stat(name, value, value_type)
# Keys look like Directories in the VFS.
response.st_mode = stat.S_IFDIR
yield response
except exceptions.WindowsError:
pass
# Now Values - These will look like files.
for i in range(self.number_of_values):
try:
name, value, value_type = EnumValue(key, i)
response = self._Stat(name, value, value_type)
# Values look like files in the VFS.
response.st_mode = stat.S_IFREG
yield response
except exceptions.WindowsError:
pass
except exceptions.WindowsError as e:
raise IOError("Unable to list key %s: %s" % (self.key_name, e))
def IsDirectory(self):
return self.is_directory
def Read(self, length):
if not self.fd:
self.fd = StringIO.StringIO(utils.SmartStr(self.value))
return self.fd.read(length)
def Seek(self, offset, whence=0):
if not self.fd:
self.fd = StringIO.StringIO(utils.SmartStr(self.value))
return self.fd.seek(offset, whence)
| apache-2.0 |
jetyang2005/elastalert | DBUtils-1.2/DBUtils/Tests/TestSimplePooledPg.py | 1 | 4301 | """Test the SimplePooledPg module.
Note:
We don't test performance here, so the test does not predicate
whether SimplePooledPg actually will help in improving performance or not.
Copyright and credit info:
* This test was contributed by Christoph Zwerschke
"""
import sys
import unittest
__version__ = '1.2'
# This module also serves as a mock object for the pg API module:
sys.modules['pg'] = sys.modules[__name__]
class DB:
def __init__(self, dbname, user):
self.dbname = dbname
self.user = user
self.num_queries = 0
def close(self):
self.num_queries = 0
def query(self):
self.num_queries += 1
sys.path.insert(1, '../..')
from DBUtils import SimplePooledPg
class TestSimplePooledPg(unittest.TestCase):
def my_dbpool(self, maxConnections):
return SimplePooledPg.PooledPg(maxConnections,
'SimplePooledPgTestDB', 'SimplePooledPgTestUser')
def test0_check_version(self):
from DBUtils import __version__ as DBUtilsVersion
self.assertEqual(DBUtilsVersion, __version__)
self.assertEqual(SimplePooledPg.__version__, __version__)
self.assertEqual(SimplePooledPg.PooledPg.version, __version__)
def test1_create_connection(self):
dbpool = self.my_dbpool(1)
db = dbpool.connection()
self.assertTrue(hasattr(db, 'query'))
self.assertTrue(hasattr(db, 'num_queries'))
self.assertEqual(db.num_queries, 0)
self.assertTrue(hasattr(db, 'dbname'))
self.assertEqual(db.dbname, 'SimplePooledPgTestDB')
self.assertTrue(hasattr(db, 'user'))
self.assertEqual(db.user, 'SimplePooledPgTestUser')
db.query()
self.assertEqual(db.num_queries, 1)
def test2_close_connection(self):
dbpool = self.my_dbpool(1)
db = dbpool.connection()
self.assertEqual(db.num_queries, 0)
db.query()
self.assertEqual(db.num_queries, 1)
db.close()
self.assertTrue(not hasattr(db, 'num_queries'))
db = dbpool.connection()
self.assertTrue(hasattr(db, 'dbname'))
self.assertEqual(db.dbname, 'SimplePooledPgTestDB')
self.assertTrue(hasattr(db, 'user'))
self.assertEqual(db.user, 'SimplePooledPgTestUser')
self.assertEqual(db.num_queries, 1)
db.query()
self.assertEqual(db.num_queries, 2)
def test3_two_connections(self):
dbpool = self.my_dbpool(2)
db1 = dbpool.connection()
for i in range(5):
db1.query()
db2 = dbpool.connection()
self.assertNotEqual(db1, db2)
self.assertNotEqual(db1._con, db2._con)
for i in range(7):
db2.query()
self.assertEqual(db1.num_queries, 5)
self.assertEqual(db2.num_queries, 7)
db1.close()
db1 = dbpool.connection()
self.assertNotEqual(db1, db2)
self.assertNotEqual(db1._con, db2._con)
self.assertTrue(hasattr(db1, 'query'))
for i in range(3):
db1.query()
self.assertEqual(db1.num_queries, 8)
db2.query()
self.assertEqual(db2.num_queries, 8)
def test4_threads(self):
dbpool = self.my_dbpool(2)
try:
from Queue import Queue, Empty
except ImportError: # Python 3
from queue import Queue, Empty
queue = Queue(3)
def connection():
queue.put(dbpool.connection())
from threading import Thread
thread1 = Thread(target=connection).start()
thread2 = Thread(target=connection).start()
thread3 = Thread(target=connection).start()
try:
db1 = queue.get(1, 1)
db2 = queue.get(1, 1)
except TypeError:
db1 = queue.get(1)
db2 = queue.get(1)
self.assertNotEqual(db1, db2)
self.assertNotEqual(db1._con, db2._con)
try:
self.assertRaises(Empty, queue.get, 1, 0.1)
except TypeError:
self.assertRaises(Empty, queue.get, 0)
db2.close()
try:
db3 = queue.get(1, 1)
except TypeError:
db3 = queue.get(1)
self.assertNotEqual(db1, db3)
self.assertNotEqual(db1._con, db3._con)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
adityahase/frappe | frappe/integrations/doctype/paytm_settings/paytm_settings.py | 2 | 5857 | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import json
import requests
from six.moves.urllib.parse import urlencode
import frappe
from frappe.model.document import Document
from frappe import _
from frappe.utils import get_url, call_hook_method, cint, flt, cstr
from frappe.integrations.utils import create_request_log, create_payment_gateway
from frappe.utils import get_request_site_address
from paytmchecksum import generateSignature, verifySignature
from frappe.utils.password import get_decrypted_password
class PaytmSettings(Document):
supported_currencies = ["INR"]
def validate(self):
create_payment_gateway('Paytm')
call_hook_method('payment_gateway_enabled', gateway='Paytm')
def validate_transaction_currency(self, currency):
if currency not in self.supported_currencies:
frappe.throw(_("Please select another payment method. Paytm does not support transactions in currency '{0}'").format(currency))
def get_payment_url(self, **kwargs):
'''Return payment url with several params'''
# create unique order id by making it equal to the integration request
integration_request = create_request_log(kwargs, "Host", "Paytm")
kwargs.update(dict(order_id=integration_request.name))
return get_url("./integrations/paytm_checkout?{0}".format(urlencode(kwargs)))
def get_paytm_config():
''' Returns paytm config '''
paytm_config = frappe.db.get_singles_dict('Paytm Settings')
paytm_config.update(dict(merchant_key=get_decrypted_password('Paytm Settings', 'Paytm Settings', 'merchant_key')))
if cint(paytm_config.staging):
paytm_config.update(dict(
website="WEBSTAGING",
url='https://securegw-stage.paytm.in/order/process',
transaction_status_url='https://securegw-stage.paytm.in/order/status',
industry_type_id='RETAIL'
))
else:
paytm_config.update(dict(
url='https://securegw.paytm.in/order/process',
transaction_status_url='https://securegw.paytm.in/order/status',
))
return paytm_config
def get_paytm_params(payment_details, order_id, paytm_config):
# initialize a dictionary
paytm_params = dict()
redirect_uri = get_request_site_address(True) + "/api/method/frappe.integrations.doctype.paytm_settings.paytm_settings.verify_transaction"
paytm_params.update({
"MID" : paytm_config.merchant_id,
"WEBSITE" : paytm_config.website,
"INDUSTRY_TYPE_ID" : paytm_config.industry_type_id,
"CHANNEL_ID" : "WEB",
"ORDER_ID" : order_id,
"CUST_ID" : payment_details['payer_email'],
"EMAIL" : payment_details['payer_email'],
"TXN_AMOUNT" : cstr(flt(payment_details['amount'], 2)),
"CALLBACK_URL" : redirect_uri,
})
checksum = generateSignature(paytm_params, paytm_config.merchant_key)
paytm_params.update({
"CHECKSUMHASH" : checksum
})
return paytm_params
@frappe.whitelist(allow_guest=True)
def verify_transaction(**paytm_params):
'''Verify checksum for received data in the callback and then verify the transaction'''
paytm_config = get_paytm_config()
is_valid_checksum = False
paytm_params.pop('cmd', None)
paytm_checksum = paytm_params.pop('CHECKSUMHASH', None)
if paytm_params and paytm_config and paytm_checksum:
# Verify checksum
is_valid_checksum = verifySignature(paytm_params, paytm_config.merchant_key, paytm_checksum)
if is_valid_checksum and paytm_params.get('RESPCODE') == '01':
verify_transaction_status(paytm_config, paytm_params['ORDERID'])
else:
frappe.respond_as_web_page("Payment Failed",
"Transaction failed to complete. In case of any deductions, deducted amount will get refunded to your account.",
http_status_code=401, indicator_color='red')
frappe.log_error("Order unsuccessful. Failed Response:"+cstr(paytm_params), 'Paytm Payment Failed')
def verify_transaction_status(paytm_config, order_id):
'''Verify transaction completion after checksum has been verified'''
paytm_params=dict(
MID=paytm_config.merchant_id,
ORDERID= order_id
)
checksum = generateSignature(paytm_params, paytm_config.merchant_key)
paytm_params["CHECKSUMHASH"] = checksum
post_data = json.dumps(paytm_params)
url = paytm_config.transaction_status_url
response = requests.post(url, data = post_data, headers = {"Content-type": "application/json"}).json()
finalize_request(order_id, response)
def finalize_request(order_id, transaction_response):
request = frappe.get_doc('Integration Request', order_id)
transaction_data = frappe._dict(json.loads(request.data))
redirect_to = transaction_data.get('redirect_to') or None
redirect_message = transaction_data.get('redirect_message') or None
if transaction_response['STATUS'] == "TXN_SUCCESS":
if transaction_data.reference_doctype and transaction_data.reference_docname:
custom_redirect_to = None
try:
custom_redirect_to = frappe.get_doc(transaction_data.reference_doctype,
transaction_data.reference_docname).run_method("on_payment_authorized", 'Completed')
request.db_set('status', 'Completed')
except Exception:
request.db_set('status', 'Failed')
frappe.log_error(frappe.get_traceback())
if custom_redirect_to:
redirect_to = custom_redirect_to
redirect_url = '/integrations/payment-success'
else:
request.db_set('status', 'Failed')
redirect_url = '/integrations/payment-failed'
if redirect_to:
redirect_url += '?' + urlencode({'redirect_to': redirect_to})
if redirect_message:
redirect_url += '&' + urlencode({'redirect_message': redirect_message})
frappe.local.response['type'] = 'redirect'
frappe.local.response['location'] = redirect_url
def get_gateway_controller(doctype, docname):
reference_doc = frappe.get_doc(doctype, docname)
gateway_controller = frappe.db.get_value("Payment Gateway", reference_doc.payment_gateway, "gateway_controller")
return gateway_controller | mit |
kevinlondon/sentry | src/sentry/models/authidentity.py | 25 | 1553 | from __future__ import absolute_import, print_function
from datetime import timedelta
from django.conf import settings
from django.db import models
from django.utils import timezone
from jsonfield import JSONField
from sentry.db.models import FlexibleForeignKey, Model, sane_repr
class AuthIdentity(Model):
user = FlexibleForeignKey(settings.AUTH_USER_MODEL)
auth_provider = FlexibleForeignKey('sentry.AuthProvider')
ident = models.CharField(max_length=128)
data = JSONField()
last_verified = models.DateTimeField(default=timezone.now)
last_synced = models.DateTimeField(default=timezone.now)
date_added = models.DateTimeField(default=timezone.now)
class Meta:
app_label = 'sentry'
db_table = 'sentry_authidentity'
unique_together = (('auth_provider', 'ident'), ('auth_provider', 'user'))
__repr__ = sane_repr('user_id', 'auth_provider_id')
def get_audit_log_data(self):
return {
'user_id': self.user_id,
'data': self.data,
}
# TODO(dcramer): we'd like to abstract this so there's a central Role object
# and it doesnt require two composite db objects to talk to each other
def is_valid(self, member):
if getattr(member.flags, 'sso:invalid'):
return False
if not getattr(member.flags, 'sso:linked'):
return False
if not self.last_verified:
return False
if self.last_verified < timezone.now() - timedelta(hours=24):
return False
return True
| bsd-3-clause |
wanghaven/nupic | examples/opf/experiments/multistep/simple_0/description.py | 38 | 1588 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.expdescriptionhelpers import importBaseDescription
# the sub-experiment configuration
config = \
{
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/simple_0.csv'),
'modelParams': { 'clParams': { },
'sensorParams': { 'encoders': { }, 'verbosity': 0},
'spParams': { },
'tpParams': { }}}
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| agpl-3.0 |
Plain-Andy-legacy/android_external_chromium_org | third_party/boringssl/update_gypi_and_asm.py | 26 | 7225 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can b
# found in the LICENSE file.
"""Enumerates the BoringSSL source in src/ and generates two gypi files:
boringssl.gypi and boringssl_tests.gypi."""
import os
import subprocess
import sys
# OS_ARCH_COMBOS maps from OS and platform to the OpenSSL assembly "style" for
# that platform and the extension used by asm files.
OS_ARCH_COMBOS = [
('linux', 'arm', 'elf', [''], 'S'),
('linux', 'x86', 'elf', ['-fPIC'], 'S'),
('linux', 'x86_64', 'elf', [''], 'S'),
('mac', 'x86', 'macosx', ['-fPIC'], 'S'),
('mac', 'x86_64', 'macosx', [''], 'S'),
('win', 'x86_64', 'masm', [''], 'asm'),
]
# NON_PERL_FILES enumerates assembly files that are not processed by the
# perlasm system.
NON_PERL_FILES = {
('linux', 'arm'): [
'src/crypto/poly1305/poly1305_arm_asm.S',
'src/crypto/chacha/chacha_vec_arm.S',
],
}
FILE_HEADER = """# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file is created by update_gypi_and_asm.py. Do not edit manually.
"""
def FindCMakeFiles(directory):
"""Returns list of all CMakeLists.txt files recursively in directory."""
cmakefiles = []
for (path, _, filenames) in os.walk(directory):
for filename in filenames:
if filename == 'CMakeLists.txt':
cmakefiles.append(os.path.join(path, filename))
return cmakefiles
def NoTests(dent, is_dir):
"""Filter function that can be passed to FindCFiles in order to remove test
sources."""
if is_dir:
return dent != 'test'
return 'test.' not in dent and not dent.startswith('example_')
def OnlyTests(dent, is_dir):
"""Filter function that can be passed to FindCFiles in order to remove
non-test sources."""
if is_dir:
return True
return '_test.' in dent or dent.startswith('example_')
def FindCFiles(directory, filter_func):
"""Recurses through directory and returns a list of paths to all the C source
files that pass filter_func."""
cfiles = []
for (path, dirnames, filenames) in os.walk(directory):
for filename in filenames:
if filename.endswith('.c') and filter_func(filename, False):
cfiles.append(os.path.join(path, filename))
continue
for (i, dirname) in enumerate(dirnames):
if not filter_func(dirname, True):
del dirnames[i]
return cfiles
def ExtractPerlAsmFromCMakeFile(cmakefile):
"""Parses the contents of the CMakeLists.txt file passed as an argument and
returns a list of all the perlasm() directives found in the file."""
perlasms = []
with open(cmakefile) as f:
for line in f:
line = line.strip()
if not line.startswith('perlasm('):
continue
if not line.endswith(')'):
raise ValueError('Bad perlasm line in %s' % cmakefile)
# Remove "perlasm(" from start and ")" from end
params = line[8:-1].split()
if len(params) < 2:
raise ValueError('Bad perlasm line in %s' % cmakefile)
perlasms.append({
'extra_args': params[2:],
'input': os.path.join(os.path.dirname(cmakefile), params[1]),
'output': os.path.join(os.path.dirname(cmakefile), params[0]),
})
return perlasms
def ReadPerlAsmOperations():
"""Returns a list of all perlasm() directives found in CMake config files in
src/."""
perlasms = []
cmakefiles = FindCMakeFiles('src')
for cmakefile in cmakefiles:
perlasms.extend(ExtractPerlAsmFromCMakeFile(cmakefile))
return perlasms
def PerlAsm(output_filename, input_filename, perlasm_style, extra_args):
"""Runs the a perlasm script and puts the output into output_filename."""
base_dir = os.path.dirname(output_filename)
if not os.path.isdir(base_dir):
os.makedirs(base_dir)
output = subprocess.check_output(
['perl', input_filename, perlasm_style] + extra_args)
with open(output_filename, 'w+') as out_file:
out_file.write(output)
def ArchForAsmFilename(filename):
"""Returns the architecture that a given asm file should be compiled for
based on substrings in the filename."""
if 'x86_64' in filename or 'avx2' in filename:
return 'x86_64'
elif ('x86' in filename and 'x86_64' not in filename) or '586' in filename:
return 'x86'
elif 'arm' in filename:
return 'arm'
else:
raise ValueError('Unknown arch for asm filename: ' + filename)
def WriteAsmFiles(perlasms):
"""Generates asm files from perlasm directives for each supported OS x
platform combination."""
asmfiles = {}
for osarch in OS_ARCH_COMBOS:
(osname, arch, perlasm_style, extra_args, asm_ext) = osarch
key = (osname, arch)
outDir = '%s-%s' % key
for perlasm in perlasms:
filename = os.path.basename(perlasm['input'])
output = perlasm['output']
if not output.startswith('src'):
raise ValueError('output missing src: %s' % output)
output = os.path.join(outDir, output[4:])
output = output.replace('${ASM_EXT}', asm_ext)
if arch == ArchForAsmFilename(filename):
PerlAsm(output, perlasm['input'], perlasm_style,
perlasm['extra_args'] + extra_args)
asmfiles.setdefault(key, []).append(output)
for (key, non_perl_asm_files) in NON_PERL_FILES.iteritems():
asmfiles.setdefault(key, []).extend(non_perl_asm_files)
return asmfiles
def PrintVariableSection(out, name, files):
out.write(' \'%s\': [\n' % name)
for f in sorted(files):
out.write(' \'%s\',\n' % f)
out.write(' ],\n')
def main():
crypto_c_files = FindCFiles(os.path.join('src', 'crypto'), NoTests)
ssl_c_files = FindCFiles(os.path.join('src', 'ssl'), NoTests)
with open('boringssl.gypi', 'w+') as gypi:
gypi.write(FILE_HEADER + '{\n \'variables\': {\n')
PrintVariableSection(
gypi, 'boringssl_lib_sources', crypto_c_files + ssl_c_files)
perlasms = ReadPerlAsmOperations()
for ((osname, arch), asm_files) in sorted(
WriteAsmFiles(perlasms).iteritems()):
PrintVariableSection(gypi, 'boringssl_%s_%s_sources' %
(osname, arch), asm_files)
gypi.write(' }\n}\n')
test_c_files = FindCFiles(os.path.join('src', 'crypto'), OnlyTests)
test_c_files += FindCFiles(os.path.join('src', 'ssl'), OnlyTests)
with open('boringssl_tests.gypi', 'w+') as test_gypi:
test_gypi.write(FILE_HEADER + '{\n \'targets\': [\n')
test_names = []
for test in sorted(test_c_files):
test_name = 'boringssl_%s' % os.path.splitext(os.path.basename(test))[0]
test_gypi.write(""" {
'target_name': '%s',
'type': 'executable',
'dependencies': [
'boringssl.gyp:boringssl',
],
'sources': [
'%s',
],
},\n""" % (test_name, test))
test_names.append(test_name)
test_names.sort()
test_gypi.write(""" ],
'variables': {
'boringssl_test_targets': [\n""")
for test in test_names:
test_gypi.write(""" '%s',\n""" % test)
test_gypi.write(' ],\n }\n}\n')
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
saknis/upelis | wiki - Copy.py | 1 | 169333 | #!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple Google App Engine wiki application.
The main distinguishing feature is that editing is in a WYSIWYG editor
rather than a text editor with special syntax. This application uses
google.appengine.api.datastore to access the datastore. This is a
lower-level API on which google.appengine.ext.db depends.
"""
__author__ = 'Bret Taylor & Nerijus Terebas'
import cgi
import cgitb
cgitb.enable()
import datetime
import os
import re
import sys
import urllib
import urlparse
import base64
import codecs
import math
from pngcanvas import PNGCanvas
import random
import json
#import wsgiref.handlers
#from google.appengine.ext import webapp
#from google.appengine.ext.webapp.util import run_wsgi_app
import webapp2 as webapp
from webapp2_extras import routes
#import wsgiref.handlers
import traceback
from google.appengine.ext import db
from google.appengine.api import mail
from google.appengine.api import datastore
from google.appengine.api import datastore_types
from google.appengine.api import users
from google.appengine.ext.webapp import template
from google.appengine.api import images
import locale
import gettext
from google.appengine.api import urlfetch
from Picasa import Picasa
from postmarkup import render_bbcode
from UserAdd import UserAdd
from UserAdd import Vartotojai
from Start import Start
from Start import Codeimagereg
from Start import AppVer
from Start import DinCode
import facebookoauth
from facebookoauth import FBUser
import linkedinauth
from linkedinauth import LIUser
import vkauth
from vkauth import VKUser
from upelis_settings import *
_DEBUG = DEBUG
_mailsender=MAILSENDER
_mailrcptto=MAILRCPTTO
cmsname2=CMSNAME
cmspath2=CMSPATH
cmstrans2=CMSTRANS
site1a=SITE1A
site1b=SITE1B
site2a=SITE2A
site2b=SITE2B
sitedown=SITEDOWN
current_locale = CURLOCALE
kalbos=LANGUAGES
kalboseile=LANGUAGESNR
kalbossort = LANGUAGESSORT
locale_path = LOCALEPATH
fileext=FILEEXT
lang=LANG
_kalbhtml = LANGHTML
langdef=lang
lang1 = gettext.translation (cmstrans2, locale_path, [current_locale] , fallback=True)
_ = lang1.ugettext
# Set to true if we want to have our webapp print stack traces, etc
_titauth = TITAUTH
#_titauth = "Nerijus Terebas"
_version=VERSION
#if os.environ['HTTP_HOST']==site1a or os.environ['HTTP_HOST']==site1b or os.environ['HTTP_HOST']==site2a or os.environ['HTTP_HOST']==site2b:
# imgfont = "Ubuntu-B.ttf"
#else:
# imgfont = "VeraSeBd.ttf"
#imgfont = "Ubuntu-B.ttf"
imgopt = DYNABOPT
fbputimgurl2="/static/images/upelis116.jpg"
avatarmaxurl2="/static/images/avatarmax.png"
avatarminurl2="/static/images/avatarmin.png"
g16url="/static/images/g16.png"
fb16url="/static/images/fb16.png"
in16url="/static/images/in16.png"
vk16url="/static/images/vk16.png"
gplusurl="/static/images/plus.ico"
def siteauth():
if os.environ['HTTP_HOST']==site1a or os.environ['HTTP_HOST']==site1b or os.environ['HTTP_HOST']==site2a or os.environ['HTTP_HOST']==site2b:
return "Nerijus Terebas"
else:
return _titauth
def urlparam(rparameters):
parts = rparameters.split(".")
parts.reverse()
parts.append('')
parts.append('')
parts.append('')
[ext,lang,aps]=parts[:3]
if lang in kalbos:
kalb=kalbos[lang]
else:
kalb=kalbos[langdef]
lang=langdef
values = {
'ext': ext,
'lang': lang,
'kalb': kalb,
'aps': aps}
return values
def userinfo(pic_key2,utype,lang, fileext):
if lang in kalbos:
kalb=kalbos[lang]
else:
kalb=kalbos[langdef]
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
rcomm = False
rpica = False
rplus = True
buvoapp = False
userid = "0"
content = ""
content2 = ""
pseudonimas = "Anonymous"
lank = UserNone(email=None, federated_identity=None)
youtname=""
vartkey=""
thubnail=""
vartot = None
imagemaxurl = avatarmaxurl2
userpicapagetext=""
klaida=False
errtext = ""
user = users.get_current_user()
try:
if utype:
buvesapp = db.GqlQuery("SELECT * FROM Vartotojai WHERE userid = :1", pic_key2)
else:
buvesapp = db.GqlQuery("SELECT * FROM Vartotojai WHERE lankytojas = :1", pic_key2)
for app in buvesapp:
rcomm = app.commrodyti
rpica = app.picarodyti
rplus = app.plusrodyti
buvoapp = app.rodyti
userid = app.userid
content = render_bbcode(str(app.content))
content2 = str(app.content)
pseudonimas = str(app.pseudonimas)
lank=app.lankytojas
youtname=app.youtname
vartkey=app.key()
thubnail=getphoto(lank.email())
vartot = db.get(vartkey)
except:
klaida=True
errtext = cgi.escape(str(sys.exc_info()[0])) + ' ' + cgi.escape(str(sys.exc_info()[1])) + ' ' + cgi.escape(str(sys.exc_info()[2]))
usercommpageurl = ("%s/%s-usercommpage-%s.%s/%s/%s" % (urlhost2(), cmspath2,lang, fileext, pseudonimas, userid))
userpicapageurl = ("%s/%s-userpicapage-%s.%s/%s/%s" % (urlhost2(), cmspath2,lang, fileext, pseudonimas, userid))
useryoutpageurl = ("%s/%s-useryoutpage-%s.%s/%s/%s" % (urlhost2(), cmspath2,lang, fileext, pseudonimas, userid))
usermailformpageurl = ("%s/%s-usermailformpage-%s.%s/%s/%s" % (urlhost2(), cmspath2,lang, fileext, pseudonimas, userid))
if lank.email():
usermailformtext = (_("User mail form page link text %(usercppseudonimas)s %(usermailformpageurl)s") % {'usercppseudonimas': pseudonimas,'usermailformpageurl': usermailformpageurl})
else:
usermailformtext = "User not Found"
if klaida:
userpicapagetext = ("<div>Error: %s</div" % (errtext))
if rcomm:
userpicapagetext = userpicapagetext + (_("User comm page link text %(usercppseudonimas)s %(usercommpageurl)s") % {'usercppseudonimas': pseudonimas,'usercommpageurl': usercommpageurl})
if rpica:
userpicapagetext = userpicapagetext + (_("User pica page link text %(usercppseudonimas)s %(userpicapageurl)s") % {'usercppseudonimas': pseudonimas,'userpicapageurl': userpicapageurl})
if klaida:
userpicapagetext = userpicapagetext + "<br />" + errtext
plusurl=getplius(lank.email())
if rplus and plusurl:
usercpplustext = ("<a href=\"%s\"><img src=\"%s\" width=\"32\" height=\"32\" border=\"0\" alt=\"\"></img> <strong>Google Plus</strong></a><br /><br />\n\n" % (plusurl,gplusurl))
rpluscheck="checked=\"checked\" "
else:
usercpplustext = ""
rpluscheck=" "
if youtname and len(str(youtname))>0:
userpicapagetext = userpicapagetext + (_("User yout page link text %(usercppseudonimas)s %(useryoutpageurl)s") % {'usercppseudonimas': pseudonimas,'useryoutpageurl': useryoutpageurl})
if buvoapp:
imagemaxurl = ("/%s-userimage/%s/%s" % (cmspath2,pseudonimas, userid))
if thubnail and not buvoapp:
imagemaxurl = str(thubnail)
uphoto=imagemaxurl.split("/s144/", 1)
slasas="/s200/"
imagemaxurl = slasas.join(uphoto)
usercpurl = ("/%s-usercontrolpanel-%s.%s" % (cmspath2,lang,fileext))
userpageend = ("%s/%s/%s" % (fileext,pseudonimas,userid))
userpageurl = ("%s/%s-userpage-%s.%s/%s/%s" % (urlhost2(),cmspath2, lang, fileext, pseudonimas, userid))
if rcomm:
rcommcheck="checked=\"checked\" "
# rcommcheck="checked=\"yes\""
else:
rcommcheck=""
# rcommcheck="checked=\"no\""
if rpica:
rpicacheck="checked=\"checked\" "
# rpicacheck="checked=\"yes\""
else:
rpicacheck=""
# rpicacheck="checked=\"no\""
if buvoapp:
buvoappcheck=""
# buvoappcheck="checked=\"no\""
else:
buvoappcheck="checked=\"checked\" "
# buvoappcheck="checked=\"yes\""
values = {
'imagemaxurl': imagemaxurl,
'userpageend': userpageend,
'userpicapagetext': userpicapagetext,
'usercpplustext': usercpplustext,
'usermailformtext': usermailformtext,
'usermailformpageurl': usermailformpageurl,
'useryoutpageurl': useryoutpageurl,
'userpicapageurl': userpicapageurl,
'usercommpageurl': usercommpageurl,
'usercommpageurl': usercommpageurl,
'usercpurl': usercpurl,
'pseudonimas': pseudonimas,
'userid': userid,
'content': content,
'content2': content2,
'youtname': youtname,
'vartot': vartot,
'rcomm': rcomm,
'rpica': rpica,
'lank': lank,
'rcommcheck': rcommcheck,
'rpluscheck': rpluscheck,
'rpicacheck': rpicacheck,
'buvoappcheck': buvoappcheck,
'userpageurl': userpageurl}
return values
def codekey2():
codeimg = Codeimagereg()
codeimg.ipadresas = os.environ['REMOTE_ADDR']
codeimg.date = datetime.datetime.now()
code = random.randrange(100000, 999999)
codeimg.code = "%s" % code
codeimg.put()
codekey=codeimg.key()
return codekey
def urlhost2():
if os.environ['HTTPS']=="off":
return str('http://'+os.environ['HTTP_HOST'])
else:
return str('https://'+os.environ['HTTP_HOST'])
def textloc():
q2_message = ""
if 'HTTP_X_APPENGINE_CITY' in os.environ:
q2_message = q2_message + ("%s: %s \n" % ('City', os.environ['HTTP_X_APPENGINE_CITY']))
if 'HTTP_X_APPENGINE_COUNTRY' in os.environ:
q2_message = q2_message + ("%s: %s \n" % ('Country', os.environ['HTTP_X_APPENGINE_COUNTRY']))
if 'HTTP_X_APPENGINE_CITYLATLONG' in os.environ:
q2_message = q2_message +("%s: http://maps.google.com/maps?q=%s \n" % ('CityLatLong', os.environ['HTTP_X_APPENGINE_CITYLATLONG']))
return q2_message
def textinfo():
q2_message = "\n\nRemote Addr: " + os.environ['REMOTE_ADDR'] + "\nUser Agent: " + os.environ['HTTP_USER_AGENT'] + "\nLog ID: " + os.environ['REQUEST_LOG_ID'] + "\n"
return q2_message
class BaseRequestHandler(webapp.RequestHandler):
"""Supplies a common template generation function.
When you call generate(), we augment the template variables supplied with
the current user in the 'user' variable and the current webapp request
in the 'request' variable.
"""
def generate(self, template_name, languag, template_values={}):
UserAdd().plus()
Start().first()
if not languag:
languag=langdef
if languag in kalbos:
kalb=kalbos[languag]
else:
kalb=kalbos[langdef]
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
kalb2=kalb.replace("_", "-")
values = {
'request': self.request,
'user': users.GetCurrentUser(),
'fbuser': self.fb_current_user,
'liuser': self.li_current_user,
'vkuser': self.vk_current_user,
'isadmin': users.is_current_user_admin(),
'self_url': self.request.uri,
'login_url': users.CreateLoginURL(self.request.uri),
'logout_url': users.CreateLogoutURL(self.request.uri),
'fblogin_url': "/auth/login?continue=%s" % (urllib.quote(self.request.uri)),
'fblogout_url': "/auth/logout?continue=%s" % (urllib.quote(self.request.uri)),
'lilogin_url': "/liauth/login?continue=%s" % (urllib.quote(self.request.uri)),
'lilogout_url': "/liauth/logout?continue=%s" % (urllib.quote(self.request.uri)),
'vklogin_url': "/vkauth/login?continue=%s" % (urllib.quote(self.request.uri)),
'vklogout_url': "/vkauth/logout?continue=%s" % (urllib.quote(self.request.uri)),
'application_name': siteauth(),
'msgtext_logout': _("logout"),
'msgtext_login': _("login"),
'msgtext_header': _("header %(cmsname)s") % {'cmsname': cmsname2},
'gallery': _("Gallery"),
'kalba': kalb2,
'cmspath':cmspath2,
}
values.update(template_values)
directory = os.path.dirname(__file__)
path = os.path.join(directory, os.path.join('templates', template_name))
self.response.headers['X-Powered-By'] = cmsname2+'/'+_version
appon=False
try:
codedb = db.GqlQuery("SELECT * FROM DinCode WHERE codename = :1", "start")
for thiscode in codedb:
thiscode = thiscode.codetext
appon = eval(thiscode)
except:
appon=False
if appon:
self.response.out.write(template.render(path, values, debug=_DEBUG))
else:
disablecode = "<html><body>Disable, swith to on</body></html>"
try:
codedb = db.GqlQuery("SELECT * FROM DinCode WHERE codename = :1", "disable")
for thiscode in codedb:
disablecode = thiscode.codetext
except:
disablecode = "<html><body>Disable, swith to on</body></html>"
self.response.out.write(disablecode)
@property
def fb_current_user(self):
"""Returns the logged in Facebook user, or None if unconnected."""
if not hasattr(self, "_fb_current_user"):
self._fb_current_user = None
user_id = facebookoauth.parse_cookie(self.request.cookies.get("fb_user"))
if user_id:
self._fb_current_user = FBUser.get_by_key_name(user_id)
if not self._fb_current_user or not hasattr(self._fb_current_user, "login") or not self._fb_current_user.login:
self._fb_current_user=None
return self._fb_current_user
@property
def li_current_user(self):
"""Returns the logged in Linkedin user, or None if unconnected."""
if not hasattr(self, "_li_current_user"):
self._li_current_user = None
user_id = linkedinauth.parse_cookie(self.request.cookies.get("li_user"))
if user_id:
self._li_current_user = LIUser.get_by_key_name(user_id)
return self._li_current_user
@property
def vk_current_user(self):
"""Returns the logged in Linkedin user, or None if unconnected."""
if not hasattr(self, "_vk_current_user"):
self._vk_current_user = None
user_id = vkauth.parse_cookie(self.request.cookies.get("vk_user"))
if user_id:
self._vk_current_user = VKUser.get_by_key_name(user_id)
return self._vk_current_user
class WikiFav(BaseRequestHandler):
def get(self, page_name):
self.response.headers['Cache-Control'] = 'public, max-age=60'
# self.response.headers['Last-Modified'] = lastmod.strftime("%a, %d %b %Y %H:%M:%S GMT")
expires = datetime.datetime.now() + datetime.timedelta(minutes=1)
self.response.headers['Expires'] = expires.strftime("%a, %d %b %Y %H:%M:%S GMT")
from bindata import FavIcon
imagelogo=FavIcon()
fav=imagelogo.data1
if os.environ['HTTP_HOST']==site1a or os.environ['HTTP_HOST']==site1b:
fav = imagelogo.data2
self.response.headers['Content-Type'] = 'image/x-icon'
self.response.out.write(fav)
def post(self, page_name):
self.response.headers['Cache-Control'] = 'public, max-age=60'
# self.response.headers['Last-Modified'] = lastmod.strftime("%a, %d %b %Y %H:%M:%S GMT")
expires = datetime.datetime.now() + datetime.timedelta(minutes=1)
self.response.headers['Expires'] = expires.strftime("%a, %d %b %Y %H:%M:%S GMT")
from bindata import FavIcon
imagelogo=FavIcon()
fav=imagelogo.data1
if os.environ['HTTP_HOST']==site1a or os.environ['HTTP_HOST']==site1b:
fav = imagelogo.data2
self.response.headers['Content-Type'] = 'image/x-icon'
self.response.out.write(fav)
class WikiRedirDown(BaseRequestHandler):
def get(self, rparameters):
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
user = users.get_current_user()
if user:
self.response.headers['X-Powered-By'] = cmsname2+'/'+_version
self.redirect(sitedown)
# exit(0)
else:
greeting = _("Sign in or register %(userloginurl)s") % {'userloginurl': users.create_login_url(self.request.uri)}
greeting=greeting+"<br />"+(_("diferent accounts"))
page = Page.loadnew("download")
page.content = "Download - "+_("Login header %(greeting)s") % {'greeting': greeting}
page_name2 = 'menu'+'-'+lang+'.'+fileext
page2 = Page.load(page_name2)
page3 = Page.loadnew("kalbos")
textaps=''
if len(aps)>0:
textaps=aps+'.'
text=''
for name, value in kalbossort:
text = text + (_kalbhtml % ('download', textaps+name, ext, name, name))
page3.content = text
self.generate('view.html', lang, {
'imgshar': False,
'noedit': '1',
'application_name': siteauth(),
'kalbos': page3,
'menu': page2,
'page': page,
})
class RedirN(BaseRequestHandler):
def get(self, page_name):
self.response.headers['X-Powered-By'] = cmsname2+'/'+_version
self.redirect('http://'+site2b+os.environ['PATH_INFO'])
class RedirN2(BaseRequestHandler):
def get(self, page_name):
self.redirect('http://www.google.com/')
class WikiRedirMain(BaseRequestHandler):
def get(self, page_name):
if not page_name:
page_name="MainPage"
self.response.headers['X-Powered-By'] = cmsname2+'/'+_version
entitiesRx = re.compile("[^0-9a-zA-Z]")
page_name = entitiesRx.sub("", page_name)
self.redirect('/'+cmspath2+'-'+page_name+'-'+lang+'.'+fileext)
# def post(self, page_name):
# if not page_name:
# page_name="MainPage"
# self.response.headers['X-Powered-By'] = cmsname2+'/'+_version
# self.redirect('/'+cmspath2+'-'+page_name+'-'+lang+'.'+fileext)
def post(self, rparameters):
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
page = Page.loadnew("env")
user = users.get_current_user()
greeting = ''
if user:
if users.is_current_user_admin():
items = os.environ.items()
items.sort()
for name, value in items:
aaa = "%s\t= %s <br/>" % (name, value)
greeting = greeting + aaa
for field in self.request.arguments():
aaa = "%s\t= %s <br/>" % (field, self.request.get(field))
greeting = greeting + aaa
else:
greeting = _("Welcome2 %(admin)s %(usernickname)s %(userlogouturl)s") % {'admin': '', 'usernickname': user.nickname(), 'userlogouturl': users.create_logout_url(self.request.uri)}
greeting = greeting + " " + _("and") + " " + (_("sign in on Administrator %(userloginurl)s") % {'userloginurl': users.create_login_url(self.request.uri)})
else:
greeting = _("Sign in on Administrator %(userloginurl)s") % {'userloginurl': users.create_login_url(self.request.uri)}
page.content = _("Enviroment header %(greeting)s") % {'greeting': greeting}
page_name2 = 'menu'+'-'+lang+'.'+fileext
page2 = Page.load(page_name2)
page3 = Page.loadnew("kalbos")
textaps=''
if len(aps)>0:
textaps=aps+'.'
text=''
for name, value in kalbossort:
text = text + (_kalbhtml % ('env', textaps+name, ext, name, name))
page3.content = text
self.generate('view.html', lang, {
'imgshar': False,
'noedit': '1',
'application_name': siteauth(),
'kalbos': page3,
'menu': page2,
'page': page,
})
class WikiRedir(BaseRequestHandler):
def get(self, page_name):
self.redirect('/')
def post(self, page_name):
self.redirect('/')
class WikiInstall(BaseRequestHandler):
def get(self):
for name, value in kalbossort:
lang=name
kalb=kalbos[lang]
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
yra1 = False
yra2 = False
puslapis1 = _("page index html %(cmsname)s %(cmspath)s") % {'cmsname': cmsname2,'cmspath': cmspath2}
puslapis2 = _("page menu html %(cmspath)s") % {'cmspath': cmspath2}
query = datastore.Query('Page')
query['name ='] = "MainPage-"+lang+'.'+fileext
entities = query.Get(1)
if len(entities) < 1:
yra1 = False
else:
yra1 = True
query = datastore.Query('Page')
query['name ='] = "menu-"+lang+'.'+fileext
entities = query.Get(1)
if len(entities) < 1:
yra2 = False
else:
yra2 = True
if not yra1:
page = Page.loadnew("MainPage-"+lang+'.'+fileext)
page.content = puslapis1
page.save()
if not yra2:
page = Page.loadnew("menu-"+lang+'.'+fileext)
page.content = puslapis2
page.save()
self.redirect('/')
class WikiPage(BaseRequestHandler):
"""Our one and only request handler.
We first determine which page we are editing, using "MainPage" if no
page is specified in the URI. We then determine the mode we are in (view
or edit), choosing "view" by default.
POST requests to this handler handle edit operations, writing the new page
to the datastore.
"""
def get(self, page_name, rparameters):
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
formurl=urlhost2()
formurl=urlparse.urljoin(formurl, str(self.request.uri))
o = urlparse.urlparse(formurl)
urlpath_without_query_string = o.path
url_without_query_string = o.scheme+"://"+o.netloc+o.path
url_host = o.scheme+"://"+o.netloc
# Load the main page by default
if not page_name:
page_name = 'MainPage'
page_name_org = page_name
rparameters2 = rparameters
entitiesRx = re.compile("[^0-9a-zA-Z\x2D\x5F\x2E\x2C]")
rparameters2 = entitiesRx.sub("", rparameters2)
page_name = "%s-%s" % (page_name,rparameters2)
page = Page.load(page_name)
# page_name2 = 'menu'+'-'+lang+'.'+fileext
page_name2 = "menu-%s.%s" % (lang,fileext)
page2 = Page.load(page_name2)
page3 = Page.loadnew("kalbos")
textaps=''
if len(aps)>0:
textaps=aps+'.'
text=''
for name, value in kalbossort:
text = text + (_kalbhtml % (page_name_org, textaps+name, ext, name, name))
page3.content = text
# ner
if not page.entity and not users.GetCurrentUser() and not users.is_current_user_admin():
self.error(404)
self.response.out.write('Not found')
return
# Default to edit for pages that do not yet exist
if not page.entity:
mode = 'edit'
else:
modes = ['view', 'edit', 'fbputwall']
mode = self.request.get('mode')
if not mode in modes:
mode = 'view'
# User must be logged in to edit
if mode == 'edit' and not users.GetCurrentUser() and not users.is_current_user_admin():
self.redirect(users.CreateLoginURL(self.request.uri))
return
if mode == 'fbputwall':
greeting = ''
fb_current_user=self.fb_current_user
if fb_current_user:
aaa = _("logged Facebook %(fbprofileurl)s %(fbpicurl)s %(fbname)s %(url)s") % {'fbprofileurl': fb_current_user.profile_url,'fbpicurl': "http://graph.facebook.com/"+fb_current_user.id+"/picture",'fbname': fb_current_user.name,'url': '/auth/logout?continue='+urllib.quote(self.request.uri)}
from rss import MyHTMLParser,HTMLParser
parser = HTMLParser()
parerr = False
try:
p = MyHTMLParser()
p.feed(parser.unescape(page.content))
pav=p.data[0]
p.close()
except:
pav=_("--- tag h1 not found in page ---")
parerr = True
if not parerr:
message = _("Message from:").encode("utf-8")+"\n"+urlhost2()
attachment = {}
attachment['name'] = pav.encode("utf-8")
attachment['caption'] = os.environ['HTTP_HOST']
attachment['link'] = urlhost2()+os.environ['PATH_INFO']
attachment['picture'] = urlhost2()+fbputimgurl2
attachment['description'] = ' '
import fb
obj = self
aaa=fb.putwall(obj,message,attachment)
else:
aaa="<h1>Error</h1>%s" % (pav)
else:
aaa = _("not logged Facebook %(url)s") % {'url': '/auth/login?continue='+urllib.quote(self.request.uri)}
greeting = greeting + aaa
page.content = "%s" % (greeting)
# Genertate the appropriate template
self.generate('view.html', lang, {
'imgshar': False,
'kalbos': page3,
'menu': page2,
'page': page,
})
return
if mode == 'view':
page.content = "%s<p> </p><p><a href=\"%s?mode=fbputwall\"><img src=\"%s/dynab?button_text=%s%s\" border=\"0\" alt=\"%s\"></img></a></p>\n" % (page.content,url_without_query_string,url_host,urllib.quote(_("Put to Facebook Wall").encode("utf-8")),imgopt,_("Put to Facebook Wall"))
soccomtext = ""
soccomshowform = False
if mode == 'view' and (page.commenablego or page.commenablefb or page.commenableli or page.commenablevk):
# if hasattr(a, 'property'):
soccomtext = "<div><h3>"+_("Commenting is turned on with a social networks logins:")
if page.commenablego:
soccomtext = soccomtext + " Google"
user = users.get_current_user()
if user:
soccomshowform = True
if page.commenablefb:
soccomtext = soccomtext + " FaceBook"
fb_current_user=self.fb_current_user
if fb_current_user:
soccomshowform = True
if page.commenableli:
soccomtext = soccomtext + " LinkedIn"
li_current_user=self.li_current_user
if li_current_user:
soccomshowform = True
if page.commenablevk:
soccomtext = soccomtext + " VKontakte"
vk_current_user=self.vk_current_user
if vk_current_user:
soccomshowform = True
soccomtext = soccomtext + "</h3></div>"
page.content = "%s%s" % (page.content,soccomtext)
soccomtext2 = ""
if soccomshowform:
codekey=codekey2()
soccomtext2 = (_("page Comments form %(commsendurl)s %(commcodekey)s %(commbutsrc)s") % {'commsendurl': urlpath_without_query_string, 'commcodekey': codekey, 'commbutsrc': "src=\""+url_host+"/dynab?button_text="+urllib.quote(_("Submit Comment").encode("utf-8"))+imgopt+"\""})
if mode == 'view' and (page.commenablego or page.commenablefb or page.commenableli or page.commenablevk):
page.content = "%s%s<div><p><a href=\"%s?cmd=comments\"><img src=\"%s/dynab?button_text=%s%s\" border=\"0\" alt=\"%s\"></img></a></p></div>\n" % (page.content,soccomtext2,urlpath_without_query_string,url_host,urllib.quote(_("View Comments").encode("utf-8")),imgopt,_("View Comments"))
if self.request.get('cmd') == 'comments':
rcomm = True
userid = "0"
content = ""
pseudonimas = "Anonymous"
user = users.get_current_user()
if rcomm:
yra=False
wtext=""
try:
pg=self.request.get('pg')
entitiesRx = re.compile("[^0-9]")
pg=entitiesRx.sub("", pg)
if pg:
pg = int(pg)
else:
pg=0
try:
query = db.GqlQuery("SELECT * FROM Commentsrec3 WHERE commpage = :1 ORDER BY date DESC", page_name)
greetings = query.fetch(10,pg*10)
co=query.count()
except:
klaida=True
co=0
greetings = []
i=0
ii=0
bbb=""
while i<=co:
i=i+10
if ii == pg:
bbb=bbb+' '+str(ii)
else:
bbb=bbb+(" <a href=\"%s?cmd=comments&pg=%s\">%s</a>" % (urlpath_without_query_string,str(ii),str(ii)))
ii=ii+1
wtext=wtext+"<div><hr width=\"70%\"></hr></div>\n<div style=\"text-align: center;\">"+bbb+"</div>\n\n"
for greeting in greetings:
wijun = ""
wdel = ""
if greeting.rodyti or (users.GetCurrentUser() and users.get_current_user() == greeting.author) or users.is_current_user_admin():
if users.is_current_user_admin():
wdel = _("Comments delete %(commswiturl)s %(commkey)s") % {'commswiturl': '/commswit', 'commkey': greeting.key()}
if (users.GetCurrentUser() and users.get_current_user() == greeting.author) or users.is_current_user_admin():
if not greeting.rodyti:
wijun = _("Comments show %(commswiturl)s %(commkey)s") % {'commswiturl': '/commswit', 'commkey': greeting.key()}
else:
wijun = _("Comments hidden %(commswiturl)s %(commkey)s") % {'commswiturl': '/commswit', 'commkey': greeting.key()}
user3 = greeting.vartot
user3fb = greeting.vartotfb
user3li = greeting.vartotli
user3vk = greeting.vartotvk
pseudonimas3 = "Anonymous"
userid3 = '0'
try:
userid3 = user3.userid
pseudonimas3 = user3.pseudonimas
except:
klaida=True
wtext = wtext + "\n<div class=\"comm-container\">\n<div class=\"comm-name\">\n"
if user3:
imagemaxurl2 = ("/%s-userimagemin/%s/%s" % (cmspath2,pseudonimas3, userid3))
userpageurl = ("%s/%s-userpage-%s.%s/%s/%s" % (urlhost2(), cmspath2,lang, fileext, pseudonimas3, userid3))
wtext = wtext +("<a href=\"%s\"><img src=\"%s\" border=\"0\" alt=\"\"></img></a> <strong><img src=\"%s\" alt=\"\" border=\"0\"></img></strong> <strong>%s</strong><br />\n" % (userpageurl,imagemaxurl2,g16url,pseudonimas3))
if user3fb:
userid = user3fb.id
# pseudonimas3 = user3fb.nickname
pseudonimas3 = user3fb.name
imagemaxurl2 = ("http://graph.facebook.com/%s/picture" % (userid))
userpageurl = ("%s/fbinfo?id=%s" % (urlhost2(),userid))
wtext = wtext +("<a href=\"%s\"><img src=\"%s\" border=\"0\" alt=\"\"></img></a> <strong><img src=\"%s\" alt=\"\" border=\"0\"></img></strong> <strong>%s</strong><br />\n" % (userpageurl,imagemaxurl2,fb16url,pseudonimas3))
if user3li:
userid = user3li.id
ukey = user3li.key()
# pseudonimas3 = user3li.nickname
pseudonimas3 = user3li.name
imagemaxurl2 = ("%s/liphoto2/%s" % (urlhost2(),ukey))
userpageurl = user3li.profile_url
wtext = wtext +("<a href=\"%s\"><img src=\"%s\" border=\"0\" alt=\"\"></img></a> <strong><img src=\"%s\" alt=\"\" border=\"0\"></img></strong> <strong>%s</strong><br />\n" % (userpageurl,imagemaxurl2,in16url,pseudonimas3))
if user3vk:
userid = user3vk.id
ukey = user3vk.key()
# pseudonimas3 = user3li.nickname
pseudonimas3 = user3vk.name
imagemaxurl2 = ("%s/vkphoto/%s" % (urlhost2(),userid))
userpageurl = user3vk.profile_url
wtext = wtext +("<a href=\"%s\"><img src=\"%s\" border=\"0\" alt=\"\"></img></a> <strong><img src=\"%s\" alt=\"\" border=\"0\"></img></strong> <strong>%s</strong><br />\n" % (userpageurl,imagemaxurl2,vk16url,pseudonimas3))
wtext = wtext +("\n</div>\n<div class=\"font-small-gray\">%s</div>\n" % greeting.date.strftime("%a, %d %b %Y %H:%M:%S"))
if greeting.avatar:
if greeting.avatarmax:
wtext = wtext + ("<div class=\"font-small-gray\"><a href=\"/commimg?img_id=%s&size=yes\"><img src=\"/commimg?img_id=%s\" alt=\"\"></img></a></div>\n" % (greeting.key(),greeting.key()))
else:
wtext = wtext + ("<div class=\"font-small-gray\"><img src=\"/commimg?img_id=%s\" alt=\"\"></img></div>\n" % greeting.key())
wtext = wtext + ("\n<div class=\"comm-text\"><div>%s</div></div>\n" % greeting.content)
# wtext = wtext + "</div><div class=\"clear\"><!-- --></div>\n"
#redaguoti wtext = wtext + "\n<div>"+wijun+" " +wdel+"</div>\n\n"
wtext = wtext + "<div> </div>\n</div>\n"
yra=True
except:
yra=False
errtext = ''.join(traceback.format_exception(*sys.exc_info())) #cgi.escape(str(sys.exc_info()[0]))
if yra:
commtext=("<div>%s</div>\n\t" % (wtext))
else:
commtext="<div>comments db error: %s</div>\n\t" % (errtext)
page.content = "%s%s" % (page.content,commtext)
# Genertate the appropriate template
self.generate(mode + '.html', lang, {
'imgshar': False,
'kalbos': page3,
'menu': page2,
'page': page,
})
def post(self, page_name, rparameters):
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
# User must be logged in to edit
if not users.GetCurrentUser() and not self.request.get('cmd') == 'pagecomm':
# The GET version of this URI is just the view/edit mode, which is a
# reasonable thing to redirect to
self.redirect(users.CreateLoginURL(self.request.uri))
return
if not users.is_current_user_admin() and not self.request.get('cmd') == 'pagecomm':
self.redirect(users.CreateLoginURL(self.request.uri))
return
if not page_name:
self.redirect('/')
# Create or overwrite the page
page_name = page_name+'-'+rparameters
page = Page.load(page_name)
if self.request.get('cmd') == 'pagecomm' and ((page.commenablego and users.get_current_user()) or (page.commenablefb and self.fb_current_user) or (page.commenableli and self.li_current_user) or (page.commenablevk and self.vk_current_user)):
user = users.get_current_user()
if user:
userid = user.user_id()
else:
userid = "0"
fb_current_user=self.fb_current_user
li_current_user=self.li_current_user
vk_current_user=self.vk_current_user
connt=""
vartot = None
vartkey=""
try:
buvesapp = db.GqlQuery("SELECT * FROM Vartotojai WHERE userid = :1", userid)
for app in buvesapp:
vartkey=app.key()
vartot = db.get(vartkey)
except:
klaida=True
try:
codeimg = db.get(self.request.get("scodeid"))
except:
prn="Error"
if codeimg and codeimg.code == self.request.get("scode"):
greeting = Commentsrec3()
greeting.commpage = page_name
greeting.vartot = vartot
greeting.vartotfb = self.fb_current_user
greeting.vartotli = self.li_current_user
greeting.vartotvk = self.vk_current_user
greeting.rodyti = True
greeting.userid = userid
greeting.ipadresas = os.environ['REMOTE_ADDR']
# greeting.laikas = datetime.datetime.now()
if users.get_current_user():
greeting.author = users.get_current_user()
connt = cgi.escape(self.request.get("content"))
connt = render_bbcode(connt)
connt = connt[0:400]
greeting.content = connt
# priesduom = self.request.get("img")
greeting.rname = "anonymous"
if self.request.get("img"):
avatarmax = images.resize(self.request.get("img"), width=600, height=400, output_encoding=images.PNG)
greeting.avatarmax = db.Blob(avatarmax)
avatar = images.resize(self.request.get("img"), width=96, height=96, output_encoding=images.PNG)
greeting.avatar = db.Blob(avatar)
greeting.put()
to_addr = _mailrcptto
user = users.get_current_user()
if user:
uname=user.nickname()
umail=users.get_current_user().email()
else:
uname=""
umail=""
message = mail.EmailMessage()
message.subject = os.environ['HTTP_HOST'] + " - comments"
message.sender = _mailsender
message.to = to_addr
q_message = ""
q_message = q_message + ("\n%s: %s \n%s \n%s \n" % ('Page', str(self.request.uri),str(textinfo()),str(textloc())))
message.body = (_("Comments mail message %(communame)s %(commumail)s %(commrealname)s %(commmessage)s") % {'communame': uname,'commumail': umail,'commrealname': greeting.rname,'commmessage': greeting.content}) + q_message
message.send()
# self.redirect('/'+cmspath2+'-usercommpage-'+lang+'.'+fileext+'/'+pseudonimas+'/'+userid )
if not self.request.get('cmd') == 'pagecomm':
page.content = self.request.get('content')
page.save()
self.redirect(page.view_url())
class WikiExec(BaseRequestHandler):
def get(self, rparameters):
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
# values222 = { "name" : "world" }
page = Page.loadnew("pasaulis")
page.content = base64.decodestring("PGgxPkhlbGxvPC9oMT48cD5OZXJpamF1cyBUZXJlYmFzIC0gQ01TICIlcyIgLSAlcyAtIGJhc2VkICJjY2N3aWtpIiAoQnJldCBUYXlsb3IpLCAiaW1hZ2Vfc2hhcmluZyIgKEZyZWQgV3VsZmYpPC9wPg==") % (cmsname2,_version)
page_name2 = 'menu'+'-'+lang+'.'+fileext
page3 = Page.loadnew("kalbos")
textaps=''
if len(aps)>0:
textaps=aps+'.'
text=''
for name, value in kalbossort:
text = text + (_kalbhtml % ('ver', textaps+name, ext, name, name))
page3.content = text
page2 = Page.load(page_name2)
self.generate('view.html', lang, {
'imgshar': False,
'noedit': '1',
'application_name': siteauth(),
'kalbos': page3,
'menu': page2,
'page': page,
})
class WikiLogin(BaseRequestHandler):
def get(self, rparameters):
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
user = users.get_current_user()
if user:
greeting = _("Welcome2 %(admin)s %(usernickname)s %(userlogouturl)s") % {'admin': '', 'usernickname': user.nickname(), 'userlogouturl': users.create_logout_url(self.request.uri)}
if users.is_current_user_admin():
greeting = _("Welcome2 %(admin)s %(usernickname)s %(userlogouturl)s") % {'admin': 'Administrator', 'usernickname': user.nickname(), 'userlogouturl': users.create_logout_url(self.request.uri)}
else:
greeting = _("Sign in or register %(userloginurl)s") % {'userloginurl': users.create_login_url(self.request.uri)}
greeting=greeting+"<br />"+(_("diferent accounts"))
page = Page.loadnew("login")
page.content = _("Login header %(greeting)s") % {'greeting': greeting}
page_name2 = 'menu'+'-'+lang+'.'+fileext
page2 = Page.load(page_name2)
page3 = Page.loadnew("kalbos")
textaps=''
if len(aps)>0:
textaps=aps+'.'
text=''
for name, value in kalbossort:
text = text + (_kalbhtml % ('login', textaps+name, ext, name, name))
page3.content = text
self.generate('view.html', lang, {
'imgshar': False,
'noedit': '1',
'application_name': siteauth(),
'kalbos': page3,
'menu': page2,
'page': page,
})
class WikiEnv(BaseRequestHandler):
def get(self, rparameters):
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
page = Page.loadnew("env")
user = users.get_current_user()
greeting = ''
if user:
if users.is_current_user_admin():
items = os.environ.items()
items.sort()
for name, value in items:
aaa = "%s\t= %s <br/>" % (name, value)
greeting = greeting + aaa
else:
greeting = _("Welcome2 %(admin)s %(usernickname)s %(userlogouturl)s") % {'admin': '', 'usernickname': user.nickname(), 'userlogouturl': users.create_logout_url(self.request.uri)}
greeting = greeting + " " + _("and") + " " + (_("sign in on Administrator %(userloginurl)s") % {'userloginurl': users.create_login_url(self.request.uri)})
else:
greeting = _("Sign in on Administrator %(userloginurl)s") % {'userloginurl': users.create_login_url(self.request.uri)}
page.content = _("Enviroment header %(greeting)s") % {'greeting': greeting}
page_name2 = 'menu'+'-'+lang+'.'+fileext
page2 = Page.load(page_name2)
page3 = Page.loadnew("kalbos")
textaps=''
if len(aps)>0:
textaps=aps+'.'
text=''
for name, value in kalbossort:
text = text + (_kalbhtml % ('env', textaps+name, ext, name, name))
page3.content = text
self.generate('view.html', lang, {
'imgshar': False,
'noedit': '1',
'application_name': siteauth(),
'kalbos': page3,
'menu': page2,
'page': page,
})
class WikiFB(BaseRequestHandler):
def get(self, rparameters):
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
page = Page.loadnew("fb")
user = users.get_current_user()
greeting = ''
if user:
if users.is_current_user_admin():
items = os.environ.items()
fb_current_user=self.fb_current_user
if fb_current_user:
aaa = _("logged Facebook %(fbprofileurl)s %(fbpicurl)s %(fbname)s %(url)s") % {'fbprofileurl': fb_current_user.profile_url,'fbpicurl': "http://graph.facebook.com/"+fb_current_user.id+"/picture",'fbname': fb_current_user.name,'url': '/auth/logout?continue='+urllib.quote(self.request.uri)}
else:
aaa = _("not logged Facebook %(url)s") % {'url': '/auth/login?continue='+urllib.quote(self.request.uri)}
greeting = greeting + aaa
else:
greeting = _("Welcome2 %(admin)s %(usernickname)s %(userlogouturl)s") % {'admin': '', 'usernickname': user.nickname(), 'userlogouturl': users.create_logout_url(self.request.uri)}
greeting = greeting + " " + _("and") + " " + (_("sign in on Administrator %(userloginurl)s") % {'userloginurl': users.create_login_url(self.request.uri)})
else:
greeting = _("Sign in on Administrator %(userloginurl)s") % {'userloginurl': users.create_login_url(self.request.uri)}
page.content = _("Facebook header %(greeting)s") % {'greeting': greeting}
page_name2 = 'menu'+'-'+lang+'.'+fileext
page2 = Page.load(page_name2)
page3 = Page.loadnew("kalbos")
textaps=''
if len(aps)>0:
textaps=aps+'.'
text=''
for name, value in kalbossort:
text = text + (_kalbhtml % ('fb', textaps+name, ext, name, name))
page3.content = text
self.generate('view.html', lang, {
'imgshar': False,
'noedit': '1',
'application_name': siteauth(),
'kalbos': page3,
'menu': page2,
'page': page,
})
class WikiLI(BaseRequestHandler):
def get(self, rparameters):
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
page = Page.loadnew("fb")
user = users.get_current_user()
greeting = ''
if user:
if users.is_current_user_admin():
items = os.environ.items()
li_current_user=self.li_current_user
if li_current_user:
aaa = _("logged LinkedIn %(liprofileurl)s %(lipicurl)s %(liname)s %(url)s") % {'liprofileurl': li_current_user.profile_url,'lipicurl': "/liphoto/"+li_current_user.id,'liname': li_current_user.name,'url': '/liauth/logout?continue='+urllib.quote(self.request.uri)}
else:
aaa = _("not logged LinkedIn %(url)s") % {'url': '/liauth/login?continue='+urllib.quote(self.request.uri)}
greeting = greeting + aaa
else:
greeting = _("Welcome2 %(admin)s %(usernickname)s %(userlogouturl)s") % {'admin': '', 'usernickname': user.nickname(), 'userlogouturl': users.create_logout_url(self.request.uri)}
greeting = greeting + " " + _("and") + " " + (_("sign in on Administrator %(userloginurl)s") % {'userloginurl': users.create_login_url(self.request.uri)})
else:
greeting = _("Sign in on Administrator %(userloginurl)s") % {'userloginurl': users.create_login_url(self.request.uri)}
page.content = _("LinkedIn header %(greeting)s") % {'greeting': greeting}
page_name2 = 'menu'+'-'+lang+'.'+fileext
page2 = Page.load(page_name2)
page3 = Page.loadnew("kalbos")
textaps=''
if len(aps)>0:
textaps=aps+'.'
text=''
for name, value in kalbossort:
text = text + (_kalbhtml % ('li', textaps+name, ext, name, name))
page3.content = text
self.generate('view.html', lang, {
'imgshar': False,
'noedit': '1',
'application_name': siteauth(),
'kalbos': page3,
'menu': page2,
'page': page,
})
class WikiVK(BaseRequestHandler):
def get(self, rparameters):
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
page = Page.loadnew("vk")
user = users.get_current_user()
greeting = ''
if user:
if users.is_current_user_admin():
items = os.environ.items()
vk_current_user=self.vk_current_user
if vk_current_user:
aaa = _("logged VKontakte %(vkprofileurl)s %(vkpicurl)s %(vkname)s %(url)s") % {'vkprofileurl': vk_current_user.profile_url,'vkpicurl': "/vkphoto/"+vk_current_user.id,'vkname': vk_current_user.name,'url': '/vkauth/logout?continue='+urllib.quote(self.request.uri)}
else:
aaa = _("not logged VKontakte %(url)s") % {'url': '/vkauth/login?continue='+urllib.quote(self.request.uri)}
greeting = greeting + aaa
else:
greeting = _("Welcome2 %(admin)s %(usernickname)s %(userlogouturl)s") % {'admin': '', 'usernickname': user.nickname(), 'userlogouturl': users.create_logout_url(self.request.uri)}
greeting = greeting + " " + _("and") + " " + (_("sign in on Administrator %(userloginurl)s") % {'userloginurl': users.create_login_url(self.request.uri)})
else:
greeting = _("Sign in on Administrator %(userloginurl)s") % {'userloginurl': users.create_login_url(self.request.uri)}
page.content = _("VKontakte header %(greeting)s") % {'greeting': greeting}
page_name2 = 'menu'+'-'+lang+'.'+fileext
page2 = Page.load(page_name2)
page3 = Page.loadnew("kalbos")
textaps=''
if len(aps)>0:
textaps=aps+'.'
text=''
for name, value in kalbossort:
text = text + (_kalbhtml % ('li', textaps+name, ext, name, name))
page3.content = text
self.generate('view.html', lang, {
'imgshar': False,
'noedit': '1',
'application_name': siteauth(),
'kalbos': page3,
'menu': page2,
'page': page,
})
class WikiAdmin(BaseRequestHandler):
def get(self, rparameters):
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
page = Page.loadnew("admin")
user = users.get_current_user()
greeting = ''
if user:
if users.is_current_user_admin():
greeting = _("page admin content html %(cmspath)s") % {'cmspath': cmspath2}
else:
greeting = _("Welcome2 %(admin)s %(usernickname)s %(userlogouturl)s") % {'admin': '', 'usernickname': user.nickname(), 'userlogouturl': users.create_logout_url(self.request.uri)}
greeting = greeting + " " + _("and") + " " + (_("sign in on Administrator %(userloginurl)s") % {'userloginurl': users.create_login_url(self.request.uri)})
else:
greeting = _("Sign in on Administrator %(userloginurl)s") % {'userloginurl': users.create_login_url(self.request.uri)}
page.content = _("Admin header %(greeting)s") % {'greeting': greeting}
page_name2 = 'menu'+'-'+lang+'.'+fileext
page2 = Page.load(page_name2)
page3 = Page.loadnew("kalbos")
textaps=''
if len(aps)>0:
textaps=aps+'.'
text=''
for name, value in kalbossort:
text = text + (_kalbhtml % ('admin', textaps+name, ext, name, name))
page3.content = text
self.generate('view.html', lang, {
'imgshar': False,
'noedit': '1',
'application_name': siteauth(),
'kalbos': page3,
'menu': page2,
'page': page,
})
class WikiMod(BaseRequestHandler):
def get(self, modname, rparameters):
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
sys.path.append(os.getcwd()+os.path.sep+"componets")
import importlib
entitiesRx = re.compile("[^0-9a-zA-Z]")
modname = entitiesRx.sub("", modname)
modloaderr = False
modname2 = 'custommodule'
try:
moduleim = importlib.import_module("mod"+modname)
except:
modloaderr = True
if not modloaderr:
moduleim = importlib.import_module("mod"+modname)
modmed = getattr(moduleim, "modobj"+modname)
modresult = modmed().cont(self)
if 'cont' in modresult and len(modresult['cont'])>0:
modcont = modresult['cont']
else:
modcont = "<h1>Custom Compontet \"%s\"</h1>\n" % (modname)
if 'name' in modresult and len(modresult['name'])>0:
modname2 = modresult['name']
else:
modname2 = 'customcomponent'
if 'fpput' in modresult and modresult['fpput']==True:
modfpput = True
else:
modfpput = False
if 'title' in modresult and len(modresult['title'])>0:
modtitle = modresult['title']
else:
modtitle = 'Custom Compontet'
if 'descr' in modresult and len(modresult['descr'])>0:
moddescr = modresult['descr']
else:
moddescr = ' '
# user = users.get_current_user()
aaa = "%s" % (modcont)
modes = ['view', 'fbputwall']
mode = self.request.get('mode')
if not mode in modes:
mode = 'view'
if mode == 'fbputwall' and modfpput:
fb_current_user=self.fb_current_user
if fb_current_user:
pav = modtitle
aaa = _("logged Facebook %(fbprofileurl)s %(fbpicurl)s %(fbname)s %(url)s") % {'fbprofileurl': fb_current_user.profile_url,'fbpicurl': "http://graph.facebook.com/"+fb_current_user.id+"/picture",'fbname': fb_current_user.name,'url': '/auth/logout?continue='+urllib.quote(self.request.uri)}
parerr = False
if not parerr:
message = _("Message from:").encode("utf-8")+"\n"+urlhost2()
attachment = {}
attachment['name'] = pav.encode("utf-8")
attachment['caption'] = os.environ['HTTP_HOST']
attachment['link'] = urlhost2()+os.environ['PATH_INFO']
attachment['picture'] = urlhost2()+fbputimgurl2
attachment['description'] = moddescr.encode("utf-8")
obj=self
import fb
obj = self
aaa=fb.putwall(obj,message,attachment)
else:
aaa="<h1>Error</h1>%s" % (pav)
else:
aaa = _("not logged Facebook %(url)s") % {'url': '/auth/login?continue='+urllib.quote(self.request.uri)}
if mode == 'view' and modfpput:
aaa = "%s<p> </p><p><a href=\"%s%s?mode=fbputwall\"><img src=\"%s/dynab?button_text=%s%s\" border=\"0\" alt=\"%s\"></img></a></p>" % (aaa,urlhost2(),os.environ['PATH_INFO'],urlhost2(),urllib.quote(_("Put to Facebook Wall").encode("utf-8")),imgopt,_("Put to Facebook Wall"))
else:
aaa = "<h1>Compontet \"%s\" load error</h1>\n" % (modname)
page = Page.loadnew(modname2)
page.content = "%s" % (aaa)
page_name2 = 'menu'+'-'+lang+'.'+fileext
page2 = Page.load(page_name2)
page3 = Page.loadnew("kalbos")
textaps=''
if len(aps)>0:
textaps=aps+'.'
text=''
for name, value in kalbossort:
text = text + (_kalbhtml % ('mod'+modname, textaps+name, ext, name, name))
page3.content = text
self.generate('view.html', lang, {
'imgshar': False,
'noedit': '1',
'application_name': siteauth(),
'kalbos': page3,
'menu': page2,
'page': page,
})
def post(self, modname, rparameters):
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
sys.path.append(os.getcwd()+os.path.sep+"componets")
import importlib
entitiesRx = re.compile("[^0-9a-zA-Z]")
modname = entitiesRx.sub("", modname)
modloaderr = False
modname2 = 'custommodule'
try:
moduleim = importlib.import_module("mod"+modname)
except:
modloaderr = True
if not modloaderr:
moduleim = importlib.import_module("mod"+modname)
modmed = getattr(moduleim, "modobj"+modname)
modresult = modmed().cont(self)
if 'cont' in modresult and len(modresult['cont'])>0:
modcont = modresult['cont']
else:
modcont = "<h1>Custom Compontet \"%s\"</h1>\n" % (modname)
if 'name' in modresult and len(modresult['name'])>0:
modname2 = modresult['name']
else:
modname2 = 'customcomponent'
if 'fpput' in modresult and modresult['fpput']==True:
modfpput = True
else:
modfpput = False
if 'title' in modresult and len(modresult['title'])>0:
modtitle = modresult['title']
else:
modtitle = 'Custom Compontet'
if 'descr' in modresult and len(modresult['descr'])>0:
moddescr = modresult['descr']
else:
moddescr = ' '
# user = users.get_current_user()
aaa = "%s" % (modcont)
modes = ['view', 'fbputwall']
mode = self.request.get('mode')
if not mode in modes:
mode = 'view'
if mode == 'fbputwall' and modfpput:
fb_current_user=self.fb_current_user
if fb_current_user:
pav = modtitle
aaa = _("logged Facebook %(fbprofileurl)s %(fbpicurl)s %(fbname)s %(url)s") % {'fbprofileurl': fb_current_user.profile_url,'fbpicurl': "http://graph.facebook.com/"+fb_current_user.id+"/picture",'fbname': fb_current_user.name,'url': '/auth/logout?continue='+urllib.quote(self.request.uri)}
parerr = False
if not parerr:
message = _("Message from:").encode("utf-8")+"\n"+urlhost2()
attachment = {}
attachment['name'] = pav.encode("utf-8")
attachment['caption'] = os.environ['HTTP_HOST']
attachment['link'] = urlhost2()+os.environ['PATH_INFO']
attachment['picture'] = urlhost2()+fbputimgurl2
attachment['description'] = moddescr.encode("utf-8")
obj=self
import fb
obj = self
aaa=fb.putwall(obj,message,attachment)
else:
aaa="<h1>Error</h1>%s" % (pav)
else:
aaa = _("not logged Facebook %(url)s") % {'url': '/auth/login?continue='+urllib.quote(self.request.uri)}
if mode == 'view' and modfpput:
aaa = "%s<p> </p><p><a href=\"%s%s?mode=fbputwall\"><img src=\"%s/dynab?button_text=%s%s\" border=\"0\" alt=\"%s\"></img></a></p>" % (aaa,urlhost2(),os.environ['PATH_INFO'],urlhost2(),urllib.quote(_("Put to Facebook Wall").encode("utf-8")),imgopt,_("Put to Facebook Wall"))
else:
aaa = "<h1>Compontet \"%s\" load error</h1>\n" % (modname)
page = Page.loadnew(modname2)
page.content = "%s" % (aaa)
page_name2 = 'menu'+'-'+lang+'.'+fileext
page2 = Page.load(page_name2)
page3 = Page.loadnew("kalbos")
textaps=''
if len(aps)>0:
textaps=aps+'.'
text=''
for name, value in kalbossort:
text = text + (_kalbhtml % ('mod'+modname, textaps+name, ext, name, name))
page3.content = text
self.generate('view.html', lang, {
'imgshar': False,
'noedit': '1',
'application_name': siteauth(),
'kalbos': page3,
'menu': page2,
'page': page,
})
class ListDir(BaseRequestHandler):
def get(self, rparameters):
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
page = Page.loadnew("list")
user = users.get_current_user()
greeting = ''
if user:
if users.is_current_user_admin():
items = os.listdir(self.request.get("ls"))
items.sort()
for name in items:
aaa = "%s <br />\n" % (name)
greeting = greeting + aaa
else:
greeting = _("Welcome2 %(admin)s %(usernickname)s %(userlogouturl)s") % {'admin': '', 'usernickname': user.nickname(), 'userlogouturl': users.create_logout_url(self.request.uri)}
greeting = greeting + " " + _("and") + " " + (_("sign in on Administrator %(userloginurl)s") % {'userloginurl': users.create_login_url(self.request.uri)})
else:
greeting = _("Sign in on Administrator %(userloginurl)s") % {'userloginurl': users.create_login_url(self.request.uri)}
page.content = _("List header %(greeting)s") % {'greeting': greeting}
page_name2 = 'menu'+'-'+lang+'.'+fileext
page2 = Page.load(page_name2)
page3 = Page.loadnew("kalbos")
textaps=''
if len(aps)>0:
textaps=aps+'.'
text=''
for name, value in kalbossort:
text = text + (_kalbhtml % ('ls', textaps+name, ext, name, name))
page3.content = text
self.generate('view.html', lang, {
'imgshar': False,
'noedit': '1',
'application_name': siteauth(),
'kalbos': page3,
'menu': page2,
'page': page,
})
class Greeting(db.Model):
author = db.UserProperty()
content = db.StringProperty(multiline=True)
date = db.DateTimeProperty(auto_now_add=True)
class SingGuestbook(BaseRequestHandler):
def post(self, rparameters):
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
greeting = Greeting()
aaa=""
if users.get_current_user():
greeting = Greeting()
greeting.author = users.get_current_user()
greeting.content = self.request.get('content')
greeting.put()
# self.redirect('/')
aaa = _("Guestbook 2")
else:
aaa = _("Guestbook 1 %(userloginurl)s") % {'userloginurl': users.create_login_url(self.request.uri)}
page = Page.loadnew("guestbook")
page.content = aaa
page_name2 = 'menu'+'-'+lang+'.'+fileext
page2 = Page.load(page_name2)
self.generate('view.html', lang, {
'imgshar': False,
'noedit': '1',
'application_name': siteauth(),
'menu': page2,
'page': page,
})
class WikiGuest(BaseRequestHandler):
def get(self, rparameters):
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
aaa=""
pg=self.request.get('pg')
entitiesRx = re.compile("[^0-9]")
pg=entitiesRx.sub("", pg)
if pg:
pg = int(pg)
else:
pg=0
query = db.GqlQuery("SELECT * "
"FROM Greeting "
"ORDER BY date DESC")
greetings = query.fetch(10,pg*10)
# query = Greeting.all()
co=query.count()
i=0
ii=0
bbb=""
while i<=co:
i=i+10
if ii == pg:
bbb=bbb+' '+str(ii)
else:
bbb=bbb+' '+"<a href=\""+'/'+cmspath2+'-guestbook-'+lang+'.'+fileext+"?pg="+ str(ii) +"\">"+ str(ii) +"</a>"
ii=ii+1
aaa=aaa+"<center>"+bbb+"</center><br />\n"
for greeting in greetings:
if greeting.author:
ccc1=''
ccc1=_("Guestbook 3 %(greetingusernickname)s post") % {'greetingusernickname': greeting.author.nickname()}
aaa=aaa+ccc1
else:
aaa=aaa + _("Guestbook 4 anonymous post")
aaa=aaa+('<blockquote>%s</blockquote>' %
cgi.escape(greeting.content))
if users.get_current_user():
aaa=aaa+(_("Guestbook 5 %(guestsendurl)s") % {'guestsendurl': '/'+cmspath2+'-sing-'+lang+'.'+fileext})
else:
ccc2 = ''
ccc2 = _("Guestbook 6 %(guestuserloginurl)s") % {'guestuserloginurl': users.create_login_url(self.request.uri)}
aaa=aaa+ccc2
page = Page.loadnew("guestbook")
page.content = _("Guestbook header %(guestgreeting)s") % {'guestgreeting': aaa}
page_name2 = 'menu'+'-'+lang+'.'+fileext
page2 = Page.load(page_name2)
page3 = Page.loadnew("kalbos")
textaps=''
if len(aps)>0:
textaps=aps+'.'
text=''
for name, value in kalbossort:
text = text + (_kalbhtml % ('guestbook', textaps+name, ext, name, name))
page3.content = text
self.generate('view.html', lang, {
'imgshar': False,
'noedit': '1',
'application_name': siteauth(),
'kalbos': page3,
'menu': page2,
'page': page,
})
class MailForm(BaseRequestHandler):
def get(self, rparameters):
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
codekey=codekey2()
page2 = Page.load("pasl-"+lang+'.'+fileext)
page = Page.loadnew("mailform")
user = users.get_current_user()
greeting = ''
# if user:
greeting = _("Mail form %(mailsendurl)s %(mailcodekey)s") % {'mailsendurl': '/'+cmspath2+'-sendmail-'+lang+'.'+fileext,'mailcodekey': codekey}
# else:
# greeting = "<p><a href=\""+users.create_login_url(self.request.uri)+u"\">Please login</a> with Google account.</p>"
page.content = u""+page2.content+greeting+""
page_name2 = 'menu'+'-'+lang+'.'+fileext
page2 = Page.load(page_name2)
page3 = Page.loadnew("kalbos")
textaps=''
if len(aps)>0:
textaps=aps+'.'
text=''
for name, value in kalbossort:
text = text + (_kalbhtml % ('mailform', textaps+name, ext, name, name))
page3.content = text
self.generate('view.html', lang, {
'imgshar': False,
'noedit': '1',
'application_name': siteauth(),
'kalbos': page3,
'menu': page2,
'page': page,
})
class MailSend(BaseRequestHandler):
# @login_required
def post(self, rparameters):
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
try:
codeimg = db.get(self.request.get("scodeid"))
except:
prn="Error"
# codeimg = db.get(self.request.get("scodeid"))
if codeimg and codeimg.code == self.request.get("scode"):
codeimg.delete()
x_zmail = self.request.get("zemail")
x_subject = self.request.get("zsubject")
x_realname = self.request.get("zrealname")
x_message = self.request.get("zmessage")
to_addr = _mailrcptto
user = users.get_current_user()
if user:
uname=user.nickname()
umail=users.get_current_user().email()
else:
uname=""
umail=""
if not mail.is_email_valid(to_addr):
# Return an error message...
pass
message = mail.EmailMessage()
message.subject = os.environ['HTTP_HOST'] + " maiform - " +x_subject.encode("utf-8")
# message.subject = "www"
message.sender = _mailsender
message.to = to_addr
# q_uname = uname.encode("utf-8")
# q_umail = umail.encode("utf-8")
# q_zmail = x_zmail.encode("utf-8")
# q_realname = x_realname.encode("utf-8")
# q_message = x_message.encode("utf-8")
q_uname = ''
q_umail = ''
q_zmail = ''
q_realname = ''
q_message = ''
q_uname = uname
q_umail = umail
q_zmail = x_zmail
q_realname = x_realname
q_message = x_message + ("\n%s: %s \n%s \n%s \n" % ('Page', str(self.request.uri),str(textinfo()),str(textloc())))
message.body = (_("Mail message %(mailuname)s %(mailumail)s %(mailrealname)s %(mailzmail)s %(mailmessage)s") % {'mailuname': q_uname, 'mailumail': q_umail, 'mailrealname': q_realname, 'mailzmail': q_zmail, 'mailmessage': q_message})
message.send()
ptext=_("Mail send OK")
else:
ptext=_("Mail send Error")
page = Page.loadnew("sendmail")
page.content = ptext
page_name2 = 'menu'+'-'+lang+'.'+fileext
page2 = Page.load(page_name2)
self.generate('view.html', lang, {
'imgshar': False,
'noedit': '1',
'application_name': siteauth(),
'menu': page2,
'page': page,
})
class Page(object):
"""Our abstraction for a Wiki page.
We handle all datastore operations so that new pages are handled
seamlessly. To create OR edit a page, just create a Page instance and
clal save().
"""
def __init__(self, name, entity=None):
self.name = name
self.entity = entity
if entity:
self.content = entity['content']
if entity.has_key('user'):
self.user = entity['user']
else:
self.user = None
self.created = entity['created']
self.modified = entity['modified']
self.sitemaprodyti=entity['sitemaprodyti']
self.rssrodyti=entity['rssrodyti']
self.sitemapfreq=entity['sitemapfreq']
self.sitemapprio=entity['sitemapprio']
if "commenablego" in entity:
self.commenablego=entity['commenablego']
else:
self.commenablego=False
if "commenablefb" in entity:
self.commenablefb=entity['commenablefb']
else:
self.commenablefb=False
if "commenableli" in entity:
self.commenableli=entity['commenableli']
else:
self.commenableli=False
if "commenablevk" in entity:
self.commenablevk=entity['commenablevk']
else:
self.commenablevk=False
else:
# New pages should start out with a simple title to get the user going
now = datetime.datetime.now()
if not name=="menu":
self.content = '<h1>' + cgi.escape(name) + '</h1>'
self.user = None
self.created = now
self.modified = now
self.rssrodyti=False
self.sitemaprodyti=False
self.sitemapfreq='weekly'
self.sitemapprio='0.5'
self.commenablego=False
self.commenablefb=False
self.commenableli=False
self.commenablevk=False
def entity(self):
return self.entity
def edit_url(self):
return '/'+cmspath2+'-' + self.name + '?mode=edit'
def view_url(self):
return '/'+cmspath2+'-' + self.name
def wikified_content(self):
"""Applies our wiki transforms to our content for HTML display.
We auto-link URLs, link WikiWords, and hide referers on links that
go outside of the Wiki.
"""
transforms = [
# AutoLink(),
# WikiWords(),
# HideReferers(),
]
content = self.content
for transform in transforms:
content = transform.run(content)
return content
def save(self):
"""Creates or edits this page in the datastore."""
now = datetime.datetime.now()
if self.entity:
entity = self.entity
else:
entity = datastore.Entity('Page')
entity['name'] = self.name
entity['created'] = now
entity['rssrodyti'] = self.rssrodyti
entity['sitemaprodyti'] = self.sitemaprodyti
entity['sitemapfreq'] = self.sitemapfreq
entity['sitemapprio'] = self.sitemapprio
entity['commenablego'] = self.commenablego
entity['commenablefb'] = self.commenablefb
entity['commenableli'] = self.commenableli
entity['commenablevk'] = self.commenablevk
entity['content'] = datastore_types.Text(self.content)
entity['modified'] = now
if users.GetCurrentUser():
entity['user'] = users.GetCurrentUser()
elif entity.has_key('user'):
del entity['user']
datastore.Put(entity)
@staticmethod
def loadnew(name):
return Page(name)
@staticmethod
def load(name):
"""Loads the page with the given name.
We always return a Page instance, even if the given name isn't yet in
the database. In that case, the Page object will be created when save()
is called.
"""
query = datastore.Query('Page')
query['name ='] = name
entities = query.Get(1)
if len(entities) < 1:
return Page(name)
else:
return Page(name, entities[0])
@staticmethod
def exists(name):
"""Returns true if the page with the given name exists in the datastore."""
return Page.load(name).entity
class Transform(object):
"""Abstraction for a regular expression transform.
Transform subclasses have two properties:
regexp: the regular expression defining what will be replaced
replace(MatchObject): returns a string replacement for a regexp match
We iterate over all matches for that regular expression, calling replace()
on the match to determine what text should replace the matched text.
The Transform class is more expressive than regular expression replacement
because the replace() method can execute arbitrary code to, e.g., look
up a WikiWord to see if the page exists before determining if the WikiWord
should be a link.
"""
def run(self, content):
"""Runs this transform over the given content.
We return a new string that is the result of this transform.
"""
parts = []
offset = 0
for match in self.regexp.finditer(content):
parts.append(content[offset:match.start(0)])
parts.append(self.replace(match))
offset = match.end(0)
parts.append(content[offset:])
return ''.join(parts)
class WikiWords(Transform):
"""Translates WikiWords to links.
We look up all words, and we only link those words that currently exist.
"""
def __init__(self):
self.regexp = re.compile(r'[A-Z][a-z]+([A-Z][a-z]+)+')
def replace(self, match):
wikiword = match.group(0)
if Page.exists(wikiword):
return '<a class="wikiword" href="/%s">%s</a>' % (wikiword, wikiword)
else:
return wikiword
class AutoLink(Transform):
"""A transform that auto-links URLs."""
def __init__(self):
self.regexp = re.compile(r'([^"])\b((http|https)://[^ \t\n\r<>\(\)&"]+' \
r'[^ \t\n\r<>\(\)&"\.])')
def replace(self, match):
url = match.group(2)
return match.group(1) + '<a class="autourl" href="%s">%s</a>' % (url, url)
class HideReferers(Transform):
"""A transform that hides referers for external hyperlinks."""
def __init__(self):
self.regexp = re.compile(r'href="(http[^"]+)"')
def replace(self, match):
url = match.group(1)
scheme, host, path, parameters, query, fragment = urlparse.urlparse(url)
url = 'http://www.google.com/url?sa=D&q=' + urllib.quote(url)
return 'href="' + url + '"'
class VarId(BaseRequestHandler):
def get(self, rparameters):
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
query = db.GqlQuery("SELECT * "
"FROM Vartotojai "
"ORDER BY laikas DESC")
for greeting in query:
greeting.userid=greeting.lankytojas.user_id()
greeting.put()
page = Page.loadnew("suradimas")
page.content = u'<h1>Suradimas</h1>'+""
page_name2 = 'menu'+'-'+lang+'.'+fileext
page2 = Page.load(page_name2)
page3 = Page.loadnew("kalbos")
textaps=''
if len(aps)>0:
textaps=aps+'.'
text=''
for name, value in kalbossort:
text = text + (_kalbhtml % ('searchid', textaps+name, ext, name, name))
page3.content = text
self.generate('view.html', lang, {
'imgshar': False,
'noedit': '1',
'application_name': siteauth(),
'kalbos': page3,
'menu': page2,
'page': page,
})
class VartSar(BaseRequestHandler):
def get(self, rparameters):
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
aaa=""
pg=self.request.get('pg')
entitiesRx = re.compile("[^0-9]")
pg=entitiesRx.sub("", pg)
if pg:
pg = int(pg)
else:
pg=0
query = db.GqlQuery("SELECT * "
"FROM Vartotojai "
"ORDER BY laikas DESC")
greetings = query.fetch(10,pg*10)
co=query.count()
i=0
ii=0
bbb=""
while i<=co:
i=i+10
if ii == pg:
bbb=bbb+' '+str(ii)
else:
bbb=bbb+' '+"<a href=\"/"+cmspath2+"-memberlist-"+lang+'.'+fileext+"?pg="+ str(ii) +"\">"+ str(ii) +"</a>"
ii=ii+1
aaa=aaa+"<center>"+bbb+"</center><br />\n"
for greeting in greetings:
buvoapp = greeting.rodyti
userid = greeting.userid
content = greeting.content
pseudonimas = greeting.pseudonimas
if buvoapp:
imagemaxurl = ("/"+cmspath2+"-userimagemin/%s/%s" % (pseudonimas, userid))
else:
imagemaxurl = avatarminurl2
if greeting.lankytojas:
thubnail=getphoto(greeting.lankytojas.email())
if not thubnail:
thubnail = imagemaxurl
userpageurl = ("%s/%s-userpage-%s.%s/%s/%s" % (urlhost2(), cmspath2,lang, fileext, pseudonimas, userid))
userpicaurl = ("%s/picaenable2?user=%s" % (urlhost2(), greeting.lankytojas.email()))
userplusurl = ("%s/avatar2?user=%s" % (urlhost2(), greeting.lankytojas.email()))
aaa=aaa+(("<a href=\"%s\"><img src=\"%s\"+ border=\"0\" alt=\"\"></img><img src=\"%s\"+ border=\"0\" alt=\"\"></img><br />\n\n<strong>%s</strong></a> <a href=\"%s\">Plus</a> <a href=\"%s\">Picasa</a><br />google user: <b>%s</b> email: %s") % (userpageurl,imagemaxurl,thubnail,pseudonimas,userplusurl,userpicaurl,greeting.lankytojas.nickname(),greeting.lankytojas.email()))
else:
aaa=aaa+''
iplink = ("<a href=\"%s/logs3?filter=%s\">%s</a>" % (urlhost2(), greeting.ipadresas,greeting.ipadresas))
aaa=aaa+(_("Memberlist entry msg %(memlisttime)s %(memlistipaddr)s %(memlistbrowser)s") % {'memlisttime': greeting.laikas, 'memlistipaddr': iplink, 'memlistbrowser': greeting.narsykle})
if not users.get_current_user() or not users.is_current_user_admin():
aaa = _("Sign in on Administrator %(userloginurl)s") % {'userloginurl': users.create_login_url(self.request.uri)}
page = Page.loadnew("memberlist")
page.content = _("Memberlist header %(memlistgreeting)s") % {'memlistgreeting': aaa}
page_name2 = 'menu'+'-'+lang+'.'+fileext
page2 = Page.load(page_name2)
page3 = Page.loadnew("kalbos")
textaps=''
if len(aps)>0:
textaps=aps+'.'
text=''
for name, value in kalbossort:
text = text + (_kalbhtml % ('memberlist', textaps+name, ext, name, name))
page3.content = text
self.generate('view.html', lang, {
'imgshar': False,
'noedit': '1',
'application_name': siteauth(),
'kalbos': page3,
'menu': page2,
'page': page,
})
class VartSarTrumpas(BaseRequestHandler):
def get(self, rparameters):
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
aaa=""
pg=self.request.get('pg')
entitiesRx = re.compile("[^0-9]")
pg=entitiesRx.sub("", pg)
if pg:
pg = int(pg)
else:
pg=0
query = db.GqlQuery("SELECT * "
"FROM Vartotojai "
"ORDER BY laikas DESC")
greetings = query.fetch(8,pg*8)
co=query.count()
i=0
ii=0
bbb=""
while i<=co:
i=i+8
if ii == pg:
bbb=bbb+' '+str(ii)
else:
bbb=bbb+' '+"<a href=\"/"+cmspath2+"-memberlistshort-"+lang+'.'+fileext+"?pg="+ str(ii) +"\">"+ str(ii) +"</a>"
ii=ii+1
# aaa=aaa+"<center>"+bbb+"</center><br />\n"
aaa=aaa+"<table cellspacing=\"0\" cellpadding=\"0\">\n"
z = 0
for greeting in greetings:
z = z + 1
if z==1:
aaa=aaa+"<tr>\n"
buvoapp = greeting.rodyti
userid = greeting.userid
content = greeting.content
pseudonimas = greeting.pseudonimas
if buvoapp:
imagemaxurl = ("/%s-userimagemin/%s/%s" % (cmspath2,pseudonimas, userid))
else:
imagemaxurl = avatarminurl2
if greeting.lankytojas:
thubnail=getphoto(greeting.lankytojas.email())
if thubnail and not buvoapp:
imagemaxurl = str(thubnail)
uphoto=imagemaxurl.split("/s144/", 1)
slasas="/s50/"
imagemaxurl = slasas.join(uphoto)
userpageurl = ("%s/%s-userpage-%s.%s/%s/%s" % (urlhost2(), cmspath2 ,lang, fileext, pseudonimas, userid))
aaa=aaa+(("<td width=\"50\"><a href=\"%s\" target=\"_top\"><img src=\"%s\" border=\"0\" alt=\"%s\"></img></a></td>\n") % (userpageurl,imagemaxurl,pseudonimas))
else:
aaa=aaa+''
if z==2:
z=0
aaa=aaa+"\n</tr>"
# aaa=aaa+(_("Memberlist entry msg %(memlisttime)s %(memlistipaddr)s %(memlistbrowser)s") % {'memlisttime': greeting.laikas, 'memlistipaddr': greeting.ipadresas, 'memlistbrowser': greeting.narsykle})
if z==1:
aaa=aaa+"<td width=\"50\"> </td></tr>"
# if z==0:
# aaa=aaa+"\n</tr>"
aaa=aaa+"\n</table>"
# if not users.get_current_user() or not users.is_current_user_admin():
# aaa = _("Sign in on Administrator %(userloginurl)s") % {'userloginurl': users.create_login_url(self.request.uri)}
page = Page.loadnew("memberlist")
page.content = aaa
page_name2 = 'menu'+'-'+lang+'.'+fileext
page2 = Page.load(page_name2)
page3 = Page.loadnew("kalbos")
textaps=''
if len(aps)>0:
textaps=aps+'.'
text=''
for name, value in kalbossort:
text = text + (_kalbhtml % ('memberlist', textaps+name, ext, name, name))
page3.content = text
self.generate('viewicon.html', lang, {
'imgshar': False,
'noedit': '1',
'application_name': siteauth(),
'kalbos': page3,
'menu': page2,
'page': page,
})
class FBUserListSort(BaseRequestHandler):
def get(self, rparameters):
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
aaa=""
pg=self.request.get('pg')
entitiesRx = re.compile("[^0-9]")
pg=entitiesRx.sub("", pg)
if pg:
pg = int(pg)
else:
pg=0
query = db.GqlQuery("SELECT * "
"FROM FBUser "
"ORDER BY updated DESC")
greetings = query.fetch(8,pg*8)
co=query.count()
i=0
ii=0
bbb=""
while i<=co:
i=i+8
if ii == pg:
bbb=bbb+' '+str(ii)
else:
bbb=bbb+' '+"<a href=\"/"+cmspath2+"-fbmemberlistshort-"+lang+'.'+fileext+"?pg="+ str(ii) +"\">"+ str(ii) +"</a>"
ii=ii+1
# aaa=aaa+"<center>"+bbb+"</center><br />\n"
aaa=aaa+"<table cellspacing=\"0\" cellpadding=\"0\">\n"
z = 0
for greeting in greetings:
z = z + 1
if z==1:
aaa=aaa+"<tr>\n"
userid = greeting.id
pseudonimas = greeting.nickname
imagemaxurl = ("http://graph.facebook.com/%s/picture" % (userid))
if greeting.id:
# userpageurl = ("http://www.facebook.com/profile.php?id=%s" % (userid))
userpageurl = ("%s/fbinfo?id=%s" % (urlhost2(),userid))
aaa=aaa+(("<td width=\"50\"><a href=\"%s\" target=\"_top\"><img src=\"%s\" border=\"0\" alt=\"%s\"></img></a></td>\n") % (userpageurl,imagemaxurl,pseudonimas))
else:
aaa=aaa+''
if z==2:
z=0
aaa=aaa+"\n</tr>"
# aaa=aaa+(_("Memberlist entry msg %(memlisttime)s %(memlistipaddr)s %(memlistbrowser)s") % {'memlisttime': greeting.laikas, 'memlistipaddr': greeting.ipadresas, 'memlistbrowser': greeting.narsykle})
if z==1:
aaa=aaa+"<td width=\"50\"> </td></tr>"
# if z==0:
# aaa=aaa+"\n</tr>"
aaa=aaa+"\n</table>"
# if not users.get_current_user() or not users.is_current_user_admin():
# aaa = _("Sign in on Administrator %(userloginurl)s") % {'userloginurl': users.create_login_url(self.request.uri)}
page = Page.loadnew("fbmemberlist")
page.content = aaa
page_name2 = 'menu'+'-'+lang+'.'+fileext
page2 = Page.load(page_name2)
page3 = Page.loadnew("kalbos")
textaps=''
if len(aps)>0:
textaps=aps+'.'
text=''
for name, value in kalbossort:
text = text + (_kalbhtml % ('fbmemberlist', textaps+name, ext, name, name))
page3.content = text
self.generate('viewicon.html', lang, {
'imgshar': False,
'noedit': '1',
'application_name': siteauth(),
'kalbos': page3,
'menu': page2,
'page': page,
})
class LIUserListSort(BaseRequestHandler):
def get(self, rparameters):
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
aaa=""
pg=self.request.get('pg')
entitiesRx = re.compile("[^0-9]")
pg=entitiesRx.sub("", pg)
if pg:
pg = int(pg)
else:
pg=0
query = db.GqlQuery("SELECT * "
"FROM LIUser "
"ORDER BY updated DESC")
greetings = query.fetch(8,pg*8)
co=query.count()
i=0
ii=0
bbb=""
while i<=co:
i=i+8
if ii == pg:
bbb=bbb+' '+str(ii)
else:
bbb=bbb+' '+"<a href=\"/"+cmspath2+"-fbmemberlistshort-"+lang+'.'+fileext+"?pg="+ str(ii) +"\">"+ str(ii) +"</a>"
ii=ii+1
# aaa=aaa+"<center>"+bbb+"</center><br />\n"
aaa=aaa+"<table cellspacing=\"0\" cellpadding=\"0\">\n"
z = 0
for greeting in greetings:
z = z + 1
if z==1:
aaa=aaa+"<tr>\n"
ukey = greeting.key()
userid = greeting.id
pseudonimas = greeting.nickname
profile_url = greeting.profile_url
liuname = greeting.name
directory = os.path.dirname(__file__)
pathimg = os.path.join(directory, 'liphoto2.py')
if os.path.exists(pathimg) and os.path.isfile(pathimg):
imagemaxurl = ("%s/liphoto2/%s" % (urlhost2(),ukey))
else:
imagemaxurl = ("%s%s" % (urlhost2(),avatarminurl2))
if greeting.id:
# userpageurl = ("http://www.facebook.com/profile.php?id=%s" % (userid))
userpageurl = profile_url
aaa=aaa+(("<td width=\"50\"><a href=\"%s\" target=\"_top\"><img src=\"%s\" border=\"0\" alt=\"%s\"></img></a></td>\n") % (userpageurl,imagemaxurl,pseudonimas))
else:
aaa=aaa+''
if z==2:
z=0
aaa=aaa+"\n</tr>"
# aaa=aaa+(_("Memberlist entry msg %(memlisttime)s %(memlistipaddr)s %(memlistbrowser)s") % {'memlisttime': greeting.laikas, 'memlistipaddr': greeting.ipadresas, 'memlistbrowser': greeting.narsykle})
if z==1:
aaa=aaa+"<td width=\"50\"> </td></tr>"
# if z==0:
# aaa=aaa+"\n</tr>"
aaa=aaa+"\n</table>"
# if not users.get_current_user() or not users.is_current_user_admin():
# aaa = _("Sign in on Administrator %(userloginurl)s") % {'userloginurl': users.create_login_url(self.request.uri)}
page = Page.loadnew("limemberlist")
page.content = aaa
page_name2 = 'menu'+'-'+lang+'.'+fileext
page2 = Page.load(page_name2)
page3 = Page.loadnew("kalbos")
textaps=''
if len(aps)>0:
textaps=aps+'.'
text=''
for name, value in kalbossort:
text = text + (_kalbhtml % ('fbmemberlist', textaps+name, ext, name, name))
page3.content = text
self.generate('viewicon.html', lang, {
'imgshar': False,
'noedit': '1',
'application_name': siteauth(),
'kalbos': page3,
'menu': page2,
'page': page,
})
class VKUserListSort(BaseRequestHandler):
def get(self, rparameters):
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
aaa=""
pg=self.request.get('pg')
entitiesRx = re.compile("[^0-9]")
pg=entitiesRx.sub("", pg)
if pg:
pg = int(pg)
else:
pg=0
query = db.GqlQuery("SELECT * "
"FROM VKUser "
"ORDER BY updated DESC")
greetings = query.fetch(8,pg*8)
co=query.count()
i=0
ii=0
bbb=""
while i<=co:
i=i+8
if ii == pg:
bbb=bbb+' '+str(ii)
else:
bbb=bbb+' '+"<a href=\"/"+cmspath2+"-vkmemberlistshort-"+lang+'.'+fileext+"?pg="+ str(ii) +"\">"+ str(ii) +"</a>"
ii=ii+1
# aaa=aaa+"<center>"+bbb+"</center><br />\n"
aaa=aaa+"<table cellspacing=\"0\" cellpadding=\"0\">\n"
z = 0
for greeting in greetings:
z = z + 1
if z==1:
aaa=aaa+"<tr>\n"
userid = greeting.id
pseudonimas = greeting.nickname
directory = os.path.dirname(__file__)
pathimg = os.path.join(directory, 'vkphoto.py')
if os.path.exists(pathimg) and os.path.isfile(pathimg):
imagemaxurl = ("/vkphoto/%s" % (userid))
else:
imagemaxurl = ("%s%s" % (urlhost2(),avatarminurl2))
if greeting.id:
# userpageurl = ("http://www.facebook.com/profile.php?id=%s" % (userid))
# userpageurl = ("%s/fbinfo?id=%s" % (urlhost2(),userid))
userpageurl = greeting.profile_url
aaa=aaa+(("<td width=\"50\"><a href=\"%s\" target=\"_top\"><img src=\"%s\" border=\"0\" alt=\"%s\"></img></a></td>\n") % (userpageurl,imagemaxurl,pseudonimas))
else:
aaa=aaa+''
if z==2:
z=0
aaa=aaa+"\n</tr>"
# aaa=aaa+(_("Memberlist entry msg %(memlisttime)s %(memlistipaddr)s %(memlistbrowser)s") % {'memlisttime': greeting.laikas, 'memlistipaddr': greeting.ipadresas, 'memlistbrowser': greeting.narsykle})
if z==1:
aaa=aaa+"<td width=\"50\"> </td></tr>"
# if z==0:
# aaa=aaa+"\n</tr>"
aaa=aaa+"\n</table>"
# if not users.get_current_user() or not users.is_current_user_admin():
# aaa = _("Sign in on Administrator %(userloginurl)s") % {'userloginurl': users.create_login_url(self.request.uri)}
page = Page.loadnew("vkmemberlist")
page.content = aaa
page_name2 = 'menu'+'-'+lang+'.'+fileext
page2 = Page.load(page_name2)
page3 = Page.loadnew("kalbos")
textaps=''
if len(aps)>0:
textaps=aps+'.'
text=''
for name, value in kalbossort:
text = text + (_kalbhtml % ('vkmemberlist', textaps+name, ext, name, name))
page3.content = text
self.generate('viewicon.html', lang, {
'imgshar': False,
'noedit': '1',
'application_name': siteauth(),
'kalbos': page3,
'menu': page2,
'page': page,
})
class SiteDisable(webapp.RequestHandler):
def get(self,pagename):
disablecode = "<html><body>Disable, swith to on</body></html>"
try:
codedb = db.GqlQuery("SELECT * FROM DinCode WHERE codename = :1", "disable")
for thiscode in codedb:
disablecode = thiscode.codetext
except:
disablecode = "<html><body>Disable, swith to on</body></html>"
self.response.out.write(disablecode)
def post(self,pagename):
disablecode = "<html><body>Disable, swith to on</body></html>"
try:
codedb = db.GqlQuery("SELECT * FROM DinCode WHERE codename = :1", "disable")
for thiscode in codedb:
disablecode = thiscode.codetext
except:
disablecode = "<html><body>Disable, swith to on</body></html>"
self.response.out.write(disablecode)
class HttpError(webapp.RequestHandler):
def get(self,pagename):
disablecode = "<html><body>over quota - website flood botnet</body></html>"
self.response.out.write(disablecode)
def post(self,pagename):
disablecode = "<html><body>over quota - website flood botnet</body></html>"
self.response.out.write(disablecode)
class PicaAlbumOn(db.Model):
lankytojas = db.UserProperty(required=True)
laikas = db.DateTimeProperty(auto_now_add=True)
administratorius = db.BooleanProperty()
ipadresas = db.StringProperty()
userid = db.StringProperty()
rodyti = db.BooleanProperty()
albumname = db.StringProperty()
class SpamIP(db.Model):
ipadresas = db.StringProperty()
lastserver = db.StringProperty()
date = db.DateTimeProperty(auto_now_add=True)
check = db.BooleanProperty()
spamcount = db.StringProperty()
spam = db.BooleanProperty()
class Commentsrec(db.Model):
# laikas = db.DateTimeProperty(auto_now_add=True)
author = db.UserProperty()
content = db.StringProperty(multiline=True)
rname = db.StringProperty(multiline=False)
avatar = db.BlobProperty()
avatarmax = db.BlobProperty()
date = db.DateTimeProperty(auto_now_add=True)
ipadresas = db.StringProperty()
rodyti = db.BooleanProperty()
class Commentsrec2(db.Model):
# laikas = db.DateTimeProperty(auto_now_add=True)
vartot = db.ReferenceProperty(Vartotojai, collection_name='komentarai')
author = db.UserProperty()
userid = db.StringProperty()
content = db.StringProperty(multiline=True)
rname = db.StringProperty(multiline=False)
avatar = db.BlobProperty()
avatarmax = db.BlobProperty()
date = db.DateTimeProperty(auto_now_add=True)
ipadresas = db.StringProperty()
rodyti = db.BooleanProperty()
class Commentsrec3(db.Model):
# laikas = db.DateTimeProperty(auto_now_add=True)
commpage = db.StringProperty()
vartot = db.ReferenceProperty(Vartotojai, collection_name='komentarai-go')
vartotfb = db.ReferenceProperty(FBUser, collection_name='komentarai-fb')
vartotli = db.ReferenceProperty(LIUser, collection_name='komentarai-li')
vartotvk = db.ReferenceProperty(VKUser, collection_name='komentarai-vk')
author = db.UserProperty()
userid = db.StringProperty()
content = db.StringProperty(multiline=True)
rname = db.StringProperty(multiline=False)
avatar = db.BlobProperty()
avatarmax = db.BlobProperty()
date = db.DateTimeProperty(auto_now_add=True)
ipadresas = db.StringProperty()
rodyti = db.BooleanProperty()
def getplius(useris):
try:
yra=False
if True:
f = urllib.urlopen("http://picasaweb.google.com/data/feed/api/user/%s?kind=album" % useris)
data=f.read()
# r = re.compile("(user'/><title.*>)([\s\S]*)(</title>)")
# plusid = r.search(data).group(2)
r = re.compile("(alternate.*)(google.com/[\d]*)(\'/)")
# r = re.compile("(<link rel=\x27alternate\x27 type=\x27text/html\x27 href=\x27https://picasaweb.google.com/)(.*)(\x27/>)")
plusid = "https://plus."+r.search(data).group(2)
yra=True
if yra:
return plusid
else:
return False
except:
return False
class UserControl(BaseRequestHandler):
def get(self,rparameters):
# self.response.out.write('<html><head><style>body { text-align: center; font: 11px arial, sans-serif; color: #565656; } .clear { clear:both; } .comm-container { margin-bottom:20px;} .comm-name { font-size:10pt; float:left; width:20%; padding:5px; overflow:hidden; } .comm-text { float:left; line-height:17px; width:70%; padding:5px; padding-top:0px; overflow:hidden; } .font-small-gray { font-size:10pt !important; }</style></head><body>')
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
wtext=""
codekey=codekey2()
buvoapp = False
rpica = False
rplus = True
rcomm = True
userid = "0"
content = ""
pseudonimas = "Anonymous"
thubnail = avatarmaxurl2
thubnail2 = avatarmaxurl2
eee=""
rpicacheck = ""
rcommcheck = ""
buvoappcheck=""
youtname=""
if users.get_current_user():
user = users.get_current_user()
userinfo2=userinfo(user ,False,lang,ext)
for key, val in userinfo2.items():
try:
exec(key + '=val')
except Exception, e:
err=''
usercppicaurl = ("/%s-userpicacontrol-%s.%s" % (cmspath2,lang,fileext))
eee=str(user.email())
eee = eee.strip()
try:
thubnail=getphoto(eee)
# wtext = wtext + eee+ " "+str(thubnail) +"<br />"
thubnail2 = str(thubnail)
uphoto=thubnail2.split("/s144/", 1)
slasas="/s200/"
thubnail2 = slasas.join(uphoto)
except:
klaida=True
if users.get_current_user():
wtext = wtext + _("user control panel header") + ("<br />\n<img src=\"%s\" border=\"0\" alt=\"\"></img><img src=\"%s\" border=\"0\" alt=\"\"></img><br />\n\n%s" % (imagemaxurl,thubnail2,usercpplustext))+(_("User control panel form %(usercpsendurl)s %(usercpcodekey)s %(usercpuserid)s %(usercpcontent)s %(usercppseudonimas)s %(rpluscheck)s %(rpicacheck)s %(buvoappcheck)s %(youtname)s %(rcommcheck)s") % {'usercpsendurl': '/'+cmspath2+'-usercpsubmit-'+lang+'.'+fileext, 'usercpcodekey': codekey, 'usercpuserid': userid, 'usercpcontent': content2,'usercppseudonimas': pseudonimas,'rpluscheck': rpluscheck,'rpicacheck': rpicacheck,'buvoappcheck': buvoappcheck,'youtname': youtname,'rcommcheck': rcommcheck})
if rpica:
wtext = wtext + (_("pica control link %(usercppicaurl)s") % {'usercppicaurl': usercppicaurl})
if userid != "0":
wtext = wtext + (_("vartotojo puslapis %(usercppseudonimas)s %(userpageurl)s") % {'usercppseudonimas': pseudonimas, 'userpageurl': userpageurl})
else:
wtext = wtext + _("user control panel header") + "<br />" + (_("Sign in or register %(userloginurl)s") % {'userloginurl': users.create_login_url(self.request.uri)})
page = Page.loadnew("usercontrolpanel")
page.content = wtext
page_name2 = 'menu'+'-'+lang+'.'+fileext
page2 = Page.load(page_name2)
page3 = Page.loadnew("kalbos")
textaps=''
if len(aps)>0:
textaps=aps+'.'
text=''
for name, value in kalbossort:
text = text + (_kalbhtml % ('usercontrolpanel', textaps+name, ext, name, name))
page3.content = text
self.generate('view.html', lang, {
'imgshar': False,
'noedit': '1',
'application_name': siteauth(),
'kalbos': page3,
'menu': page2,
'page': page,
})
class UserShowPage(BaseRequestHandler):
def get(self,rparameters, pseudonim , pic_key):
# self.response.out.write('<html><head><style>body { text-align: center; font: 11px arial, sans-serif; color: #565656; } .clear { clear:both; } .comm-container { margin-bottom:20px;} .comm-name { font-size:10pt; float:left; width:20%; padding:5px; overflow:hidden; } .comm-text { float:left; line-height:17px; width:70%; padding:5px; padding-top:0px; overflow:hidden; } .font-small-gray { font-size:10pt !important; }</style></head><body>')
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
wtext=""
codekey=codekey2()
userinfo2=userinfo(pic_key, True,lang,ext)
for key, val in userinfo2.items():
try:
exec(key + '=val')
except Exception, e:
err=''
wtext = wtext + (_("user page header %(pseudonimas)s") % {'pseudonimas': pseudonimas}) + ("<br />\n<img src=\"%s\" border=\"0\" id=\"profile_pic\" alt=\"\"></img><br />\n\n%s" % (imagemaxurl,usercpplustext))+(_("User page %(usercpuserid)s %(usercpcontent)s %(usercppseudonimas)s %(usercpurl)s %(userpicapagetext)s %(usermailformtext)s") % { 'usercpuserid': userid, 'usercpcontent': content,'usercppseudonimas': pseudonimas, 'usercpurl': usercpurl, 'userpicapagetext': userpicapagetext,'usermailformtext': usermailformtext})
page = Page.loadnew("userpage")
page.content = wtext
page_name2 = 'menu'+'-'+lang+'.'+fileext
page2 = Page.load(page_name2)
page3 = Page.loadnew("kalbos")
textaps=''
if len(aps)>0:
textaps=aps+'.'
text=''
for name, value in kalbossort:
text = text + (_kalbhtml % ('userpage', textaps+name, userpageend, name, name))
page3.content = text
self.generate('view.html', lang, {
'imgshar': False,
'noedit': '1',
'application_name': siteauth(),
'kalbos': page3,
'menu': page2,
'page': page,
})
class UserYoutPage(BaseRequestHandler):
def get(self,rparameters, pseudonim , pic_key):
# self.response.out.write('<html><head><style>body { text-align: center; font: 11px arial, sans-serif; color: #565656; } .clear { clear:both; } .comm-container { margin-bottom:20px;} .comm-name { font-size:10pt; float:left; width:20%; padding:5px; overflow:hidden; } .comm-text { float:left; line-height:17px; width:70%; padding:5px; padding-top:0px; overflow:hidden; } .font-small-gray { font-size:10pt !important; }</style></head><body>')
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
wtext=""
# codekey=codekey2()
userinfo2=userinfo(pic_key, True,lang,ext)
for key, val in userinfo2.items():
try:
exec(key + '=val')
except Exception, e:
err=''
usercppicatext=""
if youtname and len(str(youtname))>0:
yra=False
out=""
try:
if not yra:
f = urllib.urlopen("https://gdata.youtube.com/feeds/api/users/%s/uploads?v=2&alt=jsonc" % youtname)
data = json.loads(f.read())
out=out+"<table>"
for item in data['data']['items']:
out=out+"<tr><td>Video Title: </td><td>%s</td></tr>" % (item['title'])
out=out+"<tr><td>Video Category: </td><td>%s</td></tr>" % (item['category'])
out=out+"<tr><td>Video ID: </td><td>%s</td></tr>" % (item['id'])
if item.has_key('rating'):
out=out+"<tr><td>Video Rating: </td><td>%f</td></tr>" % (item['rating'])
out=out+"<tr><td>Embed URL: </td><td><a href=\"%s\">link to Youtube</a></td></tr>" % (item['player']['default'])
out=out+"<tr><td> </td><td> </td></tr>"
out=out+"</table>"
yra=True
except:
yra=False
if yra:
usercppicatext=("<div>%s</div>\n\t" % (out))
else:
usercppicatext="<div>Youtube not found or error</div>\n\t";
wtext = wtext + (_("user yout page header %(pseudonimas)s") % {'pseudonimas': pseudonimas}) + ("<br />\n<img src=\"%s\" border=\"0\" id=\"profile_pic\" alt=\"\"></img><br />\n\n%s" % (imagemaxurl,usercpplustext))+(_("User yout page %(usercpuserid)s %(usercpcontent)s %(usercppseudonimas)s %(usercpurl)s %(usercppicatext)s") % { 'usercpuserid': userid, 'usercpcontent': content,'usercppseudonimas': pseudonimas, 'usercpurl': usercpurl, 'usercppicatext': usercppicatext})
wtext = wtext + (_("vartotojo puslapis %(usercppseudonimas)s %(userpageurl)s") % {'usercppseudonimas': pseudonimas, 'userpageurl': userpageurl})
page = Page.loadnew("useryoutpage")
page.content = wtext
page_name2 = 'menu'+'-'+lang+'.'+fileext
page2 = Page.load(page_name2)
page3 = Page.loadnew("kalbos")
textaps=''
if len(aps)>0:
textaps=aps+'.'
text=''
for name, value in kalbossort:
text = text + (_kalbhtml % ('useryoutpage', textaps+name, userpageend, name, name))
page3.content = text
self.generate('view.html', lang, {
'imgshar': False,
'noedit': '1',
'application_name': siteauth(),
'kalbos': page3,
'menu': page2,
'page': page,
})
class UserCommPage(BaseRequestHandler):
def get(self,rparameters, pseudonim , pic_key):
# self.response.out.write('<html><head><style>body { text-align: center; font: 11px arial, sans-serif; color: #565656; } .clear { clear:both; } .comm-container { margin-bottom:20px;} .comm-name { font-size:10pt; float:left; width:20%; padding:5px; overflow:hidden; } .comm-text { float:left; line-height:17px; width:70%; padding:5px; padding-top:0px; overflow:hidden; } .font-small-gray { font-size:10pt !important; }</style></head><body>')
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
wtext=""
userinfo2=userinfo(pic_key, True,lang,ext)
for key, val in userinfo2.items():
try:
exec(key + '=val')
except Exception, e:
err=''
if rcomm:
yra=False
wtext=""
try:
pg=self.request.get('pg')
entitiesRx = re.compile("[^0-9]")
pg=entitiesRx.sub("", pg)
if pg:
pg = int(pg)
else:
pg=0
try:
query = db.GqlQuery("SELECT * FROM Commentsrec2 WHERE vartot = :1 ORDER BY date DESC", vartot)
# query = db.GqlQuery("SELECT * FROM Commentsrec WHERE rodyti = :1, author = :2 ORDER BY date DESC", '1',users.GetCurrentUser())
greetings = query.fetch(10,pg*10)
co=query.count()
except:
klaida=True
co=0
greetings = []
i=0
ii=0
bbb=""
while i<=co:
i=i+10
if ii == pg:
bbb=bbb+' '+str(ii)
else:
bbb=bbb+' '+"<a href=\"/"+cmspath2+"-usercommpage-"+lang+'.'+fileext+'/'+pseudonimas+'/'+userid+"?pg="+ str(ii) +"\">"+ str(ii) +"</a>"
ii=ii+1
# page2 = Page.load("atsi-"+lang+'.'+fileext)
wtext=wtext+"<div><hr width=\"70%\"></hr></div>\n<div><div style=\"text-align: center;\">"+bbb+"</div>\n\n"
for greeting in greetings:
wijun = ""
wdel = ""
if greeting.rodyti or (users.GetCurrentUser() and users.get_current_user() == greeting.author) or (users.GetCurrentUser() and users.get_current_user() == vartot.lankytojas) or users.is_current_user_admin():
if users.is_current_user_admin():
wdel = _("Comments delete %(commswiturl)s %(commkey)s") % {'commswiturl': '/commswit', 'commkey': greeting.key()}
if (users.GetCurrentUser() and users.get_current_user() == vartot.lankytojas) or (users.GetCurrentUser() and users.get_current_user() == greeting.author) or users.is_current_user_admin():
if not greeting.rodyti:
wijun = _("Comments show %(commswiturl)s %(commkey)s") % {'commswiturl': '/commswit', 'commkey': greeting.key()}
else:
wijun = _("Comments hidden %(commswiturl)s %(commkey)s") % {'commswiturl': '/commswit', 'commkey': greeting.key()}
user3 = greeting.author
pseudonimas3 = "Anonymous"
userid3 = '0'
try:
buvesapp = db.GqlQuery("SELECT * FROM Vartotojai WHERE lankytojas = :1", user3)
for app in buvesapp:
userid3 = app.userid
pseudonimas3 = app.pseudonimas
except:
klaida=True
imagemaxurl2 = ("/%s-userimagemin/%s/%s" % (cmspath2,pseudonimas3, userid3))
userpageurl = ("%s/%s-userpage-%s.%s/%s/%s" % (urlhost2(), cmspath2,lang, fileext, pseudonimas3, userid3))
wtext = wtext + "\n<div class=\"comm-container\">"
wtext = wtext + "<div class=\"comm-name\">"+("<a href=\"%s\"><img src=\"%s\" alt=\"\" border=\"0\"></img></a>" % (userpageurl,imagemaxurl2))+(' <strong>%s</strong>' % pseudonimas3) +", "+('<div class="font-small-gray">%s</div>' % greeting.date.strftime("%a, %d %b %Y %H:%M:%S"))
if greeting.avatar:
if greeting.avatarmax:
wtext = wtext + ('<div class="font-small-gray"><a href="/commimg?img_id=%s&size=yes"><img src="/commimg?img_id=%s" alt=""></img></a></div>' % (greeting.key(),greeting.key()))
else:
wtext = wtext + ('<div class="font-small-gray"><img src="/commimg?img_id=%s" alt=""></img></div>' % greeting.key())
wtext = wtext + ("</div><div class=\"comm-text\"><div>%s</div></div>\n\n</div><div class=\"clear\"><!-- --></div>\n\n<div>%s %s</div>\n\n<div> </div>\n" % (greeting.content,wijun,wdel))
codekey=codekey2()
if users.GetCurrentUser():
wtext = wtext + "\n</div>\n<div> </div>\n"+(_("user Comments form %(commsendurl)s %(commcodekey)s") % {'commsendurl': '/'+cmspath2+'-usercommsubmit-'+lang+'.'+fileext+'/'+pseudonimas +'/'+userid, 'commcodekey': codekey})
else:
wtext = wtext + "\n</div>\n<div> </div>\n<div>" + (_("Sign in or register %(userloginurl)s") % {'userloginurl': users.create_login_url(self.request.uri)}) + "</div>"
yra=True
except:
yra=False
if yra:
usercppicatext=("<div>%s</div>\n\t" % (wtext))
else:
usercppicatext="<div>comments db error</div>\n\t";
wtext = (_("user comm page header %(pseudonimas)s") % {'pseudonimas': pseudonimas}) + ("<br />\n<img src=\"%s\" border=\"0\" id=\"profile_pic\" alt=\"\"></img><br />\n\n%s" % (imagemaxurl,usercpplustext))+(_("User comm page %(usercpuserid)s %(usercpcontent)s %(usercppseudonimas)s %(usercpurl)s %(usercppicatext)s") % { 'usercpuserid': userid, 'usercpcontent': content,'usercppseudonimas': pseudonimas, 'usercpurl': usercpurl, 'usercppicatext': usercppicatext})
wtext = wtext + (_("vartotojo puslapis %(usercppseudonimas)s %(userpageurl)s") % {'usercppseudonimas': pseudonimas, 'userpageurl': userpageurl})
page = Page.loadnew("usercommpage")
page.content = wtext
page_name2 = 'menu'+'-'+lang+'.'+fileext
page2 = Page.load(page_name2)
page3 = Page.loadnew("kalbos")
textaps=''
if len(aps)>0:
textaps=aps+'.'
text=''
for name, value in kalbossort:
text = text + (_kalbhtml % ('usercommpage', textaps+name, userpageend, name, name))
page3.content = text
self.generate('view.html', lang, {
'imgshar': False,
'noedit': '1',
'application_name': siteauth(),
'kalbos': page3,
'menu': page2,
'page': page,
})
class UserCommSubmit(webapp.RequestHandler):
def post(self, rparameters, pseudonim , pic_key):
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
userinfo2=userinfo(pic_key,True,lang,ext)
for key, val in userinfo2.items():
try:
exec(key + '=val')
except Exception, e:
err=''
connt=""
try:
codeimg = db.get(self.request.get("scodeid"))
except:
prn="Error"
if codeimg and codeimg.code == self.request.get("scode") and rcomm:
greeting = Commentsrec2()
greeting.vartot = vartot
greeting.rodyti = True
greeting.userid = userid
greeting.ipadresas = os.environ['REMOTE_ADDR']
# greeting.laikas = datetime.datetime.now()
if users.get_current_user():
greeting.author = users.get_current_user()
connt = cgi.escape(self.request.get("content"))
connt = render_bbcode(connt)
connt = connt[0:400]
greeting.content = connt
# priesduom = self.request.get("img")
greeting.rname = pseudonimas
if self.request.get("img"):
avatarmax = images.resize(self.request.get("img"), width=600, height=400, output_encoding=images.PNG)
greeting.avatarmax = db.Blob(avatarmax)
avatar = images.resize(self.request.get("img"), width=96, height=96, output_encoding=images.PNG)
greeting.avatar = db.Blob(avatar)
greeting.put()
to_addr = _mailrcptto
user = users.get_current_user()
if user:
uname=user.nickname()
umail=users.get_current_user().email()
else:
uname=""
umail=""
message = mail.EmailMessage()
message.subject = os.environ['HTTP_HOST'] + " - comments"
# message.subject = "www"
message.sender = _mailsender
message.to = to_addr
q_message = ("\n%s: %s \n%s \n%s \n" % ('Page', str(self.request.uri),str(textinfo()),str(textloc())))
message.body = (_("Comments mail message %(communame)s %(commumail)s %(commrealname)s %(commmessage)s") % {'communame': uname,'commumail': umail,'commrealname': greeting.rname,'commmessage': greeting.content}) + q_message
message.send()
self.redirect('/'+cmspath2+'-usercommpage-'+lang+'.'+fileext+'/'+pseudonimas+'/'+userid )
class UserPicaPage(BaseRequestHandler):
def get(self,rparameters, pseudonim , pic_key):
# self.response.out.write('<html><head><style>body { text-align: center; font: 11px arial, sans-serif; color: #565656; } .clear { clear:both; } .comm-container { margin-bottom:20px;} .comm-name { font-size:10pt; float:left; width:20%; padding:5px; overflow:hidden; } .comm-text { float:left; line-height:17px; width:70%; padding:5px; padding-top:0px; overflow:hidden; } .font-small-gray { font-size:10pt !important; }</style></head><body>')
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
wtext=""
codekey=codekey2()
userinfo2=userinfo(pic_key, True,lang,ext)
for key, val in userinfo2.items():
try:
exec(key + '=val')
except Exception, e:
err=''
if rpica:
albumbuvo = {}
buves_album = db.GqlQuery("SELECT * FROM PicaAlbumOn WHERE userid = :1", userid)
for albumb in buves_album:
albumname=albumb.albumname
albumbuvo[albumname]=albumb.rodyti
user2 = lank.email()
album = self.request.get("album")
yra=False
out=""
try:
if not self.request.get("album"):
f = urllib.urlopen("http://picasaweb.google.com/data/feed/api/user/%s?kind=album" % user2)
list = Picasa().albums(f.read())
# self.response.out.write("<xmp>")
# self.response.out.write(list)
# self.response.out.write("</xmp>")
out=out+"<table>"
for name in list.keys():
album = list[name]
if albumbuvo.has_key(name) and albumbuvo[name]: #albumname in albumbuvo:
out=out+("<tr><td><img src=\"%s\" border=\"0\" alt=\"%s\"></img></td><td><a href=\"%s?album=%s\">%s</a></td><td>%s</td></tr>" % (album.thumbnail,album.title,userpicapageurl,name,name,album.title))
# pass
out=out+"<tr><td colspan=\"3\"></td></tr></table>"
# pass
yra=True
else:
f = urllib.urlopen("http://picasaweb.google.com/data/feed/api/user/%s/album/%s?kind=photo" % (user2,album))
list = Picasa().photos(f.read())
out=out+"<table>"
for photo in list:
out=out+("<tr><td><img src=\"%s\" border=\"0\" alt=\"%s\"></img></td><td><a href=\"%s\">%s</a></td><td>%s</td></tr>" % (photo.thumbnail,photo.title,photo.webpage,photo.title, photo.getDatetime() ))
# pass
out=out+"<tr><td colspan=\"3\"></td></tr></table>"
# self.response.out.write("Please login");
yra=True
except:
yra=False
if yra:
usercppicatext=("<div>%s</div>\n\t" % (out))
else:
usercppicatext="<div>Picasa info not found or error</div>\n\t";
wtext = wtext + (_("user pica page header %(pseudonimas)s") % {'pseudonimas': pseudonimas}) + ("<br />\n<img src=\"%s\" border=\"0\" id=\"profile_pic\" alt=\"\"></img><br />\n\n%s" % (imagemaxurl,usercpplustext))+(_("User pica page %(usercpuserid)s %(usercpcontent)s %(usercppseudonimas)s %(usercpurl)s %(usercppicatext)s") % { 'usercpuserid': userid, 'usercpcontent': content,'usercppseudonimas': pseudonimas, 'usercpurl': usercpurl, 'usercppicatext': usercppicatext})
wtext = wtext + (_("vartotojo puslapis %(usercppseudonimas)s %(userpageurl)s") % {'usercppseudonimas': pseudonimas, 'userpageurl': userpageurl})
page = Page.loadnew("userpicapage")
page.content = wtext
page_name2 = 'menu'+'-'+lang+'.'+fileext
page2 = Page.load(page_name2)
page3 = Page.loadnew("kalbos")
textaps=''
if len(aps)>0:
textaps=aps+'.'
text=''
for name, value in kalbossort:
text = text + (_kalbhtml % ('userpicapage', textaps+name, userpageend, name, name))
page3.content = text
self.generate('view.html', lang, {
'imgshar': False,
'noedit': '1',
'application_name': siteauth(),
'kalbos': page3,
'menu': page2,
'page': page,
})
class UserPicaControl(BaseRequestHandler):
def get(self,rparameters):
# self.response.out.write('<html><head><style>body { text-align: center; font: 11px arial, sans-serif; color: #565656; } .clear { clear:both; } .comm-container { margin-bottom:20px;} .comm-name { font-size:10pt; float:left; width:20%; padding:5px; overflow:hidden; } .comm-text { float:left; line-height:17px; width:70%; padding:5px; padding-top:0px; overflow:hidden; } .font-small-gray { font-size:10pt !important; }</style></head><body>')
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
wtext=""
codekey=codekey2()
user = users.get_current_user()
userinfo2=userinfo(user,False,lang,ext)
for key, val in userinfo2.items():
try:
exec(key + '=val')
except Exception, e:
err=''
usercppicaurl = ("/%s-userpicacontrol-%s.%s" % (cmspath2,lang,fileext))
usercppicasubmiturl = ("/%s-userpicasubmit-%s.%s" % (cmspath2,lang,fileext))
usercppicatext = ""
if rpica:
albumbuvo = {}
buves_album = db.GqlQuery("SELECT * FROM PicaAlbumOn WHERE lankytojas = :1", user)
for albumb in buves_album:
albumname=albumb.albumname
albumbuvo[albumname]=albumb.rodyti
user2 = lank.email()
album = self.request.get("album")
yra=False
out=""
namelist =""
errtext =""
buvoappcheck=""
try:
if not self.request.get("album"):
f = urllib.urlopen("http://picasaweb.google.com/data/feed/api/user/%s?kind=album" % user2)
list = Picasa().albums(f.read())
out=out+"<table><form method=\"POST\" action=\""+usercppicasubmiturl+"\">"
for name in list.keys():
album = list[name]
if albumbuvo.has_key(name) and albumbuvo[name]: #albumname in albumbuvo:
buvoappcheck="checked=\"yes\""
out=out+("<tr><td><img src=\"%s\" border=\"0\" alt=\"%s\"></img></td><td><a href=\"%s?album=%s\">%s</a></td><td>%s</td><td><input type=\"checkbox\" name=\"photoalbum\" value=\"%s\" %s></td></tr>" % (album.thumbnail,album.title,usercppicaurl,name,name,album.title,name,buvoappcheck))
namelist = namelist + "||" + str(base64.urlsafe_b64encode(str(name)))
out=out+"<tr><td colspan=\"4\"><input type=\"hidden\" name=\"namelist\" value=\""+str(namelist)+"\" ><input type=\"submit\"></td></tr></form></table>"
yra=True
else:
f = urllib.urlopen("http://picasaweb.google.com/data/feed/api/user/%s/album/%s?kind=photo" % (user2,album))
list = Picasa().photos(f.read())
out=out+"<table>"
for photo in list:
out=out+("<tr><td><img src=\"%s\" border=\"0\" alt=\"%s\"></img></td><td><a href=\"%s\">%s</a></td><td>%s</td></tr>" % (photo.thumbnail,photo.title,photo.webpage,photo.title, photo.getDatetime() ))
out=out+"<tr><td colspan=\"3\"></td></tr></table>"
yra=True
except:
errtext = cgi.escape(str(sys.exc_info()[0]))
yra=False
if yra:
usercppicatext=("<div>%s</div>\n\t" % (out))
else:
usercppicatext="<div>Picasa info not found or error " + errtext +"</div>\n\t";
wtext = wtext + _("user pica control panel header") + ("<br />\n<img src=\"%s\" border=\"0\" id=\"profile_pic\" alt=\"\"></img><br />\n\n%s" % (imagemaxurl,usercpplustext))+(_("User pica page %(usercpuserid)s %(usercpcontent)s %(usercppseudonimas)s %(usercpurl)s %(usercppicatext)s") % { 'usercpuserid': userid, 'usercpcontent': content,'usercppseudonimas': pseudonimas, 'usercpurl': usercpurl, 'usercppicatext': usercppicatext})
page = Page.loadnew("userpage")
page.content = wtext
page_name2 = 'menu'+'-'+lang+'.'+fileext
page2 = Page.load(page_name2)
page3 = Page.loadnew("kalbos")
textaps=''
if len(aps)>0:
textaps=aps+'.'
text=''
for name, value in kalbossort:
text = text + (_kalbhtml % ('userpage', textaps+name, userpageend, name, name))
page3.content = text
self.generate('view.html', lang, {
'imgshar': False,
'noedit': '1',
'application_name': siteauth(),
'kalbos': page3,
'menu': page2,
'page': page,
})
class UserControlSend(webapp.RequestHandler):
def post(self, rparameters):
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
cont=""
try:
codeimg = db.get(self.request.get("scodeid"))
except:
prn="Error"
if codeimg and codeimg.code == self.request.get("scode"):
user = users.get_current_user()
klaida=False
try:
buves_vart = db.GqlQuery("SELECT * FROM Vartotojai WHERE lankytojas = :1", user)
for vart in buves_vart:
vart.ipadresas = os.environ['REMOTE_ADDR']
vart.narsykle = os.environ['HTTP_USER_AGENT']
# greeting.laikas = datetime.datetime.now()
if users.get_current_user():
vart.lankytojas = users.get_current_user()
cont = self.request.get("content")
cont = cgi.escape(cont)
vart.content = (cont)[0:2000]
# priesduom = self.request.get("img")
vart.pseudonimas = "Anonymous"
if self.request.get("img"):
avatarmin = images.resize(self.request.get("img"), width=50, height=50, output_encoding=images.PNG)
vart.avatarmin = db.Blob(avatarmin)
avatarmax = images.resize(self.request.get("img"), width=200, height=200, output_encoding=images.PNG)
vart.avatarmax = db.Blob(avatarmax)
vart.rodyti = True
if self.request.get("rname"):
entitiesRx = re.compile("[^0-9a-zA-Z]")
rnametext = cgi.escape(self.request.get("rname"))
rnametext = entitiesRx.sub("", rnametext)
vart.pseudonimas = rnametext[0:30]
if self.request.get("youtname"):
entitiesRx = re.compile("[^0-9a-zA-Z]")
ynametext = cgi.escape(self.request.get("youtname"))
ynametext = entitiesRx.sub("", ynametext)
vart.youtname = ynametext[0:50]
if self.request.get("globphoto"):
vart.rodyti = False
if self.request.get("picasaen"):
vart.picarodyti = True
else:
vart.picarodyti = False
if self.request.get("plusen"):
vart.plusrodyti = True
else:
vart.plusrodyti = False
if self.request.get("commen"):
vart.commrodyti = True
else:
vart.commrodyti = False
vart.put()
except:
errtext = cgi.escape(str(sys.exc_info()[0]))
klaida=True
to_addr = _mailrcptto
user = users.get_current_user()
if user:
uname=user.nickname()
umail=users.get_current_user().email()
else:
uname=""
umail=""
message = mail.EmailMessage()
message.subject = os.environ['HTTP_HOST'] + " - user page edit"
# message.subject = "www"
message.sender = _mailsender
message.to = to_addr
q_message = ("\n%s: %s \n%s \n%s \n" % ('Page', str(self.request.uri),str(textinfo()),str(textloc())))
message.body = (_("Comments mail message %(communame)s %(commumail)s %(commrealname)s %(commmessage)s") % {'communame': uname,'commumail': umail,'commrealname': vart.pseudonimas,'commmessage': vart.content}) + q_message
message.send()
if klaida:
self.response.out.write("""%s <br />\n""" % (errtext))
else:
self.redirect('/'+cmspath2+'-usercontrolpanel-'+lang+'.'+fileext)
class UserPicaControlSend(webapp.RequestHandler):
def post(self, rparameters):
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
user = users.get_current_user()
now = datetime.datetime.now()
buvo = False
if user:
# try:
if user:
photoal=[]
namelist2=[]
albumbuvo = {}
albumname = ""
# form = cgi.FieldStorage()
# item = form.getvalue("photoalbum")
# if form["namelist"].value:
# namelist=form["namelist"].value
# namelist2=namelist.split("||")
# if isinstance(item, list):
# for item in form.getlist("photoalbum"):
# photoal.append(item)
# else:
# photoa = form.getfirst("photoalbum", "")
# photoal.append(photoa)
namelist=self.request.POST['namelist']
namelist2=namelist.split("||")
photoal=self.request.POST.getall('photoalbum')
for albumn2 in namelist2:
if len(albumn2)>0:
albumname = base64.urlsafe_b64decode(str(albumn2))
albumbuvo[albumname]=False
# self.response.out.write("namelist:" +albumname+ "<br />\n")
for albumn2 in photoal:
albumbuvo[albumn2]=True
# self.response.out.write("photoal:" +albumn2+ "<br />\n")
albumbuvo2=albumbuvo
buves_album = db.GqlQuery("SELECT * FROM PicaAlbumOn WHERE lankytojas = :1", user)
for albumb in buves_album:
albumb.ipadresas = os.environ['REMOTE_ADDR']
albumb.narsykle = os.environ['HTTP_USER_AGENT']
albumb.laikas = datetime.datetime.now()
albumb.userid = user.user_id()
if users.is_current_user_admin():
albumb.administratorius = True
else:
albumb.administratorius = False
albumname=str(albumb.albumname)
if albumbuvo.has_key(albumname): #albumname in albumbuvo:
albumb.rodyti = albumbuvo[albumname]
# self.response.out.write("buvo:" +albumname+" "+str(albumbuvo[albumname])+ "<br />\n")
albumbuvo2.pop(albumname)
albumb.put()
for albumn in albumbuvo2.keys():
album = PicaAlbumOn(lankytojas=user)
album.albumname=albumn
album.ipadresas = os.environ['REMOTE_ADDR']
album.narsykle = os.environ['HTTP_USER_AGENT']
album.laikas = datetime.datetime.now()
album.userid = user.user_id()
if users.is_current_user_admin():
album.administratorius = True
else:
album.administratorius = False
album.rodyti = albumbuvo2[albumn]
# self.response.out.write("naujas:" +albumn+" "+str(albumbuvo2[albumn])+ "<br />\n")
album.put()
buvo = True
# except:
# klaida=True
to_addr = _mailrcptto
user = users.get_current_user()
if user:
uname=user.nickname()
umail=users.get_current_user().email()
else:
uname=""
umail=""
message = mail.EmailMessage()
message.subject = os.environ['HTTP_HOST'] + " - add albums"
# message.subject = "www"
message.sender = _mailsender
message.to = to_addr
q_message = ("\n%s: %s \n%s \n%s \n" % ('Page', str(self.request.uri),str(textinfo()),str(textloc())))
message.body = (_("Comments mail message %(communame)s %(commumail)s %(commrealname)s %(commmessage)s") % {'communame': uname,'commumail': umail,'commrealname': '','commmessage': ''}) + q_message
message.send()
self.redirect('/'+cmspath2+'-userpicacontrol-'+lang+'.'+fileext)
class UserMailFormPage(BaseRequestHandler):
def get(self, rparameters, pseudonim , pic_key):
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
codekey=codekey2()
userinfo2=userinfo(pic_key,True,lang,ext)
for key, val in userinfo2.items():
try:
exec(key + '=val')
except Exception, e:
err=''
usersendmailurl = ("/%s-usersendmail-%s.%s/%s/%s" % (cmspath2,lang, fileext, pseudonimas, userid))
userpagetext = (_("vartotojo puslapis %(usercppseudonimas)s %(userpageurl)s") % {'usercppseudonimas': pseudonimas, 'userpageurl': userpageurl})
if hasattr(lank, 'email'):
plusurl=getplius(lank.email())
else:
plusurl=None
wtext = (_("user mailform header %(pseudonimas)s") % {'pseudonimas': pseudonimas}) + "<br />" + "<img src=\""+imagemaxurl+"\" border=\"0\" id=\"profile_pic\" alt=\"\"></img><br />\n\n"+usercpplustext+(_("User mailform %(usercpuserid)s %(usercpcontent)s %(usercppseudonimas)s %(usercpurl)s %(userpicapagetext)s") % { 'usercpuserid': userid, 'usercpcontent': content,'usercppseudonimas': pseudonimas, 'usercpurl': usercpurl, 'userpicapagetext': userpagetext})
page = Page.loadnew("usermailformpage")
user = users.get_current_user()
greeting = ''
if user and hasattr(lank, 'email'):
greeting = _("User Mail form %(mailsendurl)s %(mailcodekey)s") % {'mailsendurl': usersendmailurl,'mailcodekey': codekey}
elif not hasattr(lank, 'email'):
greeting = "\t\n<div> </div>\t\n<div>User not Found</div>"
else:
greeting = "\t\n<div> </div>\t\n<div>" + (_("Sign in or register %(userloginurl)s") % {'userloginurl': users.create_login_url(self.request.uri)}) + "</div>"
page.content = u""+ wtext +greeting+""
page_name2 = 'menu'+'-'+lang+'.'+fileext
page2 = Page.load(page_name2)
page3 = Page.loadnew("kalbos")
textaps=''
if len(aps)>0:
textaps=aps+'.'
text=''
for name, value in kalbossort:
text = text + (_kalbhtml % ('usermailformpage', textaps+name, userpageend, name, name))
page3.content = text
self.generate('view.html', lang, {
'imgshar': False,
'noedit': '1',
'application_name': siteauth(),
'kalbos': page3,
'menu': page2,
'page': page,
})
class UserMailSend(BaseRequestHandler):
# @login_required
def post(self, rparameters, pseudonim , pic_key):
parts = rparameters.split(".")
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
try:
codeimg = db.get(self.request.get("scodeid"))
except:
prn="Error"
# codeimg = db.get(self.request.get("scodeid"))
if codeimg and codeimg.code == self.request.get("scode") and users.GetCurrentUser():
userinfo2=userinfo(pic_key,True,lang,ext)
for key, val in userinfo2.items():
try:
exec(key + '=val')
except Exception, e:
err=''
userpicapagetext=""
codeimg.delete()
x_zmail = lank.email()
x_subject = self.request.get("zsubject")
x_realname = self.request.get("zrealname")
x_message = self.request.get("zmessage")
to_addr = _mailrcptto
user = users.get_current_user()
if user:
uname=user.nickname()
umail=users.get_current_user().email()
else:
uname=""
umail=""
if not mail.is_email_valid(to_addr):
# Return an error message...
pass
message = mail.EmailMessage()
message.subject = x_subject.encode("utf-8")
# message.subject = "www"
message.sender = users.get_current_user().email()
if lank.email():
message.to = lank.email()
else:
message.to = to_addr
# q_uname = uname.encode("utf-8")
# q_umail = umail.encode("utf-8")
# q_zmail = x_zmail.encode("utf-8")
# q_realname = x_realname.encode("utf-8")
# q_message = x_message.encode("utf-8")
q_uname = ''
q_umail = ''
q_zmail = ''
q_realname = ''
q_message = ''
q_uname = uname
q_umail = umail
q_zmail = x_zmail
q_realname = x_realname
q_message = x_message + ("\n%s: %s \n%s \n%s \n" % ('Page', str(self.request.uri),str(textinfo()),str(textloc())))
message.body = (_("Mail message %(mailuname)s %(mailumail)s %(mailrealname)s %(mailzmail)s %(mailmessage)s") % {'mailuname': q_uname, 'mailumail': q_umail, 'mailrealname': q_realname, 'mailzmail': q_zmail, 'mailmessage': q_message})
message.body = message.body + ("\n\nMail page: %s" % (userpageurl))
message.send()
ptext=_("Mail send OK")
else:
ptext=_("Mail send Error")
page = Page.loadnew("sendmail")
page.content = ptext
page_name2 = 'menu'+'-'+lang+'.'+fileext
page2 = Page.load(page_name2)
self.generate('view.html', lang, {
'imgshar': False,
'noedit': '1',
'application_name': siteauth(),
'menu': page2,
'page': page,
})
class Comments(BaseRequestHandler):
def get(self,rparameters):
# self.response.out.write('<html><head><style>body { text-align: center; font: 11px arial, sans-serif; color: #565656; } .clear { clear:both; } .comm-container { margin-bottom:20px;} .comm-name { font-size:10pt; float:left; width:20%; padding:5px; overflow:hidden; } .comm-text { float:left; line-height:17px; width:70%; padding:5px; padding-top:0px; overflow:hidden; } .font-small-gray { font-size:10pt !important; }</style></head><body>')
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
wtext=""
pg=self.request.get('pg')
entitiesRx = re.compile("[^0-9]")
pg=entitiesRx.sub("", pg)
if pg:
pg = int(pg)
else:
pg=0
try:
query = db.GqlQuery("SELECT * FROM Commentsrec ORDER BY date DESC")
# query = db.GqlQuery("SELECT * FROM Commentsrec WHERE rodyti = :1, author = :2 ORDER BY date DESC", '1',users.GetCurrentUser())
greetings = query.fetch(10,pg*10)
co=query.count()
except:
klaida=True
co=0
greetings = []
i=0
ii=0
bbb=""
while i<=co:
i=i+10
if ii == pg:
bbb=bbb+' '+str(ii)
else:
bbb=bbb+' '+"<a href=\"/"+cmspath2+"-comments-"+lang+'.'+fileext+"?pg="+ str(ii) +"\">"+ str(ii) +"</a>"
ii=ii+1
page2 = Page.load("atsi-"+lang+'.'+fileext)
wtext=wtext+page2.content+"\n<div><div style=\"text-align: center;\">"+bbb+"</div>\n\n"
for greeting in greetings:
wijun = ""
wdel = ""
if greeting.rodyti or (users.GetCurrentUser() and users.get_current_user() == greeting.author) or users.is_current_user_admin():
if users.is_current_user_admin():
wdel = _("Comments delete %(commswiturl)s %(commkey)s") % {'commswiturl': '/commswit', 'commkey': greeting.key()}
if (users.GetCurrentUser() and users.get_current_user() == greeting.author) or users.is_current_user_admin():
if not greeting.rodyti:
wijun = _("Comments show %(commswiturl)s %(commkey)s") % {'commswiturl': '/commswit', 'commkey': greeting.key()}
else:
wijun = _("Comments hidden %(commswiturl)s %(commkey)s") % {'commswiturl': '/commswit', 'commkey': greeting.key()}
wtext = wtext + "\n<div class=\"comm-container\">"
wtext = wtext + "<div class=\"comm-name\">"+('<strong>%s</strong>' % greeting.rname) +", "+('<div class="font-small-gray">%s</div>' % greeting.date.strftime("%a, %d %b %Y %H:%M:%S"))
if greeting.avatar:
if greeting.avatarmax:
wtext = wtext + ('<div class="font-small-gray"><a href="/commimg?img_id=%s&size=yes"><img src="/commimg?img_id=%s" alt=""></img></a></div>' % (greeting.key(),greeting.key()))
else:
wtext = wtext + ('<div class="font-small-gray"><img src="/commimg?img_id=%s" alt=""></img></div>' % greeting.key())
wtext = wtext + ("</div><div class=\"comm-text\"><div>%s</div></div>\n\n</div><div class=\"clear\"><!-- --></div>\n\n<div>%s %s</div>\n\n<div> </div>\n" % (greeting.content,wijun,wdel))
codekey=codekey2()
wtext = wtext + "\n</div>\n"+(_("Comments form %(commsendurl)s %(commcodekey)s") % {'commsendurl': '/'+cmspath2+'-commsubmit-'+lang+'.'+fileext, 'commcodekey': codekey})
page = Page.loadnew("comments")
page.content = wtext
page_name2 = 'menu'+'-'+lang+'.'+fileext
page2 = Page.load(page_name2)
page3 = Page.loadnew("kalbos")
textaps=''
if len(aps)>0:
textaps=aps+'.'
text=''
for name, value in kalbossort:
text = text + (_kalbhtml % ('comments', textaps+name, ext, name, name))
page3.content = text
self.generate('view.html', lang, {
'imgshar': False,
'noedit': '1',
'application_name': siteauth(),
'kalbos': page3,
'menu': page2,
'page': page,
})
class AvatarErr(object):
from bindata import PictureErr
avatar=PictureErr.thumbnail_data
avatarmax=PictureErr.data
class CommentsImage(webapp.RequestHandler):
def get(self):
try:
greeting = db.get(self.request.get("img_id"))
atype = "png"
except:
greeting = AvatarErr()
atype = "jpeg"
if self.request.get("size"):
if hasattr(greeting, 'avatarmax'):
# if greeting.avatarmax:
self.response.headers['Content-Type'] = "image/%s" % atype
self.response.out.write(greeting.avatarmax)
else:
self.response.out.write("No image")
else:
if hasattr(greeting, 'avatar'):
# if greeting.avatar:
self.response.headers['Content-Type'] = "image/%s" % atype
self.response.out.write(greeting.avatar)
else:
self.response.out.write("No image")
class UserShowImageMin(webapp.RequestHandler):
def get(self, pseudonim , pic_key):
buvoapp = False
imagemaxurl = ""
lank = UserNone(email=None, federated_identity=None)
try:
buvesapp = db.GqlQuery("SELECT * FROM Vartotojai WHERE userid = :1", pic_key)
for app in buvesapp:
avatarmin=app.avatarmin
buvoapp = app.rodyti
lank=app.lankytojas
except:
klaida=True
thubnail=getphoto(lank.email())
if buvoapp:
self.response.headers['Content-Type'] = "image/png"
self.response.out.write(avatarmin)
elif thubnail:
imagemaxurl = str(thubnail)
uphoto=imagemaxurl.split("/s144/", 1)
slasas="/s50/"
imagemaxurl = slasas.join(uphoto)
self.response.set_status(302)
self.response.headers['Location'] = imagemaxurl
else:
self.response.set_status(302)
self.response.headers['Location'] = avatarminurl2
self.response.out.write("No image " +pic_key)
class UserShowImageMax(webapp.RequestHandler):
def get(self, pseudonim , pic_key):
buvoapp = False
avatarmax = ""
try:
buvesapp = db.GqlQuery("SELECT * FROM Vartotojai WHERE userid = :1", pic_key)
for app in buvesapp:
avatarmax=app.avatarmax
buvoapp = app.rodyti
except:
klaida=True
if buvoapp:
self.response.headers['Content-Type'] = "image/png"
self.response.out.write(avatarmax)
else:
self.response.set_status(302)
self.response.headers['Location'] = avatarmaxurl2
self.response.out.write("No image " +pic_key)
class SwitComments(webapp.RequestHandler):
def get(self):
userid = "0"
pseudonimas = "Anonymous"
lank = ""
vartkey=""
user = users.get_current_user()
usercomm = False
url='/comments'
try:
buvesapp = db.GqlQuery("SELECT * FROM Vartotojai WHERE lankytojas = :1", user)
for app in buvesapp:
userid = app.userid
pseudonimas = str(app.pseudonimas)
lank=app.lankytojas
vartkey=app.key()
vartot = db.get(vartkey)
comm = db.get(self.request.get("id"))
kname=comm.kind()
vartot_comm=comm.vartot
vartot_comm_key=vartot_comm.key()
vartot_comm_vartot=db.get(vartot_comm_key)
if (userid == vartot_comm_vartot.userid):
usercomm = True
except:
klaida=True
try:
if ((users.GetCurrentUser() and users.get_current_user() == comm.author) or (usercomm) or users.is_current_user_admin()) and ((kname == 'Commentsrec') or (kname == 'Commentsrec2')):
if self.request.get("show")=="del" and users.is_current_user_admin():
comm.delete()
if self.request.get("show")=="yes":
comm.rodyti=True
comm.put()
if self.request.get("show")=="no":
comm.rodyti=False
comm.put()
if kname == 'Commentsrec':
url='/comments'
if kname == 'Commentsrec2':
userid=comm.userid
rname=comm.rname
url='/'+cmspath2+'-usercommpage-'+langdef+'.'+fileext+'/'+rname+'/'+userid
except:
klaida=True
self.redirect(url)
class SubmitComments(webapp.RequestHandler):
def post(self, rparameters):
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
connt=""
try:
codeimg = db.get(self.request.get("scodeid"))
except:
prn="Error"
if codeimg and codeimg.code == self.request.get("scode"):
codeimg.delete()
greeting = Commentsrec()
greeting.rodyti = True
greeting.ipadresas = os.environ['REMOTE_ADDR']
# greeting.laikas = datetime.datetime.now()
if users.get_current_user():
greeting.author = users.get_current_user()
htmlerr=True
connt = self.request.get("content")
connt2 = cgi.escape(connt)
if connt==connt2:
htmlerr=False
connt = render_bbcode(connt)
connt = connt[0:400]
greeting.content = connt
# priesduom = self.request.get("img")
greeting.rname = "Anonymous"
if self.request.get("img"):
try:
avatar = images.resize(self.request.get("img"), width=96, height=96, output_encoding=images.PNG)
greeting.avatar = db.Blob(avatar)
avatarmax = images.resize(self.request.get("img"), width=600, height=400, output_encoding=images.PNG)
greeting.avatarmax = db.Blob(avatarmax)
except:
avatarerr="Error"
if self.request.get("rname"):
greeting.rname = cgi.escape(self.request.get("rname")[0:60])
if not htmlerr:
greeting.put()
buvoip = False
try:
ipaddr = os.environ['REMOTE_ADDR']
if True:
try:
buvesip = db.GqlQuery("SELECT * FROM SpamIP WHERE ipadresas = :1", ipaddr)
for app in buvesip:
buvoip = True
except:
klaida=True
if not buvoip:
app = SpamIP(ipadresas=ipaddr)
app.date = datetime.datetime.now()
app.lastserver = '0'
app.check = False
app.spamcount = '0'
app.spam = False
app.put()
except:
klaida=True
to_addr = _mailrcptto
user = users.get_current_user()
if user:
uname=user.nickname()
umail=users.get_current_user().email()
else:
uname=""
umail=""
message = mail.EmailMessage()
message.subject = os.environ['HTTP_HOST'] + " - comments" + (" %s %s %s") % (codeimg.code,self.request.get("scode"),htmlerr)
# message.subject = "www"
message.sender = _mailsender
message.to = to_addr
q_message = ("\n%s: %s \n%s \n%s \n" % ('Page', str(self.request.uri),str(textinfo()),str(textloc())))
message.body = (_("Comments mail message %(communame)s %(commumail)s %(commrealname)s %(commmessage)s") % {'communame': uname,'commumail': umail,'commrealname': greeting.rname,'commmessage': greeting.content}) + q_message
message.send()
self.redirect('/'+cmspath2+'-comments-'+lang+'.'+fileext)
class SiteMapControl(BaseRequestHandler):
def get(self,rparameters):
# self.response.out.write('<html><head><style>body { text-align: center; font: 11px arial, sans-serif; color: #565656; } .clear { clear:both; } .comm-container { margin-bottom:20px;} .comm-name { font-size:10pt; float:left; width:20%; padding:5px; overflow:hidden; } .comm-text { float:left; line-height:17px; width:70%; padding:5px; padding-top:0px; overflow:hidden; } .font-small-gray { font-size:10pt !important; }</style></head><body>')
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
wtext=""
buvoappcheck=""
user = users.get_current_user()
usercpsmurl = ("/%s-sitemapcp2-%s.%s" % (cmspath2,lang,fileext))
out=("<table><form method=\"POST\" action=\"%s\">\n" % (usercpsmurl))
freqvalues = {
'1': 'always',
'2': 'hourly',
'3': 'daily',
'4': 'weekly',
'5': 'monthly',
'6': 'yearly',
'7': 'never'
}
selecttext=""
namelist = ''
if users.is_current_user_admin():
sitemapbuvo = {}
query = datastore.Query('Page')
entities = query.Get(1000)
namelist = ''
for entity in entities:
sitemaprodyti=True
rssprodyti=True
sitemapfreqkey='weekly'
buvosmcheck=""
buvorsscheck=""
commenablegocheck=""
commenablefbcheck=""
commenablelicheck=""
commenablevkcheck=""
ename="---"
pagekey=entity.key()
sitemapprio='0.5'
if 'name' in entity.keys():
ename=entity['name']
if 'sitemapfreq' in entity.keys():
sitemapfreqkey=entity['sitemapfreq']
if 'sitemapprio' in entity.keys():
sitemapprio=entity['sitemapprio']
if 'sitemaprodyti' in entity.keys():
if entity['sitemaprodyti']:
sitemaprodyti=entity['sitemaprodyti']
buvosmcheck="checked=\"yes\""
if 'rssrodyti' in entity.keys():
if entity['rssrodyti']:
sitemaprodyti=entity['rssrodyti']
buvorsscheck="checked=\"yes\""
if 'commenablego' in entity.keys():
if entity['commenablego']:
commenablegocheck="checked=\"checked\""
if 'commenablefb' in entity.keys():
if entity['commenablefb']:
commenablefbcheck="checked=\"checked\""
if 'commenableli' in entity.keys():
if entity['commenableli']:
commenablelicheck="checked=\"checked\""
if 'commenablevk' in entity.keys():
if entity['commenablevk']:
commenablevkcheck="checked=\"checked\""
selecttext=("<select name=\"freq_%s\">" % (ename))
for fname in sorted(freqvalues.iterkeys()):
freqvalue = freqvalues[fname]
selecttextyes=""
# if cmp(int(fname),int(sitemapfreqkey))==0:
if freqvalue==sitemapfreqkey:
selecttextyes="selected=\"selected\""
selecttext=selecttext+("<option %s>%s</option>" % (selecttextyes,freqvalue))
selecttext=selecttext+"</select>\n"
out=out+("<tr><td>%s</td><td><input type=\"checkbox\" name=\"sitemaprodyti\" value=\"%s\" %s></td><td>%s</td><td><input type=\"text\" size=\"4\" name=\"prio_%s\" value=\"%s\" > RSS <input type=\"checkbox\" name=\"rssrodyti\" value=\"%s\" %s></td></tr>\n" % (ename,ename,buvosmcheck,selecttext,ename,sitemapprio,ename,buvorsscheck))
out=out+("<tr><td> </td><td colspan=\"3\"> Google comments <input type=\"checkbox\" name=\"commenablego\" value=\"%s\" %s> Facebook comments <input type=\"checkbox\" name=\"commenablefb\" value=\"%s\" %s> LinkedIn comments <input type=\"checkbox\" name=\"commenableli\" value=\"%s\" %s> VKontakte comments <input type=\"checkbox\" name=\"commenablevk\" value=\"%s\" %s></td></td></tr>\n" % (ename,commenablegocheck,ename,commenablefbcheck,ename,commenablelicheck,ename,commenablevkcheck))
namelist = namelist + "||" + str(base64.urlsafe_b64encode(str(ename)))
out=out+"<tr><td colspan=\"4\"><input type=\"hidden\" name=\"namelist\" value=\""+str(namelist)+"\" ></td></tr>\n"
out=out+"<tr><td></td><td></td><td></td><td><input type=\"submit\"></td></tr>\n"
out=out+"</form></table>\n"
wtext = out
page = Page.loadnew("sitemapcp")
page.content = wtext
page_name2 = 'menu'+'-'+lang+'.'+fileext
page2 = Page.load(page_name2)
page3 = Page.loadnew("kalbos")
textaps=''
if len(aps)>0:
textaps=aps+'.'
text=''
for name, value in kalbossort:
text = text + (_kalbhtml % ('sitemapcp', textaps+name, ext, name, name))
page3.content = text
self.generate('view.html', lang, {
'imgshar': False,
'noedit': '1',
'application_name': siteauth(),
'kalbos': page3,
'menu': page2,
'page': page,
})
class SiteMapControl2(webapp.RequestHandler):
def post(self, rparameters):
param=urlparam(rparameters)
ext=param['ext']
lang=param['lang']
aps=param['aps']
kalb=param['kalb']
lang1 = gettext.translation (cmstrans2, locale_path, [kalb] , fallback=True)
_ = lang1.ugettext
user = users.get_current_user()
now = datetime.datetime.now()
buvo = False
if user:
# try:
if users.is_current_user_admin():
photoal=[]
namelist2=[]
albumbuvo = {}
rssbuvo = {}
commenablegobuvo = {}
commenablefbbuvo = {}
commenablelibuvo = {}
commenablevkbuvo = {}
albumname = ""
# form = cgi.FieldStorage()
# item = form.getvalue("sitemaprodyti")
# if form["namelist"].value:
# namelist=form["namelist"].value
# namelist2=namelist.split("||")
# if isinstance(item, list):
# for item in form.getlist("sitemaprodyti"):
# photoal.append(item)
# else:
# photoa = form.getfirst("sitemaprodyti", "")
# photoal.append(photoa)
namelist=self.request.POST['namelist']
namelist2=namelist.split("||")
photoal=self.request.POST.getall('sitemaprodyti')
rssal=self.request.POST.getall('rssrodyti')
commenablegoal=self.request.POST.getall('commenablego')
commenablefbal=self.request.POST.getall('commenablefb')
commenablelial=self.request.POST.getall('commenableli')
commenablevkal=self.request.POST.getall('commenablevk')
for albumn2 in namelist2:
if len(albumn2)>0:
albumname = base64.urlsafe_b64decode(str(albumn2))
albumbuvo[albumname]=False
rssbuvo[albumname]=False
commenablegobuvo[albumname]=False
commenablefbbuvo[albumname]=False
commenablelibuvo[albumname]=False
commenablevkbuvo[albumname]=False
# self.response.out.write("namelist:" +albumname+ "<br />\n")
for albumn2 in photoal:
albumbuvo[albumn2]=True
for albumn2 in rssal:
rssbuvo[albumn2]=True
for albumn2 in commenablegoal:
commenablegobuvo[albumn2]=True
for albumn2 in commenablefbal:
commenablefbbuvo[albumn2]=True
for albumn2 in commenablelial:
commenablelibuvo[albumn2]=True
for albumn2 in commenablevkal:
commenablevkbuvo[albumn2]=True
# self.response.out.write("photoal:" +albumn2+ "<br />\n")
albumbuvo2=albumbuvo
rssbuvo2=rssbuvo
query = datastore.Query('Page')
entities = query.Get(1000)
for albumb in entities:
albumname=str(albumb['name'])
if albumbuvo.has_key(albumname): #albumname in albumbuvo:
albumb['sitemaprodyti'] = albumbuvo[albumname]
albumb['rssrodyti'] = rssbuvo[albumname]
albumb['commenablego'] = commenablegobuvo[albumname]
albumb['commenablefb'] = commenablefbbuvo[albumname]
albumb['commenableli'] = commenablelibuvo[albumname]
albumb['commenablevk'] = commenablevkbuvo[albumname]
albumb['sitemapfreq'] = self.request.POST["freq_"+albumname]
albumb['sitemapprio'] = self.request.POST["prio_"+albumname]
# self.response.out.write("buvo:" +albumname+" "+str(albumbuvo[albumname])+ "<br />\n")
albumbuvo2.pop(albumname)
datastore.Put(albumb)
self.redirect('/'+cmspath2+'-sitemapcp-'+lang+'.'+fileext)
class CodeImage(webapp.RequestHandler):
def get(self):
# img = PNGCanvas(256, 256)
# pix = [0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0,1,1,1,1,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,1,1,1,1,0,0,0,1,1,1,1,0,0,0,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,1,0,0,1,1,1,0,0,0,0,0,0,0,0,1,0,0,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,1,1,0,0,1,1,1,0,0,0,0,0,0,1,1,1,0,0,1,1,1,0,0,0,0,0,0,1,1,1,0,0,1,1,1,1,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,0,0,0,0,1,0,0,0,1,1,1,1,1,1,0,0,0,1,1,1,0,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,1,1,1,0,0,0,0,0,0,0,1,1,0,0,1,1,1,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,0,0,1,1,1,1,0,0,0,0,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,1,1,1,0,0,1,1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,1,1,1,1,0,0,0,1,0,0,0,0,0,0,1,1,1,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
krast=5
starp=2
# splot = 14;
# sauks = 14;
pix = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,1,1,0,0,0,0,1,1,0,0,1,1,0,0,1,1,0,0,0,0,1,1,0,1,1,0,0,0,0,1,1,0,1,1,0,0,0,0,1,1,0,1,1,0,0,0,0,1,1,0,0,1,1,0,0,1,1,0,0,0,0,1,1,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,1,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,0,0,0,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,1,1,0,0,1,1,0,0,1,1,0,0,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,1,1,0,1,1,0,0,0,1,1,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,1,0,0,0,0,0,1,1,1,1,0,0,0,0,1,1,0,1,1,0,0,0,1,1,0,0,1,1,0,0,1,1,0,0,0,1,1,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,1,1,0,1,1,1,0,0,0,1,1,1,0,0,1,1,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,1,1,0,1,1,0,0,0,0,1,1,0,0,1,1,0,0,1,1,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,1,1,0,0,1,1,0,0,1,1,0,0,0,0,1,0,0,1,1,0,0,0,0,0,0,0,1,1,0,1,1,1,0,0,0,1,1,1,0,0,1,1,0,0,1,1,0,0,0,0,1,1,0,1,1,0,0,0,0,1,1,0,0,1,1,0,0,1,1,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,1,1,0,0,1,1,0,0,1,1,0,0,0,0,1,1,0,0,1,1,0,0,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,0,0,1,1,0,0,1,1,0,0,0,0,1,1,0,1,1,0,0,0,0,1,1,0,0,1,1,0,0,1,1,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,1,1,0,0,1,1,0,0,1,1,0,0,0,0,1,1,0,1,1,0,0,0,0,1,1,0,0,1,1,0,0,1,1,1,0,0,0,1,1,1,0,1,1,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,1,1,0,0,1,1,0,0,1,1,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
krast=5
starp=2
splot = 14
sauks = 14
splot = 9
sauks = 15
nn=6
istr=int(math.sqrt((splot/2)*(splot/2)+(sauks/2)*(sauks/2)))
splot2 = istr*2
sauks2 = istr*2
plot=2*krast + nn*splot + (nn-1)*starp;
auks=2*krast + sauks;
plot2=2*krast + nn*splot2 + (nn-1)*starp
auks2=2*krast + sauks2;
# img = PNGCanvas(plot, auks, [0, 0,0,0])
# img = PNGCanvas(plot2, auks2, [0, 0,0,0])
img = PNGCanvas(plot2, auks2, [0xff, 0xfa, 0xcd, 0xff])
# img = PNGCanvas(plot, auks, [0, 0,0,0])
ss=[0,2,4,6,8,1,3,5,7,9]
try:
codeimg = db.get(self.request.get("id"))
kodas=codeimg.code
except:
kodas="000000"
for i in range(0, 6):
sx2 = "%s" % kodas[i:i+1]
try:
sx = int(sx2)
except:
sx=0
# sx = random.randrange(0, 10)
alfa=((random.randrange(0, 90 , 5)-45)*math.pi)/180
for y in range(1, sauks):
# alfa=math.pi/2
# alfa=math.pi/4
for x in range(1, splot):
nr = sx*(splot*sauks)+(y-1)*splot+x-1
xcor=x-splot/2 -1
ycor=y-sauks/2-1
istrs=math.sqrt(xcor*xcor+ycor*ycor)
alfa1=math.atan2(ycor,xcor)
xcornew=istrs*math.cos(alfa1+alfa)
ycornew=istrs*math.sin(alfa1+alfa)
xx=int(krast+i*(starp+splot2)+splot2/2+1+xcornew)
yy=int(krast+sauks2/2+1+ycornew)
# xx=krast+i*(starp+splot2)+xcor+splot2/2 +1;
# yy=krast+ycor+sauks2/2 +1;
if pix[nr]==1:
# img.point(xx, yy, [0xff, 0, 0, 0xff])
img.point(xx, yy, [0, 0, 0, 0xff])
# img.point(xx, yy, [0xff, 0xfa, 0xcd, 0xff])
self.response.headers['Content-Type'] = "image/png"
self.response.out.write(img.dump())
class UserNone(object):
__user_id = None
__federated_identity = None
__federated_provider = None
def __init__(self, email=None, _auth_domain=None,
_user_id=None, federated_identity=None, federated_provider=None,
_strict_mode=True):
if email is None:
email = ''
self.__email = email
self.__federated_identity = federated_identity
self.__federated_provider = federated_provider
self.__auth_domain = _auth_domain
self.__user_id = _user_id or None
def nickname(self):
return self.__email
def email(self):
return self.__email
def user_id(self):
return self.__user_id
def auth_domain(self):
return self.__auth_domain
def federated_identity(self):
return self.__federated_identity
def federated_provider(self):
return self.__federated_provider
def getphoto(useris):
yra=False
try:
f = urllib.urlopen("http://picasaweb.google.com/data/feed/api/user/%s?kind=album" % useris)
list = Picasa().albums(f.read())
for name in list.keys():
album = list[name]
if name.find('ProfilePhotos') == 0:
f = urllib.urlopen("http://picasaweb.google.com/data/feed/api/user/%s/album/%s?kind=photo" % (useris,name))
list = Picasa().photos(f.read())
for photo in list:
phototurim = photo.thumbnail #photo.webpage
yra=True
break
break
if name == "Profile_photosActive":
f = urllib.urlopen("http://picasaweb.google.com/data/feed/api/user/%s/album/%s?kind=photo" % (useris,"Profile_photosActive"))
list = Picasa().photos(f.read())
for photo in list:
phototurim = photo.thumbnail #photo.webpage
yra=True
break
break
except:
yra=False
if yra:
return phototurim
else:
return False
def mainold():
try:
imgcodes = db.GqlQuery("SELECT * FROM Codeimagereg WHERE date < :1", datetime.datetime.now() + datetime.timedelta(minutes=-15))
for imgcode in imgcodes:
imgcode.delete()
except:
klaida=True
redir = False
if os.environ['HTTP_HOST']=='www.upe.lt' or os.environ['HTTP_HOST']=='lt.upe.lt' or os.environ['HTTP_HOST']=='us.upe.lt' or os.environ['HTTP_HOST']=='upe.lt':
redir = True
redir2 = False
if os.environ['HTTP_HOST']=='google5353c7992b3833b7.nerij.us':
redir2 = True
buvoapp = False
try:
thisappver = os.environ['CURRENT_VERSION_ID']
thisappid = os.environ['APPLICATION_ID']
thisappsoftver = os.environ['SERVER_SOFTWARE']
thishostname = os.environ['DEFAULT_VERSION_HOSTNAME']
if True:
try:
buvesapp = db.GqlQuery("SELECT * FROM AppVer WHERE appver = :1", thisappver)
for app in buvesapp:
app.timelast = datetime.datetime.now()
app.put()
buvoapp = True
except:
klaida=True
# db.put(buves_vart)
if not buvoapp:
app = AppVer(appver=thisappver)
app.timestart = now
app.timelast = now
app.enable = False
app.appsoftver = thisappsoftver
app.appid = thisappid
app.hostname = thishostname
app.put()
except:
klaida=True
try:
codedb = db.GqlQuery("SELECT * FROM DinCode WHERE codename = :1", "start")
for thiscode in codedb:
thiscode = thiscode.codetext
appon = eval(thiscode)
except:
appon=False
def handle_500(request, response, exception):
greeting = ''
items = os.environ.items()
items.sort()
for name, value in items:
aaa = "%s\t= %s\n" % (name, value)
greeting = greeting + aaa
lines = ''.join(traceback.format_exception(*sys.exc_info()))
message = mail.EmailMessage()
message.subject = os.environ['HTTP_HOST'] + " - Error500 - " + os.environ['REQUEST_ID_HASH']
message.sender = _mailsender
message.to = _mailrcptto
message.body = "%s\n\n%s" % (greeting,lines)
message.send()
response.write("<html><body><h1>Internal Server Error 500</h1>\n<xmp>")
# response.write("%s\n\n" % (greeting))
# response.write(cgi.escape(lines, quote=True))
response.write("</xmp></body></html>")
#applicationdisable = webapp.WSGIApplication([('/(.*)', SiteDisable),], debug=_DEBUG)
#applicationredir = webapp.WSGIApplication([('/(.*)', RedirN),], debug=_DEBUG)
#applicationredir2 = webapp.WSGIApplication([('/(.*)', RedirN2),], debug=_DEBUG)
url_map1 = [
routes.DomainRoute(r'<:(upe\.lt|lt\.upe\.lt|us\.upe\.lt|www\.upe\.lt)>', [
webapp.Route('/(.*)', handler=RedirN),
]),
('/install', WikiInstall),
('/'+cmspath2+'-env-(.*)', WikiEnv),
('/'+cmspath2+'-fb-(.*)', WikiFB),
('/'+cmspath2+'-li-(.*)', WikiLI),
('/'+cmspath2+'-vk-(.*)', WikiVK),
('/'+cmspath2+'-ver-(.*)', WikiExec),
('/'+cmspath2+'-login-(.*)', WikiLogin),
('/'+cmspath2+'-admin-(.*)', WikiAdmin),
('/'+cmspath2+'-mod(.*)-(.*)', WikiMod),
('/'+cmspath2+'-lietuvos(.*)-(.*)', WikiMod),
('/'+cmspath2+'-sitemapcp-(.*)', SiteMapControl),
('/'+cmspath2+'-sitemapcp2-(.*)', SiteMapControl2),
('/'+cmspath2+'-memberlistshort-(.*)', VartSarTrumpas),
('/'+cmspath2+'-fbmemberlistshort-(.*)', FBUserListSort),
('/'+cmspath2+'-limemberlistshort-(.*)', LIUserListSort),
('/'+cmspath2+'-vkmemberlistshort-(.*)', VKUserListSort),
('/'+cmspath2+'-memberlist-(.*)', VartSar),
('/'+cmspath2+'-usercontrolpanel-(.*)', UserControl),
('/'+cmspath2+'-usercpsubmit-(.*)', UserControlSend),
('/'+cmspath2+'-userpicacontrol-(.*)', UserPicaControl),
('/'+cmspath2+'-userpicasubmit-(.*)', UserPicaControlSend),
('/'+cmspath2+'-userpicapage-(.*)/([-\w]+)/([0-9_]+)', UserPicaPage),
('/'+cmspath2+'-useryoutpage-(.*)/([-\w]+)/([0-9_]+)', UserYoutPage),
('/'+cmspath2+'-usercommpage-(.*)/([-\w]+)/([0-9_]+)', UserCommPage),
('/'+cmspath2+'-usercommsubmit-(.*)/([-\w]+)/([0-9_]+)', UserCommSubmit),
('/'+cmspath2+'-usermailformpage-(.*)/([-\w]+)/([0-9_]+)', UserMailFormPage),
('/'+cmspath2+'-usersendmail-(.*)/([-\w]+)/([0-9_]+)', UserMailSend),
('/'+cmspath2+'-userpage-(.*)/([-\w]+)/([0-9_]+)', UserShowPage),
('/'+cmspath2+'-userimagemin/([-\w]+)/([0-9_]+)', UserShowImageMin),
('/'+cmspath2+'-userimage/([-\w]+)/([0-9_]+)', UserShowImageMax),
('/'+cmspath2+'-comments-(.*)', Comments),
('/'+cmspath2+'-atsiliepimai-(.*)', Comments),
('/'+cmspath2+'-commsubmit-(.*)', SubmitComments),
('/'+cmspath2+'-mailform-(.*)', MailForm),
('/'+cmspath2+'-siustilaiska-(.*)', MailForm),
('/'+cmspath2+'-sendmail-(.*)', MailSend),
# ('/'+cmspath2+'-searchid-(.*)', VarId),
('/commswit', SwitComments),
('/commimg', CommentsImage),
('/codeimg', CodeImage),
('/(.*)favicon.ico', WikiFav),
('/'+cmspath2+'-guestbook-(.*)', WikiGuest),
('/'+cmspath2+'-sveciai-(.*)', WikiGuest),
('/'+cmspath2+'-sing-(.*)', SingGuestbook),
('/'+cmspath2+'-ls-(.*)', ListDir),
('/'+cmspath2+'-download-(.*)', WikiRedirDown),
# ('/redir.php/(.*)', WikiRedir),
# ('/redir(.*)', WikiRedir),
]
#url_map2 = [('/'+cmspath2+'-pic-(.*)', ImageSharingAlbumIndex),
# ('/'+cmspath2+'-picnew-(.*)', ImageSharingAlbumCreate),
# ('/'+cmspath2+'-picalbum-(.*)/([-\w]+)', ImageSharingAlbumView),
# ('/'+cmspath2+'-picupload-(.*)/([-\w]+)', ImageSharingUploadImage),
# ('/'+cmspath2+'-picshowimage-(.*)/([-\w]+)', ImageSharingShowImage),
# ('/'+cmspath2+'-pic(thumbnail|image)-(.*)/([-\w]+)', ImageSharingServeImage),
# ('/'+cmspath2+'-picsearch-(.*)', ImageSharingSearch)]
url_map = []
url_map.extend(url_map1)
#url_map.extend(url_map2)
url_map.extend([('/'+cmspath2+'-([-\w]+)-(.*)', WikiPage),('/(.*)', WikiRedirMain)])
app = webapp.WSGIApplication(url_map, debug=_DEBUG)
app.error_handlers[500] = handle_500
# wsgiref.handlers.CGIHandler().run(application)
# if redir:
# applicationredir.run()
# exit(0)
# if redir2:
# applicationredir2.run()
# exit(0)
# if appon:
# app.run()
# exit(0)
# else:
# applicationdisable.run()
# exit(0)
#if __name__ == '__main__':
# main()
#if __name__ == '__main__':
# try:
# main()
# except:
# applicationerror = webapp.WSGIApplication([('/(.*)', HttpError),], debug=_DEBUG)
# run_wsgi_app(applicationerror)
# exit(0)
| lgpl-2.1 |
kiith-sa/QGIS | python/plugins/processing/script/scripts/Extract_raster_values_to_CSV.py | 4 | 3177 | ##[Example scripts]=group
##Input_raster=raster
##Input_vector=vector
##Transform_vector_to_raster_CRS=boolean
##Output_table=output table
import os
from osgeo import gdal, ogr, osr
from processing.core.TableWriter import TableWriter
from processing.core.GeoAlgorithmExecutionException import \
GeoAlgorithmExecutionException
from processing.tools.raster import *
raster = gdal.Open(Input_raster)
rasterBaseName = os.path.splitext(os.path.basename(Input_raster))[0]
bandCount = raster.RasterCount
rasterXSize = raster.RasterXSize
rasterYSize = raster.RasterYSize
geoTransform = raster.GetGeoTransform()
rasterCRS = osr.SpatialReference()
rasterCRS.ImportFromWkt(raster.GetProjectionRef())
vector = ogr.Open(Input_vector, False)
layer = vector.GetLayer(0)
featureCount = layer.GetFeatureCount()
if featureCount == 0:
raise GeoAlgorithmExecutionException(
'There are no features in input vector.')
vectorCRS = layer.GetSpatialRef()
columns = []
featureDefn = layer.GetLayerDefn()
for i in xrange(featureDefn.GetFieldCount()):
fieldDefn = featureDefn.GetFieldDefn(i)
columns.append([fieldDefn.GetNameRef()])
layer.ResetReading()
feature = layer.GetNextFeature()
while feature is not None:
for i in xrange(featureDefn.GetFieldCount()):
fieldDefn = featureDefn.GetFieldDefn(i)
if fieldDefn.GetType() == ogr.OFTInteger:
columns[i].append(feature.GetFieldAsInteger(i))
elif fieldDefn.GetType() == ogr.OFTReal:
columns[i].append(feature.GetFieldAsDouble(i))
else:
columns[i].append(feature.GetFieldAsString(i))
feature = layer.GetNextFeature()
current = 0
total = bandCount + featureCount * bandCount
if Transform_vector_to_raster_CRS:
coordTransform = osr.CoordinateTransformation(vectorCRS, rasterCRS)
if coordTransform is None:
raise GeoAlgorithmExecutionException(
'Error while creating coordinate transformation.')
columnName = rasterBaseName[:8]
for i in xrange(bandCount):
current += 1
progress.setPercentage(int(current * total))
rasterBand = raster.GetRasterBand(i + 1)
data = rasterBand.ReadAsArray()
layer.ResetReading()
feature = layer.GetNextFeature()
col = []
col.append(columnName + '_' + str(i + 1))
while feature is not None:
current += 1
progress.setPercentage(int(current * total))
geometry = feature.GetGeometryRef()
x = geometry.GetX()
y = geometry.GetY()
if Transform_vector_to_raster_CRS:
pnt = coordTransform.TransformPoint(x, y, 0)
x = pnt[0]
y = pnt[1]
(rX, rY) = raster.mapToPixel(x, y, geoTransform)
if rX > rasterXSize or rY > rasterYSize:
feature = layer.GetNextFeature()
continue
value = data[rY, rX]
col.append(value)
feature = layer.GetNextFeature()
rasterBand = None
columns.append(col)
raster = None
vector.Destroy()
writer = TableWriter(Output_table, 'utf-8', [])
row = []
for i in xrange(len(columns[0])):
for col in columns:
row.append(col[i])
writer.addRecord(row)
row[:] = []
| gpl-2.0 |
zhukaixy/kbengine | kbe/src/lib/python/Lib/ctypes/macholib/dyld.py | 152 | 4907 | """
dyld emulation
"""
import os
from ctypes.macholib.framework import framework_info
from ctypes.macholib.dylib import dylib_info
from itertools import *
__all__ = [
'dyld_find', 'framework_find',
'framework_info', 'dylib_info',
]
# These are the defaults as per man dyld(1)
#
DEFAULT_FRAMEWORK_FALLBACK = [
os.path.expanduser("~/Library/Frameworks"),
"/Library/Frameworks",
"/Network/Library/Frameworks",
"/System/Library/Frameworks",
]
DEFAULT_LIBRARY_FALLBACK = [
os.path.expanduser("~/lib"),
"/usr/local/lib",
"/lib",
"/usr/lib",
]
def dyld_env(env, var):
if env is None:
env = os.environ
rval = env.get(var)
if rval is None:
return []
return rval.split(':')
def dyld_image_suffix(env=None):
if env is None:
env = os.environ
return env.get('DYLD_IMAGE_SUFFIX')
def dyld_framework_path(env=None):
return dyld_env(env, 'DYLD_FRAMEWORK_PATH')
def dyld_library_path(env=None):
return dyld_env(env, 'DYLD_LIBRARY_PATH')
def dyld_fallback_framework_path(env=None):
return dyld_env(env, 'DYLD_FALLBACK_FRAMEWORK_PATH')
def dyld_fallback_library_path(env=None):
return dyld_env(env, 'DYLD_FALLBACK_LIBRARY_PATH')
def dyld_image_suffix_search(iterator, env=None):
"""For a potential path iterator, add DYLD_IMAGE_SUFFIX semantics"""
suffix = dyld_image_suffix(env)
if suffix is None:
return iterator
def _inject(iterator=iterator, suffix=suffix):
for path in iterator:
if path.endswith('.dylib'):
yield path[:-len('.dylib')] + suffix + '.dylib'
else:
yield path + suffix
yield path
return _inject()
def dyld_override_search(name, env=None):
# If DYLD_FRAMEWORK_PATH is set and this dylib_name is a
# framework name, use the first file that exists in the framework
# path if any. If there is none go on to search the DYLD_LIBRARY_PATH
# if any.
framework = framework_info(name)
if framework is not None:
for path in dyld_framework_path(env):
yield os.path.join(path, framework['name'])
# If DYLD_LIBRARY_PATH is set then use the first file that exists
# in the path. If none use the original name.
for path in dyld_library_path(env):
yield os.path.join(path, os.path.basename(name))
def dyld_executable_path_search(name, executable_path=None):
# If we haven't done any searching and found a library and the
# dylib_name starts with "@executable_path/" then construct the
# library name.
if name.startswith('@executable_path/') and executable_path is not None:
yield os.path.join(executable_path, name[len('@executable_path/'):])
def dyld_default_search(name, env=None):
yield name
framework = framework_info(name)
if framework is not None:
fallback_framework_path = dyld_fallback_framework_path(env)
for path in fallback_framework_path:
yield os.path.join(path, framework['name'])
fallback_library_path = dyld_fallback_library_path(env)
for path in fallback_library_path:
yield os.path.join(path, os.path.basename(name))
if framework is not None and not fallback_framework_path:
for path in DEFAULT_FRAMEWORK_FALLBACK:
yield os.path.join(path, framework['name'])
if not fallback_library_path:
for path in DEFAULT_LIBRARY_FALLBACK:
yield os.path.join(path, os.path.basename(name))
def dyld_find(name, executable_path=None, env=None):
"""
Find a library or framework using dyld semantics
"""
for path in dyld_image_suffix_search(chain(
dyld_override_search(name, env),
dyld_executable_path_search(name, executable_path),
dyld_default_search(name, env),
), env):
if os.path.isfile(path):
return path
raise ValueError("dylib %s could not be found" % (name,))
def framework_find(fn, executable_path=None, env=None):
"""
Find a framework using dyld semantics in a very loose manner.
Will take input such as:
Python
Python.framework
Python.framework/Versions/Current
"""
try:
return dyld_find(fn, executable_path=executable_path, env=env)
except ValueError as e:
pass
fmwk_index = fn.rfind('.framework')
if fmwk_index == -1:
fmwk_index = len(fn)
fn += '.framework'
fn = os.path.join(fn, os.path.basename(fn[:fmwk_index]))
try:
return dyld_find(fn, executable_path=executable_path, env=env)
except ValueError:
raise e
def test_dyld_find():
env = {}
assert dyld_find('libSystem.dylib') == '/usr/lib/libSystem.dylib'
assert dyld_find('System.framework/System') == '/System/Library/Frameworks/System.framework/System'
if __name__ == '__main__':
test_dyld_find()
| lgpl-3.0 |
ycaihua/kbengine | kbe/src/lib/python/Doc/includes/mp_newtype.py | 52 | 1940 | from multiprocessing import freeze_support
from multiprocessing.managers import BaseManager, BaseProxy
import operator
##
class Foo:
def f(self):
print('you called Foo.f()')
def g(self):
print('you called Foo.g()')
def _h(self):
print('you called Foo._h()')
# A simple generator function
def baz():
for i in range(10):
yield i*i
# Proxy type for generator objects
class GeneratorProxy(BaseProxy):
_exposed_ = ['__next__']
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
# Function to return the operator module
def get_operator_module():
return operator
##
class MyManager(BaseManager):
pass
# register the Foo class; make `f()` and `g()` accessible via proxy
MyManager.register('Foo1', Foo)
# register the Foo class; make `g()` and `_h()` accessible via proxy
MyManager.register('Foo2', Foo, exposed=('g', '_h'))
# register the generator function baz; use `GeneratorProxy` to make proxies
MyManager.register('baz', baz, proxytype=GeneratorProxy)
# register get_operator_module(); make public functions accessible via proxy
MyManager.register('operator', get_operator_module)
##
def test():
manager = MyManager()
manager.start()
print('-' * 20)
f1 = manager.Foo1()
f1.f()
f1.g()
assert not hasattr(f1, '_h')
assert sorted(f1._exposed_) == sorted(['f', 'g'])
print('-' * 20)
f2 = manager.Foo2()
f2.g()
f2._h()
assert not hasattr(f2, 'f')
assert sorted(f2._exposed_) == sorted(['g', '_h'])
print('-' * 20)
it = manager.baz()
for i in it:
print('<%d>' % i, end=' ')
print()
print('-' * 20)
op = manager.operator()
print('op.add(23, 45) =', op.add(23, 45))
print('op.pow(2, 94) =', op.pow(2, 94))
print('op._exposed_ =', op._exposed_)
##
if __name__ == '__main__':
freeze_support()
test()
| lgpl-3.0 |
kdaniels/cobbler | cobbler/autoinstall_manager.py | 16 | 11460 | import os
from cobbler import autoinstallgen
from cobbler import clogger
from cobbler import utils
from cobbler.cexceptions import CX
TEMPLATING_ERROR = 1
KICKSTART_ERROR = 2
class AutoInstallationManager:
"""
Manage automatic installation templates, snippets and final files
"""
def __init__(self, collection_mgr, logger=None):
"""
Constructor
@param CollectionManager collection_mgr collection manager
@param Logger logger logger
"""
self.collection_mgr = collection_mgr
self.snippets_base_dir = self.collection_mgr.settings().autoinstall_snippets_dir
self.templates_base_dir = self.collection_mgr.settings().autoinstall_templates_dir
self.autoinstallgen = autoinstallgen.AutoInstallationGen(self.collection_mgr)
if logger is None:
logger = clogger.Logger()
self.logger = logger
def validate_autoinstall_template_file_path(self, autoinstall, for_item=True, new_autoinstall=False):
"""
Validate the automatic installation template's relative file path.
@param: str autoinstall automatic installation template relative file path
@param: bool for_item (enable/disable special handling for Item objects)
@param: bool new_autoinstall (when set to true new filenames are allowed)
@returns str automatic installation template relative file path
"""
if not isinstance(autoinstall, basestring):
raise CX("Invalid input, autoinstall must be a string")
else:
autoinstall = autoinstall.strip()
if autoinstall == "":
# empty autoinstall is allowed (interactive installations)
return autoinstall
if for_item is True:
# this autoinstall value has special meaning for Items
# other callers of this function have no use for this
if autoinstall == "<<inherit>>":
return autoinstall
if autoinstall.find("..") != -1:
raise CX("Invalid automatic installation template file location %s, it must not contain .." % autoinstall)
autoinstall_path = "%s/%s" % (self.templates_base_dir, autoinstall)
if not os.path.isfile(autoinstall_path) and not new_autoinstall:
raise CX("Invalid automatic installation template file location %s, file not found" % autoinstall_path)
return autoinstall
def get_autoinstall_templates(self):
"""
Get automatic OS installation templates
@return list automatic installation templates
"""
files = []
for root, dirnames, filenames in os.walk(self.templates_base_dir):
for filename in filenames:
rel_root = root[len(self.templates_base_dir) + 1:]
if rel_root:
rel_path = "%s/%s" % (rel_root, filename)
else:
rel_path = filename
files.append(rel_path)
files.sort()
return files
def read_autoinstall_template(self, file_path):
"""
Read an automatic OS installation template
@param str file_path automatic installation template relative file path
@return str automatic installation template content
"""
file_path = self.validate_autoinstall_template_file_path(file_path, for_item=False)
file_full_path = "%s/%s" % (self.templates_base_dir, file_path)
fileh = open(file_full_path, "r")
data = fileh.read()
fileh.close()
return data
def write_autoinstall_template(self, file_path, data):
"""
Write an automatic OS installation template
@param str file_path automatic installation template relative file path
@param str data automatic installation template content
"""
file_path = self.validate_autoinstall_template_file_path(file_path, for_item=False, new_autoinstall=True)
file_full_path = "%s/%s" % (self.templates_base_dir, file_path)
try:
utils.mkdir(os.path.dirname(file_full_path))
except:
utils.die(self.logger, "unable to create directory for automatic OS installation template at %s" % file_path)
fileh = open(file_full_path, "w+")
fileh.write(data)
fileh.close()
return True
def remove_autoinstall_template(self, file_path):
"""
Remove an automatic OS installation template
@param str file_path automatic installation template relative file path
"""
file_path = self.validate_autoinstall_template_file_path(file_path, for_item=False)
file_full_path = "%s/%s" % (self.templates_base_dir, file_path)
if not self.is_autoinstall_in_use(file_path):
os.remove(file_full_path)
else:
utils.die(self.logger, "attempt to delete in-use file")
def validate_autoinstall_snippet_file_path(self, snippet, new_snippet=False):
"""
Validate the snippet's relative file path.
@param: str snippet automatic installation snippet relative file path
@param: bool new_snippet (when set to true new filenames are allowed)
@returns: str snippet or CX
"""
if not isinstance(snippet, basestring):
raise CX("Invalid input, snippet must be a string")
else:
snippet = snippet.strip()
if snippet.find("..") != -1:
raise CX("Invalid automated installation snippet file location %s, it must not contain .." % snippet)
snippet_path = "%s/%s" % (self.snippets_base_dir, snippet)
if not os.path.isfile(snippet_path) and not new_snippet:
raise CX("Invalid automated installation snippet file location %s, file not found" % snippet_path)
return snippet
def get_autoinstall_snippets(self):
files = []
for root, dirnames, filenames in os.walk(self.snippets_base_dir):
for filename in filenames:
rel_root = root[len(self.snippets_base_dir) + 1:]
if rel_root:
rel_path = "%s/%s" % (rel_root, filename)
else:
rel_path = filename
files.append(rel_path)
files.sort()
return files
def read_autoinstall_snippet(self, file_path):
file_path = self.validate_autoinstall_snippet_file_path(file_path)
file_full_path = "%s/%s" % (self.snippets_base_dir, file_path)
fileh = open(file_full_path, "r")
data = fileh.read()
fileh.close()
return data
def write_autoinstall_snippet(self, file_path, data):
file_path = self.validate_autoinstall_snippet_file_path(file_path, new_snippet=True)
file_full_path = "%s/%s" % (self.snippets_base_dir, file_path)
try:
utils.mkdir(os.path.dirname(file_full_path))
except:
utils.die(self.logger, "unable to create directory for automatic OS installation snippet at %s" % file_path)
fileh = open(file_full_path, "w+")
fileh.write(data)
fileh.close()
def remove_autoinstall_snippet(self, file_path):
file_path = self.validate_autoinstall_snippet_file_path(file_path)
os.remove(file_path)
return True
def is_autoinstall_in_use(self, name):
for x in self.collection_mgr.profiles():
if x.autoinstall is not None and x.autoinstall == name:
return True
for x in self.collection_mgr.systems():
if x.autoinstall is not None and x.autoinstall == name:
return True
return False
def generate_autoinstall(self, profile=None, system=None):
if system is not None:
return self.autoinstallgen.generate_autoinstall_for_system(system)
elif profile is not None:
return self.autoinstallgen.generate_autoinstall_for_profile(profile)
def log_autoinstall_validation_errors(self, errors_type, errors):
"""
Log automatic installation file errors
@param int errors_type validation errors type
"""
if errors_type == TEMPLATING_ERROR:
self.logger.warning("Potential templating errors:")
for error in errors:
(line, col) = error["lineCol"]
line -= 1 # we add some lines to the template data, so numbering is off
self.logger.warning("Unknown variable found at line %d, column %d: '%s'" % (line, col, error["rawCode"]))
elif errors_type == KICKSTART_ERROR:
self.logger.warning("Kickstart validation errors: %s" % errors[0])
def validate_autoinstall_file(self, obj, is_profile):
"""
Validate automatic installation file used by a system/profile
@param Item obj system/profile
@param bool is_profile if obj is a profile
@return [bool, int, list] list with validation result, errors type and list of errors
"""
last_errors = []
blended = utils.blender(self.collection_mgr.api, False, obj)
# get automatic installation template
autoinstall = blended["autoinstall"]
if autoinstall is None or autoinstall == "":
self.logger.info("%s has no automatic installation template set, skipping" % obj.name)
return [True, None, None]
# generate automatic installation file
os_version = blended["os_version"]
self.logger.info("----------------------------")
self.logger.debug("osversion: %s" % os_version)
if is_profile:
self.generate_autoinstall(profile=obj)
else:
self.generate_autoinstall(system=obj)
last_errors = self.autoinstallgen.get_last_errors()
if len(last_errors) > 0:
return [False, TEMPLATING_ERROR, last_errors]
def validate_autoinstall_files(self, logger=None):
"""
Determine if Cobbler automatic OS installation files will be accepted by
corresponding Linux distribution installers. The presence of an error
does not imply that the automatic installation file is bad, only that
the possibility exists. Automatic installation file validators are not
available for all automatic installation file types and on all operating
systems in which Cobbler may be installed.
@param Logger logger logger
@return bool if all automatic installation files are valid
"""
for x in self.collection_mgr.profiles():
(success, errors_type, errors) = self.validate_autoinstall_file(x, True)
if not success:
overall_success = True
if len(errors) > 0:
self.log_autoinstall_validation_errors(errors_type, errors)
for x in self.collection_mgr.systems():
(success, errors_type, errors) = self.validate_autoinstall_file(x, False)
if not success:
overall_success = False
if len(errors) > 0:
self.log_autoinstall_validation_errors(errors_type, errors)
if not overall_success:
self.logger.warning("*** potential errors detected in automatic installation files ***")
else:
self.logger.info("*** all automatic installation files seem to be ok ***")
return overall_success
| gpl-2.0 |
RockySteveJobs/python-for-android | python3-alpha/python3-src/Lib/test/test_descr.py | 46 | 148012 | import builtins
import sys
import types
import math
import unittest
from copy import deepcopy
from test import support
class OperatorsTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.binops = {
'add': '+',
'sub': '-',
'mul': '*',
'div': '/',
'divmod': 'divmod',
'pow': '**',
'lshift': '<<',
'rshift': '>>',
'and': '&',
'xor': '^',
'or': '|',
'cmp': 'cmp',
'lt': '<',
'le': '<=',
'eq': '==',
'ne': '!=',
'gt': '>',
'ge': '>=',
}
for name, expr in list(self.binops.items()):
if expr.islower():
expr = expr + "(a, b)"
else:
expr = 'a %s b' % expr
self.binops[name] = expr
self.unops = {
'pos': '+',
'neg': '-',
'abs': 'abs',
'invert': '~',
'int': 'int',
'float': 'float',
'oct': 'oct',
'hex': 'hex',
}
for name, expr in list(self.unops.items()):
if expr.islower():
expr = expr + "(a)"
else:
expr = '%s a' % expr
self.unops[name] = expr
def unop_test(self, a, res, expr="len(a)", meth="__len__"):
d = {'a': a}
self.assertEqual(eval(expr, d), res)
t = type(a)
m = getattr(t, meth)
# Find method in parent class
while meth not in t.__dict__:
t = t.__bases__[0]
# in some implementations (e.g. PyPy), 'm' can be a regular unbound
# method object; the getattr() below obtains its underlying function.
self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth])
self.assertEqual(m(a), res)
bm = getattr(a, meth)
self.assertEqual(bm(), res)
def binop_test(self, a, b, res, expr="a+b", meth="__add__"):
d = {'a': a, 'b': b}
# XXX Hack so this passes before 2.3 when -Qnew is specified.
if meth == "__div__" and 1/2 == 0.5:
meth = "__truediv__"
if meth == '__divmod__': pass
self.assertEqual(eval(expr, d), res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
# in some implementations (e.g. PyPy), 'm' can be a regular unbound
# method object; the getattr() below obtains its underlying function.
self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth])
self.assertEqual(m(a, b), res)
bm = getattr(a, meth)
self.assertEqual(bm(b), res)
def sliceop_test(self, a, b, c, res, expr="a[b:c]", meth="__getitem__"):
d = {'a': a, 'b': b, 'c': c}
self.assertEqual(eval(expr, d), res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
# in some implementations (e.g. PyPy), 'm' can be a regular unbound
# method object; the getattr() below obtains its underlying function.
self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth])
self.assertEqual(m(a, slice(b, c)), res)
bm = getattr(a, meth)
self.assertEqual(bm(slice(b, c)), res)
def setop_test(self, a, b, res, stmt="a+=b", meth="__iadd__"):
d = {'a': deepcopy(a), 'b': b}
exec(stmt, d)
self.assertEqual(d['a'], res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
# in some implementations (e.g. PyPy), 'm' can be a regular unbound
# method object; the getattr() below obtains its underlying function.
self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth])
d['a'] = deepcopy(a)
m(d['a'], b)
self.assertEqual(d['a'], res)
d['a'] = deepcopy(a)
bm = getattr(d['a'], meth)
bm(b)
self.assertEqual(d['a'], res)
def set2op_test(self, a, b, c, res, stmt="a[b]=c", meth="__setitem__"):
d = {'a': deepcopy(a), 'b': b, 'c': c}
exec(stmt, d)
self.assertEqual(d['a'], res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
# in some implementations (e.g. PyPy), 'm' can be a regular unbound
# method object; the getattr() below obtains its underlying function.
self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth])
d['a'] = deepcopy(a)
m(d['a'], b, c)
self.assertEqual(d['a'], res)
d['a'] = deepcopy(a)
bm = getattr(d['a'], meth)
bm(b, c)
self.assertEqual(d['a'], res)
def setsliceop_test(self, a, b, c, d, res, stmt="a[b:c]=d", meth="__setitem__"):
dictionary = {'a': deepcopy(a), 'b': b, 'c': c, 'd': d}
exec(stmt, dictionary)
self.assertEqual(dictionary['a'], res)
t = type(a)
while meth not in t.__dict__:
t = t.__bases__[0]
m = getattr(t, meth)
# in some implementations (e.g. PyPy), 'm' can be a regular unbound
# method object; the getattr() below obtains its underlying function.
self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth])
dictionary['a'] = deepcopy(a)
m(dictionary['a'], slice(b, c), d)
self.assertEqual(dictionary['a'], res)
dictionary['a'] = deepcopy(a)
bm = getattr(dictionary['a'], meth)
bm(slice(b, c), d)
self.assertEqual(dictionary['a'], res)
def test_lists(self):
# Testing list operations...
# Asserts are within individual test methods
self.binop_test([1], [2], [1,2], "a+b", "__add__")
self.binop_test([1,2,3], 2, 1, "b in a", "__contains__")
self.binop_test([1,2,3], 4, 0, "b in a", "__contains__")
self.binop_test([1,2,3], 1, 2, "a[b]", "__getitem__")
self.sliceop_test([1,2,3], 0, 2, [1,2], "a[b:c]", "__getitem__")
self.setop_test([1], [2], [1,2], "a+=b", "__iadd__")
self.setop_test([1,2], 3, [1,2,1,2,1,2], "a*=b", "__imul__")
self.unop_test([1,2,3], 3, "len(a)", "__len__")
self.binop_test([1,2], 3, [1,2,1,2,1,2], "a*b", "__mul__")
self.binop_test([1,2], 3, [1,2,1,2,1,2], "b*a", "__rmul__")
self.set2op_test([1,2], 1, 3, [1,3], "a[b]=c", "__setitem__")
self.setsliceop_test([1,2,3,4], 1, 3, [5,6], [1,5,6,4], "a[b:c]=d",
"__setitem__")
def test_dicts(self):
# Testing dict operations...
self.binop_test({1:2,3:4}, 1, 1, "b in a", "__contains__")
self.binop_test({1:2,3:4}, 2, 0, "b in a", "__contains__")
self.binop_test({1:2,3:4}, 1, 2, "a[b]", "__getitem__")
d = {1:2, 3:4}
l1 = []
for i in list(d.keys()):
l1.append(i)
l = []
for i in iter(d):
l.append(i)
self.assertEqual(l, l1)
l = []
for i in d.__iter__():
l.append(i)
self.assertEqual(l, l1)
l = []
for i in dict.__iter__(d):
l.append(i)
self.assertEqual(l, l1)
d = {1:2, 3:4}
self.unop_test(d, 2, "len(a)", "__len__")
self.assertEqual(eval(repr(d), {}), d)
self.assertEqual(eval(d.__repr__(), {}), d)
self.set2op_test({1:2,3:4}, 2, 3, {1:2,2:3,3:4}, "a[b]=c",
"__setitem__")
# Tests for unary and binary operators
def number_operators(self, a, b, skip=[]):
dict = {'a': a, 'b': b}
for name, expr in list(self.binops.items()):
if name not in skip:
name = "__%s__" % name
if hasattr(a, name):
res = eval(expr, dict)
self.binop_test(a, b, res, expr, name)
for name, expr in list(self.unops.items()):
if name not in skip:
name = "__%s__" % name
if hasattr(a, name):
res = eval(expr, dict)
self.unop_test(a, res, expr, name)
def test_ints(self):
# Testing int operations...
self.number_operators(100, 3)
# The following crashes in Python 2.2
self.assertEqual((1).__bool__(), 1)
self.assertEqual((0).__bool__(), 0)
# This returns 'NotImplemented' in Python 2.2
class C(int):
def __add__(self, other):
return NotImplemented
self.assertEqual(C(5), 5)
try:
C() + ""
except TypeError:
pass
else:
self.fail("NotImplemented should have caused TypeError")
def test_floats(self):
# Testing float operations...
self.number_operators(100.0, 3.0)
def test_complexes(self):
# Testing complex operations...
self.number_operators(100.0j, 3.0j, skip=['lt', 'le', 'gt', 'ge',
'int', 'float',
'divmod', 'mod'])
class Number(complex):
__slots__ = ['prec']
def __new__(cls, *args, **kwds):
result = complex.__new__(cls, *args)
result.prec = kwds.get('prec', 12)
return result
def __repr__(self):
prec = self.prec
if self.imag == 0.0:
return "%.*g" % (prec, self.real)
if self.real == 0.0:
return "%.*gj" % (prec, self.imag)
return "(%.*g+%.*gj)" % (prec, self.real, prec, self.imag)
__str__ = __repr__
a = Number(3.14, prec=6)
self.assertEqual(repr(a), "3.14")
self.assertEqual(a.prec, 6)
a = Number(a, prec=2)
self.assertEqual(repr(a), "3.1")
self.assertEqual(a.prec, 2)
a = Number(234.5)
self.assertEqual(repr(a), "234.5")
self.assertEqual(a.prec, 12)
def test_explicit_reverse_methods(self):
# see issue 9930
self.assertEqual(complex.__radd__(3j, 4.0), complex(4.0, 3.0))
self.assertEqual(float.__rsub__(3.0, 1), -2.0)
@support.impl_detail("the module 'xxsubtype' is internal")
def test_spam_lists(self):
# Testing spamlist operations...
import copy, xxsubtype as spam
def spamlist(l, memo=None):
import xxsubtype as spam
return spam.spamlist(l)
# This is an ugly hack:
copy._deepcopy_dispatch[spam.spamlist] = spamlist
self.binop_test(spamlist([1]), spamlist([2]), spamlist([1,2]), "a+b",
"__add__")
self.binop_test(spamlist([1,2,3]), 2, 1, "b in a", "__contains__")
self.binop_test(spamlist([1,2,3]), 4, 0, "b in a", "__contains__")
self.binop_test(spamlist([1,2,3]), 1, 2, "a[b]", "__getitem__")
self.sliceop_test(spamlist([1,2,3]), 0, 2, spamlist([1,2]), "a[b:c]",
"__getitem__")
self.setop_test(spamlist([1]), spamlist([2]), spamlist([1,2]), "a+=b",
"__iadd__")
self.setop_test(spamlist([1,2]), 3, spamlist([1,2,1,2,1,2]), "a*=b",
"__imul__")
self.unop_test(spamlist([1,2,3]), 3, "len(a)", "__len__")
self.binop_test(spamlist([1,2]), 3, spamlist([1,2,1,2,1,2]), "a*b",
"__mul__")
self.binop_test(spamlist([1,2]), 3, spamlist([1,2,1,2,1,2]), "b*a",
"__rmul__")
self.set2op_test(spamlist([1,2]), 1, 3, spamlist([1,3]), "a[b]=c",
"__setitem__")
self.setsliceop_test(spamlist([1,2,3,4]), 1, 3, spamlist([5,6]),
spamlist([1,5,6,4]), "a[b:c]=d", "__setitem__")
# Test subclassing
class C(spam.spamlist):
def foo(self): return 1
a = C()
self.assertEqual(a, [])
self.assertEqual(a.foo(), 1)
a.append(100)
self.assertEqual(a, [100])
self.assertEqual(a.getstate(), 0)
a.setstate(42)
self.assertEqual(a.getstate(), 42)
@support.impl_detail("the module 'xxsubtype' is internal")
def test_spam_dicts(self):
# Testing spamdict operations...
import copy, xxsubtype as spam
def spamdict(d, memo=None):
import xxsubtype as spam
sd = spam.spamdict()
for k, v in list(d.items()):
sd[k] = v
return sd
# This is an ugly hack:
copy._deepcopy_dispatch[spam.spamdict] = spamdict
self.binop_test(spamdict({1:2,3:4}), 1, 1, "b in a", "__contains__")
self.binop_test(spamdict({1:2,3:4}), 2, 0, "b in a", "__contains__")
self.binop_test(spamdict({1:2,3:4}), 1, 2, "a[b]", "__getitem__")
d = spamdict({1:2,3:4})
l1 = []
for i in list(d.keys()):
l1.append(i)
l = []
for i in iter(d):
l.append(i)
self.assertEqual(l, l1)
l = []
for i in d.__iter__():
l.append(i)
self.assertEqual(l, l1)
l = []
for i in type(spamdict({})).__iter__(d):
l.append(i)
self.assertEqual(l, l1)
straightd = {1:2, 3:4}
spamd = spamdict(straightd)
self.unop_test(spamd, 2, "len(a)", "__len__")
self.unop_test(spamd, repr(straightd), "repr(a)", "__repr__")
self.set2op_test(spamdict({1:2,3:4}), 2, 3, spamdict({1:2,2:3,3:4}),
"a[b]=c", "__setitem__")
# Test subclassing
class C(spam.spamdict):
def foo(self): return 1
a = C()
self.assertEqual(list(a.items()), [])
self.assertEqual(a.foo(), 1)
a['foo'] = 'bar'
self.assertEqual(list(a.items()), [('foo', 'bar')])
self.assertEqual(a.getstate(), 0)
a.setstate(100)
self.assertEqual(a.getstate(), 100)
class ClassPropertiesAndMethods(unittest.TestCase):
def test_python_dicts(self):
# Testing Python subclass of dict...
self.assertTrue(issubclass(dict, dict))
self.assertIsInstance({}, dict)
d = dict()
self.assertEqual(d, {})
self.assertTrue(d.__class__ is dict)
self.assertIsInstance(d, dict)
class C(dict):
state = -1
def __init__(self_local, *a, **kw):
if a:
self.assertEqual(len(a), 1)
self_local.state = a[0]
if kw:
for k, v in list(kw.items()):
self_local[v] = k
def __getitem__(self, key):
return self.get(key, 0)
def __setitem__(self_local, key, value):
self.assertIsInstance(key, type(0))
dict.__setitem__(self_local, key, value)
def setstate(self, state):
self.state = state
def getstate(self):
return self.state
self.assertTrue(issubclass(C, dict))
a1 = C(12)
self.assertEqual(a1.state, 12)
a2 = C(foo=1, bar=2)
self.assertEqual(a2[1] == 'foo' and a2[2], 'bar')
a = C()
self.assertEqual(a.state, -1)
self.assertEqual(a.getstate(), -1)
a.setstate(0)
self.assertEqual(a.state, 0)
self.assertEqual(a.getstate(), 0)
a.setstate(10)
self.assertEqual(a.state, 10)
self.assertEqual(a.getstate(), 10)
self.assertEqual(a[42], 0)
a[42] = 24
self.assertEqual(a[42], 24)
N = 50
for i in range(N):
a[i] = C()
for j in range(N):
a[i][j] = i*j
for i in range(N):
for j in range(N):
self.assertEqual(a[i][j], i*j)
def test_python_lists(self):
# Testing Python subclass of list...
class C(list):
def __getitem__(self, i):
if isinstance(i, slice):
return i.start, i.stop
return list.__getitem__(self, i) + 100
a = C()
a.extend([0,1,2])
self.assertEqual(a[0], 100)
self.assertEqual(a[1], 101)
self.assertEqual(a[2], 102)
self.assertEqual(a[100:200], (100,200))
def test_metaclass(self):
# Testing metaclasses...
class C(metaclass=type):
def __init__(self):
self.__state = 0
def getstate(self):
return self.__state
def setstate(self, state):
self.__state = state
a = C()
self.assertEqual(a.getstate(), 0)
a.setstate(10)
self.assertEqual(a.getstate(), 10)
class _metaclass(type):
def myself(cls): return cls
class D(metaclass=_metaclass):
pass
self.assertEqual(D.myself(), D)
d = D()
self.assertEqual(d.__class__, D)
class M1(type):
def __new__(cls, name, bases, dict):
dict['__spam__'] = 1
return type.__new__(cls, name, bases, dict)
class C(metaclass=M1):
pass
self.assertEqual(C.__spam__, 1)
c = C()
self.assertEqual(c.__spam__, 1)
class _instance(object):
pass
class M2(object):
@staticmethod
def __new__(cls, name, bases, dict):
self = object.__new__(cls)
self.name = name
self.bases = bases
self.dict = dict
return self
def __call__(self):
it = _instance()
# Early binding of methods
for key in self.dict:
if key.startswith("__"):
continue
setattr(it, key, self.dict[key].__get__(it, self))
return it
class C(metaclass=M2):
def spam(self):
return 42
self.assertEqual(C.name, 'C')
self.assertEqual(C.bases, ())
self.assertIn('spam', C.dict)
c = C()
self.assertEqual(c.spam(), 42)
# More metaclass examples
class autosuper(type):
# Automatically add __super to the class
# This trick only works for dynamic classes
def __new__(metaclass, name, bases, dict):
cls = super(autosuper, metaclass).__new__(metaclass,
name, bases, dict)
# Name mangling for __super removes leading underscores
while name[:1] == "_":
name = name[1:]
if name:
name = "_%s__super" % name
else:
name = "__super"
setattr(cls, name, super(cls))
return cls
class A(metaclass=autosuper):
def meth(self):
return "A"
class B(A):
def meth(self):
return "B" + self.__super.meth()
class C(A):
def meth(self):
return "C" + self.__super.meth()
class D(C, B):
def meth(self):
return "D" + self.__super.meth()
self.assertEqual(D().meth(), "DCBA")
class E(B, C):
def meth(self):
return "E" + self.__super.meth()
self.assertEqual(E().meth(), "EBCA")
class autoproperty(type):
# Automatically create property attributes when methods
# named _get_x and/or _set_x are found
def __new__(metaclass, name, bases, dict):
hits = {}
for key, val in dict.items():
if key.startswith("_get_"):
key = key[5:]
get, set = hits.get(key, (None, None))
get = val
hits[key] = get, set
elif key.startswith("_set_"):
key = key[5:]
get, set = hits.get(key, (None, None))
set = val
hits[key] = get, set
for key, (get, set) in hits.items():
dict[key] = property(get, set)
return super(autoproperty, metaclass).__new__(metaclass,
name, bases, dict)
class A(metaclass=autoproperty):
def _get_x(self):
return -self.__x
def _set_x(self, x):
self.__x = -x
a = A()
self.assertTrue(not hasattr(a, "x"))
a.x = 12
self.assertEqual(a.x, 12)
self.assertEqual(a._A__x, -12)
class multimetaclass(autoproperty, autosuper):
# Merge of multiple cooperating metaclasses
pass
class A(metaclass=multimetaclass):
def _get_x(self):
return "A"
class B(A):
def _get_x(self):
return "B" + self.__super._get_x()
class C(A):
def _get_x(self):
return "C" + self.__super._get_x()
class D(C, B):
def _get_x(self):
return "D" + self.__super._get_x()
self.assertEqual(D().x, "DCBA")
# Make sure type(x) doesn't call x.__class__.__init__
class T(type):
counter = 0
def __init__(self, *args):
T.counter += 1
class C(metaclass=T):
pass
self.assertEqual(T.counter, 1)
a = C()
self.assertEqual(type(a), C)
self.assertEqual(T.counter, 1)
class C(object): pass
c = C()
try: c()
except TypeError: pass
else: self.fail("calling object w/o call method should raise "
"TypeError")
# Testing code to find most derived baseclass
class A(type):
def __new__(*args, **kwargs):
return type.__new__(*args, **kwargs)
class B(object):
pass
class C(object, metaclass=A):
pass
# The most derived metaclass of D is A rather than type.
class D(B, C):
pass
def test_module_subclasses(self):
# Testing Python subclass of module...
log = []
MT = type(sys)
class MM(MT):
def __init__(self, name):
MT.__init__(self, name)
def __getattribute__(self, name):
log.append(("getattr", name))
return MT.__getattribute__(self, name)
def __setattr__(self, name, value):
log.append(("setattr", name, value))
MT.__setattr__(self, name, value)
def __delattr__(self, name):
log.append(("delattr", name))
MT.__delattr__(self, name)
a = MM("a")
a.foo = 12
x = a.foo
del a.foo
self.assertEqual(log, [("setattr", "foo", 12),
("getattr", "foo"),
("delattr", "foo")])
# http://python.org/sf/1174712
try:
class Module(types.ModuleType, str):
pass
except TypeError:
pass
else:
self.fail("inheriting from ModuleType and str at the same time "
"should fail")
def test_multiple_inheritance(self):
# Testing multiple inheritance...
class C(object):
def __init__(self):
self.__state = 0
def getstate(self):
return self.__state
def setstate(self, state):
self.__state = state
a = C()
self.assertEqual(a.getstate(), 0)
a.setstate(10)
self.assertEqual(a.getstate(), 10)
class D(dict, C):
def __init__(self):
type({}).__init__(self)
C.__init__(self)
d = D()
self.assertEqual(list(d.keys()), [])
d["hello"] = "world"
self.assertEqual(list(d.items()), [("hello", "world")])
self.assertEqual(d["hello"], "world")
self.assertEqual(d.getstate(), 0)
d.setstate(10)
self.assertEqual(d.getstate(), 10)
self.assertEqual(D.__mro__, (D, dict, C, object))
# SF bug #442833
class Node(object):
def __int__(self):
return int(self.foo())
def foo(self):
return "23"
class Frag(Node, list):
def foo(self):
return "42"
self.assertEqual(Node().__int__(), 23)
self.assertEqual(int(Node()), 23)
self.assertEqual(Frag().__int__(), 42)
self.assertEqual(int(Frag()), 42)
def test_diamond_inheritence(self):
# Testing multiple inheritance special cases...
class A(object):
def spam(self): return "A"
self.assertEqual(A().spam(), "A")
class B(A):
def boo(self): return "B"
def spam(self): return "B"
self.assertEqual(B().spam(), "B")
self.assertEqual(B().boo(), "B")
class C(A):
def boo(self): return "C"
self.assertEqual(C().spam(), "A")
self.assertEqual(C().boo(), "C")
class D(B, C): pass
self.assertEqual(D().spam(), "B")
self.assertEqual(D().boo(), "B")
self.assertEqual(D.__mro__, (D, B, C, A, object))
class E(C, B): pass
self.assertEqual(E().spam(), "B")
self.assertEqual(E().boo(), "C")
self.assertEqual(E.__mro__, (E, C, B, A, object))
# MRO order disagreement
try:
class F(D, E): pass
except TypeError:
pass
else:
self.fail("expected MRO order disagreement (F)")
try:
class G(E, D): pass
except TypeError:
pass
else:
self.fail("expected MRO order disagreement (G)")
# see thread python-dev/2002-October/029035.html
def test_ex5_from_c3_switch(self):
# Testing ex5 from C3 switch discussion...
class A(object): pass
class B(object): pass
class C(object): pass
class X(A): pass
class Y(A): pass
class Z(X,B,Y,C): pass
self.assertEqual(Z.__mro__, (Z, X, B, Y, A, C, object))
# see "A Monotonic Superclass Linearization for Dylan",
# by Kim Barrett et al. (OOPSLA 1996)
def test_monotonicity(self):
# Testing MRO monotonicity...
class Boat(object): pass
class DayBoat(Boat): pass
class WheelBoat(Boat): pass
class EngineLess(DayBoat): pass
class SmallMultihull(DayBoat): pass
class PedalWheelBoat(EngineLess,WheelBoat): pass
class SmallCatamaran(SmallMultihull): pass
class Pedalo(PedalWheelBoat,SmallCatamaran): pass
self.assertEqual(PedalWheelBoat.__mro__,
(PedalWheelBoat, EngineLess, DayBoat, WheelBoat, Boat, object))
self.assertEqual(SmallCatamaran.__mro__,
(SmallCatamaran, SmallMultihull, DayBoat, Boat, object))
self.assertEqual(Pedalo.__mro__,
(Pedalo, PedalWheelBoat, EngineLess, SmallCatamaran,
SmallMultihull, DayBoat, WheelBoat, Boat, object))
# see "A Monotonic Superclass Linearization for Dylan",
# by Kim Barrett et al. (OOPSLA 1996)
def test_consistency_with_epg(self):
# Testing consistency with EPG...
class Pane(object): pass
class ScrollingMixin(object): pass
class EditingMixin(object): pass
class ScrollablePane(Pane,ScrollingMixin): pass
class EditablePane(Pane,EditingMixin): pass
class EditableScrollablePane(ScrollablePane,EditablePane): pass
self.assertEqual(EditableScrollablePane.__mro__,
(EditableScrollablePane, ScrollablePane, EditablePane, Pane,
ScrollingMixin, EditingMixin, object))
def test_mro_disagreement(self):
# Testing error messages for MRO disagreement...
mro_err_msg = """Cannot create a consistent method resolution
order (MRO) for bases """
def raises(exc, expected, callable, *args):
try:
callable(*args)
except exc as msg:
# the exact msg is generally considered an impl detail
if support.check_impl_detail():
if not str(msg).startswith(expected):
self.fail("Message %r, expected %r" %
(str(msg), expected))
else:
self.fail("Expected %s" % exc)
class A(object): pass
class B(A): pass
class C(object): pass
# Test some very simple errors
raises(TypeError, "duplicate base class A",
type, "X", (A, A), {})
raises(TypeError, mro_err_msg,
type, "X", (A, B), {})
raises(TypeError, mro_err_msg,
type, "X", (A, C, B), {})
# Test a slightly more complex error
class GridLayout(object): pass
class HorizontalGrid(GridLayout): pass
class VerticalGrid(GridLayout): pass
class HVGrid(HorizontalGrid, VerticalGrid): pass
class VHGrid(VerticalGrid, HorizontalGrid): pass
raises(TypeError, mro_err_msg,
type, "ConfusedGrid", (HVGrid, VHGrid), {})
def test_object_class(self):
# Testing object class...
a = object()
self.assertEqual(a.__class__, object)
self.assertEqual(type(a), object)
b = object()
self.assertNotEqual(a, b)
self.assertFalse(hasattr(a, "foo"))
try:
a.foo = 12
except (AttributeError, TypeError):
pass
else:
self.fail("object() should not allow setting a foo attribute")
self.assertFalse(hasattr(object(), "__dict__"))
class Cdict(object):
pass
x = Cdict()
self.assertEqual(x.__dict__, {})
x.foo = 1
self.assertEqual(x.foo, 1)
self.assertEqual(x.__dict__, {'foo': 1})
def test_slots(self):
# Testing __slots__...
class C0(object):
__slots__ = []
x = C0()
self.assertFalse(hasattr(x, "__dict__"))
self.assertFalse(hasattr(x, "foo"))
class C1(object):
__slots__ = ['a']
x = C1()
self.assertFalse(hasattr(x, "__dict__"))
self.assertFalse(hasattr(x, "a"))
x.a = 1
self.assertEqual(x.a, 1)
x.a = None
self.assertEqual(x.a, None)
del x.a
self.assertFalse(hasattr(x, "a"))
class C3(object):
__slots__ = ['a', 'b', 'c']
x = C3()
self.assertFalse(hasattr(x, "__dict__"))
self.assertFalse(hasattr(x, 'a'))
self.assertFalse(hasattr(x, 'b'))
self.assertFalse(hasattr(x, 'c'))
x.a = 1
x.b = 2
x.c = 3
self.assertEqual(x.a, 1)
self.assertEqual(x.b, 2)
self.assertEqual(x.c, 3)
class C4(object):
"""Validate name mangling"""
__slots__ = ['__a']
def __init__(self, value):
self.__a = value
def get(self):
return self.__a
x = C4(5)
self.assertFalse(hasattr(x, '__dict__'))
self.assertFalse(hasattr(x, '__a'))
self.assertEqual(x.get(), 5)
try:
x.__a = 6
except AttributeError:
pass
else:
self.fail("Double underscored names not mangled")
# Make sure slot names are proper identifiers
try:
class C(object):
__slots__ = [None]
except TypeError:
pass
else:
self.fail("[None] slots not caught")
try:
class C(object):
__slots__ = ["foo bar"]
except TypeError:
pass
else:
self.fail("['foo bar'] slots not caught")
try:
class C(object):
__slots__ = ["foo\0bar"]
except TypeError:
pass
else:
self.fail("['foo\\0bar'] slots not caught")
try:
class C(object):
__slots__ = ["1"]
except TypeError:
pass
else:
self.fail("['1'] slots not caught")
try:
class C(object):
__slots__ = [""]
except TypeError:
pass
else:
self.fail("[''] slots not caught")
class C(object):
__slots__ = ["a", "a_b", "_a", "A0123456789Z"]
# XXX(nnorwitz): was there supposed to be something tested
# from the class above?
# Test a single string is not expanded as a sequence.
class C(object):
__slots__ = "abc"
c = C()
c.abc = 5
self.assertEqual(c.abc, 5)
# Test unicode slot names
# Test a single unicode string is not expanded as a sequence.
class C(object):
__slots__ = "abc"
c = C()
c.abc = 5
self.assertEqual(c.abc, 5)
# _unicode_to_string used to modify slots in certain circumstances
slots = ("foo", "bar")
class C(object):
__slots__ = slots
x = C()
x.foo = 5
self.assertEqual(x.foo, 5)
self.assertTrue(type(slots[0]) is str)
# this used to leak references
try:
class C(object):
__slots__ = [chr(128)]
except (TypeError, UnicodeEncodeError):
pass
else:
raise TestFailed("[chr(128)] slots not caught")
# Test leaks
class Counted(object):
counter = 0 # counts the number of instances alive
def __init__(self):
Counted.counter += 1
def __del__(self):
Counted.counter -= 1
class C(object):
__slots__ = ['a', 'b', 'c']
x = C()
x.a = Counted()
x.b = Counted()
x.c = Counted()
self.assertEqual(Counted.counter, 3)
del x
support.gc_collect()
self.assertEqual(Counted.counter, 0)
class D(C):
pass
x = D()
x.a = Counted()
x.z = Counted()
self.assertEqual(Counted.counter, 2)
del x
support.gc_collect()
self.assertEqual(Counted.counter, 0)
class E(D):
__slots__ = ['e']
x = E()
x.a = Counted()
x.z = Counted()
x.e = Counted()
self.assertEqual(Counted.counter, 3)
del x
support.gc_collect()
self.assertEqual(Counted.counter, 0)
# Test cyclical leaks [SF bug 519621]
class F(object):
__slots__ = ['a', 'b']
s = F()
s.a = [Counted(), s]
self.assertEqual(Counted.counter, 1)
s = None
support.gc_collect()
self.assertEqual(Counted.counter, 0)
# Test lookup leaks [SF bug 572567]
import gc
if hasattr(gc, 'get_objects'):
class G(object):
def __eq__(self, other):
return False
g = G()
orig_objects = len(gc.get_objects())
for i in range(10):
g==g
new_objects = len(gc.get_objects())
self.assertEqual(orig_objects, new_objects)
class H(object):
__slots__ = ['a', 'b']
def __init__(self):
self.a = 1
self.b = 2
def __del__(self_):
self.assertEqual(self_.a, 1)
self.assertEqual(self_.b, 2)
with support.captured_output('stderr') as s:
h = H()
del h
self.assertEqual(s.getvalue(), '')
class X(object):
__slots__ = "a"
with self.assertRaises(AttributeError):
del X().a
def test_slots_special(self):
# Testing __dict__ and __weakref__ in __slots__...
class D(object):
__slots__ = ["__dict__"]
a = D()
self.assertTrue(hasattr(a, "__dict__"))
self.assertFalse(hasattr(a, "__weakref__"))
a.foo = 42
self.assertEqual(a.__dict__, {"foo": 42})
class W(object):
__slots__ = ["__weakref__"]
a = W()
self.assertTrue(hasattr(a, "__weakref__"))
self.assertFalse(hasattr(a, "__dict__"))
try:
a.foo = 42
except AttributeError:
pass
else:
self.fail("shouldn't be allowed to set a.foo")
class C1(W, D):
__slots__ = []
a = C1()
self.assertTrue(hasattr(a, "__dict__"))
self.assertTrue(hasattr(a, "__weakref__"))
a.foo = 42
self.assertEqual(a.__dict__, {"foo": 42})
class C2(D, W):
__slots__ = []
a = C2()
self.assertTrue(hasattr(a, "__dict__"))
self.assertTrue(hasattr(a, "__weakref__"))
a.foo = 42
self.assertEqual(a.__dict__, {"foo": 42})
def test_slots_descriptor(self):
# Issue2115: slot descriptors did not correctly check
# the type of the given object
import abc
class MyABC(metaclass=abc.ABCMeta):
__slots__ = "a"
class Unrelated(object):
pass
MyABC.register(Unrelated)
u = Unrelated()
self.assertIsInstance(u, MyABC)
# This used to crash
self.assertRaises(TypeError, MyABC.a.__set__, u, 3)
def test_dynamics(self):
# Testing class attribute propagation...
class D(object):
pass
class E(D):
pass
class F(D):
pass
D.foo = 1
self.assertEqual(D.foo, 1)
# Test that dynamic attributes are inherited
self.assertEqual(E.foo, 1)
self.assertEqual(F.foo, 1)
# Test dynamic instances
class C(object):
pass
a = C()
self.assertFalse(hasattr(a, "foobar"))
C.foobar = 2
self.assertEqual(a.foobar, 2)
C.method = lambda self: 42
self.assertEqual(a.method(), 42)
C.__repr__ = lambda self: "C()"
self.assertEqual(repr(a), "C()")
C.__int__ = lambda self: 100
self.assertEqual(int(a), 100)
self.assertEqual(a.foobar, 2)
self.assertFalse(hasattr(a, "spam"))
def mygetattr(self, name):
if name == "spam":
return "spam"
raise AttributeError
C.__getattr__ = mygetattr
self.assertEqual(a.spam, "spam")
a.new = 12
self.assertEqual(a.new, 12)
def mysetattr(self, name, value):
if name == "spam":
raise AttributeError
return object.__setattr__(self, name, value)
C.__setattr__ = mysetattr
try:
a.spam = "not spam"
except AttributeError:
pass
else:
self.fail("expected AttributeError")
self.assertEqual(a.spam, "spam")
class D(C):
pass
d = D()
d.foo = 1
self.assertEqual(d.foo, 1)
# Test handling of int*seq and seq*int
class I(int):
pass
self.assertEqual("a"*I(2), "aa")
self.assertEqual(I(2)*"a", "aa")
self.assertEqual(2*I(3), 6)
self.assertEqual(I(3)*2, 6)
self.assertEqual(I(3)*I(2), 6)
# Test comparison of classes with dynamic metaclasses
class dynamicmetaclass(type):
pass
class someclass(metaclass=dynamicmetaclass):
pass
self.assertNotEqual(someclass, object)
def test_errors(self):
# Testing errors...
try:
class C(list, dict):
pass
except TypeError:
pass
else:
self.fail("inheritance from both list and dict should be illegal")
try:
class C(object, None):
pass
except TypeError:
pass
else:
self.fail("inheritance from non-type should be illegal")
class Classic:
pass
try:
class C(type(len)):
pass
except TypeError:
pass
else:
self.fail("inheritance from CFunction should be illegal")
try:
class C(object):
__slots__ = 1
except TypeError:
pass
else:
self.fail("__slots__ = 1 should be illegal")
try:
class C(object):
__slots__ = [1]
except TypeError:
pass
else:
self.fail("__slots__ = [1] should be illegal")
class M1(type):
pass
class M2(type):
pass
class A1(object, metaclass=M1):
pass
class A2(object, metaclass=M2):
pass
try:
class B(A1, A2):
pass
except TypeError:
pass
else:
self.fail("finding the most derived metaclass should have failed")
def test_classmethods(self):
# Testing class methods...
class C(object):
def foo(*a): return a
goo = classmethod(foo)
c = C()
self.assertEqual(C.goo(1), (C, 1))
self.assertEqual(c.goo(1), (C, 1))
self.assertEqual(c.foo(1), (c, 1))
class D(C):
pass
d = D()
self.assertEqual(D.goo(1), (D, 1))
self.assertEqual(d.goo(1), (D, 1))
self.assertEqual(d.foo(1), (d, 1))
self.assertEqual(D.foo(d, 1), (d, 1))
# Test for a specific crash (SF bug 528132)
def f(cls, arg): return (cls, arg)
ff = classmethod(f)
self.assertEqual(ff.__get__(0, int)(42), (int, 42))
self.assertEqual(ff.__get__(0)(42), (int, 42))
# Test super() with classmethods (SF bug 535444)
self.assertEqual(C.goo.__self__, C)
self.assertEqual(D.goo.__self__, D)
self.assertEqual(super(D,D).goo.__self__, D)
self.assertEqual(super(D,d).goo.__self__, D)
self.assertEqual(super(D,D).goo(), (D,))
self.assertEqual(super(D,d).goo(), (D,))
# Verify that a non-callable will raise
meth = classmethod(1).__get__(1)
self.assertRaises(TypeError, meth)
# Verify that classmethod() doesn't allow keyword args
try:
classmethod(f, kw=1)
except TypeError:
pass
else:
self.fail("classmethod shouldn't accept keyword args")
@support.impl_detail("the module 'xxsubtype' is internal")
def test_classmethods_in_c(self):
# Testing C-based class methods...
import xxsubtype as spam
a = (1, 2, 3)
d = {'abc': 123}
x, a1, d1 = spam.spamlist.classmeth(*a, **d)
self.assertEqual(x, spam.spamlist)
self.assertEqual(a, a1)
self.assertEqual(d, d1)
x, a1, d1 = spam.spamlist().classmeth(*a, **d)
self.assertEqual(x, spam.spamlist)
self.assertEqual(a, a1)
self.assertEqual(d, d1)
def test_staticmethods(self):
# Testing static methods...
class C(object):
def foo(*a): return a
goo = staticmethod(foo)
c = C()
self.assertEqual(C.goo(1), (1,))
self.assertEqual(c.goo(1), (1,))
self.assertEqual(c.foo(1), (c, 1,))
class D(C):
pass
d = D()
self.assertEqual(D.goo(1), (1,))
self.assertEqual(d.goo(1), (1,))
self.assertEqual(d.foo(1), (d, 1))
self.assertEqual(D.foo(d, 1), (d, 1))
@support.impl_detail("the module 'xxsubtype' is internal")
def test_staticmethods_in_c(self):
# Testing C-based static methods...
import xxsubtype as spam
a = (1, 2, 3)
d = {"abc": 123}
x, a1, d1 = spam.spamlist.staticmeth(*a, **d)
self.assertEqual(x, None)
self.assertEqual(a, a1)
self.assertEqual(d, d1)
x, a1, d2 = spam.spamlist().staticmeth(*a, **d)
self.assertEqual(x, None)
self.assertEqual(a, a1)
self.assertEqual(d, d1)
def test_classic(self):
# Testing classic classes...
class C:
def foo(*a): return a
goo = classmethod(foo)
c = C()
self.assertEqual(C.goo(1), (C, 1))
self.assertEqual(c.goo(1), (C, 1))
self.assertEqual(c.foo(1), (c, 1))
class D(C):
pass
d = D()
self.assertEqual(D.goo(1), (D, 1))
self.assertEqual(d.goo(1), (D, 1))
self.assertEqual(d.foo(1), (d, 1))
self.assertEqual(D.foo(d, 1), (d, 1))
class E: # *not* subclassing from C
foo = C.foo
self.assertEqual(E().foo.__func__, C.foo) # i.e., unbound
self.assertTrue(repr(C.foo.__get__(C())).startswith("<bound method "))
def test_compattr(self):
# Testing computed attributes...
class C(object):
class computed_attribute(object):
def __init__(self, get, set=None, delete=None):
self.__get = get
self.__set = set
self.__delete = delete
def __get__(self, obj, type=None):
return self.__get(obj)
def __set__(self, obj, value):
return self.__set(obj, value)
def __delete__(self, obj):
return self.__delete(obj)
def __init__(self):
self.__x = 0
def __get_x(self):
x = self.__x
self.__x = x+1
return x
def __set_x(self, x):
self.__x = x
def __delete_x(self):
del self.__x
x = computed_attribute(__get_x, __set_x, __delete_x)
a = C()
self.assertEqual(a.x, 0)
self.assertEqual(a.x, 1)
a.x = 10
self.assertEqual(a.x, 10)
self.assertEqual(a.x, 11)
del a.x
self.assertEqual(hasattr(a, 'x'), 0)
def test_newslots(self):
# Testing __new__ slot override...
class C(list):
def __new__(cls):
self = list.__new__(cls)
self.foo = 1
return self
def __init__(self):
self.foo = self.foo + 2
a = C()
self.assertEqual(a.foo, 3)
self.assertEqual(a.__class__, C)
class D(C):
pass
b = D()
self.assertEqual(b.foo, 3)
self.assertEqual(b.__class__, D)
def test_altmro(self):
# Testing mro() and overriding it...
class A(object):
def f(self): return "A"
class B(A):
pass
class C(A):
def f(self): return "C"
class D(B, C):
pass
self.assertEqual(D.mro(), [D, B, C, A, object])
self.assertEqual(D.__mro__, (D, B, C, A, object))
self.assertEqual(D().f(), "C")
class PerverseMetaType(type):
def mro(cls):
L = type.mro(cls)
L.reverse()
return L
class X(D,B,C,A, metaclass=PerverseMetaType):
pass
self.assertEqual(X.__mro__, (object, A, C, B, D, X))
self.assertEqual(X().f(), "A")
try:
class _metaclass(type):
def mro(self):
return [self, dict, object]
class X(object, metaclass=_metaclass):
pass
# In CPython, the class creation above already raises
# TypeError, as a protection against the fact that
# instances of X would segfault it. In other Python
# implementations it would be ok to let the class X
# be created, but instead get a clean TypeError on the
# __setitem__ below.
x = object.__new__(X)
x[5] = 6
except TypeError:
pass
else:
self.fail("devious mro() return not caught")
try:
class _metaclass(type):
def mro(self):
return [1]
class X(object, metaclass=_metaclass):
pass
except TypeError:
pass
else:
self.fail("non-class mro() return not caught")
try:
class _metaclass(type):
def mro(self):
return 1
class X(object, metaclass=_metaclass):
pass
except TypeError:
pass
else:
self.fail("non-sequence mro() return not caught")
def test_overloading(self):
# Testing operator overloading...
class B(object):
"Intermediate class because object doesn't have a __setattr__"
class C(B):
def __getattr__(self, name):
if name == "foo":
return ("getattr", name)
else:
raise AttributeError
def __setattr__(self, name, value):
if name == "foo":
self.setattr = (name, value)
else:
return B.__setattr__(self, name, value)
def __delattr__(self, name):
if name == "foo":
self.delattr = name
else:
return B.__delattr__(self, name)
def __getitem__(self, key):
return ("getitem", key)
def __setitem__(self, key, value):
self.setitem = (key, value)
def __delitem__(self, key):
self.delitem = key
a = C()
self.assertEqual(a.foo, ("getattr", "foo"))
a.foo = 12
self.assertEqual(a.setattr, ("foo", 12))
del a.foo
self.assertEqual(a.delattr, "foo")
self.assertEqual(a[12], ("getitem", 12))
a[12] = 21
self.assertEqual(a.setitem, (12, 21))
del a[12]
self.assertEqual(a.delitem, 12)
self.assertEqual(a[0:10], ("getitem", slice(0, 10)))
a[0:10] = "foo"
self.assertEqual(a.setitem, (slice(0, 10), "foo"))
del a[0:10]
self.assertEqual(a.delitem, (slice(0, 10)))
def test_methods(self):
# Testing methods...
class C(object):
def __init__(self, x):
self.x = x
def foo(self):
return self.x
c1 = C(1)
self.assertEqual(c1.foo(), 1)
class D(C):
boo = C.foo
goo = c1.foo
d2 = D(2)
self.assertEqual(d2.foo(), 2)
self.assertEqual(d2.boo(), 2)
self.assertEqual(d2.goo(), 1)
class E(object):
foo = C.foo
self.assertEqual(E().foo.__func__, C.foo) # i.e., unbound
self.assertTrue(repr(C.foo.__get__(C(1))).startswith("<bound method "))
def test_special_method_lookup(self):
# The lookup of special methods bypasses __getattr__ and
# __getattribute__, but they still can be descriptors.
def run_context(manager):
with manager:
pass
def iden(self):
return self
def hello(self):
return b"hello"
def empty_seq(self):
return []
def zero(self):
return 0
def complex_num(self):
return 1j
def stop(self):
raise StopIteration
def return_true(self, thing=None):
return True
def do_isinstance(obj):
return isinstance(int, obj)
def do_issubclass(obj):
return issubclass(int, obj)
def do_dict_missing(checker):
class DictSub(checker.__class__, dict):
pass
self.assertEqual(DictSub()["hi"], 4)
def some_number(self_, key):
self.assertEqual(key, "hi")
return 4
def swallow(*args): pass
def format_impl(self, spec):
return "hello"
# It would be nice to have every special method tested here, but I'm
# only listing the ones I can remember outside of typeobject.c, since it
# does it right.
specials = [
("__bytes__", bytes, hello, set(), {}),
("__reversed__", reversed, empty_seq, set(), {}),
("__length_hint__", list, zero, set(),
{"__iter__" : iden, "__next__" : stop}),
("__sizeof__", sys.getsizeof, zero, set(), {}),
("__instancecheck__", do_isinstance, return_true, set(), {}),
("__missing__", do_dict_missing, some_number,
set(("__class__",)), {}),
("__subclasscheck__", do_issubclass, return_true,
set(("__bases__",)), {}),
("__enter__", run_context, iden, set(), {"__exit__" : swallow}),
("__exit__", run_context, swallow, set(), {"__enter__" : iden}),
("__complex__", complex, complex_num, set(), {}),
("__format__", format, format_impl, set(), {}),
("__floor__", math.floor, zero, set(), {}),
("__trunc__", math.trunc, zero, set(), {}),
("__ceil__", math.ceil, zero, set(), {}),
("__dir__", dir, empty_seq, set(), {}),
]
class Checker(object):
def __getattr__(self, attr, test=self):
test.fail("__getattr__ called with {0}".format(attr))
def __getattribute__(self, attr, test=self):
if attr not in ok:
test.fail("__getattribute__ called with {0}".format(attr))
return object.__getattribute__(self, attr)
class SpecialDescr(object):
def __init__(self, impl):
self.impl = impl
def __get__(self, obj, owner):
record.append(1)
return self.impl.__get__(obj, owner)
class MyException(Exception):
pass
class ErrDescr(object):
def __get__(self, obj, owner):
raise MyException
for name, runner, meth_impl, ok, env in specials:
class X(Checker):
pass
for attr, obj in env.items():
setattr(X, attr, obj)
setattr(X, name, meth_impl)
runner(X())
record = []
class X(Checker):
pass
for attr, obj in env.items():
setattr(X, attr, obj)
setattr(X, name, SpecialDescr(meth_impl))
runner(X())
self.assertEqual(record, [1], name)
class X(Checker):
pass
for attr, obj in env.items():
setattr(X, attr, obj)
setattr(X, name, ErrDescr())
try:
runner(X())
except MyException:
pass
else:
self.fail("{0!r} didn't raise".format(name))
def test_specials(self):
# Testing special operators...
# Test operators like __hash__ for which a built-in default exists
# Test the default behavior for static classes
class C(object):
def __getitem__(self, i):
if 0 <= i < 10: return i
raise IndexError
c1 = C()
c2 = C()
self.assertTrue(not not c1) # What?
self.assertNotEqual(id(c1), id(c2))
hash(c1)
hash(c2)
self.assertEqual(c1, c1)
self.assertTrue(c1 != c2)
self.assertTrue(not c1 != c1)
self.assertTrue(not c1 == c2)
# Note that the module name appears in str/repr, and that varies
# depending on whether this test is run standalone or from a framework.
self.assertTrue(str(c1).find('C object at ') >= 0)
self.assertEqual(str(c1), repr(c1))
self.assertNotIn(-1, c1)
for i in range(10):
self.assertIn(i, c1)
self.assertNotIn(10, c1)
# Test the default behavior for dynamic classes
class D(object):
def __getitem__(self, i):
if 0 <= i < 10: return i
raise IndexError
d1 = D()
d2 = D()
self.assertTrue(not not d1)
self.assertNotEqual(id(d1), id(d2))
hash(d1)
hash(d2)
self.assertEqual(d1, d1)
self.assertNotEqual(d1, d2)
self.assertTrue(not d1 != d1)
self.assertTrue(not d1 == d2)
# Note that the module name appears in str/repr, and that varies
# depending on whether this test is run standalone or from a framework.
self.assertTrue(str(d1).find('D object at ') >= 0)
self.assertEqual(str(d1), repr(d1))
self.assertNotIn(-1, d1)
for i in range(10):
self.assertIn(i, d1)
self.assertNotIn(10, d1)
# Test overridden behavior
class Proxy(object):
def __init__(self, x):
self.x = x
def __bool__(self):
return not not self.x
def __hash__(self):
return hash(self.x)
def __eq__(self, other):
return self.x == other
def __ne__(self, other):
return self.x != other
def __ge__(self, other):
return self.x >= other
def __gt__(self, other):
return self.x > other
def __le__(self, other):
return self.x <= other
def __lt__(self, other):
return self.x < other
def __str__(self):
return "Proxy:%s" % self.x
def __repr__(self):
return "Proxy(%r)" % self.x
def __contains__(self, value):
return value in self.x
p0 = Proxy(0)
p1 = Proxy(1)
p_1 = Proxy(-1)
self.assertFalse(p0)
self.assertTrue(not not p1)
self.assertEqual(hash(p0), hash(0))
self.assertEqual(p0, p0)
self.assertNotEqual(p0, p1)
self.assertTrue(not p0 != p0)
self.assertEqual(not p0, p1)
self.assertTrue(p0 < p1)
self.assertTrue(p0 <= p1)
self.assertTrue(p1 > p0)
self.assertTrue(p1 >= p0)
self.assertEqual(str(p0), "Proxy:0")
self.assertEqual(repr(p0), "Proxy(0)")
p10 = Proxy(range(10))
self.assertNotIn(-1, p10)
for i in range(10):
self.assertIn(i, p10)
self.assertNotIn(10, p10)
def test_weakrefs(self):
# Testing weak references...
import weakref
class C(object):
pass
c = C()
r = weakref.ref(c)
self.assertEqual(r(), c)
del c
support.gc_collect()
self.assertEqual(r(), None)
del r
class NoWeak(object):
__slots__ = ['foo']
no = NoWeak()
try:
weakref.ref(no)
except TypeError as msg:
self.assertTrue(str(msg).find("weak reference") >= 0)
else:
self.fail("weakref.ref(no) should be illegal")
class Weak(object):
__slots__ = ['foo', '__weakref__']
yes = Weak()
r = weakref.ref(yes)
self.assertEqual(r(), yes)
del yes
support.gc_collect()
self.assertEqual(r(), None)
del r
def test_properties(self):
# Testing property...
class C(object):
def getx(self):
return self.__x
def setx(self, value):
self.__x = value
def delx(self):
del self.__x
x = property(getx, setx, delx, doc="I'm the x property.")
a = C()
self.assertFalse(hasattr(a, "x"))
a.x = 42
self.assertEqual(a._C__x, 42)
self.assertEqual(a.x, 42)
del a.x
self.assertFalse(hasattr(a, "x"))
self.assertFalse(hasattr(a, "_C__x"))
C.x.__set__(a, 100)
self.assertEqual(C.x.__get__(a), 100)
C.x.__delete__(a)
self.assertFalse(hasattr(a, "x"))
raw = C.__dict__['x']
self.assertIsInstance(raw, property)
attrs = dir(raw)
self.assertIn("__doc__", attrs)
self.assertIn("fget", attrs)
self.assertIn("fset", attrs)
self.assertIn("fdel", attrs)
self.assertEqual(raw.__doc__, "I'm the x property.")
self.assertTrue(raw.fget is C.__dict__['getx'])
self.assertTrue(raw.fset is C.__dict__['setx'])
self.assertTrue(raw.fdel is C.__dict__['delx'])
for attr in "__doc__", "fget", "fset", "fdel":
try:
setattr(raw, attr, 42)
except AttributeError as msg:
if str(msg).find('readonly') < 0:
self.fail("when setting readonly attr %r on a property, "
"got unexpected AttributeError msg %r" % (attr, str(msg)))
else:
self.fail("expected AttributeError from trying to set readonly %r "
"attr on a property" % attr)
class D(object):
__getitem__ = property(lambda s: 1/0)
d = D()
try:
for i in d:
str(i)
except ZeroDivisionError:
pass
else:
self.fail("expected ZeroDivisionError from bad property")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_properties_doc_attrib(self):
class E(object):
def getter(self):
"getter method"
return 0
def setter(self_, value):
"setter method"
pass
prop = property(getter)
self.assertEqual(prop.__doc__, "getter method")
prop2 = property(fset=setter)
self.assertEqual(prop2.__doc__, None)
def test_testcapi_no_segfault(self):
# this segfaulted in 2.5b2
try:
import _testcapi
except ImportError:
pass
else:
class X(object):
p = property(_testcapi.test_with_docstring)
def test_properties_plus(self):
class C(object):
foo = property(doc="hello")
@foo.getter
def foo(self):
return self._foo
@foo.setter
def foo(self, value):
self._foo = abs(value)
@foo.deleter
def foo(self):
del self._foo
c = C()
self.assertEqual(C.foo.__doc__, "hello")
self.assertFalse(hasattr(c, "foo"))
c.foo = -42
self.assertTrue(hasattr(c, '_foo'))
self.assertEqual(c._foo, 42)
self.assertEqual(c.foo, 42)
del c.foo
self.assertFalse(hasattr(c, '_foo'))
self.assertFalse(hasattr(c, "foo"))
class D(C):
@C.foo.deleter
def foo(self):
try:
del self._foo
except AttributeError:
pass
d = D()
d.foo = 24
self.assertEqual(d.foo, 24)
del d.foo
del d.foo
class E(object):
@property
def foo(self):
return self._foo
@foo.setter
def foo(self, value):
raise RuntimeError
@foo.setter
def foo(self, value):
self._foo = abs(value)
@foo.deleter
def foo(self, value=None):
del self._foo
e = E()
e.foo = -42
self.assertEqual(e.foo, 42)
del e.foo
class F(E):
@E.foo.deleter
def foo(self):
del self._foo
@foo.setter
def foo(self, value):
self._foo = max(0, value)
f = F()
f.foo = -10
self.assertEqual(f.foo, 0)
del f.foo
def test_dict_constructors(self):
# Testing dict constructor ...
d = dict()
self.assertEqual(d, {})
d = dict({})
self.assertEqual(d, {})
d = dict({1: 2, 'a': 'b'})
self.assertEqual(d, {1: 2, 'a': 'b'})
self.assertEqual(d, dict(list(d.items())))
self.assertEqual(d, dict(iter(d.items())))
d = dict({'one':1, 'two':2})
self.assertEqual(d, dict(one=1, two=2))
self.assertEqual(d, dict(**d))
self.assertEqual(d, dict({"one": 1}, two=2))
self.assertEqual(d, dict([("two", 2)], one=1))
self.assertEqual(d, dict([("one", 100), ("two", 200)], **d))
self.assertEqual(d, dict(**d))
for badarg in 0, 0, 0j, "0", [0], (0,):
try:
dict(badarg)
except TypeError:
pass
except ValueError:
if badarg == "0":
# It's a sequence, and its elements are also sequences (gotta
# love strings <wink>), but they aren't of length 2, so this
# one seemed better as a ValueError than a TypeError.
pass
else:
self.fail("no TypeError from dict(%r)" % badarg)
else:
self.fail("no TypeError from dict(%r)" % badarg)
try:
dict({}, {})
except TypeError:
pass
else:
self.fail("no TypeError from dict({}, {})")
class Mapping:
# Lacks a .keys() method; will be added later.
dict = {1:2, 3:4, 'a':1j}
try:
dict(Mapping())
except TypeError:
pass
else:
self.fail("no TypeError from dict(incomplete mapping)")
Mapping.keys = lambda self: list(self.dict.keys())
Mapping.__getitem__ = lambda self, i: self.dict[i]
d = dict(Mapping())
self.assertEqual(d, Mapping.dict)
# Init from sequence of iterable objects, each producing a 2-sequence.
class AddressBookEntry:
def __init__(self, first, last):
self.first = first
self.last = last
def __iter__(self):
return iter([self.first, self.last])
d = dict([AddressBookEntry('Tim', 'Warsaw'),
AddressBookEntry('Barry', 'Peters'),
AddressBookEntry('Tim', 'Peters'),
AddressBookEntry('Barry', 'Warsaw')])
self.assertEqual(d, {'Barry': 'Warsaw', 'Tim': 'Peters'})
d = dict(zip(range(4), range(1, 5)))
self.assertEqual(d, dict([(i, i+1) for i in range(4)]))
# Bad sequence lengths.
for bad in [('tooshort',)], [('too', 'long', 'by 1')]:
try:
dict(bad)
except ValueError:
pass
else:
self.fail("no ValueError from dict(%r)" % bad)
def test_dir(self):
# Testing dir() ...
junk = 12
self.assertEqual(dir(), ['junk', 'self'])
del junk
# Just make sure these don't blow up!
for arg in 2, 2, 2j, 2e0, [2], "2", b"2", (2,), {2:2}, type, self.test_dir:
dir(arg)
# Test dir on new-style classes. Since these have object as a
# base class, a lot more gets sucked in.
def interesting(strings):
return [s for s in strings if not s.startswith('_')]
class C(object):
Cdata = 1
def Cmethod(self): pass
cstuff = ['Cdata', 'Cmethod']
self.assertEqual(interesting(dir(C)), cstuff)
c = C()
self.assertEqual(interesting(dir(c)), cstuff)
## self.assertIn('__self__', dir(C.Cmethod))
c.cdata = 2
c.cmethod = lambda self: 0
self.assertEqual(interesting(dir(c)), cstuff + ['cdata', 'cmethod'])
## self.assertIn('__self__', dir(c.Cmethod))
class A(C):
Adata = 1
def Amethod(self): pass
astuff = ['Adata', 'Amethod'] + cstuff
self.assertEqual(interesting(dir(A)), astuff)
## self.assertIn('__self__', dir(A.Amethod))
a = A()
self.assertEqual(interesting(dir(a)), astuff)
a.adata = 42
a.amethod = lambda self: 3
self.assertEqual(interesting(dir(a)), astuff + ['adata', 'amethod'])
## self.assertIn('__self__', dir(a.Amethod))
# Try a module subclass.
class M(type(sys)):
pass
minstance = M("m")
minstance.b = 2
minstance.a = 1
names = [x for x in dir(minstance) if x not in ["__name__", "__doc__"]]
self.assertEqual(names, ['a', 'b'])
class M2(M):
def getdict(self):
return "Not a dict!"
__dict__ = property(getdict)
m2instance = M2("m2")
m2instance.b = 2
m2instance.a = 1
self.assertEqual(m2instance.__dict__, "Not a dict!")
try:
dir(m2instance)
except TypeError:
pass
# Two essentially featureless objects, just inheriting stuff from
# object.
self.assertEqual(dir(NotImplemented), dir(Ellipsis))
if support.check_impl_detail():
# None differs in PyPy: it has a __nonzero__
self.assertEqual(dir(None), dir(Ellipsis))
# Nasty test case for proxied objects
class Wrapper(object):
def __init__(self, obj):
self.__obj = obj
def __repr__(self):
return "Wrapper(%s)" % repr(self.__obj)
def __getitem__(self, key):
return Wrapper(self.__obj[key])
def __len__(self):
return len(self.__obj)
def __getattr__(self, name):
return Wrapper(getattr(self.__obj, name))
class C(object):
def __getclass(self):
return Wrapper(type(self))
__class__ = property(__getclass)
dir(C()) # This used to segfault
def test_supers(self):
# Testing super...
class A(object):
def meth(self, a):
return "A(%r)" % a
self.assertEqual(A().meth(1), "A(1)")
class B(A):
def __init__(self):
self.__super = super(B, self)
def meth(self, a):
return "B(%r)" % a + self.__super.meth(a)
self.assertEqual(B().meth(2), "B(2)A(2)")
class C(A):
def meth(self, a):
return "C(%r)" % a + self.__super.meth(a)
C._C__super = super(C)
self.assertEqual(C().meth(3), "C(3)A(3)")
class D(C, B):
def meth(self, a):
return "D(%r)" % a + super(D, self).meth(a)
self.assertEqual(D().meth(4), "D(4)C(4)B(4)A(4)")
# Test for subclassing super
class mysuper(super):
def __init__(self, *args):
return super(mysuper, self).__init__(*args)
class E(D):
def meth(self, a):
return "E(%r)" % a + mysuper(E, self).meth(a)
self.assertEqual(E().meth(5), "E(5)D(5)C(5)B(5)A(5)")
class F(E):
def meth(self, a):
s = self.__super # == mysuper(F, self)
return "F(%r)[%s]" % (a, s.__class__.__name__) + s.meth(a)
F._F__super = mysuper(F)
self.assertEqual(F().meth(6), "F(6)[mysuper]E(6)D(6)C(6)B(6)A(6)")
# Make sure certain errors are raised
try:
super(D, 42)
except TypeError:
pass
else:
self.fail("shouldn't allow super(D, 42)")
try:
super(D, C())
except TypeError:
pass
else:
self.fail("shouldn't allow super(D, C())")
try:
super(D).__get__(12)
except TypeError:
pass
else:
self.fail("shouldn't allow super(D).__get__(12)")
try:
super(D).__get__(C())
except TypeError:
pass
else:
self.fail("shouldn't allow super(D).__get__(C())")
# Make sure data descriptors can be overridden and accessed via super
# (new feature in Python 2.3)
class DDbase(object):
def getx(self): return 42
x = property(getx)
class DDsub(DDbase):
def getx(self): return "hello"
x = property(getx)
dd = DDsub()
self.assertEqual(dd.x, "hello")
self.assertEqual(super(DDsub, dd).x, 42)
# Ensure that super() lookup of descriptor from classmethod
# works (SF ID# 743627)
class Base(object):
aProp = property(lambda self: "foo")
class Sub(Base):
@classmethod
def test(klass):
return super(Sub,klass).aProp
self.assertEqual(Sub.test(), Base.aProp)
# Verify that super() doesn't allow keyword args
try:
super(Base, kw=1)
except TypeError:
pass
else:
self.assertEqual("super shouldn't accept keyword args")
def test_basic_inheritance(self):
# Testing inheritance from basic types...
class hexint(int):
def __repr__(self):
return hex(self)
def __add__(self, other):
return hexint(int.__add__(self, other))
# (Note that overriding __radd__ doesn't work,
# because the int type gets first dibs.)
self.assertEqual(repr(hexint(7) + 9), "0x10")
self.assertEqual(repr(hexint(1000) + 7), "0x3ef")
a = hexint(12345)
self.assertEqual(a, 12345)
self.assertEqual(int(a), 12345)
self.assertTrue(int(a).__class__ is int)
self.assertEqual(hash(a), hash(12345))
self.assertTrue((+a).__class__ is int)
self.assertTrue((a >> 0).__class__ is int)
self.assertTrue((a << 0).__class__ is int)
self.assertTrue((hexint(0) << 12).__class__ is int)
self.assertTrue((hexint(0) >> 12).__class__ is int)
class octlong(int):
__slots__ = []
def __str__(self):
return oct(self)
def __add__(self, other):
return self.__class__(super(octlong, self).__add__(other))
__radd__ = __add__
self.assertEqual(str(octlong(3) + 5), "0o10")
# (Note that overriding __radd__ here only seems to work
# because the example uses a short int left argument.)
self.assertEqual(str(5 + octlong(3000)), "0o5675")
a = octlong(12345)
self.assertEqual(a, 12345)
self.assertEqual(int(a), 12345)
self.assertEqual(hash(a), hash(12345))
self.assertTrue(int(a).__class__ is int)
self.assertTrue((+a).__class__ is int)
self.assertTrue((-a).__class__ is int)
self.assertTrue((-octlong(0)).__class__ is int)
self.assertTrue((a >> 0).__class__ is int)
self.assertTrue((a << 0).__class__ is int)
self.assertTrue((a - 0).__class__ is int)
self.assertTrue((a * 1).__class__ is int)
self.assertTrue((a ** 1).__class__ is int)
self.assertTrue((a // 1).__class__ is int)
self.assertTrue((1 * a).__class__ is int)
self.assertTrue((a | 0).__class__ is int)
self.assertTrue((a ^ 0).__class__ is int)
self.assertTrue((a & -1).__class__ is int)
self.assertTrue((octlong(0) << 12).__class__ is int)
self.assertTrue((octlong(0) >> 12).__class__ is int)
self.assertTrue(abs(octlong(0)).__class__ is int)
# Because octlong overrides __add__, we can't check the absence of +0
# optimizations using octlong.
class longclone(int):
pass
a = longclone(1)
self.assertTrue((a + 0).__class__ is int)
self.assertTrue((0 + a).__class__ is int)
# Check that negative clones don't segfault
a = longclone(-1)
self.assertEqual(a.__dict__, {})
self.assertEqual(int(a), -1) # self.assertTrue PyNumber_Long() copies the sign bit
class precfloat(float):
__slots__ = ['prec']
def __init__(self, value=0.0, prec=12):
self.prec = int(prec)
def __repr__(self):
return "%.*g" % (self.prec, self)
self.assertEqual(repr(precfloat(1.1)), "1.1")
a = precfloat(12345)
self.assertEqual(a, 12345.0)
self.assertEqual(float(a), 12345.0)
self.assertTrue(float(a).__class__ is float)
self.assertEqual(hash(a), hash(12345.0))
self.assertTrue((+a).__class__ is float)
class madcomplex(complex):
def __repr__(self):
return "%.17gj%+.17g" % (self.imag, self.real)
a = madcomplex(-3, 4)
self.assertEqual(repr(a), "4j-3")
base = complex(-3, 4)
self.assertEqual(base.__class__, complex)
self.assertEqual(a, base)
self.assertEqual(complex(a), base)
self.assertEqual(complex(a).__class__, complex)
a = madcomplex(a) # just trying another form of the constructor
self.assertEqual(repr(a), "4j-3")
self.assertEqual(a, base)
self.assertEqual(complex(a), base)
self.assertEqual(complex(a).__class__, complex)
self.assertEqual(hash(a), hash(base))
self.assertEqual((+a).__class__, complex)
self.assertEqual((a + 0).__class__, complex)
self.assertEqual(a + 0, base)
self.assertEqual((a - 0).__class__, complex)
self.assertEqual(a - 0, base)
self.assertEqual((a * 1).__class__, complex)
self.assertEqual(a * 1, base)
self.assertEqual((a / 1).__class__, complex)
self.assertEqual(a / 1, base)
class madtuple(tuple):
_rev = None
def rev(self):
if self._rev is not None:
return self._rev
L = list(self)
L.reverse()
self._rev = self.__class__(L)
return self._rev
a = madtuple((1,2,3,4,5,6,7,8,9,0))
self.assertEqual(a, (1,2,3,4,5,6,7,8,9,0))
self.assertEqual(a.rev(), madtuple((0,9,8,7,6,5,4,3,2,1)))
self.assertEqual(a.rev().rev(), madtuple((1,2,3,4,5,6,7,8,9,0)))
for i in range(512):
t = madtuple(range(i))
u = t.rev()
v = u.rev()
self.assertEqual(v, t)
a = madtuple((1,2,3,4,5))
self.assertEqual(tuple(a), (1,2,3,4,5))
self.assertTrue(tuple(a).__class__ is tuple)
self.assertEqual(hash(a), hash((1,2,3,4,5)))
self.assertTrue(a[:].__class__ is tuple)
self.assertTrue((a * 1).__class__ is tuple)
self.assertTrue((a * 0).__class__ is tuple)
self.assertTrue((a + ()).__class__ is tuple)
a = madtuple(())
self.assertEqual(tuple(a), ())
self.assertTrue(tuple(a).__class__ is tuple)
self.assertTrue((a + a).__class__ is tuple)
self.assertTrue((a * 0).__class__ is tuple)
self.assertTrue((a * 1).__class__ is tuple)
self.assertTrue((a * 2).__class__ is tuple)
self.assertTrue(a[:].__class__ is tuple)
class madstring(str):
_rev = None
def rev(self):
if self._rev is not None:
return self._rev
L = list(self)
L.reverse()
self._rev = self.__class__("".join(L))
return self._rev
s = madstring("abcdefghijklmnopqrstuvwxyz")
self.assertEqual(s, "abcdefghijklmnopqrstuvwxyz")
self.assertEqual(s.rev(), madstring("zyxwvutsrqponmlkjihgfedcba"))
self.assertEqual(s.rev().rev(), madstring("abcdefghijklmnopqrstuvwxyz"))
for i in range(256):
s = madstring("".join(map(chr, range(i))))
t = s.rev()
u = t.rev()
self.assertEqual(u, s)
s = madstring("12345")
self.assertEqual(str(s), "12345")
self.assertTrue(str(s).__class__ is str)
base = "\x00" * 5
s = madstring(base)
self.assertEqual(s, base)
self.assertEqual(str(s), base)
self.assertTrue(str(s).__class__ is str)
self.assertEqual(hash(s), hash(base))
self.assertEqual({s: 1}[base], 1)
self.assertEqual({base: 1}[s], 1)
self.assertTrue((s + "").__class__ is str)
self.assertEqual(s + "", base)
self.assertTrue(("" + s).__class__ is str)
self.assertEqual("" + s, base)
self.assertTrue((s * 0).__class__ is str)
self.assertEqual(s * 0, "")
self.assertTrue((s * 1).__class__ is str)
self.assertEqual(s * 1, base)
self.assertTrue((s * 2).__class__ is str)
self.assertEqual(s * 2, base + base)
self.assertTrue(s[:].__class__ is str)
self.assertEqual(s[:], base)
self.assertTrue(s[0:0].__class__ is str)
self.assertEqual(s[0:0], "")
self.assertTrue(s.strip().__class__ is str)
self.assertEqual(s.strip(), base)
self.assertTrue(s.lstrip().__class__ is str)
self.assertEqual(s.lstrip(), base)
self.assertTrue(s.rstrip().__class__ is str)
self.assertEqual(s.rstrip(), base)
identitytab = {}
self.assertTrue(s.translate(identitytab).__class__ is str)
self.assertEqual(s.translate(identitytab), base)
self.assertTrue(s.replace("x", "x").__class__ is str)
self.assertEqual(s.replace("x", "x"), base)
self.assertTrue(s.ljust(len(s)).__class__ is str)
self.assertEqual(s.ljust(len(s)), base)
self.assertTrue(s.rjust(len(s)).__class__ is str)
self.assertEqual(s.rjust(len(s)), base)
self.assertTrue(s.center(len(s)).__class__ is str)
self.assertEqual(s.center(len(s)), base)
self.assertTrue(s.lower().__class__ is str)
self.assertEqual(s.lower(), base)
class madunicode(str):
_rev = None
def rev(self):
if self._rev is not None:
return self._rev
L = list(self)
L.reverse()
self._rev = self.__class__("".join(L))
return self._rev
u = madunicode("ABCDEF")
self.assertEqual(u, "ABCDEF")
self.assertEqual(u.rev(), madunicode("FEDCBA"))
self.assertEqual(u.rev().rev(), madunicode("ABCDEF"))
base = "12345"
u = madunicode(base)
self.assertEqual(str(u), base)
self.assertTrue(str(u).__class__ is str)
self.assertEqual(hash(u), hash(base))
self.assertEqual({u: 1}[base], 1)
self.assertEqual({base: 1}[u], 1)
self.assertTrue(u.strip().__class__ is str)
self.assertEqual(u.strip(), base)
self.assertTrue(u.lstrip().__class__ is str)
self.assertEqual(u.lstrip(), base)
self.assertTrue(u.rstrip().__class__ is str)
self.assertEqual(u.rstrip(), base)
self.assertTrue(u.replace("x", "x").__class__ is str)
self.assertEqual(u.replace("x", "x"), base)
self.assertTrue(u.replace("xy", "xy").__class__ is str)
self.assertEqual(u.replace("xy", "xy"), base)
self.assertTrue(u.center(len(u)).__class__ is str)
self.assertEqual(u.center(len(u)), base)
self.assertTrue(u.ljust(len(u)).__class__ is str)
self.assertEqual(u.ljust(len(u)), base)
self.assertTrue(u.rjust(len(u)).__class__ is str)
self.assertEqual(u.rjust(len(u)), base)
self.assertTrue(u.lower().__class__ is str)
self.assertEqual(u.lower(), base)
self.assertTrue(u.upper().__class__ is str)
self.assertEqual(u.upper(), base)
self.assertTrue(u.capitalize().__class__ is str)
self.assertEqual(u.capitalize(), base)
self.assertTrue(u.title().__class__ is str)
self.assertEqual(u.title(), base)
self.assertTrue((u + "").__class__ is str)
self.assertEqual(u + "", base)
self.assertTrue(("" + u).__class__ is str)
self.assertEqual("" + u, base)
self.assertTrue((u * 0).__class__ is str)
self.assertEqual(u * 0, "")
self.assertTrue((u * 1).__class__ is str)
self.assertEqual(u * 1, base)
self.assertTrue((u * 2).__class__ is str)
self.assertEqual(u * 2, base + base)
self.assertTrue(u[:].__class__ is str)
self.assertEqual(u[:], base)
self.assertTrue(u[0:0].__class__ is str)
self.assertEqual(u[0:0], "")
class sublist(list):
pass
a = sublist(range(5))
self.assertEqual(a, list(range(5)))
a.append("hello")
self.assertEqual(a, list(range(5)) + ["hello"])
a[5] = 5
self.assertEqual(a, list(range(6)))
a.extend(range(6, 20))
self.assertEqual(a, list(range(20)))
a[-5:] = []
self.assertEqual(a, list(range(15)))
del a[10:15]
self.assertEqual(len(a), 10)
self.assertEqual(a, list(range(10)))
self.assertEqual(list(a), list(range(10)))
self.assertEqual(a[0], 0)
self.assertEqual(a[9], 9)
self.assertEqual(a[-10], 0)
self.assertEqual(a[-1], 9)
self.assertEqual(a[:5], list(range(5)))
## class CountedInput(file):
## """Counts lines read by self.readline().
##
## self.lineno is the 0-based ordinal of the last line read, up to
## a maximum of one greater than the number of lines in the file.
##
## self.ateof is true if and only if the final "" line has been read,
## at which point self.lineno stops incrementing, and further calls
## to readline() continue to return "".
## """
##
## lineno = 0
## ateof = 0
## def readline(self):
## if self.ateof:
## return ""
## s = file.readline(self)
## # Next line works too.
## # s = super(CountedInput, self).readline()
## self.lineno += 1
## if s == "":
## self.ateof = 1
## return s
##
## f = file(name=support.TESTFN, mode='w')
## lines = ['a\n', 'b\n', 'c\n']
## try:
## f.writelines(lines)
## f.close()
## f = CountedInput(support.TESTFN)
## for (i, expected) in zip(range(1, 5) + [4], lines + 2 * [""]):
## got = f.readline()
## self.assertEqual(expected, got)
## self.assertEqual(f.lineno, i)
## self.assertEqual(f.ateof, (i > len(lines)))
## f.close()
## finally:
## try:
## f.close()
## except:
## pass
## support.unlink(support.TESTFN)
def test_keywords(self):
# Testing keyword args to basic type constructors ...
self.assertEqual(int(x=1), 1)
self.assertEqual(float(x=2), 2.0)
self.assertEqual(int(x=3), 3)
self.assertEqual(complex(imag=42, real=666), complex(666, 42))
self.assertEqual(str(object=500), '500')
self.assertEqual(str(object=b'abc', errors='strict'), 'abc')
self.assertEqual(tuple(sequence=range(3)), (0, 1, 2))
self.assertEqual(list(sequence=(0, 1, 2)), list(range(3)))
# note: as of Python 2.3, dict() no longer has an "items" keyword arg
for constructor in (int, float, int, complex, str, str,
tuple, list):
try:
constructor(bogus_keyword_arg=1)
except TypeError:
pass
else:
self.fail("expected TypeError from bogus keyword argument to %r"
% constructor)
def test_str_subclass_as_dict_key(self):
# Testing a str subclass used as dict key ..
class cistr(str):
"""Sublcass of str that computes __eq__ case-insensitively.
Also computes a hash code of the string in canonical form.
"""
def __init__(self, value):
self.canonical = value.lower()
self.hashcode = hash(self.canonical)
def __eq__(self, other):
if not isinstance(other, cistr):
other = cistr(other)
return self.canonical == other.canonical
def __hash__(self):
return self.hashcode
self.assertEqual(cistr('ABC'), 'abc')
self.assertEqual('aBc', cistr('ABC'))
self.assertEqual(str(cistr('ABC')), 'ABC')
d = {cistr('one'): 1, cistr('two'): 2, cistr('tHree'): 3}
self.assertEqual(d[cistr('one')], 1)
self.assertEqual(d[cistr('tWo')], 2)
self.assertEqual(d[cistr('THrEE')], 3)
self.assertIn(cistr('ONe'), d)
self.assertEqual(d.get(cistr('thrEE')), 3)
def test_classic_comparisons(self):
# Testing classic comparisons...
class classic:
pass
for base in (classic, int, object):
class C(base):
def __init__(self, value):
self.value = int(value)
def __eq__(self, other):
if isinstance(other, C):
return self.value == other.value
if isinstance(other, int) or isinstance(other, int):
return self.value == other
return NotImplemented
def __ne__(self, other):
if isinstance(other, C):
return self.value != other.value
if isinstance(other, int) or isinstance(other, int):
return self.value != other
return NotImplemented
def __lt__(self, other):
if isinstance(other, C):
return self.value < other.value
if isinstance(other, int) or isinstance(other, int):
return self.value < other
return NotImplemented
def __le__(self, other):
if isinstance(other, C):
return self.value <= other.value
if isinstance(other, int) or isinstance(other, int):
return self.value <= other
return NotImplemented
def __gt__(self, other):
if isinstance(other, C):
return self.value > other.value
if isinstance(other, int) or isinstance(other, int):
return self.value > other
return NotImplemented
def __ge__(self, other):
if isinstance(other, C):
return self.value >= other.value
if isinstance(other, int) or isinstance(other, int):
return self.value >= other
return NotImplemented
c1 = C(1)
c2 = C(2)
c3 = C(3)
self.assertEqual(c1, 1)
c = {1: c1, 2: c2, 3: c3}
for x in 1, 2, 3:
for y in 1, 2, 3:
for op in "<", "<=", "==", "!=", ">", ">=":
self.assertTrue(eval("c[x] %s c[y]" % op) ==
eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
self.assertTrue(eval("c[x] %s y" % op) ==
eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
self.assertTrue(eval("x %s c[y]" % op) ==
eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
def test_rich_comparisons(self):
# Testing rich comparisons...
class Z(complex):
pass
z = Z(1)
self.assertEqual(z, 1+0j)
self.assertEqual(1+0j, z)
class ZZ(complex):
def __eq__(self, other):
try:
return abs(self - other) <= 1e-6
except:
return NotImplemented
zz = ZZ(1.0000003)
self.assertEqual(zz, 1+0j)
self.assertEqual(1+0j, zz)
class classic:
pass
for base in (classic, int, object, list):
class C(base):
def __init__(self, value):
self.value = int(value)
def __cmp__(self_, other):
self.fail("shouldn't call __cmp__")
def __eq__(self, other):
if isinstance(other, C):
return self.value == other.value
if isinstance(other, int) or isinstance(other, int):
return self.value == other
return NotImplemented
def __ne__(self, other):
if isinstance(other, C):
return self.value != other.value
if isinstance(other, int) or isinstance(other, int):
return self.value != other
return NotImplemented
def __lt__(self, other):
if isinstance(other, C):
return self.value < other.value
if isinstance(other, int) or isinstance(other, int):
return self.value < other
return NotImplemented
def __le__(self, other):
if isinstance(other, C):
return self.value <= other.value
if isinstance(other, int) or isinstance(other, int):
return self.value <= other
return NotImplemented
def __gt__(self, other):
if isinstance(other, C):
return self.value > other.value
if isinstance(other, int) or isinstance(other, int):
return self.value > other
return NotImplemented
def __ge__(self, other):
if isinstance(other, C):
return self.value >= other.value
if isinstance(other, int) or isinstance(other, int):
return self.value >= other
return NotImplemented
c1 = C(1)
c2 = C(2)
c3 = C(3)
self.assertEqual(c1, 1)
c = {1: c1, 2: c2, 3: c3}
for x in 1, 2, 3:
for y in 1, 2, 3:
for op in "<", "<=", "==", "!=", ">", ">=":
self.assertTrue(eval("c[x] %s c[y]" % op) == eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
self.assertTrue(eval("c[x] %s y" % op) == eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
self.assertTrue(eval("x %s c[y]" % op) == eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
def test_descrdoc(self):
# Testing descriptor doc strings...
from _io import FileIO
def check(descr, what):
self.assertEqual(descr.__doc__, what)
check(FileIO.closed, "True if the file is closed") # getset descriptor
check(complex.real, "the real part of a complex number") # member descriptor
def test_doc_descriptor(self):
# Testing __doc__ descriptor...
# SF bug 542984
class DocDescr(object):
def __get__(self, object, otype):
if object:
object = object.__class__.__name__ + ' instance'
if otype:
otype = otype.__name__
return 'object=%s; type=%s' % (object, otype)
class OldClass:
__doc__ = DocDescr()
class NewClass(object):
__doc__ = DocDescr()
self.assertEqual(OldClass.__doc__, 'object=None; type=OldClass')
self.assertEqual(OldClass().__doc__, 'object=OldClass instance; type=OldClass')
self.assertEqual(NewClass.__doc__, 'object=None; type=NewClass')
self.assertEqual(NewClass().__doc__, 'object=NewClass instance; type=NewClass')
def test_set_class(self):
# Testing __class__ assignment...
class C(object): pass
class D(object): pass
class E(object): pass
class F(D, E): pass
for cls in C, D, E, F:
for cls2 in C, D, E, F:
x = cls()
x.__class__ = cls2
self.assertTrue(x.__class__ is cls2)
x.__class__ = cls
self.assertTrue(x.__class__ is cls)
def cant(x, C):
try:
x.__class__ = C
except TypeError:
pass
else:
self.fail("shouldn't allow %r.__class__ = %r" % (x, C))
try:
delattr(x, "__class__")
except (TypeError, AttributeError):
pass
else:
self.fail("shouldn't allow del %r.__class__" % x)
cant(C(), list)
cant(list(), C)
cant(C(), 1)
cant(C(), object)
cant(object(), list)
cant(list(), object)
class Int(int): __slots__ = []
cant(2, Int)
cant(Int(), int)
cant(True, int)
cant(2, bool)
o = object()
cant(o, type(1))
cant(o, type(None))
del o
class G(object):
__slots__ = ["a", "b"]
class H(object):
__slots__ = ["b", "a"]
class I(object):
__slots__ = ["a", "b"]
class J(object):
__slots__ = ["c", "b"]
class K(object):
__slots__ = ["a", "b", "d"]
class L(H):
__slots__ = ["e"]
class M(I):
__slots__ = ["e"]
class N(J):
__slots__ = ["__weakref__"]
class P(J):
__slots__ = ["__dict__"]
class Q(J):
pass
class R(J):
__slots__ = ["__dict__", "__weakref__"]
for cls, cls2 in ((G, H), (G, I), (I, H), (Q, R), (R, Q)):
x = cls()
x.a = 1
x.__class__ = cls2
self.assertTrue(x.__class__ is cls2,
"assigning %r as __class__ for %r silently failed" % (cls2, x))
self.assertEqual(x.a, 1)
x.__class__ = cls
self.assertTrue(x.__class__ is cls,
"assigning %r as __class__ for %r silently failed" % (cls, x))
self.assertEqual(x.a, 1)
for cls in G, J, K, L, M, N, P, R, list, Int:
for cls2 in G, J, K, L, M, N, P, R, list, Int:
if cls is cls2:
continue
cant(cls(), cls2)
# Issue5283: when __class__ changes in __del__, the wrong
# type gets DECREF'd.
class O(object):
pass
class A(object):
def __del__(self):
self.__class__ = O
l = [A() for x in range(100)]
del l
def test_set_dict(self):
# Testing __dict__ assignment...
class C(object): pass
a = C()
a.__dict__ = {'b': 1}
self.assertEqual(a.b, 1)
def cant(x, dict):
try:
x.__dict__ = dict
except (AttributeError, TypeError):
pass
else:
self.fail("shouldn't allow %r.__dict__ = %r" % (x, dict))
cant(a, None)
cant(a, [])
cant(a, 1)
del a.__dict__ # Deleting __dict__ is allowed
class Base(object):
pass
def verify_dict_readonly(x):
"""
x has to be an instance of a class inheriting from Base.
"""
cant(x, {})
try:
del x.__dict__
except (AttributeError, TypeError):
pass
else:
self.fail("shouldn't allow del %r.__dict__" % x)
dict_descr = Base.__dict__["__dict__"]
try:
dict_descr.__set__(x, {})
except (AttributeError, TypeError):
pass
else:
self.fail("dict_descr allowed access to %r's dict" % x)
# Classes don't allow __dict__ assignment and have readonly dicts
class Meta1(type, Base):
pass
class Meta2(Base, type):
pass
class D(object, metaclass=Meta1):
pass
class E(object, metaclass=Meta2):
pass
for cls in C, D, E:
verify_dict_readonly(cls)
class_dict = cls.__dict__
try:
class_dict["spam"] = "eggs"
except TypeError:
pass
else:
self.fail("%r's __dict__ can be modified" % cls)
# Modules also disallow __dict__ assignment
class Module1(types.ModuleType, Base):
pass
class Module2(Base, types.ModuleType):
pass
for ModuleType in Module1, Module2:
mod = ModuleType("spam")
verify_dict_readonly(mod)
mod.__dict__["spam"] = "eggs"
# Exception's __dict__ can be replaced, but not deleted
# (at least not any more than regular exception's __dict__ can
# be deleted; on CPython it is not the case, whereas on PyPy they
# can, just like any other new-style instance's __dict__.)
def can_delete_dict(e):
try:
del e.__dict__
except (TypeError, AttributeError):
return False
else:
return True
class Exception1(Exception, Base):
pass
class Exception2(Base, Exception):
pass
for ExceptionType in Exception, Exception1, Exception2:
e = ExceptionType()
e.__dict__ = {"a": 1}
self.assertEqual(e.a, 1)
self.assertEqual(can_delete_dict(e), can_delete_dict(ValueError()))
def test_pickles(self):
# Testing pickling and copying new-style classes and objects...
import pickle
def sorteditems(d):
L = list(d.items())
L.sort()
return L
global C
class C(object):
def __init__(self, a, b):
super(C, self).__init__()
self.a = a
self.b = b
def __repr__(self):
return "C(%r, %r)" % (self.a, self.b)
global C1
class C1(list):
def __new__(cls, a, b):
return super(C1, cls).__new__(cls)
def __getnewargs__(self):
return (self.a, self.b)
def __init__(self, a, b):
self.a = a
self.b = b
def __repr__(self):
return "C1(%r, %r)<%r>" % (self.a, self.b, list(self))
global C2
class C2(int):
def __new__(cls, a, b, val=0):
return super(C2, cls).__new__(cls, val)
def __getnewargs__(self):
return (self.a, self.b, int(self))
def __init__(self, a, b, val=0):
self.a = a
self.b = b
def __repr__(self):
return "C2(%r, %r)<%r>" % (self.a, self.b, int(self))
global C3
class C3(object):
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return self.foo
def __setstate__(self, foo):
self.foo = foo
global C4classic, C4
class C4classic: # classic
pass
class C4(C4classic, object): # mixed inheritance
pass
for bin in 0, 1:
for cls in C, C1, C2:
s = pickle.dumps(cls, bin)
cls2 = pickle.loads(s)
self.assertTrue(cls2 is cls)
a = C1(1, 2); a.append(42); a.append(24)
b = C2("hello", "world", 42)
s = pickle.dumps((a, b), bin)
x, y = pickle.loads(s)
self.assertEqual(x.__class__, a.__class__)
self.assertEqual(sorteditems(x.__dict__), sorteditems(a.__dict__))
self.assertEqual(y.__class__, b.__class__)
self.assertEqual(sorteditems(y.__dict__), sorteditems(b.__dict__))
self.assertEqual(repr(x), repr(a))
self.assertEqual(repr(y), repr(b))
# Test for __getstate__ and __setstate__ on new style class
u = C3(42)
s = pickle.dumps(u, bin)
v = pickle.loads(s)
self.assertEqual(u.__class__, v.__class__)
self.assertEqual(u.foo, v.foo)
# Test for picklability of hybrid class
u = C4()
u.foo = 42
s = pickle.dumps(u, bin)
v = pickle.loads(s)
self.assertEqual(u.__class__, v.__class__)
self.assertEqual(u.foo, v.foo)
# Testing copy.deepcopy()
import copy
for cls in C, C1, C2:
cls2 = copy.deepcopy(cls)
self.assertTrue(cls2 is cls)
a = C1(1, 2); a.append(42); a.append(24)
b = C2("hello", "world", 42)
x, y = copy.deepcopy((a, b))
self.assertEqual(x.__class__, a.__class__)
self.assertEqual(sorteditems(x.__dict__), sorteditems(a.__dict__))
self.assertEqual(y.__class__, b.__class__)
self.assertEqual(sorteditems(y.__dict__), sorteditems(b.__dict__))
self.assertEqual(repr(x), repr(a))
self.assertEqual(repr(y), repr(b))
def test_pickle_slots(self):
# Testing pickling of classes with __slots__ ...
import pickle
# Pickling of classes with __slots__ but without __getstate__ should fail
# (if using protocol 0 or 1)
global B, C, D, E
class B(object):
pass
for base in [object, B]:
class C(base):
__slots__ = ['a']
class D(C):
pass
try:
pickle.dumps(C(), 0)
except TypeError:
pass
else:
self.fail("should fail: pickle C instance - %s" % base)
try:
pickle.dumps(C(), 0)
except TypeError:
pass
else:
self.fail("should fail: pickle D instance - %s" % base)
# Give C a nice generic __getstate__ and __setstate__
class C(base):
__slots__ = ['a']
def __getstate__(self):
try:
d = self.__dict__.copy()
except AttributeError:
d = {}
for cls in self.__class__.__mro__:
for sn in cls.__dict__.get('__slots__', ()):
try:
d[sn] = getattr(self, sn)
except AttributeError:
pass
return d
def __setstate__(self, d):
for k, v in list(d.items()):
setattr(self, k, v)
class D(C):
pass
# Now it should work
x = C()
y = pickle.loads(pickle.dumps(x))
self.assertEqual(hasattr(y, 'a'), 0)
x.a = 42
y = pickle.loads(pickle.dumps(x))
self.assertEqual(y.a, 42)
x = D()
x.a = 42
x.b = 100
y = pickle.loads(pickle.dumps(x))
self.assertEqual(y.a + y.b, 142)
# A subclass that adds a slot should also work
class E(C):
__slots__ = ['b']
x = E()
x.a = 42
x.b = "foo"
y = pickle.loads(pickle.dumps(x))
self.assertEqual(y.a, x.a)
self.assertEqual(y.b, x.b)
def test_binary_operator_override(self):
# Testing overrides of binary operations...
class I(int):
def __repr__(self):
return "I(%r)" % int(self)
def __add__(self, other):
return I(int(self) + int(other))
__radd__ = __add__
def __pow__(self, other, mod=None):
if mod is None:
return I(pow(int(self), int(other)))
else:
return I(pow(int(self), int(other), int(mod)))
def __rpow__(self, other, mod=None):
if mod is None:
return I(pow(int(other), int(self), mod))
else:
return I(pow(int(other), int(self), int(mod)))
self.assertEqual(repr(I(1) + I(2)), "I(3)")
self.assertEqual(repr(I(1) + 2), "I(3)")
self.assertEqual(repr(1 + I(2)), "I(3)")
self.assertEqual(repr(I(2) ** I(3)), "I(8)")
self.assertEqual(repr(2 ** I(3)), "I(8)")
self.assertEqual(repr(I(2) ** 3), "I(8)")
self.assertEqual(repr(pow(I(2), I(3), I(5))), "I(3)")
class S(str):
def __eq__(self, other):
return self.lower() == other.lower()
def test_subclass_propagation(self):
# Testing propagation of slot functions to subclasses...
class A(object):
pass
class B(A):
pass
class C(A):
pass
class D(B, C):
pass
d = D()
orig_hash = hash(d) # related to id(d) in platform-dependent ways
A.__hash__ = lambda self: 42
self.assertEqual(hash(d), 42)
C.__hash__ = lambda self: 314
self.assertEqual(hash(d), 314)
B.__hash__ = lambda self: 144
self.assertEqual(hash(d), 144)
D.__hash__ = lambda self: 100
self.assertEqual(hash(d), 100)
D.__hash__ = None
self.assertRaises(TypeError, hash, d)
del D.__hash__
self.assertEqual(hash(d), 144)
B.__hash__ = None
self.assertRaises(TypeError, hash, d)
del B.__hash__
self.assertEqual(hash(d), 314)
C.__hash__ = None
self.assertRaises(TypeError, hash, d)
del C.__hash__
self.assertEqual(hash(d), 42)
A.__hash__ = None
self.assertRaises(TypeError, hash, d)
del A.__hash__
self.assertEqual(hash(d), orig_hash)
d.foo = 42
d.bar = 42
self.assertEqual(d.foo, 42)
self.assertEqual(d.bar, 42)
def __getattribute__(self, name):
if name == "foo":
return 24
return object.__getattribute__(self, name)
A.__getattribute__ = __getattribute__
self.assertEqual(d.foo, 24)
self.assertEqual(d.bar, 42)
def __getattr__(self, name):
if name in ("spam", "foo", "bar"):
return "hello"
raise AttributeError(name)
B.__getattr__ = __getattr__
self.assertEqual(d.spam, "hello")
self.assertEqual(d.foo, 24)
self.assertEqual(d.bar, 42)
del A.__getattribute__
self.assertEqual(d.foo, 42)
del d.foo
self.assertEqual(d.foo, "hello")
self.assertEqual(d.bar, 42)
del B.__getattr__
try:
d.foo
except AttributeError:
pass
else:
self.fail("d.foo should be undefined now")
# Test a nasty bug in recurse_down_subclasses()
class A(object):
pass
class B(A):
pass
del B
support.gc_collect()
A.__setitem__ = lambda *a: None # crash
def test_buffer_inheritance(self):
# Testing that buffer interface is inherited ...
import binascii
# SF bug [#470040] ParseTuple t# vs subclasses.
class MyBytes(bytes):
pass
base = b'abc'
m = MyBytes(base)
# b2a_hex uses the buffer interface to get its argument's value, via
# PyArg_ParseTuple 't#' code.
self.assertEqual(binascii.b2a_hex(m), binascii.b2a_hex(base))
class MyInt(int):
pass
m = MyInt(42)
try:
binascii.b2a_hex(m)
self.fail('subclass of int should not have a buffer interface')
except TypeError:
pass
def test_str_of_str_subclass(self):
# Testing __str__ defined in subclass of str ...
import binascii
import io
class octetstring(str):
def __str__(self):
return binascii.b2a_hex(self.encode('ascii')).decode("ascii")
def __repr__(self):
return self + " repr"
o = octetstring('A')
self.assertEqual(type(o), octetstring)
self.assertEqual(type(str(o)), str)
self.assertEqual(type(repr(o)), str)
self.assertEqual(ord(o), 0x41)
self.assertEqual(str(o), '41')
self.assertEqual(repr(o), 'A repr')
self.assertEqual(o.__str__(), '41')
self.assertEqual(o.__repr__(), 'A repr')
capture = io.StringIO()
# Calling str() or not exercises different internal paths.
print(o, file=capture)
print(str(o), file=capture)
self.assertEqual(capture.getvalue(), '41\n41\n')
capture.close()
def test_keyword_arguments(self):
# Testing keyword arguments to __init__, __call__...
def f(a): return a
self.assertEqual(f.__call__(a=42), 42)
a = []
list.__init__(a, sequence=[0, 1, 2])
self.assertEqual(a, [0, 1, 2])
def test_recursive_call(self):
# Testing recursive __call__() by setting to instance of class...
class A(object):
pass
A.__call__ = A()
try:
A()()
except RuntimeError:
pass
else:
self.fail("Recursion limit should have been reached for __call__()")
def test_delete_hook(self):
# Testing __del__ hook...
log = []
class C(object):
def __del__(self):
log.append(1)
c = C()
self.assertEqual(log, [])
del c
support.gc_collect()
self.assertEqual(log, [1])
class D(object): pass
d = D()
try: del d[0]
except TypeError: pass
else: self.fail("invalid del() didn't raise TypeError")
def test_hash_inheritance(self):
# Testing hash of mutable subclasses...
class mydict(dict):
pass
d = mydict()
try:
hash(d)
except TypeError:
pass
else:
self.fail("hash() of dict subclass should fail")
class mylist(list):
pass
d = mylist()
try:
hash(d)
except TypeError:
pass
else:
self.fail("hash() of list subclass should fail")
def test_str_operations(self):
try: 'a' + 5
except TypeError: pass
else: self.fail("'' + 5 doesn't raise TypeError")
try: ''.split('')
except ValueError: pass
else: self.fail("''.split('') doesn't raise ValueError")
try: ''.join([0])
except TypeError: pass
else: self.fail("''.join([0]) doesn't raise TypeError")
try: ''.rindex('5')
except ValueError: pass
else: self.fail("''.rindex('5') doesn't raise ValueError")
try: '%(n)s' % None
except TypeError: pass
else: self.fail("'%(n)s' % None doesn't raise TypeError")
try: '%(n' % {}
except ValueError: pass
else: self.fail("'%(n' % {} '' doesn't raise ValueError")
try: '%*s' % ('abc')
except TypeError: pass
else: self.fail("'%*s' % ('abc') doesn't raise TypeError")
try: '%*.*s' % ('abc', 5)
except TypeError: pass
else: self.fail("'%*.*s' % ('abc', 5) doesn't raise TypeError")
try: '%s' % (1, 2)
except TypeError: pass
else: self.fail("'%s' % (1, 2) doesn't raise TypeError")
try: '%' % None
except ValueError: pass
else: self.fail("'%' % None doesn't raise ValueError")
self.assertEqual('534253'.isdigit(), 1)
self.assertEqual('534253x'.isdigit(), 0)
self.assertEqual('%c' % 5, '\x05')
self.assertEqual('%c' % '5', '5')
def test_deepcopy_recursive(self):
# Testing deepcopy of recursive objects...
class Node:
pass
a = Node()
b = Node()
a.b = b
b.a = a
z = deepcopy(a) # This blew up before
def test_unintialized_modules(self):
# Testing uninitialized module objects...
from types import ModuleType as M
m = M.__new__(M)
str(m)
self.assertEqual(hasattr(m, "__name__"), 0)
self.assertEqual(hasattr(m, "__file__"), 0)
self.assertEqual(hasattr(m, "foo"), 0)
self.assertFalse(m.__dict__) # None or {} are both reasonable answers
m.foo = 1
self.assertEqual(m.__dict__, {"foo": 1})
def test_funny_new(self):
# Testing __new__ returning something unexpected...
class C(object):
def __new__(cls, arg):
if isinstance(arg, str): return [1, 2, 3]
elif isinstance(arg, int): return object.__new__(D)
else: return object.__new__(cls)
class D(C):
def __init__(self, arg):
self.foo = arg
self.assertEqual(C("1"), [1, 2, 3])
self.assertEqual(D("1"), [1, 2, 3])
d = D(None)
self.assertEqual(d.foo, None)
d = C(1)
self.assertIsInstance(d, D)
self.assertEqual(d.foo, 1)
d = D(1)
self.assertIsInstance(d, D)
self.assertEqual(d.foo, 1)
def test_imul_bug(self):
# Testing for __imul__ problems...
# SF bug 544647
class C(object):
def __imul__(self, other):
return (self, other)
x = C()
y = x
y *= 1.0
self.assertEqual(y, (x, 1.0))
y = x
y *= 2
self.assertEqual(y, (x, 2))
y = x
y *= 3
self.assertEqual(y, (x, 3))
y = x
y *= 1<<100
self.assertEqual(y, (x, 1<<100))
y = x
y *= None
self.assertEqual(y, (x, None))
y = x
y *= "foo"
self.assertEqual(y, (x, "foo"))
def test_copy_setstate(self):
# Testing that copy.*copy() correctly uses __setstate__...
import copy
class C(object):
def __init__(self, foo=None):
self.foo = foo
self.__foo = foo
def setfoo(self, foo=None):
self.foo = foo
def getfoo(self):
return self.__foo
def __getstate__(self):
return [self.foo]
def __setstate__(self_, lst):
self.assertEqual(len(lst), 1)
self_.__foo = self_.foo = lst[0]
a = C(42)
a.setfoo(24)
self.assertEqual(a.foo, 24)
self.assertEqual(a.getfoo(), 42)
b = copy.copy(a)
self.assertEqual(b.foo, 24)
self.assertEqual(b.getfoo(), 24)
b = copy.deepcopy(a)
self.assertEqual(b.foo, 24)
self.assertEqual(b.getfoo(), 24)
def test_slices(self):
# Testing cases with slices and overridden __getitem__ ...
# Strings
self.assertEqual("hello"[:4], "hell")
self.assertEqual("hello"[slice(4)], "hell")
self.assertEqual(str.__getitem__("hello", slice(4)), "hell")
class S(str):
def __getitem__(self, x):
return str.__getitem__(self, x)
self.assertEqual(S("hello")[:4], "hell")
self.assertEqual(S("hello")[slice(4)], "hell")
self.assertEqual(S("hello").__getitem__(slice(4)), "hell")
# Tuples
self.assertEqual((1,2,3)[:2], (1,2))
self.assertEqual((1,2,3)[slice(2)], (1,2))
self.assertEqual(tuple.__getitem__((1,2,3), slice(2)), (1,2))
class T(tuple):
def __getitem__(self, x):
return tuple.__getitem__(self, x)
self.assertEqual(T((1,2,3))[:2], (1,2))
self.assertEqual(T((1,2,3))[slice(2)], (1,2))
self.assertEqual(T((1,2,3)).__getitem__(slice(2)), (1,2))
# Lists
self.assertEqual([1,2,3][:2], [1,2])
self.assertEqual([1,2,3][slice(2)], [1,2])
self.assertEqual(list.__getitem__([1,2,3], slice(2)), [1,2])
class L(list):
def __getitem__(self, x):
return list.__getitem__(self, x)
self.assertEqual(L([1,2,3])[:2], [1,2])
self.assertEqual(L([1,2,3])[slice(2)], [1,2])
self.assertEqual(L([1,2,3]).__getitem__(slice(2)), [1,2])
# Now do lists and __setitem__
a = L([1,2,3])
a[slice(1, 3)] = [3,2]
self.assertEqual(a, [1,3,2])
a[slice(0, 2, 1)] = [3,1]
self.assertEqual(a, [3,1,2])
a.__setitem__(slice(1, 3), [2,1])
self.assertEqual(a, [3,2,1])
a.__setitem__(slice(0, 2, 1), [2,3])
self.assertEqual(a, [2,3,1])
def test_subtype_resurrection(self):
# Testing resurrection of new-style instance...
class C(object):
container = []
def __del__(self):
# resurrect the instance
C.container.append(self)
c = C()
c.attr = 42
# The most interesting thing here is whether this blows up, due to
# flawed GC tracking logic in typeobject.c's call_finalizer() (a 2.2.1
# bug).
del c
# If that didn't blow up, it's also interesting to see whether clearing
# the last container slot works: that will attempt to delete c again,
# which will cause c to get appended back to the container again
# "during" the del. (On non-CPython implementations, however, __del__
# is typically not called again.)
support.gc_collect()
self.assertEqual(len(C.container), 1)
del C.container[-1]
if support.check_impl_detail():
support.gc_collect()
self.assertEqual(len(C.container), 1)
self.assertEqual(C.container[-1].attr, 42)
# Make c mortal again, so that the test framework with -l doesn't report
# it as a leak.
del C.__del__
def test_slots_trash(self):
# Testing slot trash...
# Deallocating deeply nested slotted trash caused stack overflows
class trash(object):
__slots__ = ['x']
def __init__(self, x):
self.x = x
o = None
for i in range(50000):
o = trash(o)
del o
def test_slots_multiple_inheritance(self):
# SF bug 575229, multiple inheritance w/ slots dumps core
class A(object):
__slots__=()
class B(object):
pass
class C(A,B) :
__slots__=()
if support.check_impl_detail():
self.assertEqual(C.__basicsize__, B.__basicsize__)
self.assertTrue(hasattr(C, '__dict__'))
self.assertTrue(hasattr(C, '__weakref__'))
C().x = 2
def test_rmul(self):
# Testing correct invocation of __rmul__...
# SF patch 592646
class C(object):
def __mul__(self, other):
return "mul"
def __rmul__(self, other):
return "rmul"
a = C()
self.assertEqual(a*2, "mul")
self.assertEqual(a*2.2, "mul")
self.assertEqual(2*a, "rmul")
self.assertEqual(2.2*a, "rmul")
def test_ipow(self):
# Testing correct invocation of __ipow__...
# [SF bug 620179]
class C(object):
def __ipow__(self, other):
pass
a = C()
a **= 2
def test_mutable_bases(self):
# Testing mutable bases...
# stuff that should work:
class C(object):
pass
class C2(object):
def __getattribute__(self, attr):
if attr == 'a':
return 2
else:
return super(C2, self).__getattribute__(attr)
def meth(self):
return 1
class D(C):
pass
class E(D):
pass
d = D()
e = E()
D.__bases__ = (C,)
D.__bases__ = (C2,)
self.assertEqual(d.meth(), 1)
self.assertEqual(e.meth(), 1)
self.assertEqual(d.a, 2)
self.assertEqual(e.a, 2)
self.assertEqual(C2.__subclasses__(), [D])
try:
del D.__bases__
except (TypeError, AttributeError):
pass
else:
self.fail("shouldn't be able to delete .__bases__")
try:
D.__bases__ = ()
except TypeError as msg:
if str(msg) == "a new-style class can't have only classic bases":
self.fail("wrong error message for .__bases__ = ()")
else:
self.fail("shouldn't be able to set .__bases__ to ()")
try:
D.__bases__ = (D,)
except TypeError:
pass
else:
# actually, we'll have crashed by here...
self.fail("shouldn't be able to create inheritance cycles")
try:
D.__bases__ = (C, C)
except TypeError:
pass
else:
self.fail("didn't detect repeated base classes")
try:
D.__bases__ = (E,)
except TypeError:
pass
else:
self.fail("shouldn't be able to create inheritance cycles")
def test_builtin_bases(self):
# Make sure all the builtin types can have their base queried without
# segfaulting. See issue #5787.
builtin_types = [tp for tp in builtins.__dict__.values()
if isinstance(tp, type)]
for tp in builtin_types:
object.__getattribute__(tp, "__bases__")
if tp is not object:
self.assertEqual(len(tp.__bases__), 1, tp)
class L(list):
pass
class C(object):
pass
class D(C):
pass
try:
L.__bases__ = (dict,)
except TypeError:
pass
else:
self.fail("shouldn't turn list subclass into dict subclass")
try:
list.__bases__ = (dict,)
except TypeError:
pass
else:
self.fail("shouldn't be able to assign to list.__bases__")
try:
D.__bases__ = (C, list)
except TypeError:
pass
else:
assert 0, "best_base calculation found wanting"
def test_mutable_bases_with_failing_mro(self):
# Testing mutable bases with failing mro...
class WorkOnce(type):
def __new__(self, name, bases, ns):
self.flag = 0
return super(WorkOnce, self).__new__(WorkOnce, name, bases, ns)
def mro(self):
if self.flag > 0:
raise RuntimeError("bozo")
else:
self.flag += 1
return type.mro(self)
class WorkAlways(type):
def mro(self):
# this is here to make sure that .mro()s aren't called
# with an exception set (which was possible at one point).
# An error message will be printed in a debug build.
# What's a good way to test for this?
return type.mro(self)
class C(object):
pass
class C2(object):
pass
class D(C):
pass
class E(D):
pass
class F(D, metaclass=WorkOnce):
pass
class G(D, metaclass=WorkAlways):
pass
# Immediate subclasses have their mro's adjusted in alphabetical
# order, so E's will get adjusted before adjusting F's fails. We
# check here that E's gets restored.
E_mro_before = E.__mro__
D_mro_before = D.__mro__
try:
D.__bases__ = (C2,)
except RuntimeError:
self.assertEqual(E.__mro__, E_mro_before)
self.assertEqual(D.__mro__, D_mro_before)
else:
self.fail("exception not propagated")
def test_mutable_bases_catch_mro_conflict(self):
# Testing mutable bases catch mro conflict...
class A(object):
pass
class B(object):
pass
class C(A, B):
pass
class D(A, B):
pass
class E(C, D):
pass
try:
C.__bases__ = (B, A)
except TypeError:
pass
else:
self.fail("didn't catch MRO conflict")
def test_mutable_names(self):
# Testing mutable names...
class C(object):
pass
# C.__module__ could be 'test_descr' or '__main__'
mod = C.__module__
C.__name__ = 'D'
self.assertEqual((C.__module__, C.__name__), (mod, 'D'))
C.__name__ = 'D.E'
self.assertEqual((C.__module__, C.__name__), (mod, 'D.E'))
def test_subclass_right_op(self):
# Testing correct dispatch of subclass overloading __r<op>__...
# This code tests various cases where right-dispatch of a subclass
# should be preferred over left-dispatch of a base class.
# Case 1: subclass of int; this tests code in abstract.c::binary_op1()
class B(int):
def __floordiv__(self, other):
return "B.__floordiv__"
def __rfloordiv__(self, other):
return "B.__rfloordiv__"
self.assertEqual(B(1) // 1, "B.__floordiv__")
self.assertEqual(1 // B(1), "B.__rfloordiv__")
# Case 2: subclass of object; this is just the baseline for case 3
class C(object):
def __floordiv__(self, other):
return "C.__floordiv__"
def __rfloordiv__(self, other):
return "C.__rfloordiv__"
self.assertEqual(C() // 1, "C.__floordiv__")
self.assertEqual(1 // C(), "C.__rfloordiv__")
# Case 3: subclass of new-style class; here it gets interesting
class D(C):
def __floordiv__(self, other):
return "D.__floordiv__"
def __rfloordiv__(self, other):
return "D.__rfloordiv__"
self.assertEqual(D() // C(), "D.__floordiv__")
self.assertEqual(C() // D(), "D.__rfloordiv__")
# Case 4: this didn't work right in 2.2.2 and 2.3a1
class E(C):
pass
self.assertEqual(E.__rfloordiv__, C.__rfloordiv__)
self.assertEqual(E() // 1, "C.__floordiv__")
self.assertEqual(1 // E(), "C.__rfloordiv__")
self.assertEqual(E() // C(), "C.__floordiv__")
self.assertEqual(C() // E(), "C.__floordiv__") # This one would fail
@support.impl_detail("testing an internal kind of method object")
def test_meth_class_get(self):
# Testing __get__ method of METH_CLASS C methods...
# Full coverage of descrobject.c::classmethod_get()
# Baseline
arg = [1, 2, 3]
res = {1: None, 2: None, 3: None}
self.assertEqual(dict.fromkeys(arg), res)
self.assertEqual({}.fromkeys(arg), res)
# Now get the descriptor
descr = dict.__dict__["fromkeys"]
# More baseline using the descriptor directly
self.assertEqual(descr.__get__(None, dict)(arg), res)
self.assertEqual(descr.__get__({})(arg), res)
# Now check various error cases
try:
descr.__get__(None, None)
except TypeError:
pass
else:
self.fail("shouldn't have allowed descr.__get__(None, None)")
try:
descr.__get__(42)
except TypeError:
pass
else:
self.fail("shouldn't have allowed descr.__get__(42)")
try:
descr.__get__(None, 42)
except TypeError:
pass
else:
self.fail("shouldn't have allowed descr.__get__(None, 42)")
try:
descr.__get__(None, int)
except TypeError:
pass
else:
self.fail("shouldn't have allowed descr.__get__(None, int)")
def test_isinst_isclass(self):
# Testing proxy isinstance() and isclass()...
class Proxy(object):
def __init__(self, obj):
self.__obj = obj
def __getattribute__(self, name):
if name.startswith("_Proxy__"):
return object.__getattribute__(self, name)
else:
return getattr(self.__obj, name)
# Test with a classic class
class C:
pass
a = C()
pa = Proxy(a)
self.assertIsInstance(a, C) # Baseline
self.assertIsInstance(pa, C) # Test
# Test with a classic subclass
class D(C):
pass
a = D()
pa = Proxy(a)
self.assertIsInstance(a, C) # Baseline
self.assertIsInstance(pa, C) # Test
# Test with a new-style class
class C(object):
pass
a = C()
pa = Proxy(a)
self.assertIsInstance(a, C) # Baseline
self.assertIsInstance(pa, C) # Test
# Test with a new-style subclass
class D(C):
pass
a = D()
pa = Proxy(a)
self.assertIsInstance(a, C) # Baseline
self.assertIsInstance(pa, C) # Test
def test_proxy_super(self):
# Testing super() for a proxy object...
class Proxy(object):
def __init__(self, obj):
self.__obj = obj
def __getattribute__(self, name):
if name.startswith("_Proxy__"):
return object.__getattribute__(self, name)
else:
return getattr(self.__obj, name)
class B(object):
def f(self):
return "B.f"
class C(B):
def f(self):
return super(C, self).f() + "->C.f"
obj = C()
p = Proxy(obj)
self.assertEqual(C.__dict__["f"](p), "B.f->C.f")
def test_carloverre(self):
# Testing prohibition of Carlo Verre's hack...
try:
object.__setattr__(str, "foo", 42)
except TypeError:
pass
else:
self.fail("Carlo Verre __setattr__ succeeded!")
try:
object.__delattr__(str, "lower")
except TypeError:
pass
else:
self.fail("Carlo Verre __delattr__ succeeded!")
def test_weakref_segfault(self):
# Testing weakref segfault...
# SF 742911
import weakref
class Provoker:
def __init__(self, referrent):
self.ref = weakref.ref(referrent)
def __del__(self):
x = self.ref()
class Oops(object):
pass
o = Oops()
o.whatever = Provoker(o)
del o
def test_wrapper_segfault(self):
# SF 927248: deeply nested wrappers could cause stack overflow
f = lambda:None
for i in range(1000000):
f = f.__call__
f = None
def test_file_fault(self):
# Testing sys.stdout is changed in getattr...
test_stdout = sys.stdout
class StdoutGuard:
def __getattr__(self, attr):
sys.stdout = sys.__stdout__
raise RuntimeError("Premature access to sys.stdout.%s" % attr)
sys.stdout = StdoutGuard()
try:
print("Oops!")
except RuntimeError:
pass
finally:
sys.stdout = test_stdout
def test_vicious_descriptor_nonsense(self):
# Testing vicious_descriptor_nonsense...
# A potential segfault spotted by Thomas Wouters in mail to
# python-dev 2003-04-17, turned into an example & fixed by Michael
# Hudson just less than four months later...
class Evil(object):
def __hash__(self):
return hash('attr')
def __eq__(self, other):
del C.attr
return 0
class Descr(object):
def __get__(self, ob, type=None):
return 1
class C(object):
attr = Descr()
c = C()
c.__dict__[Evil()] = 0
self.assertEqual(c.attr, 1)
# this makes a crash more likely:
support.gc_collect()
self.assertEqual(hasattr(c, 'attr'), False)
def test_init(self):
# SF 1155938
class Foo(object):
def __init__(self):
return 10
try:
Foo()
except TypeError:
pass
else:
self.fail("did not test __init__() for None return")
def test_method_wrapper(self):
# Testing method-wrapper objects...
# <type 'method-wrapper'> did not support any reflection before 2.5
# XXX should methods really support __eq__?
l = []
self.assertEqual(l.__add__, l.__add__)
self.assertEqual(l.__add__, [].__add__)
self.assertTrue(l.__add__ != [5].__add__)
self.assertTrue(l.__add__ != l.__mul__)
self.assertTrue(l.__add__.__name__ == '__add__')
if hasattr(l.__add__, '__self__'):
# CPython
self.assertTrue(l.__add__.__self__ is l)
self.assertTrue(l.__add__.__objclass__ is list)
else:
# Python implementations where [].__add__ is a normal bound method
self.assertTrue(l.__add__.im_self is l)
self.assertTrue(l.__add__.im_class is list)
self.assertEqual(l.__add__.__doc__, list.__add__.__doc__)
try:
hash(l.__add__)
except TypeError:
pass
else:
self.fail("no TypeError from hash([].__add__)")
t = ()
t += (7,)
self.assertEqual(t.__add__, (7,).__add__)
self.assertEqual(hash(t.__add__), hash((7,).__add__))
def test_not_implemented(self):
# Testing NotImplemented...
# all binary methods should be able to return a NotImplemented
import operator
def specialmethod(self, other):
return NotImplemented
def check(expr, x, y):
try:
exec(expr, {'x': x, 'y': y, 'operator': operator})
except TypeError:
pass
else:
self.fail("no TypeError from %r" % (expr,))
N1 = sys.maxsize + 1 # might trigger OverflowErrors instead of
# TypeErrors
N2 = sys.maxsize # if sizeof(int) < sizeof(long), might trigger
# ValueErrors instead of TypeErrors
for name, expr, iexpr in [
('__add__', 'x + y', 'x += y'),
('__sub__', 'x - y', 'x -= y'),
('__mul__', 'x * y', 'x *= y'),
('__truediv__', 'operator.truediv(x, y)', None),
('__floordiv__', 'operator.floordiv(x, y)', None),
('__div__', 'x / y', 'x /= y'),
('__mod__', 'x % y', 'x %= y'),
('__divmod__', 'divmod(x, y)', None),
('__pow__', 'x ** y', 'x **= y'),
('__lshift__', 'x << y', 'x <<= y'),
('__rshift__', 'x >> y', 'x >>= y'),
('__and__', 'x & y', 'x &= y'),
('__or__', 'x | y', 'x |= y'),
('__xor__', 'x ^ y', 'x ^= y')]:
rname = '__r' + name[2:]
A = type('A', (), {name: specialmethod})
a = A()
check(expr, a, a)
check(expr, a, N1)
check(expr, a, N2)
if iexpr:
check(iexpr, a, a)
check(iexpr, a, N1)
check(iexpr, a, N2)
iname = '__i' + name[2:]
C = type('C', (), {iname: specialmethod})
c = C()
check(iexpr, c, a)
check(iexpr, c, N1)
check(iexpr, c, N2)
def test_assign_slice(self):
# ceval.c's assign_slice used to check for
# tp->tp_as_sequence->sq_slice instead of
# tp->tp_as_sequence->sq_ass_slice
class C(object):
def __setitem__(self, idx, value):
self.value = value
c = C()
c[1:2] = 3
self.assertEqual(c.value, 3)
def test_set_and_no_get(self):
# See
# http://mail.python.org/pipermail/python-dev/2010-January/095637.html
class Descr(object):
def __init__(self, name):
self.name = name
def __set__(self, obj, value):
obj.__dict__[self.name] = value
descr = Descr("a")
class X(object):
a = descr
x = X()
self.assertIs(x.a, descr)
x.a = 42
self.assertEqual(x.a, 42)
# Also check type_getattro for correctness.
class Meta(type):
pass
class X(object):
__metaclass__ = Meta
X.a = 42
Meta.a = Descr("a")
self.assertEqual(X.a, 42)
def test_getattr_hooks(self):
# issue 4230
class Descriptor(object):
counter = 0
def __get__(self, obj, objtype=None):
def getter(name):
self.counter += 1
raise AttributeError(name)
return getter
descr = Descriptor()
class A(object):
__getattribute__ = descr
class B(object):
__getattr__ = descr
class C(object):
__getattribute__ = descr
__getattr__ = descr
self.assertRaises(AttributeError, getattr, A(), "attr")
self.assertEqual(descr.counter, 1)
self.assertRaises(AttributeError, getattr, B(), "attr")
self.assertEqual(descr.counter, 2)
self.assertRaises(AttributeError, getattr, C(), "attr")
self.assertEqual(descr.counter, 4)
import gc
class EvilGetattribute(object):
# This used to segfault
def __getattr__(self, name):
raise AttributeError(name)
def __getattribute__(self, name):
del EvilGetattribute.__getattr__
for i in range(5):
gc.collect()
raise AttributeError(name)
self.assertRaises(AttributeError, getattr, EvilGetattribute(), "attr")
def test_abstractmethods(self):
# type pretends not to have __abstractmethods__.
self.assertRaises(AttributeError, getattr, type, "__abstractmethods__")
class meta(type):
pass
self.assertRaises(AttributeError, getattr, meta, "__abstractmethods__")
class X(object):
pass
with self.assertRaises(AttributeError):
del X.__abstractmethods__
def test_proxy_call(self):
class FakeStr:
__class__ = str
fake_str = FakeStr()
# isinstance() reads __class__
self.assertTrue(isinstance(fake_str, str))
# call a method descriptor
with self.assertRaises(TypeError):
str.split(fake_str)
# call a slot wrapper descriptor
with self.assertRaises(TypeError):
str.__add__(fake_str, "abc")
def test_repr_as_str(self):
# Issue #11603: crash or infinite loop when rebinding __str__ as
# __repr__.
class Foo:
pass
Foo.__repr__ = Foo.__str__
foo = Foo()
str(foo)
class DictProxyTests(unittest.TestCase):
def setUp(self):
class C(object):
def meth(self):
pass
self.C = C
def test_iter_keys(self):
# Testing dict-proxy keys...
it = self.C.__dict__.keys()
self.assertNotIsInstance(it, list)
keys = list(it)
keys.sort()
self.assertEqual(keys, ['__dict__', '__doc__', '__module__',
'__weakref__', 'meth'])
def test_iter_values(self):
# Testing dict-proxy values...
it = self.C.__dict__.values()
self.assertNotIsInstance(it, list)
values = list(it)
self.assertEqual(len(values), 5)
def test_iter_items(self):
# Testing dict-proxy iteritems...
it = self.C.__dict__.items()
self.assertNotIsInstance(it, list)
keys = [item[0] for item in it]
keys.sort()
self.assertEqual(keys, ['__dict__', '__doc__', '__module__',
'__weakref__', 'meth'])
def test_dict_type_with_metaclass(self):
# Testing type of __dict__ when metaclass set...
class B(object):
pass
class M(type):
pass
class C(metaclass=M):
# In 2.3a1, C.__dict__ was a real dict rather than a dict proxy
pass
self.assertEqual(type(C.__dict__), type(B.__dict__))
def test_repr(self):
# Testing dict_proxy.__repr__
dict_ = {k: v for k, v in self.C.__dict__.items()}
self.assertEqual(repr(self.C.__dict__), 'dict_proxy({!r})'.format(dict_))
class PTypesLongInitTest(unittest.TestCase):
# This is in its own TestCase so that it can be run before any other tests.
def test_pytype_long_ready(self):
# Testing SF bug 551412 ...
# This dumps core when SF bug 551412 isn't fixed --
# but only when test_descr.py is run separately.
# (That can't be helped -- as soon as PyType_Ready()
# is called for PyLong_Type, the bug is gone.)
class UserLong(object):
def __pow__(self, *args):
pass
try:
pow(0, UserLong(), 0)
except:
pass
# Another segfault only when run early
# (before PyType_Ready(tuple) is called)
type.mro(tuple)
def test_main():
# Run all local test cases, with PTypesLongInitTest first.
support.run_unittest(PTypesLongInitTest, OperatorsTest,
ClassPropertiesAndMethods, DictProxyTests)
if __name__ == "__main__":
test_main()
| apache-2.0 |
RuthAngus/granola | granola/download.py | 1 | 1294 | # Downloading Kepler light curves
import os
import pandas as pd
import kplr
import kepler_data as kd
def get_lc(id, KPLR_DIR="/Users/ruthangus/.kplr/data/lightcurves"):
"""
Downloads the kplr light curve and loads x, y and yerr.
"""
kid = str(int(id)).zfill(9)
path = os.path.join(KPLR_DIR, "{}".format(kid))
if not os.path.exists(path):
client = kplr.API()
star = client.star(kid)
print("Downloading LC...")
star.get_light_curves(fetch=True, short_cadence=False)
x, y, yerr = kd.load_kepler_data(os.path.join(KPLR_DIR,
"{}".format(kid)))
else:
x, y, yerr = kd.load_kepler_data(os.path.join(KPLR_DIR,
"{}".format(kid)))
x -= x[0]
return x, y, yerr
if __name__ == "__main__":
DATA_DIR = "/Users/ruthangus/projects/granola/granola/data"
# load KIC-TGAS
data = pd.read_csv(os.path.join(DATA_DIR, "kic_tgas.csv"))
# cut on temperature and logg
m = (data.teff.values < 6250) * (4 < data.logg.values)
data = data.iloc[m]
for i, kic in enumerate(data.kepid.values[275:400]):
print(kic, i, "of", len(data.kepid.values[275:400]))
x, y, yerr = get_lc(kic)
| mit |
phdowling/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_scalability.py | 225 | 5719 | """
============================================
Scalability of Approximate Nearest Neighbors
============================================
This example studies the scalability profile of approximate 10-neighbors
queries using the LSHForest with ``n_estimators=20`` and ``n_candidates=200``
when varying the number of samples in the dataset.
The first plot demonstrates the relationship between query time and index size
of LSHForest. Query time is compared with the brute force method in exact
nearest neighbor search for the same index sizes. The brute force queries have a
very predictable linear scalability with the index (full scan). LSHForest index
have sub-linear scalability profile but can be slower for small datasets.
The second plot shows the speedup when using approximate queries vs brute force
exact queries. The speedup tends to increase with the dataset size but should
reach a plateau typically when doing queries on datasets with millions of
samples and a few hundreds of dimensions. Higher dimensional datasets tends to
benefit more from LSHForest indexing.
The break even point (speedup = 1) depends on the dimensionality and structure
of the indexed data and the parameters of the LSHForest index.
The precision of approximate queries should decrease slowly with the dataset
size. The speed of the decrease depends mostly on the LSHForest parameters and
the dimensionality of the data.
"""
from __future__ import division
print(__doc__)
# Authors: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
# Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
###############################################################################
import time
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Parameters of the study
n_samples_min = int(1e3)
n_samples_max = int(1e5)
n_features = 100
n_centers = 100
n_queries = 100
n_steps = 6
n_iter = 5
# Initialize the range of `n_samples`
n_samples_values = np.logspace(np.log10(n_samples_min),
np.log10(n_samples_max),
n_steps).astype(np.int)
# Generate some structured data
rng = np.random.RandomState(42)
all_data, _ = make_blobs(n_samples=n_samples_max + n_queries,
n_features=n_features, centers=n_centers, shuffle=True,
random_state=0)
queries = all_data[:n_queries]
index_data = all_data[n_queries:]
# Metrics to collect for the plots
average_times_exact = []
average_times_approx = []
std_times_approx = []
accuracies = []
std_accuracies = []
average_speedups = []
std_speedups = []
# Calculate the average query time
for n_samples in n_samples_values:
X = index_data[:n_samples]
# Initialize LSHForest for queries of a single neighbor
lshf = LSHForest(n_estimators=20, n_candidates=200,
n_neighbors=10).fit(X)
nbrs = NearestNeighbors(algorithm='brute', metric='cosine',
n_neighbors=10).fit(X)
time_approx = []
time_exact = []
accuracy = []
for i in range(n_iter):
# pick one query at random to study query time variability in LSHForest
query = queries[rng.randint(0, n_queries)]
t0 = time.time()
exact_neighbors = nbrs.kneighbors(query, return_distance=False)
time_exact.append(time.time() - t0)
t0 = time.time()
approx_neighbors = lshf.kneighbors(query, return_distance=False)
time_approx.append(time.time() - t0)
accuracy.append(np.in1d(approx_neighbors, exact_neighbors).mean())
average_time_exact = np.mean(time_exact)
average_time_approx = np.mean(time_approx)
speedup = np.array(time_exact) / np.array(time_approx)
average_speedup = np.mean(speedup)
mean_accuracy = np.mean(accuracy)
std_accuracy = np.std(accuracy)
print("Index size: %d, exact: %0.3fs, LSHF: %0.3fs, speedup: %0.1f, "
"accuracy: %0.2f +/-%0.2f" %
(n_samples, average_time_exact, average_time_approx, average_speedup,
mean_accuracy, std_accuracy))
accuracies.append(mean_accuracy)
std_accuracies.append(std_accuracy)
average_times_exact.append(average_time_exact)
average_times_approx.append(average_time_approx)
std_times_approx.append(np.std(time_approx))
average_speedups.append(average_speedup)
std_speedups.append(np.std(speedup))
# Plot average query time against n_samples
plt.figure()
plt.errorbar(n_samples_values, average_times_approx, yerr=std_times_approx,
fmt='o-', c='r', label='LSHForest')
plt.plot(n_samples_values, average_times_exact, c='b',
label="NearestNeighbors(algorithm='brute', metric='cosine')")
plt.legend(loc='upper left', fontsize='small')
plt.ylim(0, None)
plt.ylabel("Average query time in seconds")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Impact of index size on response time for first "
"nearest neighbors queries")
# Plot average query speedup versus index size
plt.figure()
plt.errorbar(n_samples_values, average_speedups, yerr=std_speedups,
fmt='o-', c='r')
plt.ylim(0, None)
plt.ylabel("Average speedup")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Speedup of the approximate NN queries vs brute force")
# Plot average precision versus index size
plt.figure()
plt.errorbar(n_samples_values, accuracies, std_accuracies, fmt='o-', c='c')
plt.ylim(0, 1.1)
plt.ylabel("precision@10")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("precision of 10-nearest-neighbors queries with index size")
plt.show()
| bsd-3-clause |
zsjohny/jumpserver | apps/terminal/migrations/0016_commandstorage_replaystorage.py | 3 | 2483 | # Generated by Django 2.2.5 on 2019-11-22 10:07
import common.fields.model
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('terminal', '0015_auto_20190923_1529'),
]
operations = [
migrations.CreateModel(
name='CommandStorage',
fields=[
('id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('created_by', models.CharField(blank=True, max_length=32, null=True, verbose_name='Created by')),
('date_created', models.DateTimeField(auto_now_add=True, null=True, verbose_name='Date created')),
('date_updated', models.DateTimeField(auto_now=True, verbose_name='Date updated')),
('name', models.CharField(max_length=32, unique=True, verbose_name='Name')),
('type', models.CharField(choices=[('null', 'Null'), ('server', 'Server'), ('es', 'Elasticsearch')], default='server', max_length=16, verbose_name='Type')),
('meta', common.fields.model.EncryptJsonDictTextField(default={})),
('comment', models.TextField(blank=True, default='', max_length=128, verbose_name='Comment')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ReplayStorage',
fields=[
('id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('created_by', models.CharField(blank=True, max_length=32, null=True, verbose_name='Created by')),
('date_created', models.DateTimeField(auto_now_add=True, null=True, verbose_name='Date created')),
('date_updated', models.DateTimeField(auto_now=True, verbose_name='Date updated')),
('name', models.CharField(max_length=32, unique=True, verbose_name='Name')),
('type', models.CharField(choices=[('null', 'Null'), ('server', 'Server'), ('s3', 'S3'), ('ceph', 'Ceph'), ('swift', 'Swift'), ('oss', 'OSS'), ('azure', 'Azure')], default='server', max_length=16, verbose_name='Type')),
('meta', common.fields.model.EncryptJsonDictTextField(default={})),
('comment', models.TextField(blank=True, default='', max_length=128, verbose_name='Comment')),
],
options={
'abstract': False,
},
),
]
| gpl-2.0 |
sharad1126/owtf | framework/wrappers/set/spear_phishing.py | 3 | 2650 | #!/usr/bin/env python
'''
Description:
This is the handler for the Social Engineering Toolkit (SET) trying to overcome the limitations of set-automate
'''
from framework.dependency_management.dependency_resolver import BaseComponent
from framework.lib.general import *
import time
SCRIPT_DELAY = 2
class SpearPhishing(BaseComponent):
COMPONENT_NAME = "spear_phishing"
def __init__(self, set):
self.register_in_service_locator()
self.config = self.get_component("config")
self.error_handler = self.get_component("error_handler")
self.set = set
def Run(self, Args, PluginInfo):
Output = ''
if self.Init(Args):
self.set.Open({
'ConnectVia': self.config.GetResources('OpenSET')
, 'InitialCommands': None
, 'ExitMethod': Args['ISHELL_EXIT_METHOD']
, 'CommandsBeforeExit': Args['ISHELL_COMMANDS_BEFORE_EXIT']
, 'CommandsBeforeExitDelim': Args['ISHELL_COMMANDS_BEFORE_EXIT_DELIM']
}, PluginInfo)
if Args['PHISHING_CUSTOM_EXE_PAYLOAD_DIR']: # Prepend directory to payload
Args['PHISHING_CUSTOM_EXE_PAYLOAD'] = Args['PHISHING_CUSTOM_EXE_PAYLOAD_DIR'] + "/" + Args[
'PHISHING_CUSTOM_EXE_PAYLOAD']
for Script in self.GetSETScripts(Args):
cprint("Running SET script: " + Script)
Output += self.set.RunScript(Script, Args, Debug=False)
cprint("Sleeping " + str(SCRIPT_DELAY) + " seconds..")
time.sleep(int(SCRIPT_DELAY))
# Output += self.set.RunScript(self.SETScript, Args, Debug=False)
self.set.Close(PluginInfo)
return Output
def GetSETScripts(self, Args):
return [
Args['PHISHING_SCRIPT_DIR'] + "/start_phishing.set"
, Args['PHISHING_SCRIPT_DIR'] + "/payload_" + Args['PHISHING_PAYLOAD'] + ".set"
, Args['PHISHING_SCRIPT_DIR'] + "/send_email_smtp.set"
]
def InitPaths(self, Args):
MandatoryPaths = self.config.GetAsList(
['TOOL_SET_DIR', '_PDF_TEMPLATE', '_WORD_TEMPLATE', '_EMAIL_TARGET'])
if not PathsExist(MandatoryPaths) or not PathsExist(self.GetSETScripts(Args)):
self.error_handler.FrameworkAbort("USER ERROR: Some mandatory paths were not found your filesystem", 'user')
return False
return True
def Init(self, Args):
if not self.InitPaths(Args):
return False
return True
| bsd-3-clause |
0x46616c6b/ansible | lib/ansible/modules/cloud/amazon/lambda_event.py | 21 | 14656 | #!/usr/bin/python
# (c) 2016, Pierre Jodouin <pjodouin@virtualcomputing.solutions>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: lambda_event
short_description: Creates, updates or deletes AWS Lambda function event mappings.
description:
- This module allows the management of AWS Lambda function event source mappings such as DynamoDB and Kinesis stream
events via the Ansible framework. These event source mappings are relevant only in the AWS Lambda pull model, where
AWS Lambda invokes the function.
It is idempotent and supports "Check" mode. Use module M(lambda) to manage the lambda
function itself and M(lambda_alias) to manage function aliases.
version_added: "2.2"
author: Pierre Jodouin (@pjodouin), Ryan Brown (@ryansb)
options:
lambda_function_arn:
description:
- The name or ARN of the lambda function.
required: true
aliases: ['function_name', 'function_arn']
state:
description:
- Describes the desired state.
required: true
default: "present"
choices: ["present", "absent"]
alias:
description:
- Name of the function alias. Mutually exclusive with C(version).
required: true
version:
description:
- Version of the Lambda function. Mutually exclusive with C(alias).
required: false
event_source:
description:
- Source of the event that triggers the lambda function.
required: false
default: stream
choices: ['stream']
source_params:
description:
- Sub-parameters required for event source.
- I(== stream event source ==)
- C(source_arn) The Amazon Resource Name (ARN) of the Kinesis or DynamoDB stream that is the event source.
- C(enabled) Indicates whether AWS Lambda should begin polling the event source. Default is True.
- C(batch_size) The largest number of records that AWS Lambda will retrieve from your event source at the
time of invoking your function. Default is 100.
- C(starting_position) The position in the stream where AWS Lambda should start reading.
Choices are TRIM_HORIZON or LATEST.
required: true
requirements:
- boto3
extends_documentation_fragment:
- aws
'''
EXAMPLES = '''
---
# Example that creates a lambda event notification for a DynamoDB stream
- hosts: localhost
gather_facts: no
vars:
state: present
tasks:
- name: DynamoDB stream event mapping
lambda_event:
state: "{{ state | default('present') }}"
event_source: stream
function_name: "{{ function_name }}"
alias: Dev
source_params:
source_arn: arn:aws:dynamodb:us-east-1:123456789012:table/tableName/stream/2016-03-19T19:51:37.457
enabled: True
batch_size: 100
starting_position: TRIM_HORIZON
- name: Show source event
debug:
var: lambda_stream_events
'''
RETURN = '''
---
lambda_stream_events:
description: list of dictionaries returned by the API describing stream event mappings
returned: success
type: list
'''
import sys
try:
import boto3
from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
# ---------------------------------------------------------------------------------------------------
#
# Helper Functions & classes
#
# ---------------------------------------------------------------------------------------------------
class AWSConnection:
"""
Create the connection object and client objects as required.
"""
def __init__(self, ansible_obj, resources, use_boto3=True):
try:
self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=use_boto3)
self.resource_client = dict()
if not resources:
resources = ['lambda']
resources.append('iam')
for resource in resources:
aws_connect_kwargs.update(dict(region=self.region,
endpoint=self.endpoint,
conn_type='client',
resource=resource
))
self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs)
# if region is not provided, then get default profile/session region
if not self.region:
self.region = self.resource_client['lambda'].meta.region_name
except (ClientError, ParamValidationError, MissingParametersError) as e:
ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e))
# set account ID
try:
self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4]
except (ClientError, ValueError, KeyError, IndexError):
self.account_id = ''
def client(self, resource='lambda'):
return self.resource_client[resource]
def pc(key):
"""
Changes python key into Pascale case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'.
:param key:
:return:
"""
return "".join([token.capitalize() for token in key.split('_')])
def ordered_obj(obj):
"""
Order object for comparison purposes
:param obj:
:return:
"""
if isinstance(obj, dict):
return sorted((k, ordered_obj(v)) for k, v in obj.items())
if isinstance(obj, list):
return sorted(ordered_obj(x) for x in obj)
else:
return obj
def set_api_sub_params(params):
"""
Sets module sub-parameters to those expected by the boto3 API.
:param params:
:return:
"""
api_params = dict()
for param in params.keys():
param_value = params.get(param, None)
if param_value:
api_params[pc(param)] = param_value
return api_params
def validate_params(module, aws):
"""
Performs basic parameter validation.
:param module:
:param aws:
:return:
"""
function_name = module.params['lambda_function_arn']
# validate function name
if not re.search('^[\w\-:]+$', function_name):
module.fail_json(
msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
)
if len(function_name) > 64:
module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
# check if 'function_name' needs to be expanded in full ARN format
if not module.params['lambda_function_arn'].startswith('arn:aws:lambda:'):
function_name = module.params['lambda_function_arn']
module.params['lambda_function_arn'] = 'arn:aws:lambda:{0}:{1}:function:{2}'.format(aws.region, aws.account_id, function_name)
qualifier = get_qualifier(module)
if qualifier:
function_arn = module.params['lambda_function_arn']
module.params['lambda_function_arn'] = '{0}:{1}'.format(function_arn, qualifier)
return
def get_qualifier(module):
"""
Returns the function qualifier as a version or alias or None.
:param module:
:return:
"""
qualifier = None
if module.params['version'] > 0:
qualifier = str(module.params['version'])
elif module.params['alias']:
qualifier = str(module.params['alias'])
return qualifier
# ---------------------------------------------------------------------------------------------------
#
# Lambda Event Handlers
#
# This section defines a lambda_event_X function where X is an AWS service capable of initiating
# the execution of a Lambda function (pull only).
#
# ---------------------------------------------------------------------------------------------------
def lambda_event_stream(module, aws):
"""
Adds, updates or deletes lambda stream (DynamoDb, Kinesis) event notifications.
:param module:
:param aws:
:return:
"""
client = aws.client('lambda')
facts = dict()
changed = False
current_state = 'absent'
state = module.params['state']
api_params = dict(FunctionName=module.params['lambda_function_arn'])
# check if required sub-parameters are present and valid
source_params = module.params['source_params']
source_arn = source_params.get('source_arn')
if source_arn:
api_params.update(EventSourceArn=source_arn)
else:
module.fail_json(msg="Source parameter 'source_arn' is required for stream event notification.")
# check if optional sub-parameters are valid, if present
batch_size = source_params.get('batch_size')
if batch_size:
try:
source_params['batch_size'] = int(batch_size)
except ValueError:
module.fail_json(msg="Source parameter 'batch_size' must be an integer, found: {0}".format(source_params['batch_size']))
# optional boolean value needs special treatment as not present does not imply False
source_param_enabled = module.boolean(source_params.get('enabled', 'True'))
# check if event mapping exist
try:
facts = client.list_event_source_mappings(**api_params)['EventSourceMappings']
if facts:
current_state = 'present'
except ClientError as e:
module.fail_json(msg='Error retrieving stream event notification configuration: {0}'.format(e))
if state == 'present':
if current_state == 'absent':
starting_position = source_params.get('starting_position')
if starting_position:
api_params.update(StartingPosition=starting_position)
else:
module.fail_json(msg="Source parameter 'starting_position' is required for stream event notification.")
if source_arn:
api_params.update(Enabled=source_param_enabled)
if source_params.get('batch_size'):
api_params.update(BatchSize=source_params.get('batch_size'))
try:
if not module.check_mode:
facts = client.create_event_source_mapping(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error creating stream source event mapping: {0}'.format(e))
else:
# current_state is 'present'
api_params = dict(FunctionName=module.params['lambda_function_arn'])
current_mapping = facts[0]
api_params.update(UUID=current_mapping['UUID'])
mapping_changed = False
# check if anything changed
if source_params.get('batch_size') and source_params['batch_size'] != current_mapping['BatchSize']:
api_params.update(BatchSize=source_params['batch_size'])
mapping_changed = True
if source_param_enabled is not None:
if source_param_enabled:
if current_mapping['State'] not in ('Enabled', 'Enabling'):
api_params.update(Enabled=True)
mapping_changed = True
else:
if current_mapping['State'] not in ('Disabled', 'Disabling'):
api_params.update(Enabled=False)
mapping_changed = True
if mapping_changed:
try:
if not module.check_mode:
facts = client.update_event_source_mapping(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error updating stream source event mapping: {0}'.format(e))
else:
if current_state == 'present':
# remove the stream event mapping
api_params = dict(UUID=facts[0]['UUID'])
try:
if not module.check_mode:
facts = client.delete_event_source_mapping(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error removing stream source event mapping: {0}'.format(e))
return camel_dict_to_snake_dict(dict(changed=changed, events=facts))
def main():
"""Produce a list of function suffixes which handle lambda events."""
this_module = sys.modules[__name__]
source_choices = ["stream"]
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
state=dict(required=False, default='present', choices=['present', 'absent']),
lambda_function_arn=dict(required=True, default=None, aliases=['function_name', 'function_arn']),
event_source=dict(required=False, default="stream", choices=source_choices),
source_params=dict(type='dict', required=True, default=None),
alias=dict(required=False, default=None),
version=dict(type='int', required=False, default=0),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[['alias', 'version']],
required_together=[]
)
# validate dependencies
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required for this module.')
aws = AWSConnection(module, ['lambda'])
validate_params(module, aws)
this_module_function = getattr(this_module, 'lambda_event_{}'.format(module.params['event_source'].lower()))
results = this_module_function(module, aws)
module.exit_json(**results)
# ansible import module(s) kept at ~eof as recommended
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
nickhand/nbodykit | nbodykit/algorithms/pair_counters/mocksurvey.py | 2 | 8301 | from .base import PairCountBase, verify_input_sources
import numpy
import logging
class SurveyDataPairCount(PairCountBase):
r"""
Count (weighted) pairs of objects from a survey data catalog
as a function of :math:`r`, :math:`(r,\mu)`, :math:`(r_p, \pi)`, or
:math:`\theta` using the :mod:`Corrfunc` package.
See the Notes below for the allowed coordinate dimensions.
The default weighting scheme uses the product of the weights for each
object in a pair.
Results are computed when the class is inititalized. See the documenation
of :func:`~SurveyDataPairCount.run` for the attributes storing the
results.
.. note::
The algorithm expects the positions of particles from a survey catalog
be the sky coordinates, right ascension and declination, and redshift.
To compute pair counts in a simulation box using Cartesian
coordinates, see :class:`~nbodykit.algorithms.SimulationBoxPairCount`.
.. warning::
The right ascension and declination columns should be specified
in degrees.
Parameters
----------
mode : '1d', '2d', 'projected', 'angular'
compute pair counts as a function of the specified coordinate basis;
see the Notes section below for specifics
first : CatalogSource
the first source of particles, providing the 'Position' column
edges : array_like
the separation bin edges along the first coordinate dimension;
depending on ``mode``, the options are :math:`r`, :math:`r_p`, or
:math:`\theta`. Expected units for distances are :math:`\mathrm{Mpc}/h`
and degrees for angles. Length of nbins+1
cosmo : :class:`~nbodykit.cosmology.cosmology.Cosmology`, optional
the cosmology instance used to convert redshift into comoving distance;
this is required for all cases except ``mode='angular'``
second : CatalogSource, optional
the second source of particles to cross-correlate
Nmu : int, optional
the number of :math:`\mu` bins, ranging from 0 to 1; requred if
``mode='2d'``
pimax : float, optional
The maximum separation along the line-of-sight when ``mode='projected'``.
Distances along the :math:`\pi` direction are binned with unit
depth. For instance, if ``pimax=40``, then 40 bins will be created
along the :math:`\pi` direction.
ra : str, optional
the name of the column in the source specifying the
right ascension coordinates in units of degrees; default is 'RA'
dec : str, optional
the name of the column in the source specifying the declination
coordinates; default is 'DEC'
redshift : str, optional
the name of the column in the source specifying the redshift
coordinates; default is 'Redshift'
weight : str, optional
the name of the column in the source specifying the object weights
show_progress : bool, optional
if ``True``, perform the pair counting calculation in 10 iterations,
logging the progress after each iteration; this is useful for
understanding the scaling of the code
domain_factor : int, optional
the integer value by which to oversubscribe the domain decomposition
mesh before balancing loads; this number can affect the distribution
of loads on the ranks -- an optimal value will lead to balanced loads
**config : key/value pairs
additional keywords to pass to the :mod:`Corrfunc` function
Notes
-----
This class can compute pair counts using several different coordinate
choices, based on the value of the input argument ``mode``. The choices
are:
* ``mode='1d'`` : compute pairs as a function of the 3D separation :math:`r`
* ``mode='2d'`` : compute pairs as a function of the 3D separation :math:`r`
and the cosine of the angle to the line-of-sight, :math:`\mu`
* ``mode='projected'`` : compute pairs as a function of distance perpendicular
and parallel to the line-of-sight, :math:`r_p` and :math:`\pi`
* ``mode='angular'`` : compute pairs as a function of angle on the sky, :math:`\theta`
"""
logger = logging.getLogger('SurveyDataPairCount')
def __init__(self, mode, first, edges, cosmo=None, second=None,
Nmu=None, pimax=None,
ra='RA', dec='DEC', redshift='Redshift', weight='Weight',
show_progress=False, domain_factor=4,
**config):
# verify the input sources
required_cols = [ra, dec, weight]
if mode != 'angular': required_cols.append(redshift)
verify_input_sources(first, second, None, required_cols, inspect_boxsize=False)
# init the base class (this verifies input arguments)
PairCountBase.__init__(self, mode, edges, first, second, Nmu, pimax, weight, show_progress)
# need cosmology if not angular!
if mode != 'angular' and cosmo is None:
raise ValueError("'cosmo' keyword is required when 'mode' is not 'angular'")
# save the meta-data
self.attrs['cosmo'] = cosmo
self.attrs['weight'] = weight
self.attrs['ra'] = ra
self.attrs['dec'] = dec
self.attrs['redshift'] = redshift
self.attrs['config'] = config
self.attrs['domain_factor'] = domain_factor
# run the algorithm
self.run()
def run(self):
"""
Calculate the pair counts of a survey data catalog.
This adds the following attribute:
- :attr:`SurveyDataPairCount.pairs`
self.pairs.attrs['total_wnpairs']: The total of wnpairs.
Attributes
----------
pairs : :class:`~nbodykit.binned_statistic.BinnedStatistic`
a BinnedStatistic object holding the pair count results.
The coordinate grid will be ``(r,)``, ``(r,mu)``, ``(rp, pi)``,
or ``(theta,)`` when ``mode`` is '1d', '2d', 'projected', 'angular',
respectively.
The BinnedStatistic stores the following variables:
- ``r``, ``rp``, or ``theta`` : the mean separation value in the bin
- ``npairs``: the number of pairs in the bin
- ``wnpairs``: the weighted npairs in the bin; each pair
contributes the product of the individual weight values
"""
from .domain import decompose_survey_data
# setup
mode = self.attrs['mode']
first, second = self.first, self.second
attrs = self.attrs.copy()
Nmu = 1 if mode == '1d' else attrs['Nmu']
# compute the max cartesian distance for smoothing
smoothing = numpy.max(attrs['edges'])
if mode == 'projected':
smoothing = numpy.sqrt(smoothing**2 + attrs['pimax']**2)
elif mode == 'angular':
smoothing = 2 * numpy.sin(0.5 * numpy.deg2rad(smoothing))
# do a domain decomposition on the data
(pos1, w1), (pos2, w2) = decompose_survey_data(first, second, attrs,
self.logger, smoothing,
angular=(mode=='angular'),
domain_factor=attrs['domain_factor'])
# get the Corrfunc callable based on mode
if attrs['mode'] in ['1d', '2d']:
from .corrfunc.mocks import DDsmu_mocks
func = DDsmu_mocks(attrs['edges'], Nmu, comm=self.comm, show_progress=attrs['show_progress'])
elif attrs['mode'] == 'projected':
from .corrfunc.mocks import DDrppi_mocks
func = DDrppi_mocks(attrs['edges'], attrs['pimax'], comm=self.comm, show_progress=attrs['show_progress'])
elif attrs['mode'] == 'angular':
from .corrfunc.mocks import DDtheta_mocks
func = DDtheta_mocks(attrs['edges'], comm=self.comm, show_progress=attrs['show_progress'])
# do the calculation
self.pairs = func(pos1, w1, pos2, w2, **attrs['config'])
self.pairs.attrs['total_wnpairs'] = self.attrs['total_wnpairs']
# squeeze the result if '1d' (single mu bin was used)
if mode == '1d':
self.pairs = self.pairs.squeeze(dim='mu')
| gpl-3.0 |
xyproto/b-tk.core | Testing/Python/AcquisitionUnitConverterTest.py | 4 | 7026 | import btk
import unittest
import _TDDConfigure
class AcquisitionUnitConverterTest(unittest.TestCase):
def test_NoInputNoConversion(self):
uc = btk.btkAcquisitionUnitConverter()
uc.Update()
output = uc.GetOutput()
self.assertEqual(output.GetPointUnit(btk.btkPoint.Marker), 'mm')
self.assertEqual(output.GetPointUnit(btk.btkPoint.Angle), 'deg')
self.assertEqual(output.GetPointUnit(btk.btkPoint.Force), 'N')
self.assertEqual(output.GetPointUnit(btk.btkPoint.Moment), 'Nmm')
self.assertEqual(output.GetPointUnit(btk.btkPoint.Power), 'W')
def test_NoInputConversionToMeter(self):
uc = btk.btkAcquisitionUnitConverter()
uc.SetUnit(btk.btkAcquisitionUnitConverter.Length, 'm')
uc.SetUnit(btk.btkAcquisitionUnitConverter.Moment, 'Nm')
uc.Update()
output = uc.GetOutput()
self.assertEqual(output.GetPointUnit(btk.btkPoint.Marker), 'mm')
self.assertEqual(output.GetPointUnit(btk.btkPoint.Angle), 'deg')
self.assertEqual(output.GetPointUnit(btk.btkPoint.Force), 'N')
self.assertEqual(output.GetPointUnit(btk.btkPoint.Moment), 'Nmm')
self.assertEqual(output.GetPointUnit(btk.btkPoint.Power), 'W')
def test_ConversionToMeter(self):
input = btk.btkAcquisition()
input.Init(2,5)
p1 = input.GetPoint(0)
a = p1.GetValues()
a[0,0] = 1234.56; a[1,0] = 34.65; a[2,0] = 98.08; a[3,0] = 987.12; a[4,0] = 654.12
p1.SetValues(a)
p2 = input.GetPoint(1)
p2.SetType(btk.btkPoint.Moment)
b = p2.GetValues()
b[0,0] = 1234.56; b[1,0] = 1134.65; b[2,0] = 1000.54; b[3,0] = 987.12; b[4,0] = 1435.896
p2.SetValues(b)
uc = btk.btkAcquisitionUnitConverter()
uc.SetInput(input)
uc.SetUnit(btk.btkAcquisitionUnitConverter.Length, 'm')
uc.SetUnit(btk.btkAcquisitionUnitConverter.Moment, 'Nm')
uc.Update()
output = uc.GetOutput()
self.assertEqual(output.GetPointUnit(btk.btkPoint.Marker), 'm')
self.assertEqual(output.GetPointUnit(btk.btkPoint.Angle), 'deg')
self.assertEqual(output.GetPointUnit(btk.btkPoint.Force), 'N')
self.assertEqual(output.GetPointUnit(btk.btkPoint.Moment), 'Nm')
self.assertEqual(output.GetPointUnit(btk.btkPoint.Power), 'W')
p1 = output.GetPoint(0)
a = p1.GetValues()
p2 = output.GetPoint(1)
b = p2.GetValues()
self.assertAlmostEqual(a[0,0], 1.23456, 10);
self.assertAlmostEqual(a[1,0], 0.03465, 10);
self.assertAlmostEqual(a[2,0], 0.09808, 10);
self.assertAlmostEqual(a[3,0], 0.98712, 10);
self.assertAlmostEqual(a[4,0], 0.65412, 10)
self.assertAlmostEqual(b[0,0], 1.23456, 10)
self.assertAlmostEqual(b[1,0], 1.13465, 10)
self.assertAlmostEqual(b[2,0], 1.00054, 10)
self.assertAlmostEqual(b[3,0], 0.98712, 10)
self.assertAlmostEqual(b[4,0], 1.435896, 10)
def test_ConversionForAnalog(self):
input = btk.btkAcquisition()
input.Init(0,5,2)
a1 = input.GetAnalog(0)
a1.SetUnit('Nmm')
a = a1.GetValues()
a[0,0] = 1234.56; a[1,0] = 34.65; a[2,0] = 98.08; a[3,0] = 987.12; a[4,0] = 654.12
a1.SetValues(a)
a2 = input.GetAnalog(1)
b = a2.GetValues()
b[0,0] = 1.56; b[1,0] = 1.65; b[2,0] = 1.54; b[3,0] = 0.98712; b[4,0] = 1.8964;
a2.SetValues(b)
uc = btk.btkAcquisitionUnitConverter()
uc.SetInput(input)
uc.SetUnit(btk.btkAcquisitionUnitConverter.Length, 'm')
uc.SetUnit(btk.btkAcquisitionUnitConverter.Moment, 'Nm')
uc.Update()
output = uc.GetOutput()
self.assertEqual(output.GetPointUnit(btk.btkPoint.Marker), 'm')
self.assertEqual(output.GetPointUnit(btk.btkPoint.Angle), 'deg')
self.assertEqual(output.GetPointUnit(btk.btkPoint.Force), 'N')
self.assertEqual(output.GetPointUnit(btk.btkPoint.Moment), 'Nm')
self.assertEqual(output.GetPointUnit(btk.btkPoint.Power), 'W')
a1 = output.GetAnalog(0)
a = a1.GetValues()
a2 = output.GetAnalog(1)
b = a2.GetValues()
self.assertAlmostEqual(a[0], 1.23456, 10)
self.assertAlmostEqual(a[1], 0.03465, 10)
self.assertAlmostEqual(a[2], 0.09808, 10)
self.assertAlmostEqual(a[3], 0.98712, 10)
self.assertAlmostEqual(a[4], 0.65412, 10)
self.assertAlmostEqual(b[0], 1.5600, 10)
self.assertAlmostEqual(b[1], 1.6500, 10)
self.assertAlmostEqual(b[2], 1.5400, 10)
self.assertAlmostEqual(b[3], 0.98712, 10)
self.assertAlmostEqual(b[4], 1.8964, 10)
def test_ConversionFromFile(self):
reader = btk.btkAcquisitionFileReader()
reader.SetFilename(_TDDConfigure.C3DFilePathIN + 'sample09/PlugInC3D.c3d')
acq = reader.GetOutput()
uc = btk.btkAcquisitionUnitConverter()
uc.SetInput(acq)
uc.SetUnit(btk.btkAcquisitionUnitConverter.Length, 'm')
uc.SetUnit(btk.btkAcquisitionUnitConverter.Angle, 'rad')
uc.SetUnit(btk.btkAcquisitionUnitConverter.Moment, 'Nm')
uc.Update()
acq2 = uc.GetOutput()
self.assertEqual(acq2.GetPointUnit(btk.btkPoint.Scalar), 'm')
self.assertEqual(acq.GetFirstFrame(), acq2.GetFirstFrame())
self.assertEqual(acq.GetPointFrequency(), acq2.GetPointFrequency())
self.assertEqual(acq.GetPointNumber(), acq2.GetPointNumber())
self.assertEqual(acq.GetPointFrameNumber(), acq2.GetPointFrameNumber())
self.assertEqual(acq.GetAnalogFrequency(), acq2.GetAnalogFrequency())
self.assertEqual(acq.GetAnalogNumber(), acq2.GetAnalogNumber())
for j in range(0,acq.GetPointNumber()):
s = 1.0
t = acq.GetPoint(j).GetType()
if (t == btk.btkPoint.Marker) or (t == btk.btkPoint.Scalar):
s = 0.001
elif (t == btk.btkPoint.Angle):
s = 1.745329251994e-02
elif (t == btk.btkPoint.Moment):
s = 0.001
for i in range(1,50):
self.assertAlmostEqual(acq.GetPoint(j).GetValues()[i,0] * s, acq2.GetPoint(j).GetValues()[i,0], 10)
self.assertAlmostEqual(acq.GetPoint(j).GetValues()[i,1] * s, acq2.GetPoint(j).GetValues()[i,1], 10)
self.assertAlmostEqual(acq.GetPoint(j).GetValues()[i,2] * s, acq2.GetPoint(j).GetValues()[i,2], 10)
for j in range(0,acq.GetAnalogNumber()):
s = 1.0
unit = acq.GetAnalog(j).GetUnit()
if (unit == 'Nmm'):
s = 0.001
for i in range(1,50):
self.assertAlmostEqual(acq.GetAnalog(j).GetValues()[i] * s, acq2.GetAnalog(j).GetValues()[i], 10)
self.assertAlmostEqual(acq.GetMetaData().GetChild('SEG').GetChild('MARKER_DIAMETER').GetInfo().ToDouble(0) * 0.001,
acq2.GetMetaData().GetChild('SEG').GetChild('MARKER_DIAMETER').GetInfo().ToDouble(0), 5)
| bsd-3-clause |
benthomasson/fsm-designer | app/fsm_designer/features/steps/diff.py | 1 | 1393 |
from behave import when, then
from code_generation import cd
import fsm_designer.cli
import os
HERE = os.path.dirname(__file__)
@given(u'two empty fsm designs')
def step_impl(context):
context.designA = os.path.join(HERE, 'X')
context.designB = os.path.join(HERE, 'Y')
@given(u'two simple fsm designs')
def step_impl(context):
context.designA = os.path.join(HERE, 'x.yml')
context.designB = os.path.join(HERE, 'y.yml')
@given(u'two different fsm designs')
def step_impl(context):
context.designA = os.path.join(HERE, 'x.yml')
context.designB = os.path.join(HERE, 'x2.yml')
@given(u'two fsm designs with different transitions')
def step_impl(context):
context.designA = os.path.join(HERE, 'x.yml')
context.designB = os.path.join(HERE, 'x3.yml')
@when(u'finding differences')
def step_impl(context):
try:
with cd(context.tempdir):
context.result = fsm_designer.cli.main('diff {0} {1}'.format(context.designA, context.designB).split())
except SystemExit, e:
print(dir(e))
print(e.code)
print(e.message)
print(e.args)
raise AssertionError('SystemExit')
@then(u'their should be no differences between the designs.')
def step_impl(context):
assert context.result is 0
@then(u'their should be differences between the designs.')
def step_impl(context):
assert context.result is 1
| gpl-2.0 |
bright-sparks/chromium-spacewalk | tools/telemetry/telemetry/page/actions/action_runner.py | 42 | 25683 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import time
from telemetry.page.actions.javascript_click import ClickElementAction
from telemetry.page.actions.loop import LoopAction
from telemetry.page.actions.navigate import NavigateAction
from telemetry.page.actions.pinch import PinchAction
from telemetry.page.actions.play import PlayAction
from telemetry.page.actions.repaint_continuously import (
RepaintContinuouslyAction)
from telemetry.page.actions.scroll import ScrollAction
from telemetry.page.actions.scroll_bounce import ScrollBounceAction
from telemetry.page.actions.seek import SeekAction
from telemetry.page.actions.swipe import SwipeAction
from telemetry.page.actions.tap import TapAction
from telemetry.page.actions.wait import WaitForElementAction
from telemetry.web_perf import timeline_interaction_record
class ActionRunner(object):
def __init__(self, tab, skip_waits=False):
self._tab = tab
self._skip_waits = skip_waits
def _RunAction(self, action):
action.WillRunAction(self._tab)
action.RunAction(self._tab)
def BeginInteraction(self, label, is_fast=False, is_smooth=False,
is_responsive=False, repeatable=False):
"""Marks the beginning of an interaction record.
An interaction record is a labeled time period containing
interaction that developers care about. Each set of metrics
specified in flags will be calculated for this time period.. The
End() method in the returned object must be called once to mark
the end of the timeline.
Args:
label: A label for this particular interaction. This can be any
user-defined string, but must not contain '/'.
is_fast: Whether to measure how fast the browser completes necessary work
for this interaction record. See fast_metric.py for details.
is_smooth: Whether to check for smoothness metrics for this interaction.
is_responsive: Whether to check for responsiveness metrics for
this interaction.
repeatable: Whether other interactions may use the same logical name
as this interaction. All interactions with the same logical name must
have the same flags.
"""
flags = []
if is_fast:
flags.append(timeline_interaction_record.IS_FAST)
if is_smooth:
flags.append(timeline_interaction_record.IS_SMOOTH)
if is_responsive:
flags.append(timeline_interaction_record.IS_RESPONSIVE)
if repeatable:
flags.append(timeline_interaction_record.REPEATABLE)
interaction = Interaction(self._tab, label, flags)
interaction.Begin()
return interaction
def BeginGestureInteraction(self, label, is_fast=False, is_smooth=False,
is_responsive=False, repeatable=False):
"""Marks the beginning of a gesture-based interaction record.
This is similar to normal interaction record, but it will
auto-narrow the interaction time period to only include the
synthetic gesture event output by Chrome. This is typically use to
reduce noise in gesture-based analysis (e.g., analysis for a
swipe/scroll).
The interaction record label will be prepended with 'Gesture_'.
Args:
label: A label for this particular interaction. This can be any
user-defined string, but must not contain '/'.
is_fast: Whether to measure how fast the browser completes necessary work
for this interaction record. See fast_metric.py for details.
is_smooth: Whether to check for smoothness metrics for this interaction.
is_responsive: Whether to check for responsiveness metrics for
this interaction.
repeatable: Whether other interactions may use the same logical name
as this interaction. All interactions with the same logical name must
have the same flags.
"""
return self.BeginInteraction('Gesture_' + label, is_fast, is_smooth,
is_responsive, repeatable)
def NavigateToPage(self, page, timeout_in_seconds=60):
"""Navigate to the given page.
Args:
page: page is an instance of page.Page
timeout_in_seconds: The timeout in seconds (default to 60).
"""
if page.is_file:
target_side_url = self._tab.browser.http_server.UrlOf(page.file_path_url)
else:
target_side_url = page.url
self._RunAction(NavigateAction(
url=target_side_url,
script_to_evaluate_on_commit=page.script_to_evaluate_on_commit,
timeout_in_seconds=timeout_in_seconds))
def WaitForNavigate(self, timeout_in_seconds_seconds=60):
self._tab.WaitForNavigate(timeout_in_seconds_seconds)
self._tab.WaitForDocumentReadyStateToBeInteractiveOrBetter()
def ReloadPage(self):
"""Reloads the page."""
self._tab.ExecuteJavaScript('window.location.reload()')
self._tab.WaitForDocumentReadyStateToBeInteractiveOrBetter()
def ExecuteJavaScript(self, statement):
"""Executes a given JavaScript expression. Does not return the result.
Example: runner.ExecuteJavaScript('var foo = 1;');
Args:
statement: The statement to execute (provided as string).
Raises:
EvaluationException: The statement failed to execute.
"""
self._tab.ExecuteJavaScript(statement)
def EvaluateJavaScript(self, expression):
"""Returns the evaluation result of the given JavaScript expression.
The evaluation results must be convertible to JSON. If the result
is not needed, use ExecuteJavaScript instead.
Example: num = runner.EvaluateJavaScript('document.location.href')
Args:
expression: The expression to evaluate (provided as string).
Raises:
EvaluationException: The statement expression failed to execute
or the evaluation result can not be JSON-ized.
"""
return self._tab.EvaluateJavaScript(expression)
def Wait(self, seconds):
"""Wait for the number of seconds specified.
Args:
seconds: The number of seconds to wait.
"""
if not self._skip_waits:
time.sleep(seconds)
def WaitForJavaScriptCondition(self, condition, timeout_in_seconds=60):
"""Wait for a JavaScript condition to become true.
Example: runner.WaitForJavaScriptCondition('window.foo == 10');
Args:
condition: The JavaScript condition (as string).
timeout_in_seconds: The timeout in seconds (default to 60).
"""
self._tab.WaitForJavaScriptExpression(condition, timeout_in_seconds)
def WaitForElement(self, selector=None, text=None, element_function=None,
timeout_in_seconds=60):
"""Wait for an element to appear in the document.
The element may be selected via selector, text, or element_function.
Only one of these arguments must be specified.
Args:
selector: A CSS selector describing the element.
text: The element must contains this exact text.
element_function: A JavaScript function (as string) that is used
to retrieve the element. For example:
'(function() { return foo.element; })()'.
timeout_in_seconds: The timeout in seconds (default to 60).
"""
self._RunAction(WaitForElementAction(
selector=selector, text=text, element_function=element_function,
timeout_in_seconds=timeout_in_seconds))
def TapElement(self, selector=None, text=None, element_function=None):
"""Tap an element.
The element may be selected via selector, text, or element_function.
Only one of these arguments must be specified.
Args:
selector: A CSS selector describing the element.
text: The element must contains this exact text.
element_function: A JavaScript function (as string) that is used
to retrieve the element. For example:
'(function() { return foo.element; })()'.
"""
self._RunAction(TapAction(
selector=selector, text=text, element_function=element_function))
def ClickElement(self, selector=None, text=None, element_function=None):
"""Click an element.
The element may be selected via selector, text, or element_function.
Only one of these arguments must be specified.
Args:
selector: A CSS selector describing the element.
text: The element must contains this exact text.
element_function: A JavaScript function (as string) that is used
to retrieve the element. For example:
'(function() { return foo.element; })()'.
"""
self._RunAction(ClickElementAction(
selector=selector, text=text, element_function=element_function))
def PinchPage(self, left_anchor_ratio=0.5, top_anchor_ratio=0.5,
scale_factor=None, speed_in_pixels_per_second=800):
"""Perform the pinch gesture on the page.
It computes the pinch gesture automatically based on the anchor
coordinate and the scale factor. The scale factor is the ratio of
of the final span and the initial span of the gesture.
Args:
left_anchor_ratio: The horizontal pinch anchor coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
top_anchor_ratio: The vertical pinch anchor coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
scale_factor: The ratio of the final span to the initial span.
The default scale factor is
3.0 / (window.outerWidth/window.innerWidth).
speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
"""
self._RunAction(PinchAction(
left_anchor_ratio=left_anchor_ratio, top_anchor_ratio=top_anchor_ratio,
scale_factor=scale_factor,
speed_in_pixels_per_second=speed_in_pixels_per_second))
def PinchElement(self, selector=None, text=None, element_function=None,
left_anchor_ratio=0.5, top_anchor_ratio=0.5,
scale_factor=None, speed_in_pixels_per_second=800):
"""Perform the pinch gesture on an element.
It computes the pinch gesture automatically based on the anchor
coordinate and the scale factor. The scale factor is the ratio of
of the final span and the initial span of the gesture.
Args:
selector: A CSS selector describing the element.
text: The element must contains this exact text.
element_function: A JavaScript function (as string) that is used
to retrieve the element. For example:
'function() { return foo.element; }'.
left_anchor_ratio: The horizontal pinch anchor coordinate of the
gesture, as a ratio of the visible bounding rectangle for
the element.
top_anchor_ratio: The vertical pinch anchor coordinate of the
gesture, as a ratio of the visible bounding rectangle for
the element.
scale_factor: The ratio of the final span to the initial span.
The default scale factor is
3.0 / (window.outerWidth/window.innerWidth).
speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
"""
self._RunAction(PinchAction(
selector=selector, text=text, element_function=element_function,
left_anchor_ratio=left_anchor_ratio, top_anchor_ratio=top_anchor_ratio,
scale_factor=scale_factor,
speed_in_pixels_per_second=speed_in_pixels_per_second))
def ScrollPage(self, left_start_ratio=0.5, top_start_ratio=0.5,
direction='down', distance=None, distance_expr=None,
speed_in_pixels_per_second=800, use_touch=False):
"""Perform scroll gesture on the page.
You may specify distance or distance_expr, but not both. If
neither is specified, the default scroll distance is variable
depending on direction (see scroll.js for full implementation).
Args:
left_start_ratio: The horizontal starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
top_start_ratio: The vertical starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
direction: The direction of scroll, either 'left', 'right',
'up', or 'down'
distance: The distance to scroll (in pixel).
distance_expr: A JavaScript expression (as string) that can be
evaluated to compute scroll distance. Example:
'window.scrollTop' or '(function() { return crazyMath(); })()'.
speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
use_touch: Whether scrolling should be done with touch input.
"""
self._RunAction(ScrollAction(
left_start_ratio=left_start_ratio, top_start_ratio=top_start_ratio,
direction=direction, distance=distance, distance_expr=distance_expr,
speed_in_pixels_per_second=speed_in_pixels_per_second,
use_touch=use_touch))
def ScrollElement(self, selector=None, text=None, element_function=None,
left_start_ratio=0.5, top_start_ratio=0.5,
direction='down', distance=None, distance_expr=None,
speed_in_pixels_per_second=800, use_touch=False):
"""Perform scroll gesture on the element.
The element may be selected via selector, text, or element_function.
Only one of these arguments must be specified.
You may specify distance or distance_expr, but not both. If
neither is specified, the default scroll distance is variable
depending on direction (see scroll.js for full implementation).
Args:
selector: A CSS selector describing the element.
text: The element must contains this exact text.
element_function: A JavaScript function (as string) that is used
to retrieve the element. For example:
'function() { return foo.element; }'.
left_start_ratio: The horizontal starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
the element.
top_start_ratio: The vertical starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
the element.
direction: The direction of scroll, either 'left', 'right',
'up', or 'down'
distance: The distance to scroll (in pixel).
distance_expr: A JavaScript expression (as string) that can be
evaluated to compute scroll distance. Example:
'window.scrollTop' or '(function() { return crazyMath(); })()'.
speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
use_touch: Whether scrolling should be done with touch input.
"""
self._RunAction(ScrollAction(
selector=selector, text=text, element_function=element_function,
left_start_ratio=left_start_ratio, top_start_ratio=top_start_ratio,
direction=direction, distance=distance, distance_expr=distance_expr,
speed_in_pixels_per_second=speed_in_pixels_per_second,
use_touch=use_touch))
def ScrollBouncePage(self, left_start_ratio=0.5, top_start_ratio=0.5,
direction='down', distance=100,
overscroll=10, repeat_count=10,
speed_in_pixels_per_second=400):
"""Perform scroll bounce gesture on the page.
This gesture scrolls the page by the number of pixels specified in
distance, in the given direction, followed by a scroll by
(distance + overscroll) pixels in the opposite direction.
The above gesture is repeated repeat_count times.
Args:
left_start_ratio: The horizontal starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
top_start_ratio: The vertical starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
direction: The direction of scroll, either 'left', 'right',
'up', or 'down'
distance: The distance to scroll (in pixel).
overscroll: The number of additional pixels to scroll back, in
addition to the givendistance.
repeat_count: How often we want to repeat the full gesture.
speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
"""
self._RunAction(ScrollBounceAction(
left_start_ratio=left_start_ratio, top_start_ratio=top_start_ratio,
direction=direction, distance=distance,
overscroll=overscroll, repeat_count=repeat_count,
speed_in_pixels_per_second=speed_in_pixels_per_second))
def ScrollBounceElement(self, selector=None, text=None, element_function=None,
left_start_ratio=0.5, top_start_ratio=0.5,
direction='down', distance=100,
overscroll=10, repeat_count=10,
speed_in_pixels_per_second=400):
"""Perform scroll bounce gesture on the element.
This gesture scrolls on the element by the number of pixels specified in
distance, in the given direction, followed by a scroll by
(distance + overscroll) pixels in the opposite direction.
The above gesture is repeated repeat_count times.
Args:
selector: A CSS selector describing the element.
text: The element must contains this exact text.
element_function: A JavaScript function (as string) that is used
to retrieve the element. For example:
'function() { return foo.element; }'.
left_start_ratio: The horizontal starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
top_start_ratio: The vertical starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
direction: The direction of scroll, either 'left', 'right',
'up', or 'down'
distance: The distance to scroll (in pixel).
overscroll: The number of additional pixels to scroll back, in
addition to the givendistance.
repeat_count: How often we want to repeat the full gesture.
speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
"""
self._RunAction(ScrollBounceAction(
selector=selector, text=text, element_function=element_function,
left_start_ratio=left_start_ratio, top_start_ratio=top_start_ratio,
direction=direction, distance=distance,
overscroll=overscroll, repeat_count=repeat_count,
speed_in_pixels_per_second=speed_in_pixels_per_second))
def SwipePage(self, left_start_ratio=0.5, top_start_ratio=0.5,
direction='left', distance=100, speed_in_pixels_per_second=800):
"""Perform swipe gesture on the page.
Args:
left_start_ratio: The horizontal starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
top_start_ratio: The vertical starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
direction: The direction of swipe, either 'left', 'right',
'up', or 'down'
distance: The distance to swipe (in pixel).
speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
"""
self._RunAction(SwipeAction(
left_start_ratio=left_start_ratio, top_start_ratio=top_start_ratio,
direction=direction, distance=distance,
speed_in_pixels_per_second=speed_in_pixels_per_second))
def SwipeElement(self, selector=None, text=None, element_function=None,
left_start_ratio=0.5, top_start_ratio=0.5,
direction='left', distance=100,
speed_in_pixels_per_second=800):
"""Perform swipe gesture on the element.
The element may be selected via selector, text, or element_function.
Only one of these arguments must be specified.
Args:
selector: A CSS selector describing the element.
text: The element must contains this exact text.
element_function: A JavaScript function (as string) that is used
to retrieve the element. For example:
'function() { return foo.element; }'.
left_start_ratio: The horizontal starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
the element.
top_start_ratio: The vertical starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
the element.
direction: The direction of swipe, either 'left', 'right',
'up', or 'down'
distance: The distance to swipe (in pixel).
speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
"""
self._RunAction(SwipeAction(
selector=selector, text=text, element_function=element_function,
left_start_ratio=left_start_ratio, top_start_ratio=top_start_ratio,
direction=direction, distance=distance,
speed_in_pixels_per_second=speed_in_pixels_per_second))
def PlayMedia(self, selector=None,
playing_event_timeout_in_seconds=0,
ended_event_timeout_in_seconds=0):
"""Invokes the "play" action on media elements (such as video).
Args:
selector: A CSS selector describing the element. If none is
specified, play the first media element on the page. If the
selector matches more than 1 media element, all of them will
be played.
playing_event_timeout_in_seconds: Maximum waiting time for the "playing"
event (dispatched when the media begins to play) to be fired.
0 means do not wait.
ended_event_timeout_in_seconds: Maximum waiting time for the "ended"
event (dispatched when playback completes) to be fired.
0 means do not wait.
Raises:
TimeoutException: If the maximum waiting time is exceeded.
"""
self._RunAction(PlayAction(
selector=selector,
playing_event_timeout_in_seconds=playing_event_timeout_in_seconds,
ended_event_timeout_in_seconds=ended_event_timeout_in_seconds))
def SeekMedia(self, seconds, selector=None, timeout_in_seconds=0,
log_time=True, label=''):
"""Performs a seek action on media elements (such as video).
Args:
seconds: The media time to seek to.
selector: A CSS selector describing the element. If none is
specified, seek the first media element on the page. If the
selector matches more than 1 media element, all of them will
be seeked.
timeout_in_seconds: Maximum waiting time for the "seeked" event
(dispatched when the seeked operation completes) to be
fired. 0 means do not wait.
log_time: Whether to log the seek time for the perf
measurement. Useful when performing multiple seek.
label: A suffix string to name the seek perf measurement.
Raises:
TimeoutException: If the maximum waiting time is exceeded.
"""
self._RunAction(SeekAction(
seconds=seconds, selector=selector,
timeout_in_seconds=timeout_in_seconds,
log_time=log_time, label=label))
def LoopMedia(self, loop_count, selector=None, timeout_in_seconds=None):
"""Loops a media playback.
Args:
loop_count: The number of times to loop the playback.
selector: A CSS selector describing the element. If none is
specified, loop the first media element on the page. If the
selector matches more than 1 media element, all of them will
be looped.
timeout_in_seconds: Maximum waiting time for the looped playback to
complete. 0 means do not wait. None (the default) means to
wait loop_count * 60 seconds.
Raises:
TimeoutException: If the maximum waiting time is exceeded.
"""
self._RunAction(LoopAction(
loop_count=loop_count, selector=selector,
timeout_in_seconds=timeout_in_seconds))
def ForceGarbageCollection(self):
"""Forces JavaScript garbage collection on the page."""
self._tab.CollectGarbage()
def PauseInteractive(self):
"""Pause the page execution and wait for terminal interaction.
This is typically used for debugging. You can use this to pause
the page execution and inspect the browser state before
continuing.
"""
raw_input("Interacting... Press Enter to continue.")
def RepaintContinuously(self, seconds):
"""Continuously repaints the visible content.
It does this by requesting animation frames until the given number
of seconds have elapsed AND at least three RAFs have been
fired. Times out after max(60, self.seconds), if less than three
RAFs were fired."""
self._RunAction(RepaintContinuouslyAction(
seconds=0 if self._skip_waits else seconds))
class Interaction(object):
def __init__(self, action_runner, label, flags):
assert action_runner
assert label
assert isinstance(flags, list)
self._action_runner = action_runner
self._label = label
self._flags = flags
self._started = False
def Begin(self):
assert not self._started
self._started = True
self._action_runner.ExecuteJavaScript('console.time("%s");' %
timeline_interaction_record.GetJavaScriptMarker(
self._label, self._flags))
def End(self):
assert self._started
self._started = False
self._action_runner.ExecuteJavaScript('console.timeEnd("%s");' %
timeline_interaction_record.GetJavaScriptMarker(
self._label, self._flags))
| bsd-3-clause |
asce1062/MAHDI-ROM_external_skia | tools/sanitize_source_files.py | 176 | 4852 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module that sanitizes source files with specified modifiers."""
import commands
import os
import sys
_FILE_EXTENSIONS_TO_SANITIZE = ['cpp', 'h', 'c', 'gyp', 'gypi']
_SUBDIRS_TO_IGNORE = ['.git', '.svn', 'third_party']
def SanitizeFilesWithModifiers(directory, file_modifiers, line_modifiers):
"""Sanitizes source files with the specified file and line modifiers.
Args:
directory: string - The directory which will be recursively traversed to
find source files to apply modifiers to.
file_modifiers: list - file-modification methods which should be applied to
the complete file content (Eg: EOFOneAndOnlyOneNewlineAdder).
line_modifiers: list - line-modification methods which should be applied to
lines in a file (Eg: TabReplacer).
"""
for item in os.listdir(directory):
full_item_path = os.path.join(directory, item)
if os.path.isfile(full_item_path): # Item is a file.
# Only sanitize files with extensions we care about.
if (len(full_item_path.split('.')) > 1 and
full_item_path.split('.')[-1] in _FILE_EXTENSIONS_TO_SANITIZE):
f = file(full_item_path)
try:
lines = f.readlines()
finally:
f.close()
new_lines = [] # Collect changed lines here.
line_number = 0 # Keeps track of line numbers in the source file.
write_to_file = False # File is written to only if this flag is set.
# Run the line modifiers for each line in this file.
for line in lines:
original_line = line
line_number += 1
for modifier in line_modifiers:
line = modifier(line, full_item_path, line_number)
if original_line != line:
write_to_file = True
new_lines.append(line)
# Run the file modifiers.
old_content = ''.join(lines)
new_content = ''.join(new_lines)
for modifier in file_modifiers:
new_content = modifier(new_content, full_item_path)
if new_content != old_content:
write_to_file = True
# Write modifications to the file.
if write_to_file:
f = file(full_item_path, 'w')
try:
f.write(new_content)
finally:
f.close()
print 'Made changes to %s' % full_item_path
elif item not in _SUBDIRS_TO_IGNORE:
# Item is a directory recursively call the method.
SanitizeFilesWithModifiers(full_item_path, file_modifiers, line_modifiers)
############## Line Modification methods ##############
def TrailingWhitespaceRemover(line, file_path, line_number):
"""Strips out trailing whitespaces from the specified line."""
stripped_line = line.rstrip() + '\n'
if line != stripped_line:
print 'Removing trailing whitespace in %s:%s' % (file_path, line_number)
return stripped_line
def CrlfReplacer(line, file_path, line_number):
"""Replaces CRLF with LF."""
if '\r\n' in line:
print 'Replacing CRLF with LF in %s:%s' % (file_path, line_number)
return line.replace('\r\n', '\n')
def TabReplacer(line, file_path, line_number):
"""Replaces Tabs with 4 whitespaces."""
if '\t' in line:
print 'Replacing Tab with whitespace in %s:%s' % (file_path, line_number)
return line.replace('\t', ' ')
############## File Modification methods ##############
def CopywriteChecker(file_content, unused_file_path):
"""Ensures that the copywrite information is correct."""
# TODO(rmistry): Figure out the legal implications of changing old copyright
# headers.
return file_content
def EOFOneAndOnlyOneNewlineAdder(file_content, file_path):
"""Adds one and only one LF at the end of the file."""
if file_content and (file_content[-1] != '\n' or file_content[-2:-1] == '\n'):
file_content = file_content.rstrip()
file_content += '\n'
print 'Added exactly one newline to %s' % file_path
return file_content
def SvnEOLChecker(file_content, file_path):
"""Sets svn:eol-style property to LF."""
output = commands.getoutput(
'svn propget svn:eol-style %s' % file_path)
if output != 'LF':
print 'Setting svn:eol-style property to LF in %s' % file_path
os.system('svn ps svn:eol-style LF %s' % file_path)
return file_content
#######################################################
if '__main__' == __name__:
sys.exit(SanitizeFilesWithModifiers(
os.getcwd(),
file_modifiers=[
CopywriteChecker,
EOFOneAndOnlyOneNewlineAdder,
SvnEOLChecker,
],
line_modifiers=[
CrlfReplacer,
TabReplacer,
TrailingWhitespaceRemover,
],
))
| bsd-3-clause |
civisanalytics/ansible-modules-core | cloud/amazon/ec2_elb.py | 33 | 13968 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: ec2_elb
short_description: De-registers or registers instances from EC2 ELBs
description:
- This module de-registers or registers an AWS EC2 instance from the ELBs
that it belongs to.
- Returns fact "ec2_elbs" which is a list of elbs attached to the instance
if state=absent is passed as an argument.
- Will be marked changed when called only if there are ELBs found to operate on.
version_added: "1.2"
author: "John Jarvis (@jarv)"
options:
state:
description:
- register or deregister the instance
required: true
choices: ['present', 'absent']
instance_id:
description:
- EC2 Instance ID
required: true
ec2_elbs:
description:
- List of ELB names, required for registration. The ec2_elbs fact should be used if there was a previous de-register.
required: false
default: None
enable_availability_zone:
description:
- Whether to enable the availability zone of the instance on the target ELB if the availability zone has not already
been enabled. If set to no, the task will fail if the availability zone is not enabled on the ELB.
required: false
default: yes
choices: [ "yes", "no" ]
wait:
description:
- Wait for instance registration or deregistration to complete successfully before returning.
required: false
default: yes
choices: [ "yes", "no" ]
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
aliases: []
version_added: "1.5"
wait_timeout:
description:
- Number of seconds to wait for an instance to change state. If 0 then this module may return an error if a transient error occurs. If non-zero then any transient errors are ignored until the timeout is reached. Ignored when wait=no.
required: false
default: 0
version_added: "1.6"
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = """
# basic pre_task and post_task example
pre_tasks:
- name: Gathering ec2 facts
action: ec2_facts
- name: Instance De-register
local_action:
module: ec2_elb
instance_id: "{{ ansible_ec2_instance_id }}"
state: absent
roles:
- myrole
post_tasks:
- name: Instance Register
local_action:
module: ec2_elb
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
state: present
with_items: ec2_elbs
"""
import time
try:
import boto
import boto.ec2
import boto.ec2.autoscale
import boto.ec2.elb
from boto.regioninfo import RegionInfo
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
class ElbManager:
"""Handles EC2 instance ELB registration and de-registration"""
def __init__(self, module, instance_id=None, ec2_elbs=None,
region=None, **aws_connect_params):
self.module = module
self.instance_id = instance_id
self.region = region
self.aws_connect_params = aws_connect_params
self.lbs = self._get_instance_lbs(ec2_elbs)
self.changed = False
def deregister(self, wait, timeout):
"""De-register the instance from all ELBs and wait for the ELB
to report it out-of-service"""
for lb in self.lbs:
initial_state = self._get_instance_health(lb)
if initial_state is None:
# Instance isn't registered with this load
# balancer. Ignore it and try the next one.
continue
lb.deregister_instances([self.instance_id])
# The ELB is changing state in some way. Either an instance that's
# InService is moving to OutOfService, or an instance that's
# already OutOfService is being deregistered.
self.changed = True
if wait:
self._await_elb_instance_state(lb, 'OutOfService', initial_state, timeout)
def register(self, wait, enable_availability_zone, timeout):
"""Register the instance for all ELBs and wait for the ELB
to report the instance in-service"""
for lb in self.lbs:
initial_state = self._get_instance_health(lb)
if enable_availability_zone:
self._enable_availailability_zone(lb)
lb.register_instances([self.instance_id])
if wait:
self._await_elb_instance_state(lb, 'InService', initial_state, timeout)
else:
# We cannot assume no change was made if we don't wait
# to find out
self.changed = True
def exists(self, lbtest):
""" Verify that the named ELB actually exists """
found = False
for lb in self.lbs:
if lb.name == lbtest:
found=True
break
return found
def _enable_availailability_zone(self, lb):
"""Enable the current instance's availability zone in the provided lb.
Returns True if the zone was enabled or False if no change was made.
lb: load balancer"""
instance = self._get_instance()
if instance.placement in lb.availability_zones:
return False
lb.enable_zones(zones=instance.placement)
# If successful, the new zone will have been added to
# lb.availability_zones
return instance.placement in lb.availability_zones
def _await_elb_instance_state(self, lb, awaited_state, initial_state, timeout):
"""Wait for an ELB to change state
lb: load balancer
awaited_state : state to poll for (string)"""
wait_timeout = time.time() + timeout
while True:
instance_state = self._get_instance_health(lb)
if not instance_state:
msg = ("The instance %s could not be put in service on %s."
" Reason: Invalid Instance")
self.module.fail_json(msg=msg % (self.instance_id, lb))
if instance_state.state == awaited_state:
# Check the current state against the initial state, and only set
# changed if they are different.
if (initial_state is None) or (instance_state.state != initial_state.state):
self.changed = True
break
elif self._is_instance_state_pending(instance_state):
# If it's pending, we'll skip further checks andd continue waiting
pass
elif (awaited_state == 'InService'
and instance_state.reason_code == "Instance"
and time.time() >= wait_timeout):
# If the reason_code for the instance being out of service is
# "Instance" this indicates a failure state, e.g. the instance
# has failed a health check or the ELB does not have the
# instance's availabilty zone enabled. The exact reason why is
# described in InstantState.description.
msg = ("The instance %s could not be put in service on %s."
" Reason: %s")
self.module.fail_json(msg=msg % (self.instance_id,
lb,
instance_state.description))
time.sleep(1)
def _is_instance_state_pending(self, instance_state):
"""
Determines whether the instance_state is "pending", meaning there is
an operation under way to bring it in service.
"""
# This is messy, because AWS provides no way to distinguish between
# an instance that is is OutOfService because it's pending vs. OutOfService
# because it's failing health checks. So we're forced to analyze the
# description, which is likely to be brittle.
return (instance_state and 'pending' in instance_state.description)
def _get_instance_health(self, lb):
"""
Check instance health, should return status object or None under
certain error conditions.
"""
try:
status = lb.get_instance_health([self.instance_id])[0]
except boto.exception.BotoServerError as e:
if e.error_code == 'InvalidInstance':
return None
else:
raise
return status
def _get_instance_lbs(self, ec2_elbs=None):
"""Returns a list of ELBs attached to self.instance_id
ec2_elbs: an optional list of elb names that will be used
for elb lookup instead of returning what elbs
are attached to self.instance_id"""
if not ec2_elbs:
ec2_elbs = self._get_auto_scaling_group_lbs()
try:
elb = connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
self.module.fail_json(msg=str(e))
elbs = []
marker = None
while True:
try:
newelbs = elb.get_all_load_balancers(marker=marker)
marker = newelbs.next_marker
elbs.extend(newelbs)
if not marker:
break
except TypeError:
# Older version of boto do not allow for params
elbs = elb.get_all_load_balancers()
break
if ec2_elbs:
lbs = sorted(lb for lb in elbs if lb.name in ec2_elbs)
else:
lbs = []
for lb in elbs:
for info in lb.instances:
if self.instance_id == info.id:
lbs.append(lb)
return lbs
def _get_auto_scaling_group_lbs(self):
"""Returns a list of ELBs associated with self.instance_id
indirectly through its auto scaling group membership"""
try:
asg = connect_to_aws(boto.ec2.autoscale, self.region, **self.aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
self.module.fail_json(msg=str(e))
asg_instances = asg.get_all_autoscaling_instances([self.instance_id])
if len(asg_instances) > 1:
self.module.fail_json(msg="Illegal state, expected one auto scaling group instance.")
if not asg_instances:
asg_elbs = []
else:
asg_name = asg_instances[0].group_name
asgs = asg.get_all_groups([asg_name])
if len(asg_instances) != 1:
self.module.fail_json(msg="Illegal state, expected one auto scaling group.")
asg_elbs = asgs[0].load_balancers
return asg_elbs
def _get_instance(self):
"""Returns a boto.ec2.InstanceObject for self.instance_id"""
try:
ec2 = connect_to_aws(boto.ec2, self.region, **self.aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
self.module.fail_json(msg=str(e))
return ec2.get_only_instances(instance_ids=[self.instance_id])[0]
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state={'required': True},
instance_id={'required': True},
ec2_elbs={'default': None, 'required': False, 'type':'list'},
enable_availability_zone={'default': True, 'required': False, 'type': 'bool'},
wait={'required': False, 'default': True, 'type': 'bool'},
wait_timeout={'requred': False, 'default': 0, 'type': 'int'}
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
ec2_elbs = module.params['ec2_elbs']
wait = module.params['wait']
enable_availability_zone = module.params['enable_availability_zone']
timeout = module.params['wait_timeout']
if module.params['state'] == 'present' and 'ec2_elbs' not in module.params:
module.fail_json(msg="ELBs are required for registration")
instance_id = module.params['instance_id']
elb_man = ElbManager(module, instance_id, ec2_elbs, region=region, **aws_connect_params)
if ec2_elbs is not None:
for elb in ec2_elbs:
if not elb_man.exists(elb):
msg="ELB %s does not exist" % elb
module.fail_json(msg=msg)
if module.params['state'] == 'present':
elb_man.register(wait, enable_availability_zone, timeout)
elif module.params['state'] == 'absent':
elb_man.deregister(wait, timeout)
ansible_facts = {'ec2_elbs': [lb.name for lb in elb_man.lbs]}
ec2_facts_result = dict(changed=elb_man.changed, ansible_facts=ansible_facts)
module.exit_json(**ec2_facts_result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
tinybit/lsd | foreign/jsoncpp-0.5.0/test/rununittests.py | 249 | 2507 | import sys
import os
import os.path
import subprocess
from glob import glob
import optparse
VALGRIND_CMD = 'valgrind --tool=memcheck --leak-check=yes --undef-value-errors=yes'
class TestProxy(object):
def __init__( self, test_exe_path, use_valgrind=False ):
self.test_exe_path = os.path.normpath( os.path.abspath( test_exe_path ) )
self.use_valgrind = use_valgrind
def run( self, options ):
if self.use_valgrind:
cmd = VALGRIND_CMD.split()
else:
cmd = []
cmd.extend( [self.test_exe_path, '--test-auto'] + options )
process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
stdout = process.communicate()[0]
if process.returncode:
return False, stdout
return True, stdout
def runAllTests( exe_path, use_valgrind=False ):
test_proxy = TestProxy( exe_path, use_valgrind=use_valgrind )
status, test_names = test_proxy.run( ['--list-tests'] )
if not status:
print >> sys.stderr, "Failed to obtain unit tests list:\n" + test_names
return 1
test_names = [name.strip() for name in test_names.strip().split('\n')]
failures = []
for name in test_names:
print 'TESTING %s:' % name,
succeed, result = test_proxy.run( ['--test', name] )
if succeed:
print 'OK'
else:
failures.append( (name, result) )
print 'FAILED'
failed_count = len(failures)
pass_count = len(test_names) - failed_count
if failed_count:
print
for name, result in failures:
print result
print '%d/%d tests passed (%d failure(s))' % (
pass_count, len(test_names), failed_count)
return 1
else:
print 'All %d tests passed' % len(test_names)
return 0
def main():
from optparse import OptionParser
parser = OptionParser( usage="%prog [options] <path to test_lib_json.exe>" )
parser.add_option("--valgrind",
action="store_true", dest="valgrind", default=False,
help="run all the tests using valgrind to detect memory leaks")
parser.enable_interspersed_args()
options, args = parser.parse_args()
if len(args) != 1:
parser.error( 'Must provides at least path to test_lib_json executable.' )
sys.exit( 1 )
exit_code = runAllTests( args[0], use_valgrind=options.valgrind )
sys.exit( exit_code )
if __name__ == '__main__':
main()
| bsd-2-clause |
jose36/jmdl3 | servers/videoweed.py | 34 | 4492 | # -*- coding: iso-8859-1 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para videoweed
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import re, urlparse, urllib, urllib2
import os
from core import scrapertools
from core import logger
from core import config
# Returns an array of possible video url's from the page_url
def get_video_url( page_url , premium = False , user="" , password="" , video_password="" ):
logger.info("[videoweed.py] get_video_url(page_url='%s')" % page_url)
headers = []
headers.append( [ "User-Agent" , "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.94 Safari/537.36" ] )
headers.append( [ "Accept-Encoding","gzip,deflate,sdch" ] )
data = scrapertools.cache_page(page_url,headers=headers)
logger.info("data="+data)
file_parameter = scrapertools.find_single_match(data,'flashvars\.file="([^"]+)"')
logger.info("file_parameter="+file_parameter)
filekey_parameter = scrapertools.find_single_match(data,'flashvars.filekey\="([^"]+)"')
logger.info("filekey_parameter="+filekey_parameter)
if filekey_parameter=="":
filekey_parameter = scrapertools.find_single_match(data,'fkz="([^"]+)"')
logger.info("filekey_parameter="+filekey_parameter)
#88%2E0%2E189%2E203%2Dd3cb0515a1ed66e5b297da999ed23b42%2D
filekey_parameter = filekey_parameter.replace(".","%2E")
filekey_parameter = filekey_parameter.replace("-","%2D")
logger.info("filekey_parameter="+filekey_parameter)
# http://www.videoweed.es/api/player.api.php?cid=undefined&cid2=undefined&file=31f8c26a80d23&cid3=undefined&key=88%2E0%2E189%2E203%2Dd3cb0515a1ed66e5b297da999ed23b42%2D&numOfErrors=0&user=undefined&pass=undefined
parameters="cid=undefined&cid2=undefined&file="+file_parameter+"&cid3=undefined&key="+filekey_parameter+"&numOfErrors=0&user=undefined&pass=undefined"
url = "http://www.videoweed.es/api/player.api.php?"+parameters
headers.append(["Referer",page_url])
data = scrapertools.cache_page(url,headers=headers)
logger.info(data)
patron = 'url=(.*?)&title='
matches = re.compile(patron).findall(data)
scrapertools.printMatches(matches)
video_urls = []
logger.info(matches[0])
video_urls.append( [".flv [videoweed]",matches[0]])
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
patronvideos = '(http://www.videoweed.[a-z]+/file/[a-zA-Z0-9]+)'
logger.info("[videoweed.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[videoweed]"
url = match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'videoweed' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
#logger.info("1) Videoweed formato islapeliculas") #http://embed.videoweed.com/embed.php?v=h56ts9bh1vat8
patronvideos = "(http://embed.videoweed.*?)&"
logger.info("[videoweed.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[videoweed]"
url = match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'videoweed' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
#rep="/rep2.php?vw=wuogenrzatq40&t=18&c=13"
patronvideos = 'src="" rep="([^"]+)" width="([^"]+)" height="([^"]+)"'
logger.info("[videoweed.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[videoweed]"
url = match[0]
url = url.replace("/rep2.php?vw=","http://www.videoweed.es/file/")
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'videoweed' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
def test():
video_urls = get_video_url("http://www.videoweed.es/file/57dd5d423d39c")
return len(video_urls)>0 | gpl-2.0 |
40223226/W17test2 | static/Brython3.1.1-20150328-091302/Lib/binascii.py | 620 | 24585 | """A pure Python implementation of binascii.
Rather slow and buggy in corner cases.
PyPy provides an RPython version too.
"""
# borrowed from https://bitbucket.org/pypy/pypy/src/f2bf94943a41/lib_pypy/binascii.py
class Error(Exception):
pass
class Done(Exception):
pass
class Incomplete(Exception):
pass
def a2b_uu(s):
if not s:
return ''
length = (ord(s[0]) - 0x20) % 64
def quadruplets_gen(s):
while s:
try:
yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3])
except IndexError:
s += ' '
yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3])
return
s = s[4:]
try:
result = [''.join(
[chr((A - 0x20) << 2 | (((B - 0x20) >> 4) & 0x3)),
chr(((B - 0x20) & 0xf) << 4 | (((C - 0x20) >> 2) & 0xf)),
chr(((C - 0x20) & 0x3) << 6 | ((D - 0x20) & 0x3f))
]) for A, B, C, D in quadruplets_gen(s[1:].rstrip())]
except ValueError:
raise Error('Illegal char')
result = ''.join(result)
trailingdata = result[length:]
if trailingdata.strip('\x00'):
raise Error('Trailing garbage')
result = result[:length]
if len(result) < length:
result += ((length - len(result)) * '\x00')
return bytes(result, __BRYTHON__.charset)
def b2a_uu(s):
length = len(s)
if length > 45:
raise Error('At most 45 bytes at once')
def triples_gen(s):
while s:
try:
yield ord(s[0]), ord(s[1]), ord(s[2])
except IndexError:
s += '\0\0'
yield ord(s[0]), ord(s[1]), ord(s[2])
return
s = s[3:]
result = [''.join(
[chr(0x20 + (( A >> 2 ) & 0x3F)),
chr(0x20 + (((A << 4) | ((B >> 4) & 0xF)) & 0x3F)),
chr(0x20 + (((B << 2) | ((C >> 6) & 0x3)) & 0x3F)),
chr(0x20 + (( C ) & 0x3F))])
for A, B, C in triples_gen(s)]
return chr(ord(' ') + (length & 0o77)) + ''.join(result) + '\n'
table_a2b_base64 = {
'A': 0,
'B': 1,
'C': 2,
'D': 3,
'E': 4,
'F': 5,
'G': 6,
'H': 7,
'I': 8,
'J': 9,
'K': 10,
'L': 11,
'M': 12,
'N': 13,
'O': 14,
'P': 15,
'Q': 16,
'R': 17,
'S': 18,
'T': 19,
'U': 20,
'V': 21,
'W': 22,
'X': 23,
'Y': 24,
'Z': 25,
'a': 26,
'b': 27,
'c': 28,
'd': 29,
'e': 30,
'f': 31,
'g': 32,
'h': 33,
'i': 34,
'j': 35,
'k': 36,
'l': 37,
'm': 38,
'n': 39,
'o': 40,
'p': 41,
'q': 42,
'r': 43,
's': 44,
't': 45,
'u': 46,
'v': 47,
'w': 48,
'x': 49,
'y': 50,
'z': 51,
'0': 52,
'1': 53,
'2': 54,
'3': 55,
'4': 56,
'5': 57,
'6': 58,
'7': 59,
'8': 60,
'9': 61,
'+': 62,
'/': 63,
'=': 0,
}
def a2b_base64(s):
if not isinstance(s, (str, bytes)):
raise TypeError("expected string, got %r" % (s,))
s = s.rstrip()
# clean out all invalid characters, this also strips the final '=' padding
# check for correct padding
def next_valid_char(s, pos):
for i in range(pos + 1, len(s)):
c = s[i]
if c < '\x7f':
try:
table_a2b_base64[c]
return c
except KeyError:
pass
return None
quad_pos = 0
leftbits = 0
leftchar = 0
res = []
for i, c in enumerate(s):
if isinstance(c, int):
c = chr(c)
if c > '\x7f' or c == '\n' or c == '\r' or c == ' ':
continue
if c == '=':
if quad_pos < 2 or (quad_pos == 2 and next_valid_char(s, i) != '='):
continue
else:
leftbits = 0
break
try:
next_c = table_a2b_base64[c]
except KeyError:
continue
quad_pos = (quad_pos + 1) & 0x03
leftchar = (leftchar << 6) | next_c
leftbits += 6
if leftbits >= 8:
leftbits -= 8
res.append((leftchar >> leftbits & 0xff))
leftchar &= ((1 << leftbits) - 1)
if leftbits != 0:
raise Error('Incorrect padding')
return bytes(''.join([chr(i) for i in res]),__BRYTHON__.charset)
table_b2a_base64 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"\
"0123456789+/"
def b2a_base64(s):
length = len(s)
final_length = length % 3
def triples_gen(s):
while s:
try:
yield s[0], s[1], s[2]
except IndexError:
s += b'\0\0'
yield s[0], s[1], s[2]
return
s = s[3:]
a = triples_gen(s[ :length - final_length])
result = [''.join(
[table_b2a_base64[( A >> 2 ) & 0x3F],
table_b2a_base64[((A << 4) | ((B >> 4) & 0xF)) & 0x3F],
table_b2a_base64[((B << 2) | ((C >> 6) & 0x3)) & 0x3F],
table_b2a_base64[( C ) & 0x3F]])
for A, B, C in a]
final = s[length - final_length:]
if final_length == 0:
snippet = ''
elif final_length == 1:
a = ord(final[0])
snippet = table_b2a_base64[(a >> 2 ) & 0x3F] + \
table_b2a_base64[(a << 4 ) & 0x3F] + '=='
else:
a = ord(final[0])
b = ord(final[1])
snippet = table_b2a_base64[(a >> 2) & 0x3F] + \
table_b2a_base64[((a << 4) | (b >> 4) & 0xF) & 0x3F] + \
table_b2a_base64[(b << 2) & 0x3F] + '='
return bytes(''.join(result) + snippet + '\n',__BRYTHON__.charset)
def a2b_qp(s, header=False):
inp = 0
odata = []
while inp < len(s):
if s[inp] == '=':
inp += 1
if inp >= len(s):
break
# Soft line breaks
if (s[inp] == '\n') or (s[inp] == '\r'):
if s[inp] != '\n':
while inp < len(s) and s[inp] != '\n':
inp += 1
if inp < len(s):
inp += 1
elif s[inp] == '=':
# broken case from broken python qp
odata.append('=')
inp += 1
elif s[inp] in hex_numbers and s[inp + 1] in hex_numbers:
ch = chr(int(s[inp:inp+2], 16))
inp += 2
odata.append(ch)
else:
odata.append('=')
elif header and s[inp] == '_':
odata.append(' ')
inp += 1
else:
odata.append(s[inp])
inp += 1
return bytes(''.join(odata), __BRYTHON__.charset)
def b2a_qp(data, quotetabs=False, istext=True, header=False):
"""quotetabs=True means that tab and space characters are always
quoted.
istext=False means that \r and \n are treated as regular characters
header=True encodes space characters with '_' and requires
real '_' characters to be quoted.
"""
MAXLINESIZE = 76
# See if this string is using CRLF line ends
lf = data.find('\n')
crlf = lf > 0 and data[lf-1] == '\r'
inp = 0
linelen = 0
odata = []
while inp < len(data):
c = data[inp]
if (c > '~' or
c == '=' or
(header and c == '_') or
(c == '.' and linelen == 0 and (inp+1 == len(data) or
data[inp+1] == '\n' or
data[inp+1] == '\r')) or
(not istext and (c == '\r' or c == '\n')) or
((c == '\t' or c == ' ') and (inp + 1 == len(data))) or
(c <= ' ' and c != '\r' and c != '\n' and
(quotetabs or (not quotetabs and (c != '\t' and c != ' '))))):
linelen += 3
if linelen >= MAXLINESIZE:
odata.append('=')
if crlf: odata.append('\r')
odata.append('\n')
linelen = 3
odata.append('=' + two_hex_digits(ord(c)))
inp += 1
else:
if (istext and
(c == '\n' or (inp+1 < len(data) and c == '\r' and
data[inp+1] == '\n'))):
linelen = 0
# Protect against whitespace on end of line
if (len(odata) > 0 and
(odata[-1] == ' ' or odata[-1] == '\t')):
ch = ord(odata[-1])
odata[-1] = '='
odata.append(two_hex_digits(ch))
if crlf: odata.append('\r')
odata.append('\n')
if c == '\r':
inp += 2
else:
inp += 1
else:
if (inp + 1 < len(data) and
data[inp+1] != '\n' and
(linelen + 1) >= MAXLINESIZE):
odata.append('=')
if crlf: odata.append('\r')
odata.append('\n')
linelen = 0
linelen += 1
if header and c == ' ':
c = '_'
odata.append(c)
inp += 1
return ''.join(odata)
hex_numbers = '0123456789ABCDEF'
def hex(n):
if n == 0:
return '0'
if n < 0:
n = -n
sign = '-'
else:
sign = ''
arr = []
def hex_gen(n):
""" Yield a nibble at a time. """
while n:
yield n % 0x10
n = n / 0x10
for nibble in hex_gen(n):
arr = [hex_numbers[nibble]] + arr
return sign + ''.join(arr)
def two_hex_digits(n):
return hex_numbers[n / 0x10] + hex_numbers[n % 0x10]
def strhex_to_int(s):
i = 0
for c in s:
i = i * 0x10 + hex_numbers.index(c)
return i
hqx_encoding = '!"#$%&\'()*+,-012345689@ABCDEFGHIJKLMNPQRSTUVXYZ[`abcdefhijklmpqr'
DONE = 0x7f
SKIP = 0x7e
FAIL = 0x7d
table_a2b_hqx = [
#^@ ^A ^B ^C ^D ^E ^F ^G
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
#\b \t \n ^K ^L \r ^N ^O
FAIL, FAIL, SKIP, FAIL, FAIL, SKIP, FAIL, FAIL,
#^P ^Q ^R ^S ^T ^U ^V ^W
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
#^X ^Y ^Z ^[ ^\ ^] ^^ ^_
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
# ! " # $ % & '
FAIL, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
#( ) * + , - . /
0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, FAIL, FAIL,
#0 1 2 3 4 5 6 7
0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, FAIL,
#8 9 : ; < = > ?
0x14, 0x15, DONE, FAIL, FAIL, FAIL, FAIL, FAIL,
#@ A B C D E F G
0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D,
#H I J K L M N O
0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, FAIL,
#P Q R S T U V W
0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, FAIL,
#X Y Z [ \ ] ^ _
0x2C, 0x2D, 0x2E, 0x2F, FAIL, FAIL, FAIL, FAIL,
#` a b c d e f g
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, FAIL,
#h i j k l m n o
0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, FAIL, FAIL,
#p q r s t u v w
0x3D, 0x3E, 0x3F, FAIL, FAIL, FAIL, FAIL, FAIL,
#x y z { | } ~ ^?
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
]
def a2b_hqx(s):
result = []
def quadruples_gen(s):
t = []
for c in s:
res = table_a2b_hqx[ord(c)]
if res == SKIP:
continue
elif res == FAIL:
raise Error('Illegal character')
elif res == DONE:
yield t
raise Done
else:
t.append(res)
if len(t) == 4:
yield t
t = []
yield t
done = 0
try:
for snippet in quadruples_gen(s):
length = len(snippet)
if length == 4:
result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4)))
result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2)))
result.append(chr(((snippet[2] & 0x03) << 6) | (snippet[3])))
elif length == 3:
result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4)))
result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2)))
elif length == 2:
result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4)))
except Done:
done = 1
except Error:
raise
return (''.join(result), done)
# should this return a bytes object?
#return (bytes(''.join(result), __BRYTHON__.charset), done)
def b2a_hqx(s):
result =[]
def triples_gen(s):
while s:
try:
yield ord(s[0]), ord(s[1]), ord(s[2])
except IndexError:
yield tuple([ord(c) for c in s])
s = s[3:]
for snippet in triples_gen(s):
length = len(snippet)
if length == 3:
result.append(
hqx_encoding[(snippet[0] & 0xfc) >> 2])
result.append(hqx_encoding[
((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)])
result.append(hqx_encoding[
(snippet[1] & 0x0f) << 2 | ((snippet[2] & 0xc0) >> 6)])
result.append(hqx_encoding[snippet[2] & 0x3f])
elif length == 2:
result.append(
hqx_encoding[(snippet[0] & 0xfc) >> 2])
result.append(hqx_encoding[
((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)])
result.append(hqx_encoding[
(snippet[1] & 0x0f) << 2])
elif length == 1:
result.append(
hqx_encoding[(snippet[0] & 0xfc) >> 2])
result.append(hqx_encoding[
((snippet[0] & 0x03) << 4)])
return ''.join(result)
crctab_hqx = [
0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0,
]
def crc_hqx(s, crc):
for c in s:
crc = ((crc << 8) & 0xff00) ^ crctab_hqx[((crc >> 8) & 0xff) ^ ord(c)]
return crc
def rlecode_hqx(s):
"""
Run length encoding for binhex4.
The CPython implementation does not do run length encoding
of \x90 characters. This implementation does.
"""
if not s:
return ''
result = []
prev = s[0]
count = 1
# Add a dummy character to get the loop to go one extra round.
# The dummy must be different from the last character of s.
# In the same step we remove the first character, which has
# already been stored in prev.
if s[-1] == '!':
s = s[1:] + '?'
else:
s = s[1:] + '!'
for c in s:
if c == prev and count < 255:
count += 1
else:
if count == 1:
if prev != '\x90':
result.append(prev)
else:
result.extend(['\x90', '\x00'])
elif count < 4:
if prev != '\x90':
result.extend([prev] * count)
else:
result.extend(['\x90', '\x00'] * count)
else:
if prev != '\x90':
result.extend([prev, '\x90', chr(count)])
else:
result.extend(['\x90', '\x00', '\x90', chr(count)])
count = 1
prev = c
return ''.join(result)
def rledecode_hqx(s):
s = s.split('\x90')
result = [s[0]]
prev = s[0]
for snippet in s[1:]:
count = ord(snippet[0])
if count > 0:
result.append(prev[-1] * (count-1))
prev = snippet
else:
result.append('\x90')
prev = '\x90'
result.append(snippet[1:])
return ''.join(result)
crc_32_tab = [
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419,
0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4,
0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07,
0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856,
0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4,
0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3,
0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a,
0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599,
0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190,
0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f,
0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e,
0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed,
0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3,
0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a,
0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5,
0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010,
0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17,
0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6,
0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615,
0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344,
0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a,
0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1,
0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c,
0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef,
0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe,
0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31,
0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c,
0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b,
0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1,
0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278,
0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7,
0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66,
0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605,
0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8,
0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b,
0x2d02ef8d
]
def crc32(s, crc=0):
result = 0
crc = ~int(crc) & 0xffffffff
#crc = ~long(crc) & 0xffffffffL
for c in s:
crc = crc_32_tab[(crc ^ int(ord(c))) & 0xff] ^ (crc >> 8)
#crc = crc_32_tab[(crc ^ long(ord(c))) & 0xffL] ^ (crc >> 8)
#/* Note: (crc >> 8) MUST zero fill on left
result = crc ^ 0xffffffff
if result > 2**31:
result = ((result + 2**31) % 2**32) - 2**31
return result
def b2a_hex(s):
result = []
for char in s:
c = (ord(char) >> 4) & 0xf
if c > 9:
c = c + ord('a') - 10
else:
c = c + ord('0')
result.append(chr(c))
c = ord(char) & 0xf
if c > 9:
c = c + ord('a') - 10
else:
c = c + ord('0')
result.append(chr(c))
return ''.join(result)
hexlify = b2a_hex
table_hex = [
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,-1,-1, -1,-1,-1,-1,
-1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1
]
def a2b_hex(t):
result = []
def pairs_gen(s):
while s:
try:
yield table_hex[ord(s[0])], table_hex[ord(s[1])]
except IndexError:
if len(s):
raise TypeError('Odd-length string')
return
s = s[2:]
for a, b in pairs_gen(t):
if a < 0 or b < 0:
raise TypeError('Non-hexadecimal digit found')
result.append(chr((a << 4) + b))
return bytes(''.join(result), __BRYTHON__.charset)
unhexlify = a2b_hex
| gpl-3.0 |
brianmhunt/SIWorldMap | werkzeug/testsuite/contrib/cache.py | 12 | 4277 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.cache
~~~~~~~~~~~~~~~~~~~~~~~~
Tests the cache system
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import time
import unittest
import tempfile
import shutil
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.contrib import cache
try:
import redis
except ImportError:
redis = None
class SimpleCacheTestCase(WerkzeugTestCase):
def test_get_dict(self):
c = cache.SimpleCache()
c.set('a', 'a')
c.set('b', 'b')
d = c.get_dict('a', 'b')
assert 'a' in d
assert 'a' == d['a']
assert 'b' in d
assert 'b' == d['b']
def test_set_many(self):
c = cache.SimpleCache()
c.set_many({0: 0, 1: 1, 2: 4})
assert c.get(2) == 4
c.set_many((i, i*i) for i in xrange(3))
assert c.get(2) == 4
class FileSystemCacheTestCase(WerkzeugTestCase):
def test_set_get(self):
tmp_dir = tempfile.mkdtemp()
try:
c = cache.FileSystemCache(cache_dir=tmp_dir)
for i in range(3):
c.set(str(i), i * i)
for i in range(3):
result = c.get(str(i))
assert result == i * i
finally:
shutil.rmtree(tmp_dir)
def test_filesystemcache_prune(self):
THRESHOLD = 13
tmp_dir = tempfile.mkdtemp()
c = cache.FileSystemCache(cache_dir=tmp_dir, threshold=THRESHOLD)
for i in range(2 * THRESHOLD):
c.set(str(i), i)
cache_files = os.listdir(tmp_dir)
shutil.rmtree(tmp_dir)
assert len(cache_files) <= THRESHOLD
def test_filesystemcache_clear(self):
tmp_dir = tempfile.mkdtemp()
c = cache.FileSystemCache(cache_dir=tmp_dir)
c.set('foo', 'bar')
cache_files = os.listdir(tmp_dir)
assert len(cache_files) == 1
c.clear()
cache_files = os.listdir(tmp_dir)
assert len(cache_files) == 0
shutil.rmtree(tmp_dir)
class RedisCacheTestCase(WerkzeugTestCase):
def make_cache(self):
return cache.RedisCache(key_prefix='werkzeug-test-case:')
def teardown(self):
self.make_cache().clear()
def test_compat(self):
c = self.make_cache()
c._client.set(c.key_prefix + 'foo', 'Awesome')
self.assert_equal(c.get('foo'), 'Awesome')
c._client.set(c.key_prefix + 'foo', '42')
self.assert_equal(c.get('foo'), 42)
def test_get_set(self):
c = self.make_cache()
c.set('foo', ['bar'])
assert c.get('foo') == ['bar']
def test_get_many(self):
c = self.make_cache()
c.set('foo', ['bar'])
c.set('spam', 'eggs')
assert c.get_many('foo', 'spam') == [['bar'], 'eggs']
def test_set_many(self):
c = self.make_cache()
c.set_many({'foo': 'bar', 'spam': ['eggs']})
assert c.get('foo') == 'bar'
assert c.get('spam') == ['eggs']
def test_expire(self):
c = self.make_cache()
c.set('foo', 'bar', 1)
time.sleep(2)
assert c.get('foo') is None
def test_add(self):
c = self.make_cache()
# sanity check that add() works like set()
c.add('foo', 'bar')
assert c.get('foo') == 'bar'
c.add('foo', 'qux')
assert c.get('foo') == 'bar'
def test_delete(self):
c = self.make_cache()
c.add('foo', 'bar')
assert c.get('foo') == 'bar'
c.delete('foo')
assert c.get('foo') is None
def test_delete_many(self):
c = self.make_cache()
c.add('foo', 'bar')
c.add('spam', 'eggs')
c.delete_many('foo', 'spam')
assert c.get('foo') is None
assert c.get('spam') is None
def test_inc_dec(self):
c = self.make_cache()
c.set('foo', 1)
assert c.inc('foo') == 2
assert c.dec('foo') == 1
c.delete('foo')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(SimpleCacheTestCase))
suite.addTest(unittest.makeSuite(FileSystemCacheTestCase))
if redis is not None:
suite.addTest(unittest.makeSuite(RedisCacheTestCase))
return suite
| mit |
dimagol/trex-core | scripts/external_libs/elasticsearch/elasticsearch/client/cat.py | 2 | 17303 | from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
class CatClient(NamespacedClient):
@query_params('h', 'help', 'local', 'master_timeout', 'v')
def aliases(self, name=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cat-alias.html>`_
:arg name: A comma-separated list of alias names to return
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg v: Verbose mode. Display column headers, default False
"""
return self.transport.perform_request('GET', _make_path('_cat',
'aliases', name), params=params)
@query_params('bytes', 'h', 'help', 'local', 'master_timeout', 'v')
def allocation(self, node_id=None, params=None):
"""
Allocation provides a snapshot of how shards have located around the
cluster and the state of disk usage.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cat-allocation.html>`_
:arg node_id: A comma-separated list of node IDs or names to limit the
returned information
:arg bytes: The unit in which to display byte values, valid choices are:
'b', 'k', 'kb', 'm', 'mb', 'g', 'gb', 't', 'tb', 'p', 'pb'
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg v: Verbose mode. Display column headers, default False
"""
return self.transport.perform_request('GET', _make_path('_cat',
'allocation', node_id), params=params)
@query_params('h', 'help', 'local', 'master_timeout', 'v')
def count(self, index=None, params=None):
"""
Count provides quick access to the document count of the entire cluster,
or individual indices.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cat-count.html>`_
:arg index: A comma-separated list of index names to limit the returned
information
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg v: Verbose mode. Display column headers, default False
"""
return self.transport.perform_request('GET', _make_path('_cat',
'count', index), params=params)
@query_params('h', 'help', 'local', 'master_timeout', 'ts', 'v')
def health(self, params=None):
"""
health is a terse, one-line representation of the same information from
:meth:`~elasticsearch.client.cluster.ClusterClient.health` API
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cat-health.html>`_
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg ts: Set to false to disable timestamping, default True
:arg v: Verbose mode. Display column headers, default False
"""
return self.transport.perform_request('GET', '/_cat/health',
params=params)
@query_params('help')
def help(self, params=None):
"""
A simple help for the cat api.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cat.html>`_
:arg help: Return help information, default False
"""
return self.transport.perform_request('GET', '/_cat', params=params)
@query_params('bytes', 'format', 'h', 'health', 'help', 'local',
'master_timeout', 'pri', 'v')
def indices(self, index=None, params=None):
"""
The indices command provides a cross-section of each index.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cat-indices.html>`_
:arg index: A comma-separated list of index names to limit the returned
information
:arg bytes: The unit in which to display byte values, valid choices are:
'b', 'k', 'm', 'g'
:arg h: Comma-separated list of column names to display
:arg health: A health status ("green", "yellow", or "red" to filter only
indices matching the specified health status, default None, valid
choices are: 'green', 'yellow', 'red'
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg pri: Set to true to return stats only for primary shards, default
False
:arg v: Verbose mode. Display column headers, default False
"""
return self.transport.perform_request('GET', _make_path('_cat',
'indices', index), params=params)
@query_params('h', 'help', 'local', 'master_timeout', 'v')
def master(self, params=None):
"""
Displays the master's node ID, bound IP address, and node name.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cat-master.html>`_
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg v: Verbose mode. Display column headers, default False
"""
return self.transport.perform_request('GET', '/_cat/master',
params=params)
@query_params('h', 'help', 'local', 'master_timeout', 'v')
def nodes(self, params=None):
"""
The nodes command shows the cluster topology.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cat-nodes.html>`_
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg v: Verbose mode. Display column headers, default False
"""
return self.transport.perform_request('GET', '/_cat/nodes',
params=params)
@query_params('bytes', 'h', 'help', 'master_timeout', 'v')
def recovery(self, index=None, params=None):
"""
recovery is a view of shard replication.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cat-recovery.html>`_
:arg index: A comma-separated list of index names to limit the returned
information
:arg bytes: The unit in which to display byte values, valid choices are:
'b', 'k', 'kb', 'm', 'mb', 'g', 'gb', 't', 'tb', 'p', 'pb'
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg v: Verbose mode. Display column headers, default False
"""
return self.transport.perform_request('GET', _make_path('_cat',
'recovery', index), params=params)
@query_params('h', 'help', 'local', 'master_timeout', 'v')
def shards(self, index=None, params=None):
"""
The shards command is the detailed view of what nodes contain which shards.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cat-shards.html>`_
:arg index: A comma-separated list of index names to limit the returned
information
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg v: Verbose mode. Display column headers, default False
"""
return self.transport.perform_request('GET', _make_path('_cat',
'shards', index), params=params)
@query_params('h', 'help', 'v')
def segments(self, index=None, params=None):
"""
The segments command is the detailed view of Lucene segments per index.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cat-segments.html>`_
:arg index: A comma-separated list of index names to limit the returned
information
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg v: Verbose mode. Display column headers, default False
"""
return self.transport.perform_request('GET', _make_path('_cat',
'segments', index), params=params)
@query_params('h', 'help', 'local', 'master_timeout', 'v')
def pending_tasks(self, params=None):
"""
pending_tasks provides the same information as the
:meth:`~elasticsearch.client.cluster.ClusterClient.pending_tasks` API
in a convenient tabular format.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cat-pending-tasks.html>`_
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg v: Verbose mode. Display column headers, default False
"""
return self.transport.perform_request('GET', '/_cat/pending_tasks',
params=params)
@query_params('h', 'help', 'local', 'master_timeout', 'size',
'thread_pool_patterns', 'v')
def thread_pool(self, thread_pools=None, params=None):
"""
Get information about thread pools.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cat-thread-pool.html>`_
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg size: The multiplier in which to display values, valid choices are:
'', 'k', 'm', 'g', 't', 'p'
:arg thread_pool_patterns: A comma-separated list of regular-expressions
to filter the thread pools in the output
:arg v: Verbose mode. Display column headers, default False
"""
return self.transport.perform_request('GET', _make_path('_cat',
'thread_pool', thread_pools), params=params)
@query_params('bytes', 'h', 'help', 'local', 'master_timeout', 'v')
def fielddata(self, fields=None, params=None):
"""
Shows information about currently loaded fielddata on a per-node basis.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cat-fielddata.html>`_
:arg fields: A comma-separated list of fields to return the fielddata
size
:arg bytes: The unit in which to display byte values, valid choices are:
'b', 'k', 'kb', 'm', 'mb', 'g', 'gb', 't', 'tb', 'p', 'pb'
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg v: Verbose mode. Display column headers, default False
"""
return self.transport.perform_request('GET', _make_path('_cat',
'fielddata', fields), params=params)
@query_params('h', 'help', 'local', 'master_timeout', 'v')
def plugins(self, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cat-plugins.html>`_
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg v: Verbose mode. Display column headers, default False
"""
return self.transport.perform_request('GET', '/_cat/plugins',
params=params)
@query_params('h', 'help', 'local', 'master_timeout', 'v')
def nodeattrs(self, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cat-nodeattrs.html>`_
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg v: Verbose mode. Display column headers, default False
"""
return self.transport.perform_request('GET', '/_cat/nodeattrs',
params=params)
@query_params('h', 'help', 'local', 'master_timeout', 'v')
def repositories(self, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cat-repositories.html>`_
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node, default False
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg v: Verbose mode. Display column headers, default False
"""
return self.transport.perform_request('GET', '/_cat/repositories',
params=params)
@query_params('h', 'help', 'ignore_unavailable', 'master_timeout', 'v')
def snapshots(self, repository=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cat-snapshots.html>`_
:arg repository: Name of repository from which to fetch the snapshot
information
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg ignore_unavailable: Set to true to ignore unavailable snapshots,
default False
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg v: Verbose mode. Display column headers, default False
"""
return self.transport.perform_request('GET', _make_path('_cat',
'snapshots', repository), params=params)
@query_params('actions', 'detailed', 'format', 'h', 'help', 'node_id',
'parent_node', 'parent_task', 'v')
def tasks(self, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html>`_
:arg actions: A comma-separated list of actions that should be returned.
Leave empty to return all.
:arg detailed: Return detailed task information (default: false)
:arg format: a short version of the Accept header, e.g. json, yaml
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg node_id: A comma-separated list of node IDs or names to limit the
returned information; use `_local` to return information from the
node you're connecting to, leave empty to get information from all
nodes
:arg parent_node: Return tasks with specified parent node.
:arg parent_task: Return tasks with specified parent task id. Set to -1
to return all.
:arg v: Verbose mode. Display column headers, default False
"""
return self.transport.perform_request('GET', '/_cat/tasks',
params=params)
| apache-2.0 |
julen/translate | translate/convert/po2rc.py | 25 | 4848 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2002-2006,2008-2009 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Convert Gettext PO localization files back to Windows Resource (.rc) files.
See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/rc2po.html
for examples and usage instructions.
"""
from translate.convert import convert
from translate.storage import po, rc
class rerc:
def __init__(self, templatefile, charset="utf-8", lang=None, sublang=None):
self.templatefile = templatefile
self.templatestore = rc.rcfile(templatefile, encoding=charset)
self.inputdict = {}
self.charset = charset
self.lang = lang
self.sublang = sublang
def convertstore(self, inputstore, includefuzzy=False):
self.makestoredict(inputstore, includefuzzy)
outputblocks = []
for block in self.templatestore.blocks:
outputblocks.append(self.convertblock(block))
if self.charset == "utf-8":
outputblocks.insert(0, "#pragma code_page(65001)\n")
outputblocks.append("#pragma code_page(default)")
return outputblocks
def makestoredict(self, store, includefuzzy=False):
""" make a dictionary of the translations"""
for unit in store.units:
if includefuzzy or not unit.isfuzzy():
for location in unit.getlocations():
rcstring = unit.target
if len(rcstring.strip()) == 0:
rcstring = unit.source
self.inputdict[location] = rc.escape_to_rc(rcstring).encode(self.charset)
def convertblock(self, block):
newblock = block
if isinstance(newblock, unicode):
newblock = newblock.encode('utf-8')
if newblock.startswith("LANGUAGE"):
return "LANGUAGE %s, %s" % (self.lang, self.sublang)
for unit in self.templatestore.units:
location = unit.getlocations()[0]
if location in self.inputdict:
if self.inputdict[location] != unit.match.groupdict()['value']:
newmatch = unit.match.group().replace(unit.match.groupdict()['value'],
self.inputdict[location])
newblock = newblock.replace(unit.match.group(), newmatch)
if isinstance(newblock, unicode):
newblock = newblock.encode(self.charset)
return newblock
def convertrc(inputfile, outputfile, templatefile, includefuzzy=False,
charset=None, lang=None, sublang=None, outputthreshold=None):
inputstore = po.pofile(inputfile)
if not convert.should_output_store(inputstore, outputthreshold):
return False
if not lang:
raise ValueError("must specify a target language")
if templatefile is None:
raise ValueError("must have template file for rc files")
# convertor = po2rc()
else:
convertor = rerc(templatefile, charset, lang, sublang)
outputrclines = convertor.convertstore(inputstore, includefuzzy)
outputfile.writelines(outputrclines)
return 1
def main(argv=None):
# handle command line options
formats = {("po", "rc"): ("rc", convertrc)}
parser = convert.ConvertOptionParser(formats, usetemplates=True,
description=__doc__)
defaultcharset = "utf-8"
parser.add_option("", "--charset", dest="charset", default=defaultcharset,
help="charset to use to decode the RC files (default: %s)" % defaultcharset,
metavar="CHARSET")
parser.add_option("-l", "--lang", dest="lang", default=None,
help="LANG entry", metavar="LANG")
defaultsublang = "SUBLANG_DEFAULT"
parser.add_option("", "--sublang", dest="sublang", default=defaultsublang,
help="SUBLANG entry (default: %s)" % defaultsublang, metavar="SUBLANG")
parser.passthrough.append("charset")
parser.passthrough.append("lang")
parser.passthrough.append("sublang")
parser.add_threshold_option()
parser.add_fuzzy_option()
parser.run(argv)
if __name__ == '__main__':
main()
| gpl-2.0 |
AgainFaster/django-wombat-authenticator | wombat_authenticator/migrations/0001_initial.py | 1 | 5385 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'WombatToken'
db.create_table('wombat_authenticator_wombattoken', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('store', self.gf('django.db.models.fields.CharField')(max_length=50)),
('token', self.gf('django.db.models.fields.CharField')(max_length=50)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(related_name='api_auth_token', unique=True, to=orm['auth.User'])),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('wombat_authenticator', ['WombatToken'])
# Adding unique constraint on 'WombatToken', fields ['store', 'token']
db.create_unique('wombat_authenticator_wombattoken', ['store', 'token'])
def backwards(self, orm):
# Removing unique constraint on 'WombatToken', fields ['store', 'token']
db.delete_unique('wombat_authenticator_wombattoken', ['store', 'token'])
# Deleting model 'WombatToken'
db.delete_table('wombat_authenticator_wombattoken')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '72'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'wombat_authenticator.wombattoken': {
'Meta': {'unique_together': "(('store', 'token'),)", 'object_name': 'WombatToken'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'store': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'api_auth_token'", 'unique': 'True', 'to': "orm['auth.User']"})
}
}
complete_apps = ['wombat_authenticator'] | bsd-3-clause |
kurokochin/blog-ricky | accounts/views.py | 11 | 1477 | from django.contrib.auth import (
authenticate,
get_user_model,
login,
logout,
)
from django.shortcuts import render, redirect
from .forms import UserLoginForm, UserRegisterForm
def login_view(request):
print(request.user.is_authenticated())
next = request.GET.get('next')
title = "Login"
form = UserLoginForm(request.POST or None)
if form.is_valid():
username = form.cleaned_data.get("username")
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
login(request, user)
if next:
return redirect(next)
return redirect("/")
return render(request, "form.html", {"form":form, "title": title})
def register_view(request):
print(request.user.is_authenticated())
next = request.GET.get('next')
title = "Register"
form = UserRegisterForm(request.POST or None)
if form.is_valid():
user = form.save(commit=False)
password = form.cleaned_data.get('password')
user.set_password(password)
user.save()
new_user = authenticate(username=user.username, password=password)
login(request, new_user)
if next:
return redirect(next)
return redirect("/")
context = {
"form": form,
"title": title
}
return render(request, "form.html", context)
def logout_view(request):
logout(request)
return redirect("/") | mit |
veger/ansible | lib/ansible/utils/hashing.py | 112 | 3173 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
# Note, sha1 is the only hash algorithm compatible with python2.4 and with
# FIPS-140 mode (as of 11-2014)
try:
from hashlib import sha1
except ImportError:
from sha import sha as sha1
# Backwards compat only
try:
from hashlib import md5 as _md5
except ImportError:
try:
from md5 import md5 as _md5
except ImportError:
# Assume we're running in FIPS mode here
_md5 = None
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_bytes
def secure_hash_s(data, hash_func=sha1):
''' Return a secure hash hex digest of data. '''
digest = hash_func()
data = to_bytes(data, errors='surrogate_or_strict')
digest.update(data)
return digest.hexdigest()
def secure_hash(filename, hash_func=sha1):
''' Return a secure hash hex digest of local file, None if file is not present or a directory. '''
if not os.path.exists(to_bytes(filename, errors='surrogate_or_strict')) or os.path.isdir(to_bytes(filename, errors='strict')):
return None
digest = hash_func()
blocksize = 64 * 1024
try:
infile = open(to_bytes(filename, errors='surrogate_or_strict'), 'rb')
block = infile.read(blocksize)
while block:
digest.update(block)
block = infile.read(blocksize)
infile.close()
except IOError as e:
raise AnsibleError("error while accessing the file %s, error was: %s" % (filename, e))
return digest.hexdigest()
# The checksum algorithm must match with the algorithm in ShellModule.checksum() method
checksum = secure_hash
checksum_s = secure_hash_s
#
# Backwards compat functions. Some modules include md5s in their return values
# Continue to support that for now. As of ansible-1.8, all of those modules
# should also return "checksum" (sha1 for now)
# Do not use md5 unless it is needed for:
# 1) Optional backwards compatibility
# 2) Compliance with a third party protocol
#
# MD5 will not work on systems which are FIPS-140-2 compliant.
#
def md5s(data):
if not _md5:
raise ValueError('MD5 not available. Possibly running in FIPS mode')
return secure_hash_s(data, _md5)
def md5(filename):
if not _md5:
raise ValueError('MD5 not available. Possibly running in FIPS mode')
return secure_hash(filename, _md5)
| gpl-3.0 |
alsmirn/adist | extinction/arenou.py | 1 | 13341 | """
Copyright (c) 2009, Alexey Smirnov
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Saint-Petersburg State University nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY ALEXEY SMIRNOV ''AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL ALEXEY SMIRNOV BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
#199, 4
ARENOU_FACTORS = (
# Alpha coefficients
(float(), # hack: fill zero position in list
2.22534, 3.35436, 2.77637, 4.44190, 4.46685, 7.63699, 2.43412, 3.34481,
1.40733, 1.64466, 2.12696, 2.34636, 2.77060, 1.96533, 1.93622, 1.05414,
1.39990, 2.73481, 2.99784, 3.23802, 1.72679, 1.88890, 1.98973, 1.49901,
0.90091, 1.94200,-0.37804,-0.15710, 3.20162, 1.95079, 1.91200, 2.50487,
2.44394, 2.82440, 3.84362, 0.60365, 0.58307, 2.03861, 1.14271, 0.79908,
0.94260, 1.66398, 1.08760, 1.20087, 1.13147, 1.97804, 1.40086, 2.06355,
1.59260, 1.45589, 2.56438, 3.24095, 2.95627, 1.85158, 1.60770, 0.69920,
1.36189, 0.33179, 1.70303, 1.97414, 1.07407, 1.69495, 1.51449, 1.87894,
1.43670, 6.84802, 4.16321, 0.78135, 0.85535, 0.52521, 0.88376, 0.42228,
0.71318, 0.99606, 0.91519, 0.85791, 1.44226, 2.55486, 3.18047, 2.11235,
1.75884, 1.97257, 1.41497, 1.17795, 2.62556, 3.14461, 4.26624, 2.54447,
2.27030, 1.34359, 1.76327, 2.20666, 1.50130, 2.43965, 3.35775, 2.60621,
2.90112, 2.55377, 3.12598, 3.66930, 2.15465, 1.82465, 1.76269, 1.06085,
1.21333, 0.58326, 0.74200, 0.67520, 0.62609, 0.61415, 0.58108, 0.68352,
0.61747, 1.06827, 1.53631, 1.94300, 1.22185, 1.79429, 2.29545, 2.07408,
2.94213, 3.04627, 3.78033, 2.18119, 1.45372, 1.05051, 0.48416, 0.61963,
4.40348, 2.50938, 0.44180, 3.96084, 2.53335, 2.03760, 1.06946, 0.86348,
0.30117, 0.75171, 1.97427, 1.25208, 0.89448, 0.81141, 0.83781, 1.10600,
1.37040, 1.77590, 1.20865, 2.28830, 3.26278, 2.58100, 6.23279, 4.47693,
1.22938, 0.84291, 0.23996, 0.40062, 0.56898,-0.95721,-0.19051, 2.31305,
1.39169, 1.59418, 1.57082, 1.95998, 2.59567, 5.30273, 2.93960, 1.65864,
1.71831, 1.33617,-0.31330, 1.51984,-0.50758, 1.25864, 1.54243, 2.72258,
2.81545, 2.23818, 1.38587, 2.28570, 1.36385, 0.05943, 1.40171, 0.14718,
0.57124, 3.69891, 1.19568, 0.69443, 1.11811, 1.10427,-0.42211, 0.87576,
1.27477, 1.19512, 0.97581, 0.54379,-0.85054, 0.74347, 0.77310),
# Beta coefficients
(float(), # hack: fill zero position in list
-6.00212,-14.74567, -9.62706,-19.92097,-26.07305,-46.10856,-8.69913,
-13.93228,-3.43418, -3.97380, -6.05682, -8.17784, -9.52310,-5.63251,
-13.31757,-2.36540, -1.35325,-11.70266,-11.64272,-11.63810,-6.05085,
-5.51861, -4.86206, -3.75837, -1.30459, -6.26833, 10.77372, 5.03190,
-10.59297,-4.73280, -4.97640, -8.63106, -9.17612, -4.78217,-8.04690,
0.07893, -0.21053, -4.40843, -1.35635, 1.48074, 8.16346, 0.26775,
-1.02443, -2.45407, -1.87916, -2.92838, -1.12403, -3.68278,-2.18754,
-1.90598, -2.31586, -2.78217, -2.57422, -0.67716, 0.35279,-0.09146,
-1.05290, 0.37629, -0.75246, -1.59784, -0.40066, -1.00071,-0.08441,
-0.73314, 0.67706, -5.06864, -5.80016, -0.27826, 0.20848, 0.65726,
-0.44519, -0.26304, -0.67229, -0.70103, -0.39690, -0.29115,-1.09775,
-1.68293, -2.69796, -1.77506, -1.38574, 1.55545, -1.05722,-0.95012,
-1.11097, -1.01140, -1.61242, -0.12771, -0.68720, -0.05416,-0.26407,
-0.41651, -0.09855, -0.82128, -1.16400, -0.68687, -0.97988,-0.71214,
-1.21437, -2.29731, -0.70690, -0.60223, -0.35945, -0.14211,-0.23225,
-0.06097, -0.19293, -0.21041, -0.25312, -0.13788, 0.01195,-0.10743,
0.02675, -0.26290, -0.36833, -0.71445, -0.40185, -0.48657,-0.84096,
-0.64745, -2.09258, 7.71159, -3.91956, -2.4050, -0.49522,-1.01704,
-0.27182, 0.41697, -2.95611, -0.56541, 1.58923, -3.37374,-0.40541,
-0.66317, -0.87395, -0.65870, -0.16136, -0.57143, -2.02654,-1.47763,
-0.43870, -0.51001, -0.44138, -0.86263, -1.02779, -1.26951,-0.70679,
-1.71890, -0.94181, -1.69237,-10.30384, -7.28366, -1.19030,-1.59338,
0.06304, -1.75628, -0.53331, 11.69217, 1.45670, -7.82531,-1.72984,
-1.28296, -1.97295, -3.26159, -4.84133, -7.43033, -6.48049,-9.99317,
-7.25286,-10.39799, 1.35622, -8.69502, 4.73320,-12.59627,-3.76065,
-7.47806, -5.52139, 0.81772, -9.06536, -9.88812, -8.10127,-1.08126,
-3.21783, 3.92670, -4.30242,-19.62204, -0.45043, -0.27600, 0.71179,
-2.37654, 5.24037, -4.38033, -4.98307, -6.58464, -4.89869,-0.84403,
13.01249, -1.39825, -4.4500),
# r0
(float(), # hack: fill zero position in list
0.052, 0.035, 0.042, 0.025, 0.026, 0.014, 0.050, 0.035, 0.091, 0.074,
0.056, 0.052, 0.145, 0.174, 0.073, 0.223, 0.252, 0.117, 0.129, 0.139,
0.143, 0.171, 0.205, 0.199, 0.329, 0.155, 0.210, 0.294, 0.151, 0.206,
0.192, 0.145, 0.133, 0.295, 0.239, 0.523, 0.523, 0.231, 0.421, 0.523,
0.441, 0.523, 0.523, 0.245, 0.301, 0.338, 0.523, 0.280, 0.364, 0.382,
0.554, 0.582, 0.574, 1.152, 0.661, 0.952, 0.647, 1.152, 1.132, 0.618,
1.152, 0.847, 0.897, 1.152, 0.778, 0.676, 0.359, 1.152, 1.152, 1.152,
0.993, 0.803, 0.530, 0.710, 1.152, 1.152, 0.657, 0.759, 0.589, 0.595,
0.635, 0.634, 0.669, 0.620, 1.182, 1.555, 1.323, 1.300, 1.652, 2.000,
2.000, 2.000, 1.475, 1.485, 0.841, 1.897, 1.480, 1.793, 1.287, 0.799,
1.524, 1.515, 2.000, 2.000, 2.000, 2.000, 1.923, 1.604, 1.237, 2.000,
2.000, 2.000, 2.000, 2.000, 2.000, 1.360, 1.520, 1.844, 1.365, 1.602,
0.703, 0.355, 0.482, 0.453, 1.152, 0.516, 0.891, 1.152, 0.745, 1.152,
0.949, 0.587, 1.152, 1.152, 0.612, 0.655, 0.933, 0.658, 0.487, 0.424,
1.019, 0.795, 0.949, 0.641, 0.667, 0.699, 0.855, 0.666, 1.152, 0.763,
0.302, 0.307, 0.516, 0.265, 0.523, 0.114, 0.523, 0.240, 0.376, 0.148,
0.402, 0.523, 0.398, 0.300, 0.268, 0.357, 0.227, 0.083, 0.118, 0.064,
0.329, 0.087, 0.250, 0.050, 0.205, 0.182, 0.255, 0.329, 0.076, 0.116,
0.084, 0.027, 0.218, 0.252, 0.066, 0.094, 0.252, 0.153, 0.085, 0.123,
0.184, 0.100, 0.128, 0.091, 0.100, 0.207, 0.126, 0.207, 0.08),
# Gamma coefficients
(float(), # hack: fill zero position in list
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.08336, 0.0, 0.0,
0.12839, 0.16258, 0.0, 0.0, 0.12847, 0.17698, 0.08567,0.0,0.0,
0.42624, 0.0, 0.60387, 0.0, 0.0, 0.0, 0.06013, 0.0, 0.20994, 0.01323,
0.01961, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.00043, 0.03264, 0.03339,
0.00340, 0.0, 0.0, 0.0, 0.04928, 0.0, 0.0, 0.0, 0.0, 0.01959,0.00298,
0.0, 0.0, 0.0, 0.15298, 0.33473, 0.14017, 0.20730, 0.08052, 0.0, 0.0,
0.36962, 0.07459, 0.16602, 0.14437, 0.26859, 0.07661, 0.00849, 0.0,
0.0, 0.02960, 0.15643, 0.07354, 0.0, 0.0, 0.12750, 0.05490, 0.0, 0.0,
0.0, 0.0, 0.0, 0.08639, 0.47171, 0.0, 0.0, 0.0, 0.34109, 0.0, 0.0,
0.29230, 0.09089, 0.07495, 0.00534, 0.0, 0.31600, 0.0, 0.03505,
0.02820, 0.03402, 0.05608, 0.06972, 0.02902, 0.22887, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0))
PLATES_DESCRIPTION = {
(-90, -60): (
( 29, 1), ( 57, 2), ( 85, 3), (110, 4), (150, 5), (180, 6),
(210, 7), (240, 8), (270, 9), (300, 10), (330, 11), (360, 12)),
(-60, -45): (
( 30, 13), ( 60, 14), (110, 15), (180, 16), (210, 17), (240, 18),
(270, 19), (300, 20), (330, 21), (360, 22)),
(-45, -30): (
( 30, 23), ( 60, 24), ( 90, 25), (120, 26), (160, 27), (200, 28),
(235, 29), (265, 30), (300, 31), (330, 32), (360, 33)),
(-30, -15): (
( 20, 34), ( 40, 35), ( 80, 36), (100, 37), (120, 38), (140, 39),
(160, 40), (180, 41), (200, 42), (220, 43), (240, 44), (260, 45),
(280, 46), (300, 47), (320, 48), (340, 49), (360, 50)),
(-15, -5): (
( 10, 51), ( 20, 52), ( 30, 53), ( 40, 54), ( 50, 55), ( 60, 56),
( 80, 57), ( 90, 58), (100, 59), (110, 60), (120, 61), (130, 62),
(140, 63), (150, 64), (160, 65), (180, 66), (190, 67), (200, 68),
(210, 69), (220, 70), (230, 71), (240, 72), (250, 73), (260, 74),
(270, 75), (280, 76), (290, 77), (300, 78), (310, 79), (320, 80),
(330, 81), (340, 82), (350, 83), (360, 84)),
(-5, 5): (
( 10, 85), ( 20, 86), ( 30, 87), ( 40, 88), ( 50, 89), ( 60, 90),
( 70, 91), ( 80, 92), ( 90, 93), (100, 94), (110, 95), (120, 96),
(130, 97), (140, 98), (150, 99), (160, 100), (170, 101), (180, 102),
(190, 103), (200, 104), (210, 105), (220, 106), (230, 107), (240, 108),
(250, 109), (260, 110), (270, 111), (280, 112), (290, 113), (300, 114),
(310, 115), (320, 116), (330, 117), (340, 118), (350, 119),
(360, 120)),
(5, 15): (
( 10, 121), ( 30, 122), ( 40, 123), ( 50, 124), ( 60, 125), ( 70, 126),
( 80, 127), ( 90, 128), (100, 129), (120, 130), (130, 131), (140, 132),
(160, 133), (170, 134), (200, 135), (210, 136), (230, 137), (240, 138),
(250, 139), (260, 140), (270, 141), (280, 142), (290, 143), (300, 144),
(310, 145), (320, 146), (330, 147), (340, 148), (350, 149),
(360, 150)),
(15, 30): (
( 20, 151), ( 40, 152), ( 60, 153), ( 80, 154), (100, 155), (140, 156),
(180, 157), (200, 158), (220, 159), (240, 160), (260, 161), (280, 162),
(300, 163), (320, 164), (340, 165), (360, 166)),
(30, 45):(
( 20, 167), ( 50, 168), ( 80, 169), (110, 170), (160, 171), (190, 172),
(220, 173), (250, 174), (280, 175), (320, 176), (340, 177),
(360, 178)),
(45, 60): (
( 60, 179), ( 90, 180), (110, 181), (170, 182), (200, 183), (230, 184),
(290, 185), (330, 186), (360, 187)),
(60, 90): (
( 30, 188), ( 60, 189), ( 90, 190), (120, 191), (150, 192), (180, 193),
(210, 194), (240, 195), (270, 196), (300, 197), (330, 198),
(360, 199))}
def __get_plate_no(b, l):
"""
@param l: galactic longitude.
@type l: float.
@param b: galactic latitude,
@type b: float.
@return plate_no: unique identifier of plate in (l, b) direction.
@type plate_no: integer.
@author: Alexey Smirnov
"""
plate_no = int(0)
for b_set, l_plate_no_set in PLATES_DESCRIPTION.items():
b_left, b_right = b_set
if b_left <= b < b_right:
l_left = float(0)
for l_plate_no in l_plate_no_set:
l_right, plate_no = l_plate_no
if l_left <= l < l_right:
return plate_no
else:
l_left = l_right
return plate_no
def av_arenou(r, l, b):
"""
@param r: distance in parsecs.
@param l: galactic longitude.
@param b: galactic latitude.
@return a_v: full extinction value in visual band.
@author: Alexey Smirnov
@note: computations are based on next paper results: "A tridimensional
model of the galactic interstellar extinction",
Arenou, F.; Grenon, M.; Gomez, A.,
Astronomy and Astrophysics (ISSN 0004-6361), vol. 258, no. 1, p. 104-111.
"""
a_v = float()
plate_no = __get_plate_no(b, l)
r /= 1000.0 # translate to kiloparsecs
if r < 0.0: return 0.0
if r < ARENOU_FACTORS[2][plate_no]:
a_v = (
ARENOU_FACTORS[0][plate_no] * r +
ARENOU_FACTORS[1][plate_no] * r**2)
else:
r0 = ARENOU_FACTORS[2][plate_no]
a_v = (
ARENOU_FACTORS[0][plate_no] * r0 +
ARENOU_FACTORS[1][plate_no] * r0**2 +
ARENOU_FACTORS[3][plate_no] * (r - r0))
# If full extinction below zero then equate them with zero.
# (Situation happens because some Beta coefficients from ARENOU_FACTORS
# are negative).
a_v = 0.0 if a_v < 0.0 else a_v
return a_v
| bsd-3-clause |
SophieIPP/ipp-macro-series-parser | ipp_macro_series_parser/denombrements_fiscaux/agregats_ipp.py | 1 | 36244 | # -*- coding: utf-8 -*-
# TAXIPP -- A French microsimulation model
# By: IPP <taxipp@ipp.eu>
#
# Copyright (C) 2012, 2013, 2014, 2015 IPP
# https://github.com/taxipp
#
# This file is part of TAXIPP.
#
# TAXIPP is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# TAXIPP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import collections
import numpy
import os
import pandas
import pkg_resources
from py_expression_eval import Parser
from ipp_macro_series_parser.config import Config
from ipp_macro_series_parser.denombrements_fiscaux.parsers import (
get_denombrements_fiscaux_data_frame)
from ipp_macro_series_parser.data_extraction import get_or_construct_value
config_parser = Config(
config_files_directory = os.path.join(pkg_resources.get_distribution('ipp-macro-series-parser').location)
)
def update_index_by_variable_name_appearing_in_formula(index_by_variable_name, formula):
parser_formula = Parser()
try:
expr = parser_formula.parse(formula)
except Exception, e:
print formula
raise(e)
formula_variables = expr.variables()
components = dict(
(formula_variable, {'code': formula_variable}) for formula_variable in formula_variables
)
index_by_variable_name.update(components)
return index_by_variable_name
def create_index_by_variable_name(formula_by_variable_name, level_2_formula_by_variable_name = None):
index_by_variable_name = dict()
for variable_name, formula in formula_by_variable_name.iteritems():
if not formula:
continue
index_by_variable_name[variable_name] = {
'code': None,
'formula': formula,
}
if isinstance(formula, list):
for single_formula in formula:
index_by_variable_name = update_index_by_variable_name_appearing_in_formula(
index_by_variable_name, single_formula['formula'])
else:
index_by_variable_name = update_index_by_variable_name_appearing_in_formula(index_by_variable_name, formula)
if level_2_formula_by_variable_name is not None:
level_2_index_by_variable_name = dict()
for variable_name, formula in level_2_formula_by_variable_name.iteritems():
level_2_index_by_variable_name[variable_name] = dict(
formula = formula,
)
index_by_variable_name.update(level_2_index_by_variable_name)
return index_by_variable_name
def build_aggregates(raw_data, formula_by_variable_name, level_2_formula_by_variable_name = None, years = None,
fill_value = numpy.NaN):
assert years is not None
aggregates = None
index_by_variable_name = create_index_by_variable_name(formula_by_variable_name, level_2_formula_by_variable_name)
for variable_name in formula_by_variable_name.keys() + level_2_formula_by_variable_name.keys():
serie, formula = get_or_construct_value(
raw_data, variable_name, index_by_variable_name, years = years, fill_value = fill_value)
serie = serie.reset_index().drop_duplicates().set_index('year')
assert not numpy.any(serie.index.duplicated()), 'Duplicated index for {} : {}'.format(
variable_name, serie)
if aggregates is None:
aggregates = serie
else:
try:
aggregates = pandas.concat([aggregates, serie], axis = 1, verify_integrity = True)
except Exception, e:
print "aggregates", aggregates
print "serie", serie
raise(e)
return aggregates
formula_by_variable_name = dict(
## Salaires
salaires_imposables = [
dict(
start = 1990,
end = 2004,
formula = 'f1aj + f1bj + f1cj + f1dj + f1ej + + f1fj',
),
dict(
start = 2005,
end = 2006,
formula = 'f1aj + f1bj + f1cj + f1dj + f1ej',
),
dict(
start = 2007,
end = 2013,
formula = 'f1aj + f1bj + f1cj + f1dj',
),
dict(
start = 2014,
end = 2015,
formula = 'f1aj + f1bj + f1cj + f1dj',
),
],
heures_supplementaires = [
dict(
start = 2007,
end = 2013,
formula = ' + f1au + f1bu + f1cu + f1du', # les heures sup effectuées en 2012 payées en 2013 ...
),
],
## Bénéfices agricoles
benefices_agricoles_forfait_exoneres = 'f5hn + f5in + f5jn', # frag_exon
benefices_agricoles_forfait_imposables = 'f5ho + f5io + f5jo', # frag_impo
benefices_agricoles_reels_exoneres = 'f5hb + f5ib + f5jb', # arag_exon
benefices_agricoles_reels_imposables = [
dict(
start = 1990,
end = 2005,
formula = 'f5hc + f5ic + f5jc + f5hd + f5id + f5jd'
),
dict(
start = 2006,
end = 2013,
formula = 'f5hc + f5ic + f5jc'
),
], # arag_impg TODO: check last values in openfisca
benefices_agricoles_reels_deficits = 'f5hf + f5if + f5jf', # arag_defi
benefices_agricoles_reels_sans_cga_exoneres = 'f5hh + f5ih + f5jh', # nrag_exon
benefices_agricoles_reels_sans_cga_imposables = [
dict(
start = 1990,
end = 2005,
formula = 'f5hi + f5ii + f5ji + f5hj + f5ij + f5jj',
),
dict(
start = 2006,
end = 2013,
formula = 'f5hi + f5ii + f5ji',
),
], # nrag_impg TODO: check last values in openfisca
# TODO voir années antérieures à 2006
benefices_agricoles_reels_sans_cga_deficits = 'f5hl + f5il + f5jl', # nrag_defi
# TODO: benefices_agricoles_ = 'f5hm + f5im + f5jm', # nrag_ajag
## Bénéfices industriels et commerciaux professionnels (déclaration complémentaire, cadres 5B)
benefices_industriels_commerciaux_professionnels_micro_entreprise_vente = 'f5ko + f5lo + f5mo', # mbic_impv
# TODO erreur car 2 fois la mm ligne
benefices_industriels_commerciaux_professionnels_micro_entreprise_services = 'f5kp + f5lp + f5mp', # mbic_imps
benefices_industriels_commerciaux_professionnels_reels_exoneres = 'f5kb + f5lb + f5mb', # mbic_imps
benefices_industriels_commerciaux_professionnels_reels_imposables_normal = [
dict(
start = 2003,
end = 2009,
formula = 'f5kc + f5lc + f5mc', # abic_impn
),
],
benefices_industriels_commerciaux_professionnels_reels_imposables_simplifie = [
dict(
start = 2003,
end = 2009,
formula = 'f5kd + f5ld + f5md', # abic_imps
),
],
benefices_industriels_commerciaux_professionnels_reels_imposables_normal_et_simplifie = [
dict(
start = 2010,
end = 2014,
formula = 'f5kc + f5lc + f5mc', # abic_impn
),
],
benefices_industriels_commerciaux_professionnels_reels_exoneres_sans_cga = 'f5kh + f5lh + f5mh', # nbic_exon
benefices_industriels_commerciaux_professionnels_reels_imposables_normal_sans_cga = 'f5ki + f5li + f5mi', # nbic_impn
benefices_industriels_commerciaux_professionnels_reels_imposables_simplifie_sans_cga = 'f5kj + f5lj + f5mj', # nbic_mvct
deficits_industriels_commerciaux_professionnels_normal = [
dict(
start = 2006, # au moins
end = 2009,
formula = 'f5kf + f5lf + f5mf', # abic_defn
),
],
deficits_industriels_commerciaux_professionnels_simplifie = [
dict(
start = 2006, # au moins
end = 2009,
formula = 'f5kg + f5lg + f5mg', # abic_defs
),
],
deficits_industriels_commerciaux_professionnels_normal_et_simplifie = [
dict(
start = 2010,
end = 2014, # au moins
formula = 'f5kf + f5lf + f5mf',
)
],
deficits_industriels_commerciaux_professionnels_normal_sans_cga = [
dict(
start = 2006, # au moins
end = 2009,
formula = 'f5kl + f5ll + f5ml', # nbic_defn
),
],
deficits_industriels_commerciaux_professionnels_simplifie_sans_cga = [
dict(
start = 2006, # au moins
end = 2009,
formula = 'f5km + f5lm + f5mm',
),
],
deficits_industriels_commerciaux_professionnels_normal_et_simplifie_sans_cga = [
dict(
start = 2010,
end = 2014, # au moins
formula = 'f5kl + f5ll + f5ml',
),
],
# deficits_industriels_commerciaux_professionnels_locations = [
## dict(
## start = 1990,
## end = 2008,
## formula = 'f5km + f5lm + f5mm',
## ),
# dict(
# start = 2009,
# end = 2014,
# formula = 'f5qa + f5ra + f5sa',
# ),
# ], # nbic_defs
## Bénéfices industriels et commerciaux non professionnels (déclaration complémentaire, cadres 5C)
benefices_industriels_commerciaux_non_professionnels_micro_entreprise_exoneres = 'f5nn + f5on + f5pn', # macc_exon
benefices_industriels_commerciaux_non_professionnels_micro_entreprise_vente = 'f5no + f5oo + f5po', # macc_impv
benefices_industriels_commerciaux_non_professionnels_micro_entreprise_services = 'f5np + f5op + f5op', # macc_impS
benefices_industriels_commerciaux_non_professionnels_reels_exoneres = 'f5nb + f5ob + f5pb', # aacc_exon
# benefices_industriels_commerciaux_non_professionnels_reels_imposables_normal = 'f5nc + f5nc + f5nc', # aacc_impn # TODO: normal si 3 fois la même chose ?
benefices_industriels_commerciaux_non_professionnels_reels_imposables_normal = [
dict(
start = 2003,
end = 2009,
formula = 'f5nc + f5oc + f5pc', # aacc_impn
),
],
# benefices_industriels_commerciaux_non_professionnels_reels_imposables_simplifie = 'f5nd + f5nd + f5nd', # aacc_imps TODO: ceci avant 2010 mais après locations meublées pro, # TODO: normal si 3 fois la même chose ?
benefices_industriels_commerciaux_non_professionnels_reels_imposables_simplifie = [
dict(
start = 2003,
end = 2009,
formula = 'f5nd + f5od + f5pd', # aacc_imps TODO: ceci avant 2010 mais après locations meublées pro,
),
],
benefices_industriels_commerciaux_non_professionnels_reels_imposables_normal_et_simplifie = [
dict(
start = 2010,
end = 2014,
formula = 'f5nc + f5oc + f5pc',
),
],
benefices_industriels_commerciaux_non_professionnels_reels_exoneres_sans_cga = 'f5nh + f5oh + f5ph', # nacc_exon
benefices_industriels_commerciaux_non_professionnels_reels_imposables_normal_sans_cga = 'f5ni + f5ni + f5ni', # nacc_impn
benefices_industriels_commerciaux_non_professionnels_reels_imposables_simplifie_sans_cga = 'f5nj + f5nj + f5nj', # nacc_meup TODO: ceci avant 2012 mais après locations déjà soumises aux prélèvements sociaux,
deficits_industriels_commerciaux_non_professionnels_normal = [ #'f5nf + f5of + f5pf', # aacc_defn
dict(
start = 2002, # au moins
end = 2009,
formula = 'f5nf + f5of + f5pf',
),
],
deficits_industriels_commerciaux_non_professionnels_simplifie = [ #'f5ng + f5og + f5pg', # aacc_gits
dict(
start = 2002, # au moins
end = 2009,
formula = 'f5ng + f5og + f5pg',
),
],
deficits_industriels_commerciaux_non_professionnels_normal_sans_cga = [
dict(
start = 2002, # au moins
end = 2009,
formula = 'f5nl + f5ol + f5pl',
),
],
deficits_industriels_commerciaux_non_professionnels_simplifie_sans_cga = [
dict(
start = 2002, # au moins
end = 2009,
formula = 'f5nm + f5om + f5pm',
),
],
deficits_industriels_commerciaux_non_professionnels_normal_et_simplifie = [
dict(
start = 2010,
end = 2014, # au moins
formula = 'f5nf + f5of + f5pf',
),
],
deficits_industriels_commerciaux_non_professionnels_normal_et_simplifie_sans_cga = [
dict(
start = 2010,
end = 2014, # au moins
formula = 'f5nl + f5ol + f5pl',
),
],
# deficits_industriels_commerciaux_non_professionnels_sans_cga = 'f5nl + f5ol + f5pl', # nacc_defn
# TODO: Locations déjà soumises aux prélèvements sociaux sans CGA (régime du bénéfice réel)
# deficits_industriels_commerciaux_non_professionnels_locations = 'f5ny + f5oy + f5py',
# - Détails Bénéfices non commerciaux professionnels (déclaration complémentaire, cadres 5D)
benefices_non_commerciaux_professionnels_micro_entreprise_imposables = 'f5hq + f5iq + f5jq', # mbnc_impo
benefices_non_commerciaux_professionnels_declaration_controlee = 'f5qc + f5rc + f5sc', #
benefices_non_commerciaux_professionnels_declaration_controlee_sans_cga = 'f5qi + f5ri + f5si', #
deficits_non_commerciaux_professionnels_declaration_controlee = 'f5qe + f5re + f5se', #
deficits_non_commerciaux_professionnels_declaration_controlee_sans_cga = 'f5qk + f5rk + f5sk', #
# - Détails Bénéfices non commerciaux non professionnels (déclaration complémentaire, cadres 5E)
benefices_non_commerciaux_non_professionnels_micro_entreprise_imposables = 'f5ku + f5lu + f5mu',
benefices_non_commerciaux_non_professionnels_declaration_controlee = 'f5jg + f5rf + f5sf',
benefices_non_commerciaux_non_professionnels_declaration_controlee_sans_cga = 'f5sn + f5ns + f5os',
deficits_non_commerciaux_non_professionnels_declaration_controlee = 'f5jj + f5rg + f5sg',
deficits_non_commerciaux_non_professionnels_declaration_controlee_sans_cga = 'f5sp + f5nu + f5ou',
# - Revenus fonciers
revenus_fonciers_regime_normal = 'f4ba', # f4ba
revenus_fonciers_micro_foncier = 'f4be', # f4be
# Missing financier (tout est par case dans of)
# - Déficits des années antérieures non encore déduits
# Missing foncier
# - rentes viagères : 'f1aw', 'f1bw', 'f1cw', 'f1dw'
# - Déficits : 'f4bb', et suivantes... 'f4bd'
# Missing plus value
# - Gains de levée d'options sur titres 'f1tv' 'f1uv' 'f1tw' 'f1uw' 'f1tx' 'f1ux'
# Missing revenus_de_remplacement
# - chomeur_longue_duree: 'f1ai', 'f1bi', 'f1ci', 'f1di'
# Missing salarie
# - sal_pen_exo_etr (start = 2013, 1ac, 1bc, 1cc, 1cd)
frais_reels = [
dict(
end = 2014,
start = 2005,
formula = 'f1ak + f1bk + f1ck + f1dk',
),
dict(
end = 2004,
start = 2004,
formula = 'f1ak + f1bk + f1ck + f1dk + f1ek',
),
dict(
start = 2003,
end = 2003,
formula = 'f1ak + f1bk + f1ck + f1dk + f1ek + f1fk',
),
],
# - hsup (f1au, f1bu, f1cu, f1du, f1eu) start 2007
# -
allocations_chomage = [
dict(
start = 2007,
end = 2013,
formula = 'f1ap + f1bp + f1cp + f1dp',
),
dict(
start = 2005,
end = 2006,
formula = 'f1ap + f1bp + f1cp + f1dp + f1ep',
),
dict(
start = 2000,
end = 2004,
formula = 'f1ap + f1bp + f1cp + f1dp + f1ep + f1fp',
),
], # choi
#
pensions_de_retraite = [
dict(
start = 2007,
end = 2013,
formula = 'f1as + f1bs + f1cs + f1ds',
),
dict(
start = 2005,
end = 2006,
formula = 'f1as + f1bs + f1cs + f1ds + f1es',
),
dict(
start = 2000,
end = 2004,
formula = 'f1as + f1bs + f1cs + f1ds + f1es + f1fs',
),
], # rsti
dividendes_imposes_au_bareme = 'f2dc + f2fu', # 'f2dc + f2fu' non agrégés
interet_imposes_au_bareme = 'f2ts + f2go + f2tr', # non agrégés
assurances_vie_imposees_au_bareme = 'f2ch', # non agrégés
dividendes_imposes_au_prelevement_liberatoire = 'f2da',
interets_imposes_au_prelevement_liberatoire = 'f2ee',
assurances_vie_imposees_au_prelevement_liberatoire = 'f2dh',
plus_values_mobilieres_regime_normal = 'f3vg',
plus_values_mobilieres_stock_options = 'f3vf + f3vi', # PV stock options 1, stock options 2, TODO Différencier ?
plus_values_mobilieres_retraite_dirigeant = 'f3va', # TODO f3vb ?
plus_values_professionnelles_regime_normal = [
dict(
start = 2007, # au moins
end = 2009,
formula = 'f5hz + f5iz + f5jz', # TODO: ceci n'est valable qu'avant 2010
),
dict(
start = 2010,
end = 2013, # DONE
formula = 'f5hx + f5ix + f5jx + f5he + f5ie + f5je + f5kq + f5lq + f5ke + f5le + f5me + f5nq + f5oq + f5pq + f5ne + f5oe + f5pe + f5hr + f5ir + f5jr + f5qd + f5rd + f5sd + f5kv + f5lv + f5mv + f5so + f5nt', # + f5mq + f5ot
),
],
plus_values_professionnelles_retraite_dirigeant = 'f5hg + f5ig',
revenus_distribues_pea_exoneres = [
dict(
start = 2009,
end = 2009,
formula = 'f2gr',
),
],
pensions_alimentaires_percues = 'f1ao + f1bo + f1co + f1do + f1eo + f1fo', # pensions_alimentaires_percues
pensions_alimentaires_verses = 'f6gi + f6gj + f6el + f6em + f6gp + f6gu + f6dd',
)
level_2_formula_by_variable_name = dict(
salaires = 'salaires_imposables + heures_supplementaires',
revenus_d_activite_non_salariee = 'benefices_agricoles + benefices_industriels_commerciaux + benefices_non_commerciaux', # + revenus_activite_non_salariee_exoneres',
# TODO get parameters form openfisca legislation
benefices_agricoles = 'benefices_agricoles_bruts - 0.5 * deficits_agricoles',
benefices_agricoles_bruts = 'benefices_agricoles_forfait_imposables + benefices_agricoles_reels_imposables + 1.25 * benefices_agricoles_reels_sans_cga_imposables', # TODO get parameters form openfisca legislation
deficits_agricoles = 'benefices_agricoles_reels_deficits + benefices_agricoles_reels_sans_cga_deficits',
# Bénéfices industriels et commerciaux
benefices_industriels_commerciaux = 'benefices_industriels_commerciaux_professionnels + benefices_industriels_commerciaux_non_professionnels',
benefices_industriels_commerciaux_bruts = 'benefices_industriels_commerciaux_professionnels_bruts + benefices_industriels_commerciaux_non_professionnels_bruts',
deficits_industriels_commerciaux = 'deficits_industriels_commerciaux_professionnels + deficits_industriels_commerciaux_non_professionnels',
# - Bénéfices industriels et commerciaux professionnels
benefices_industriels_commerciaux_professionnels = 'benefices_industriels_commerciaux_professionnels_bruts - 0.5 * deficits_industriels_commerciaux_professionnels',
benefices_industriels_commerciaux_professionnels_bruts = 'benefices_industriels_commerciaux_professionnels_micro_entreprise + benefices_industriels_commerciaux_professionnels_reels',
benefices_industriels_commerciaux_professionnels_micro_entreprise = '(1 - 0.71) * benefices_industriels_commerciaux_professionnels_micro_entreprise_vente + (1 - 0.5) * benefices_industriels_commerciaux_professionnels_micro_entreprise_services', # TODO check and use legislation parameters
benefices_industriels_commerciaux_professionnels_reels = 'benefices_industriels_commerciaux_professionnels_reels_avec_cga + benefices_industriels_commerciaux_professionnels_reels_sans_cga',
benefices_industriels_commerciaux_professionnels_reels_avec_cga = 'benefices_industriels_commerciaux_professionnels_reels_imposables_normal + benefices_industriels_commerciaux_professionnels_reels_imposables_simplifie',
benefices_industriels_commerciaux_professionnels_reels_sans_cga = '1.25 * (benefices_industriels_commerciaux_professionnels_reels_imposables_normal_sans_cga + benefices_industriels_commerciaux_professionnels_reels_imposables_simplifie_sans_cga)', # TODO check and use legislation
deficits_industriels_commerciaux_professionnels = [
dict(
start = 2006,
end = 2009,
formula = 'deficits_industriels_commerciaux_professionnels_normal + deficits_industriels_commerciaux_professionnels_simplifie + deficits_industriels_commerciaux_professionnels_normal_sans_cga + deficits_industriels_commerciaux_professionnels_simplifie_sans_cga',
),
dict(
start = 2010,
end = 2014,
formula = 'deficits_industriels_commerciaux_professionnels_normal_et_simplifie + deficits_industriels_commerciaux_professionnels_normal_et_simplifie_sans_cga'
),
],
# - Bénéfices industriels et commerciaux non professionnels (déclaration complémentaire, cadres 5C)
benefices_industriels_commerciaux_non_professionnels = 'benefices_industriels_commerciaux_non_professionnels_bruts - 0.5 * deficits_industriels_commerciaux_non_professionnels',
benefices_industriels_commerciaux_non_professionnels_bruts = 'benefices_industriels_commerciaux_non_professionnels_micro_entreprise + benefices_industriels_commerciaux_non_professionnels_reels',
benefices_industriels_commerciaux_non_professionnels_micro_entreprise = '(1 - 0.71) * benefices_industriels_commerciaux_non_professionnels_micro_entreprise_vente + (1 - 0.5) * benefices_industriels_commerciaux_non_professionnels_micro_entreprise_services', # TODO check and use legislation parameters
benefices_industriels_commerciaux_non_professionnels_reels = 'benefices_industriels_commerciaux_non_professionnels_reels_avec_cga + benefices_industriels_commerciaux_non_professionnels_reels_sans_cga',
benefices_industriels_commerciaux_non_professionnels_reels_avec_cga = [
dict(
start = 2003,
end = 2009,
formula = 'benefices_industriels_commerciaux_non_professionnels_reels_imposables_normal + benefices_industriels_commerciaux_non_professionnels_reels_imposables_simplifie',
),
dict(
start = 2010,
end = 2014,
formula = 'benefices_industriels_commerciaux_non_professionnels_reels_imposables_normal_et_simplifie',
),
],
benefices_industriels_commerciaux_non_professionnels_reels_sans_cga = '1.25 * (benefices_industriels_commerciaux_non_professionnels_reels_imposables_normal_sans_cga + benefices_industriels_commerciaux_non_professionnels_reels_imposables_simplifie_sans_cga)', # TODO check and use legislation
# Bénéfices non commerciaux
benefices_non_commerciaux = 'benefices_non_commerciaux_professionnels + benefices_non_commerciaux_non_professionnels',
benefices_non_commerciaux_bruts = 'benefices_non_commerciaux_professionnels_bruts + benefices_non_commerciaux_non_professionnels_bruts',
deficits_non_commerciaux = 'deficits_non_commerciaux_professionnels + deficits_non_commerciaux_non_professionnels',
deficits_industriels_commerciaux_non_professionnels = [
dict(
start = 2002, # au moins
end = 2009,
formula = 'deficits_industriels_commerciaux_non_professionnels_normal + deficits_industriels_commerciaux_non_professionnels_simplifie + deficits_industriels_commerciaux_non_professionnels_normal_sans_cga + deficits_industriels_commerciaux_non_professionnels_simplifie_sans_cga'
),
dict(
start = 2010,
end = 2014, # au moins
formula = 'deficits_industriels_commerciaux_non_professionnels_normal_et_simplifie + deficits_industriels_commerciaux_non_professionnels_normal_et_simplifie_sans_cga',
),
],
# - Bénéfices non commerciaux professionnels (déclaration complémentaire, cadres 5D)
benefices_non_commerciaux_professionnels = 'benefices_non_commerciaux_professionnels_bruts - 0.5 * deficits_non_commerciaux_professionnels',
benefices_non_commerciaux_professionnels_bruts = '(1 - 0.34) * benefices_non_commerciaux_professionnels_micro_entreprise_imposables + benefices_non_commerciaux_professionnels_declaration_controlee + 1.25 * benefices_non_commerciaux_professionnels_declaration_controlee_sans_cga',
deficits_non_commerciaux_professionnels = 'deficits_non_commerciaux_professionnels_declaration_controlee + deficits_non_commerciaux_professionnels_declaration_controlee_sans_cga',
# - Bénéfices non commerciaux non professionnels (déclaration complémentaire, cadres 5E)
benefices_non_commerciaux_non_professionnels = 'benefices_non_commerciaux_non_professionnels_bruts - 0.5 * deficits_non_commerciaux_non_professionnels',
benefices_non_commerciaux_non_professionnels_bruts = '(1 - 0.34) * benefices_non_commerciaux_non_professionnels_micro_entreprise_imposables + benefices_non_commerciaux_non_professionnels_declaration_controlee + 1.25 * benefices_non_commerciaux_non_professionnels_declaration_controlee_sans_cga',
deficits_non_commerciaux_non_professionnels = 'deficits_non_commerciaux_non_professionnels_declaration_controlee + deficits_non_commerciaux_non_professionnels_declaration_controlee_sans_cga',
# Revenus Fonciers
revenus_fonciers = 'revenus_fonciers_regime_normal + revenus_fonciers_micro_foncier',
revenus_de_remplacement = 'pensions_de_retraite + allocations_chomage',
revenus_financiers_hors_plus_values = 'revenus_imposes_au_bareme + revenus_imposes_au_prelevement_liberatoire',
revenus_financiers = 'revenus_imposes_au_bareme + revenus_imposes_au_prelevement_liberatoire + plus_values',
plus_values = 'plus_values_mobilieres + plus_values_professionnelles',
plus_values_mobilieres = 'plus_values_mobilieres_regime_normal + plus_values_mobilieres_stock_options + plus_values_mobilieres_retraite_dirigeant', # analysis:ignore
plus_values_professionnelles = 'plus_values_professionnelles_regime_normal + plus_values_professionnelles_retraite_dirigeant', # analysis:ignore
revenus_imposes_au_bareme = 'dividendes_imposes_au_bareme + interet_imposes_au_bareme + assurances_vie_imposees_au_bareme', # analysis:ignore
revenus_imposes_au_prelevement_liberatoire = 'dividendes_imposes_au_prelevement_liberatoire + interets_imposes_au_prelevement_liberatoire + assurances_vie_imposees_au_prelevement_liberatoire', #analysis:ignore
)
#
#raw_data = get_denombrements_fiscaux_data_frame(years = [2010], fill_value = 0)
#aggregates = build_aggregates(
# raw_data,
# formula_by_variable_name,
# level_2_formula_by_variable_name = level_2_formula_by_variable_name,
# years = [2010],
# fill_value = numpy.NaN,
# )
def build_irpp_tables(years = None, fill_value = numpy.NaN):
assert years is not None
assert isinstance(years, list)
raw_data = get_denombrements_fiscaux_data_frame(years = years, fill_value = 0)
aggregates = build_aggregates(
raw_data,
formula_by_variable_name,
level_2_formula_by_variable_name = level_2_formula_by_variable_name,
years = years,
fill_value = fill_value,
)
data_frame_by_irpp_table_name = collections.OrderedDict([
# 1. Tableau IRPP1: Les revenus figurant dans les déclarations de revenus
('irpp_1', aggregates[[
'salaires',
'salaires_imposables',
'heures_supplementaires',
# TODO
# 'revenus_d_activite_non_salariee'
# 'ba',
# 'bic',
# 'bnc',
# 'revenus_activite_non_salariee_exoneres',
'revenus_de_remplacement',
'pensions_de_retraite',
'allocations_chomage',
'revenus_fonciers',
'revenus_fonciers_regime_normal',
'revenus_fonciers_micro_foncier',
'revenus_financiers',
'frais_reels',
'pensions_alimentaires_percues',
]]),
# 2. Tableau IRPP2: Détails des revenus financiers (intérêts, dividendes, plus-values) figurant dans les
# déclations de revenus (imposition au barème, imposition au prélèvement forfaitaire libératoire (PL) et
# plus-values)
('irpp_2', aggregates[[
'revenus_imposes_au_bareme',
'dividendes_imposes_au_bareme',
'interet_imposes_au_bareme',
'assurances_vie_imposees_au_bareme',
'revenus_imposes_au_prelevement_liberatoire',
'dividendes_imposes_au_prelevement_liberatoire',
'interets_imposes_au_prelevement_liberatoire',
'assurances_vie_imposees_au_prelevement_liberatoire',
'plus_values',
'revenus_financiers',
'revenus_financiers_hors_plus_values'
]]),
# 3. Tableau IRPP3: Plus-values mobilières et professionnelles
('irpp_3', aggregates[[
'plus_values',
'plus_values_mobilieres',
'plus_values_mobilieres_regime_normal',
'plus_values_mobilieres_stock_options',
'plus_values_mobilieres_retraite_dirigeant',
'plus_values_professionnelles',
'plus_values_professionnelles_regime_normal',
'plus_values_professionnelles_retraite_dirigeant',
]]),
('irpp_4', aggregates[[
'revenus_d_activite_non_salariee',
'benefices_agricoles',
'benefices_agricoles_bruts',
'deficits_agricoles',
'benefices_industriels_commerciaux',
'benefices_industriels_commerciaux_bruts',
'deficits_industriels_commerciaux',
# 'bnc',
# 'revenus_activite_non_salariee_exoneres',
]]),
# ('irpp_5_a', aggregates[[
# 'benefices_agricoles',
# 'benefices_agricoles_forfait_exoneres',
# 'benefices_agricoles_forfait_imposables',
# 'benefices_agricoles_reels_exoneres',
# 'benefices_agricoles_reels_imposables',
# 'benefices_agricoles_reels_deficits',
# 'benefices_agricoles_reels_sans_cga_exoneres',
# 'benefices_agricoles_reels_sans_cga_imposables',
# 'benefices_agricoles_reels_sans_cga_deficits',
# ]])
])
return data_frame_by_irpp_table_name
of_name_by_irpp_table_name = dict(
salaires_imposables = 'salaire_imposable',
heures_supplementaires = 'hsup',
benefices_agricoles_forfait_exoneres = 'frag_exon',
benefices_agricoles_forfait_imposables = 'frag_impo',
benefices_agricoles_reels_exoneres = 'arag_exon',
benefices_agricoles_reels_sans_cga_deficits = 'nrag_defi',
benefices_agricoles_reels_imposables = 'arag_impg',
benefices_agricoles_reels_deficits = 'arag_defi',
benefices_agricoles_reels_sans_cga_exoneres = 'nrag_exon',
benefices_agricoles_reels_sans_cga_imposables = 'arag_defi',
benefices_industriels_commerciaux_professionnels_micro_entreprise_vente = 'mbic_impv',
benefices_industriels_commerciaux_professionnels_micro_entreprise_services = 'mbic_imps',
benefices_industriels_commerciaux_professionnels_reels_exoneres = 'mbic_imps',
benefices_industriels_commerciaux_professionnels_reels_imposables_normal = 'abic_impn',
benefices_industriels_commerciaux_professionnels_reels_imposables_simplifie = 'abic_imps',
benefices_industriels_commerciaux_professionnels_reels_exoneres_sans_cga = 'nbic_exon',
benefices_industriels_commerciaux_professionnels_reels_imposables_normal_sans_cga = 'nbic_impn',
benefices_industriels_commerciaux_professionnels_reels_imposables_simplifie_sans_cga = 'nbic_mvct',
deficits_industriels_commerciaux_professionnels_normal = 'abic_defn',
deficits_industriels_commerciaux_professionnels_simplifie = 'abic_defs',
deficits_industriels_commerciaux_professionnels_sans_cga = 'nbic_defn',
deficits_industriels_commerciaux_professionnels_locations = 'nbic_defs',
benefices_industriels_commerciaux_non_professionnels_micro_entreprise_exoneres = 'macc_exon',
benefices_industriels_commerciaux_non_professionnels_micro_entreprise_vente = 'macc_impv',
benefices_industriels_commerciaux_non_professionnels_micro_entreprise_services = 'macc_impS',
benefices_industriels_commerciaux_non_professionnels_reels_exoneres = 'aacc_exon',
benefices_industriels_commerciaux_non_professionnels_reels_imposables_normal = 'aacc_impn',
benefices_industriels_commerciaux_non_professionnels_reels_imposables_simplifie = 'aacc_imps',
benefices_industriels_commerciaux_non_professionnels_reels_exoneres_sans_cga = 'nacc_exon',
benefices_industriels_commerciaux_non_professionnels_reels_imposables_normal_sans_cga = 'nacc_impn',
benefices_industriels_commerciaux_non_professionnels_reels_imposables_simplifie_sans_cga = 'nacc_meup',
deficits_industriels_commerciaux_non_professionnels_normal = 'aacc_defn',
deficits_industriels_commerciaux_non_professionnels_simplifie = 'aacc_gits',
deficits_industriels_commerciaux_non_professionnels_sans_cga = 'nacc_defn',
benefices_non_commerciaux_professionnels_micro_entreprise_imposables = 'mbnc_impo',
benefices_non_commerciaux_professionnels_declaration_controlee = '',
benefices_non_commerciaux_professionnels_declaration_controlee_sans_cga = '',
deficits_non_commerciaux_professionnels_declaration_controlee = '',
deficits_non_commerciaux_professionnels_declaration_controlee_sans_cga = '',
benefices_non_commerciaux_non_professionnels_micro_entreprise_imposables = '',
benefices_non_commerciaux_non_professionnels_declaration_controlee = '',
benefices_non_commerciaux_non_professionnels_declaration_controlee_sans_cga = '',
revenus_fonciers_regime_normal = 'f4ba', # f4ba
revenus_fonciers_micro_foncier = 'f4be', # f4be
allocations_chomage = 'cho',
pensions_de_retraite = 'rst',
# dividendes_imposes_au_bareme = 'f2dc + f2fu', # 'f2dc + f2fu' non agrégés
# interet_imposes_au_bareme = 'f2ts + f2go + f2tr', # non agrégés
assurances_vie_imposees_au_bareme = 'f2ch', # non agrégés
dividendes_imposes_au_prelevement_liberatoire = 'f2da',
interets_imposes_au_prelevement_liberatoire = 'f2ee',
assurances_vie_imposees_au_prelevement_liberatoire = 'f2dh',
plus_values_mobilieres_regime_normal = 'f3vg',
# plus_values_mobilieres_stock_options = 'f3vf + f3vi', # PV stock options 1, stock options 2, TODO Différencier ?
plus_values_mobilieres_retraite_dirigeant = 'f3va', # TODO f3vb ?
# plus_values_professionnelles_regime_normal = 'f5hz + f5iz + f5jz', # TODO: ceci n'est valable qu'avant 2010
# plus_values_professionnelles_retraite_dirigeant = 'f5hg + f5ig',
revenus_distribues_pea_exoneres = 'f2gr',
pensions_alimentaires_percues = 'pensions_alimentaires_percues', # pensions_alimentaires_percues
# pensions_alimentaires_versess = 'f6gi + f6gj + f6el + f6em + f6gp + f6gu + f6dd',
)
if __name__ == '__main__':
data_frame_by_irpp_table_name = build_irpp_tables(years = range(2008, 2013), fill_value = 0)
| gpl-3.0 |
simontakite/sysadmin | pythonscripts/learningPython/decoall-deco-any3.py | 1 | 1855 | # Class decorator factory: apply any decorator to all methods of a class
from types import FunctionType
from decotools import tracer, timer
def decorateAll(decorator):
def DecoDecorate(aClass):
for attr, attrval in aClass.__dict__.items():
if type(attrval) is FunctionType:
setattr(aClass, attr, decorator(attrval)) # Not __dict__
return aClass
return DecoDecorate
#@decorateAll(tracer) # Use a class decorator
#@decorateAll(tracer(timer(label='@@')))
#@decorateAll(timer(label='@@')(tracer)) # Times applying the tracer!
#@decorateAll(tracer(timer(label='@@'))) # Traces applying the timer!
#@decorateAll(tracer) # Traces onCall wrapper, times methods
#@decorateAll(timer(label='@@'))
#@decorateAll(timer(label='@@'))
#@decorateAll(tracer) # Times onCall wrapper, traces methods
@decorateAll(timer(label='@@'))
@decorateAll(tracer) # Times onCall wrapper, traces methods
class Person: # Applies func decorator to methods
def __init__(self, name, pay): # Person = decorateAll(..)(Person)
self.name = name # Person = DecoDecorate(Person)
self.pay = pay
def giveRaise(self, percent):
self.pay *= (1.0 + percent)
def lastName(self):
return self.name.split()[-1]
bob = Person('Bob Smith', 50000)
sue = Person('Sue Jones', 100000)
print(bob.name, sue.name)
sue.giveRaise(.10)
print('%.2f' % sue.pay)
print(bob.lastName(), sue.lastName())
# If using timer: total time per method
"""
print('-'*40)
print('%.5f' % Person.__init__.alltime)
print('%.5f' % Person.giveRaise.alltime)
print('%.5f' % Person.lastName.alltime)
""" | gpl-2.0 |
sserrot/champion_relationships | venv/Lib/site-packages/zmq/utils/garbage.py | 2 | 5935 | """Garbage collection thread for representing zmq refcount of Python objects
used in zero-copy sends.
"""
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import atexit
import struct
from os import getpid
from collections import namedtuple
from threading import Thread, Event, Lock
import warnings
import zmq
gcref = namedtuple('gcref', ['obj', 'event'])
class GarbageCollectorThread(Thread):
"""Thread in which garbage collection actually happens."""
def __init__(self, gc):
super(GarbageCollectorThread, self).__init__()
self.gc = gc
self.daemon = True
self.pid = getpid()
self.ready = Event()
def run(self):
# detect fork at beginning of the thread
if getpid is None or getpid() != self.pid:
self.ready.set()
return
try:
s = self.gc.context.socket(zmq.PULL)
s.linger = 0
s.bind(self.gc.url)
finally:
self.ready.set()
while True:
# detect fork
if getpid is None or getpid() != self.pid:
return
msg = s.recv()
if msg == b'DIE':
break
fmt = 'L' if len(msg) == 4 else 'Q'
key = struct.unpack(fmt, msg)[0]
tup = self.gc.refs.pop(key, None)
if tup and tup.event:
tup.event.set()
del tup
s.close()
class GarbageCollector(object):
"""PyZMQ Garbage Collector
Used for representing the reference held by libzmq during zero-copy sends.
This object holds a dictionary, keyed by Python id,
of the Python objects whose memory are currently in use by zeromq.
When zeromq is done with the memory, it sends a message on an inproc PUSH socket
containing the packed size_t (32 or 64-bit unsigned int),
which is the key in the dict.
When the PULL socket in the gc thread receives that message,
the reference is popped from the dict,
and any tracker events that should be signaled fire.
"""
refs = None
_context = None
_lock = None
url = "inproc://pyzmq.gc.01"
def __init__(self, context=None):
super(GarbageCollector, self).__init__()
self.refs = {}
self.pid = None
self.thread = None
self._context = context
self._lock = Lock()
self._stay_down = False
self._push = None
self._push_mutex = None
atexit.register(self._atexit)
@property
def context(self):
if self._context is None:
if Thread.__module__.startswith('gevent'):
# gevent has monkey-patched Thread, use green Context
from zmq import green
self._context = green.Context()
else:
self._context = zmq.Context()
return self._context
@context.setter
def context(self, ctx):
if self.is_alive():
if self.refs:
warnings.warn("Replacing gc context while gc is running", RuntimeWarning)
self.stop()
self._context = ctx
def _atexit(self):
"""atexit callback
sets _stay_down flag so that gc doesn't try to start up again in other atexit handlers
"""
self._stay_down = True
self.stop()
def stop(self):
"""stop the garbage-collection thread"""
if not self.is_alive():
return
self._stop()
def _stop(self):
push = self.context.socket(zmq.PUSH)
push.connect(self.url)
push.send(b'DIE')
push.close()
if self._push:
self._push.close()
self._push = None
self._push_mutex = None
self.thread.join()
self.context.term()
self.refs.clear()
self.context = None
@property
def _push_socket(self):
"""The PUSH socket for use in the zmq message destructor callback.
"""
if not self.is_alive() or self._push is None:
self._push = self.context.socket(zmq.PUSH)
self._push.connect(self.url)
return self._push
def start(self):
"""Start a new garbage collection thread.
Creates a new zmq Context used for garbage collection.
Under most circumstances, this will only be called once per process.
"""
if self.thread is not None and self.pid != getpid():
# It's re-starting, must free earlier thread's context
# since a fork probably broke it
self._stop()
self.pid = getpid()
self.refs = {}
self.thread = GarbageCollectorThread(self)
self.thread.start()
self.thread.ready.wait()
def is_alive(self):
"""Is the garbage collection thread currently running?
Includes checks for process shutdown or fork.
"""
if (getpid is None or
getpid() != self.pid or
self.thread is None or
not self.thread.is_alive()
):
return False
return True
def store(self, obj, event=None):
"""store an object and (optionally) event for zero-copy"""
if not self.is_alive():
if self._stay_down:
return 0
# safely start the gc thread
# use lock and double check,
# so we don't start multiple threads
with self._lock:
if not self.is_alive():
self.start()
tup = gcref(obj, event)
theid = id(tup)
self.refs[theid] = tup
return theid
def __del__(self):
if not self.is_alive():
return
try:
self.stop()
except Exception as e:
raise (e)
gc = GarbageCollector()
| mit |
NeCTAR-RC/heat | contrib/docker/docker/resources/docker_container.py | 1 | 9891 | #
# Copyright (c) 2013 Docker, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.engine import properties
from heat.engine import resource
from heat.openstack.common.gettextutils import _
from heat.openstack.common import log as logging
logger = logging.getLogger(__name__)
DOCKER_INSTALLED = False
# conditionally import so tests can work without having the dependency
# satisfied
try:
import docker
DOCKER_INSTALLED = True
except ImportError:
docker = None
class DockerContainer(resource.Resource):
properties_schema = {
'docker_endpoint': properties.Schema(
properties.Schema.STRING,
_('Docker daemon endpoint (by default the local docker daemon '
'will be used)'),
default=None
),
'hostname': properties.Schema(
properties.Schema.STRING,
_('Hostname of the container'),
default=''
),
'user': properties.Schema(
properties.Schema.STRING,
_('Username or UID'),
default=''
),
'memory': properties.Schema(
properties.Schema.INTEGER,
_('Memory limit (Bytes)'),
default=0
),
'attach_stdin': properties.Schema(
properties.Schema.BOOLEAN,
_('Attach to the the process\' standard input'),
default=False
),
'attach_stdout': properties.Schema(
properties.Schema.BOOLEAN,
_('Attach to the process\' standard output'),
default=True
),
'attach_stderr': properties.Schema(
properties.Schema.BOOLEAN,
_('Attach to the process\' standard error'),
default=True
),
'port_specs': properties.Schema(
properties.Schema.LIST,
_('TCP/UDP ports mapping'),
default=None
),
'privileged': properties.Schema(
properties.Schema.BOOLEAN,
_('Enable extended privileges'),
default=False
),
'tty': properties.Schema(
properties.Schema.BOOLEAN,
_('Allocate a pseudo-tty'),
default=False
),
'open_stdin': properties.Schema(
properties.Schema.BOOLEAN,
_('Open stdin'),
default=False
),
'stdin_once': properties.Schema(
properties.Schema.BOOLEAN,
_('If true, close stdin after the 1 attached client disconnects'),
default=False
),
'env': properties.Schema(
properties.Schema.LIST,
_('Set environment variables'),
default=None
),
'cmd': properties.Schema(
properties.Schema.LIST,
_('Command to run after spawning the container'),
default=[]
),
'dns': properties.Schema(
properties.Schema.LIST,
_('Set custom dns servers'),
default=None
),
'image': properties.Schema(
properties.Schema.STRING,
_('Image name')
),
'volumes': properties.Schema(
properties.Schema.MAP,
_('Create a bind mount'),
default={}
),
'volumes_from': properties.Schema(
properties.Schema.STRING,
_('Mount all specified volumes'),
default=''
),
}
attributes_schema = {
'info': _('Container info'),
'network_info': _('Container network info'),
'network_ip': _('Container ip address'),
'network_gateway': _('Container ip gateway'),
'network_tcp_ports': _('Container TCP ports'),
'network_udp_ports': _('Container UDP ports'),
'logs': _('Container logs'),
'logs_head': _('Container first logs line'),
'logs_tail': _('Container last logs line')
}
def get_client(self):
client = None
if DOCKER_INSTALLED:
endpoint = self.properties.get('docker_endpoint')
if endpoint:
client = docker.Client(endpoint)
else:
client = docker.Client()
return client
def _parse_networkinfo_ports(self, networkinfo):
tcp = []
udp = []
for port, info in networkinfo['Ports'].iteritems():
p = port.split('/')
if not info or len(p) != 2 or 'HostPort' not in info[0]:
continue
port = info[0]['HostPort']
if p[1] == 'tcp':
tcp.append(port)
elif p[1] == 'udp':
udp.append(port)
return (','.join(tcp), ','.join(udp))
def _container_networkinfo(self, client, resource_id):
info = client.inspect_container(self.resource_id)
networkinfo = info['NetworkSettings']
ports = self._parse_networkinfo_ports(networkinfo)
networkinfo['TcpPorts'] = ports[0]
networkinfo['UdpPorts'] = ports[1]
return networkinfo
def _resolve_attribute(self, name):
if not self.resource_id:
return
if name == 'info':
client = self.get_client()
return client.inspect_container(self.resource_id)
if name == 'network_info':
client = self.get_client()
networkinfo = self._container_networkinfo(client, self.resource_id)
return networkinfo
if name == 'network_ip':
client = self.get_client()
networkinfo = self._container_networkinfo(client, self.resource_id)
return networkinfo['IPAddress']
if name == 'network_gateway':
client = self.get_client()
networkinfo = self._container_networkinfo(client, self.resource_id)
return networkinfo['Gateway']
if name == 'network_tcp_ports':
client = self.get_client()
networkinfo = self._container_networkinfo(client, self.resource_id)
return networkinfo['TcpPorts']
if name == 'network_udp_ports':
client = self.get_client()
networkinfo = self._container_networkinfo(client, self.resource_id)
return networkinfo['UdpPorts']
if name == 'logs':
client = self.get_client()
logs = client.logs(self.resource_id)
return logs
if name == 'logs_head':
client = self.get_client()
logs = client.logs(self.resource_id)
return logs.split('\n')[0]
if name == 'logs_tail':
client = self.get_client()
logs = client.logs(self.resource_id)
return logs.split('\n').pop()
def handle_create(self):
args = {
'image': self.properties['image'],
'command': self.properties['cmd'],
'hostname': self.properties['hostname'],
'user': self.properties['user'],
'stdin_open': self.properties['open_stdin'],
'tty': self.properties['tty'],
'mem_limit': self.properties['memory'],
'ports': self.properties['port_specs'],
'environment': self.properties['env'],
'dns': self.properties['dns'],
'volumes': self.properties['volumes'],
'volumes_from': self.properties['volumes_from'],
}
client = self.get_client()
result = client.create_container(**args)
container_id = result['Id']
self.resource_id_set(container_id)
kwargs = {}
if self.properties['privileged']:
kwargs['privileged'] = True
client.start(container_id, **kwargs)
return container_id
def _get_container_status(self, container_id):
client = self.get_client()
info = client.inspect_container(container_id)
return info['State']
def check_create_complete(self, container_id):
status = self._get_container_status(container_id)
return status['Running']
def handle_delete(self):
if self.resource_id is None:
return
client = self.get_client()
client.kill(self.resource_id)
return self.resource_id
def check_delete_complete(self, container_id):
status = self._get_container_status(container_id)
return (not status['Running'])
def handle_suspend(self):
if not self.resource_id:
return
client = self.get_client()
client.stop(self.resource_id)
return self.resource_id
def check_suspend_complete(self, container_id):
status = self._get_container_status(container_id)
return (not status['Running'])
def handle_resume(self):
if not self.resource_id:
return
client = self.get_client()
client.start(self.resource_id)
return self.resource_id
def check_resume_complete(self, container_id):
status = self._get_container_status(container_id)
return status['Running']
def resource_mapping():
return {
'DockerInc::Docker::Container': DockerContainer,
}
def available_resource_mapping():
if DOCKER_INSTALLED:
return resource_mapping()
else:
logger.warn(_("Docker plug-in loaded, but docker lib not installed."))
return {}
| apache-2.0 |
kcguo/daily | Alpha/baidu.py | 1 | 1028 | from selenium import webdriver
import time
import os
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
password = os.getenv('USERPWD')
chrome_options = webdriver.ChromeOptions()
#chrome_options.add_argument('--headless')
#chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--no-sandbox')
chrome_options.binary_location = r'C:\Program Files (x86)\Google\Chrome\Application\chrome.exe'
driver = webdriver.Chrome('C:\chromedriver', chrome_options=chrome_options)
#driver = webdriver.Chrome('C:\chromedriver')
driver.get('https://www.baidu.com')
print driver.page_source
#saveas = ActionChains(driver).key_down(Keys.CONTROL)\
# .send_keys('s').key_up(Keys.CONTROL)
#saveas.perform()
body = driver.find_element_by_tag_name("body")
print body
body.send_keys(Keys.CONTROL + 's')
ActionChains(driver).key_down(Keys.CONTROL, body).send_keys('s').key_up(Keys.CONTROL)
time.sleep(10)
driver.close()
| gpl-3.0 |
jamesblunt/edx-platform | common/lib/xmodule/xmodule/poll_module.py | 146 | 7498 | """Poll module is ungraded xmodule used by students to
to do set of polls.
On the client side we show:
If student does not yet anwered - Question with set of choices.
If student have answered - Question with statistics for each answers.
"""
import cgi
import json
import logging
from copy import deepcopy
from collections import OrderedDict
from lxml import etree
from pkg_resources import resource_string
from xmodule.x_module import XModule
from xmodule.stringify import stringify_children
from xmodule.mako_module import MakoModuleDescriptor
from xmodule.xml_module import XmlDescriptor
from xblock.fields import Scope, String, Dict, Boolean, List
log = logging.getLogger(__name__)
class PollFields(object):
# Name of poll to use in links to this poll
display_name = String(help="Display name for this module", scope=Scope.settings)
voted = Boolean(help="Whether this student has voted on the poll", scope=Scope.user_state, default=False)
poll_answer = String(help="Student answer", scope=Scope.user_state, default='')
poll_answers = Dict(help="Poll answers from all students", scope=Scope.user_state_summary)
# List of answers, in the form {'id': 'some id', 'text': 'the answer text'}
answers = List(help="Poll answers from xml", scope=Scope.content, default=[])
question = String(help="Poll question", scope=Scope.content, default='')
class PollModule(PollFields, XModule):
"""Poll Module"""
js = {
'coffee': [resource_string(__name__, 'js/src/javascript_loader.coffee')],
'js': [
resource_string(__name__, 'js/src/poll/poll.js'),
resource_string(__name__, 'js/src/poll/poll_main.js')
]
}
css = {'scss': [resource_string(__name__, 'css/poll/display.scss')]}
js_module_name = "Poll"
def handle_ajax(self, dispatch, data):
"""Ajax handler.
Args:
dispatch: string request slug
data: dict request data parameters
Returns:
json string
"""
if dispatch in self.poll_answers and not self.voted:
# FIXME: fix this, when xblock will support mutable types.
# Now we use this hack.
temp_poll_answers = self.poll_answers
temp_poll_answers[dispatch] += 1
self.poll_answers = temp_poll_answers
self.voted = True
self.poll_answer = dispatch
return json.dumps({'poll_answers': self.poll_answers,
'total': sum(self.poll_answers.values()),
'callback': {'objectName': 'Conditional'}
})
elif dispatch == 'get_state':
return json.dumps({'poll_answer': self.poll_answer,
'poll_answers': self.poll_answers,
'total': sum(self.poll_answers.values())
})
elif dispatch == 'reset_poll' and self.voted and \
self.descriptor.xml_attributes.get('reset', 'True').lower() != 'false':
self.voted = False
# FIXME: fix this, when xblock will support mutable types.
# Now we use this hack.
temp_poll_answers = self.poll_answers
temp_poll_answers[self.poll_answer] -= 1
self.poll_answers = temp_poll_answers
self.poll_answer = ''
return json.dumps({'status': 'success'})
else: # return error message
return json.dumps({'error': 'Unknown Command!'})
def get_html(self):
"""Renders parameters to template."""
params = {
'element_id': self.location.html_id(),
'element_class': self.location.category,
'ajax_url': self.system.ajax_url,
'configuration_json': self.dump_poll(),
}
self.content = self.system.render_template('poll.html', params)
return self.content
def dump_poll(self):
"""Dump poll information.
Returns:
string - Serialize json.
"""
# FIXME: hack for resolving caching `default={}` during definition
# poll_answers field
if self.poll_answers is None:
self.poll_answers = {}
answers_to_json = OrderedDict()
# FIXME: fix this, when xblock support mutable types.
# Now we use this hack.
temp_poll_answers = self.poll_answers
# Fill self.poll_answers, prepare data for template context.
for answer in self.answers:
# Set default count for answer = 0.
if answer['id'] not in temp_poll_answers:
temp_poll_answers[answer['id']] = 0
answers_to_json[answer['id']] = cgi.escape(answer['text'])
self.poll_answers = temp_poll_answers
return json.dumps({
'answers': answers_to_json,
'question': cgi.escape(self.question),
# to show answered poll after reload:
'poll_answer': self.poll_answer,
'poll_answers': self.poll_answers if self.voted else {},
'total': sum(self.poll_answers.values()) if self.voted else 0,
'reset': str(self.descriptor.xml_attributes.get('reset', 'true')).lower()
})
class PollDescriptor(PollFields, MakoModuleDescriptor, XmlDescriptor):
_tag_name = 'poll_question'
_child_tag_name = 'answer'
module_class = PollModule
@classmethod
def definition_from_xml(cls, xml_object, system):
"""Pull out the data into dictionary.
Args:
xml_object: xml from file.
system: `system` object.
Returns:
(definition, children) - tuple
definition - dict:
{
'answers': <List of answers>,
'question': <Question string>
}
"""
# Check for presense of required tags in xml.
if len(xml_object.xpath(cls._child_tag_name)) == 0:
raise ValueError("Poll_question definition must include \
at least one 'answer' tag")
xml_object_copy = deepcopy(xml_object)
answers = []
for element_answer in xml_object_copy.findall(cls._child_tag_name):
answer_id = element_answer.get('id', None)
if answer_id:
answers.append({
'id': answer_id,
'text': stringify_children(element_answer)
})
xml_object_copy.remove(element_answer)
definition = {
'answers': answers,
'question': stringify_children(xml_object_copy)
}
children = []
return (definition, children)
def definition_to_xml(self, resource_fs):
"""Return an xml element representing to this definition."""
poll_str = u'<{tag_name}>{text}</{tag_name}>'.format(
tag_name=self._tag_name, text=self.question)
xml_object = etree.fromstring(poll_str)
xml_object.set('display_name', self.display_name)
def add_child(xml_obj, answer):
child_str = u'<{tag_name} id="{id}">{text}</{tag_name}>'.format(
tag_name=self._child_tag_name, id=answer['id'],
text=answer['text'])
child_node = etree.fromstring(child_str)
xml_object.append(child_node)
for answer in self.answers:
add_child(xml_object, answer)
return xml_object
| agpl-3.0 |
whitequark/foundry | vendor/lit/tests/shtest-format.py | 2 | 2341 | # Check the various features of the ShTest format.
#
# RUN: not %{lit} -j 1 -v %{inputs}/shtest-format > %t.out
# RUN: FileCheck < %t.out %s
#
# END.
# CHECK: -- Testing:
# CHECK: FAIL: shtest-format :: external_shell/fail.txt
# CHECK-NEXT: *** TEST 'shtest-format :: external_shell/fail.txt' FAILED ***
# CHECK: Command Output (stdout):
# CHECK-NEXT: --
# CHECK-NEXT: line 1: failed test output on stdout
# CHECK-NEXT: line 2: failed test output on stdout
# CHECK: Command Output (stderr):
# CHECK-NEXT: --
# CHECK-NEXT: cat: does-not-exist: No such file or directory
# CHECK: --
# CHECK: FAIL: shtest-format :: external_shell/fail_with_bad_encoding.txt
# CHECK-NEXT: *** TEST 'shtest-format :: external_shell/fail_with_bad_encoding.txt' FAILED ***
# CHECK: Command Output (stdout):
# CHECK-NEXT: --
# CHECK-NEXT: a line with bad encoding:
# CHECK: --
# CHECK: PASS: shtest-format :: external_shell/pass.txt
# CHECK: FAIL: shtest-format :: fail.txt
# CHECK-NEXT: *** TEST 'shtest-format :: fail.txt' FAILED ***
# CHECK-NEXT: Script:
# CHECK-NEXT: --
# CHECK-NEXT: printf "line 1
# CHECK-NEXT: false
# CHECK-NEXT: --
# CHECK-NEXT: Exit Code: 1
#
# CHECK: Command Output (stdout):
# CHECK-NEXT: --
# CHECK-NEXT: Command 0: "printf"
# CHECK-NEXT: Command 0 Result: 0
# CHECK-NEXT: Command 0 Output:
# CHECK-NEXT: line 1: failed test output on stdout
# CHECK-NEXT: line 2: failed test output on stdout
# CHECK: UNRESOLVED: shtest-format :: no-test-line.txt
# CHECK: PASS: shtest-format :: pass.txt
# CHECK: UNSUPPORTED: shtest-format :: requires-missing.txt
# CHECK: PASS: shtest-format :: requires-present.txt
# CHECK: UNSUPPORTED: shtest-format :: unsupported_dir/some-test.txt
# CHECK: XFAIL: shtest-format :: xfail-feature.txt
# CHECK: XFAIL: shtest-format :: xfail-target.txt
# CHECK: XFAIL: shtest-format :: xfail.txt
# CHECK: XPASS: shtest-format :: xpass.txt
# CHECK: Testing Time
# CHECK: Unexpected Passing Tests (1)
# CHECK: shtest-format :: xpass.txt
# CHECK: Failing Tests (3)
# CHECK: shtest-format :: external_shell/fail.txt
# CHECK: shtest-format :: external_shell/fail_with_bad_encoding.txt
# CHECK: shtest-format :: fail.txt
# CHECK: Expected Passes : 3
# CHECK: Expected Failures : 3
# CHECK: Unsupported Tests : 2
# CHECK: Unresolved Tests : 1
# CHECK: Unexpected Passes : 1
# CHECK: Unexpected Failures: 3
| mit |
scs/uclinux | user/python/python-2.4.4/Lib/aifc.py | 12 | 33330 | """Stuff to parse AIFF-C and AIFF files.
Unless explicitly stated otherwise, the description below is true
both for AIFF-C files and AIFF files.
An AIFF-C file has the following structure.
+-----------------+
| FORM |
+-----------------+
| <size> |
+----+------------+
| | AIFC |
| +------------+
| | <chunks> |
| | . |
| | . |
| | . |
+----+------------+
An AIFF file has the string "AIFF" instead of "AIFC".
A chunk consists of an identifier (4 bytes) followed by a size (4 bytes,
big endian order), followed by the data. The size field does not include
the size of the 8 byte header.
The following chunk types are recognized.
FVER
<version number of AIFF-C defining document> (AIFF-C only).
MARK
<# of markers> (2 bytes)
list of markers:
<marker ID> (2 bytes, must be > 0)
<position> (4 bytes)
<marker name> ("pstring")
COMM
<# of channels> (2 bytes)
<# of sound frames> (4 bytes)
<size of the samples> (2 bytes)
<sampling frequency> (10 bytes, IEEE 80-bit extended
floating point)
in AIFF-C files only:
<compression type> (4 bytes)
<human-readable version of compression type> ("pstring")
SSND
<offset> (4 bytes, not used by this program)
<blocksize> (4 bytes, not used by this program)
<sound data>
A pstring consists of 1 byte length, a string of characters, and 0 or 1
byte pad to make the total length even.
Usage.
Reading AIFF files:
f = aifc.open(file, 'r')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods read(), seek(), and close().
In some types of audio files, if the setpos() method is not used,
the seek() method is not necessary.
This returns an instance of a class with the following public methods:
getnchannels() -- returns number of audio channels (1 for
mono, 2 for stereo)
getsampwidth() -- returns sample width in bytes
getframerate() -- returns sampling frequency
getnframes() -- returns number of audio frames
getcomptype() -- returns compression type ('NONE' for AIFF files)
getcompname() -- returns human-readable version of
compression type ('not compressed' for AIFF files)
getparams() -- returns a tuple consisting of all of the
above in the above order
getmarkers() -- get the list of marks in the audio file or None
if there are no marks
getmark(id) -- get mark with the specified id (raises an error
if the mark does not exist)
readframes(n) -- returns at most n frames of audio
rewind() -- rewind to the beginning of the audio stream
setpos(pos) -- seek to the specified position
tell() -- return the current position
close() -- close the instance (make it unusable)
The position returned by tell(), the position given to setpos() and
the position of marks are all compatible and have nothing to do with
the actual position in the file.
The close() method is called automatically when the class instance
is destroyed.
Writing AIFF files:
f = aifc.open(file, 'w')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods write(), tell(), seek(), and
close().
This returns an instance of a class with the following public methods:
aiff() -- create an AIFF file (AIFF-C default)
aifc() -- create an AIFF-C file
setnchannels(n) -- set the number of channels
setsampwidth(n) -- set the sample width
setframerate(n) -- set the frame rate
setnframes(n) -- set the number of frames
setcomptype(type, name)
-- set the compression type and the
human-readable compression type
setparams(tuple)
-- set all parameters at once
setmark(id, pos, name)
-- add specified mark to the list of marks
tell() -- return current position in output file (useful
in combination with setmark())
writeframesraw(data)
-- write audio frames without pathing up the
file header
writeframes(data)
-- write audio frames and patch up the file header
close() -- patch up the file header and close the
output file
You should set the parameters before the first writeframesraw or
writeframes. The total number of frames does not need to be set,
but when it is set to the correct value, the header does not have to
be patched up.
It is best to first set all parameters, perhaps possibly the
compression type, and then write audio frames using writeframesraw.
When all frames have been written, either call writeframes('') or
close() to patch up the sizes in the header.
Marks can be added anytime. If there are any marks, ypu must call
close() after all frames have been written.
The close() method is called automatically when the class instance
is destroyed.
When a file is opened with the extension '.aiff', an AIFF file is
written, otherwise an AIFF-C file is written. This default can be
changed by calling aiff() or aifc() before the first writeframes or
writeframesraw.
"""
import struct
import __builtin__
__all__ = ["Error","open","openfp"]
class Error(Exception):
pass
_AIFC_version = 0xA2805140L # Version 1 of AIFF-C
_skiplist = 'COMT', 'INST', 'MIDI', 'AESD', \
'APPL', 'NAME', 'AUTH', '(c) ', 'ANNO'
def _read_long(file):
try:
return struct.unpack('>l', file.read(4))[0]
except struct.error:
raise EOFError
def _read_ulong(file):
try:
return struct.unpack('>L', file.read(4))[0]
except struct.error:
raise EOFError
def _read_short(file):
try:
return struct.unpack('>h', file.read(2))[0]
except struct.error:
raise EOFError
def _read_string(file):
length = ord(file.read(1))
if length == 0:
data = ''
else:
data = file.read(length)
if length & 1 == 0:
dummy = file.read(1)
return data
_HUGE_VAL = 1.79769313486231e+308 # See <limits.h>
def _read_float(f): # 10 bytes
expon = _read_short(f) # 2 bytes
sign = 1
if expon < 0:
sign = -1
expon = expon + 0x8000
himant = _read_ulong(f) # 4 bytes
lomant = _read_ulong(f) # 4 bytes
if expon == himant == lomant == 0:
f = 0.0
elif expon == 0x7FFF:
f = _HUGE_VAL
else:
expon = expon - 16383
f = (himant * 0x100000000L + lomant) * pow(2.0, expon - 63)
return sign * f
def _write_short(f, x):
f.write(struct.pack('>h', x))
def _write_long(f, x):
f.write(struct.pack('>L', x))
def _write_string(f, s):
f.write(chr(len(s)))
f.write(s)
if len(s) & 1 == 0:
f.write(chr(0))
def _write_float(f, x):
import math
if x < 0:
sign = 0x8000
x = x * -1
else:
sign = 0
if x == 0:
expon = 0
himant = 0
lomant = 0
else:
fmant, expon = math.frexp(x)
if expon > 16384 or fmant >= 1: # Infinity or NaN
expon = sign|0x7FFF
himant = 0
lomant = 0
else: # Finite
expon = expon + 16382
if expon < 0: # denormalized
fmant = math.ldexp(fmant, expon)
expon = 0
expon = expon | sign
fmant = math.ldexp(fmant, 32)
fsmant = math.floor(fmant)
himant = long(fsmant)
fmant = math.ldexp(fmant - fsmant, 32)
fsmant = math.floor(fmant)
lomant = long(fsmant)
_write_short(f, expon)
_write_long(f, himant)
_write_long(f, lomant)
from chunk import Chunk
class Aifc_read:
# Variables used in this class:
#
# These variables are available to the user though appropriate
# methods of this class:
# _file -- the open file with methods read(), close(), and seek()
# set through the __init__() method
# _nchannels -- the number of audio channels
# available through the getnchannels() method
# _nframes -- the number of audio frames
# available through the getnframes() method
# _sampwidth -- the number of bytes per audio sample
# available through the getsampwidth() method
# _framerate -- the sampling frequency
# available through the getframerate() method
# _comptype -- the AIFF-C compression type ('NONE' if AIFF)
# available through the getcomptype() method
# _compname -- the human-readable AIFF-C compression type
# available through the getcomptype() method
# _markers -- the marks in the audio file
# available through the getmarkers() and getmark()
# methods
# _soundpos -- the position in the audio stream
# available through the tell() method, set through the
# setpos() method
#
# These variables are used internally only:
# _version -- the AIFF-C version number
# _decomp -- the decompressor from builtin module cl
# _comm_chunk_read -- 1 iff the COMM chunk has been read
# _aifc -- 1 iff reading an AIFF-C file
# _ssnd_seek_needed -- 1 iff positioned correctly in audio
# file for readframes()
# _ssnd_chunk -- instantiation of a chunk class for the SSND chunk
# _framesize -- size of one frame in the file
def initfp(self, file):
self._version = 0
self._decomp = None
self._convert = None
self._markers = []
self._soundpos = 0
self._file = Chunk(file)
if self._file.getname() != 'FORM':
raise Error, 'file does not start with FORM id'
formdata = self._file.read(4)
if formdata == 'AIFF':
self._aifc = 0
elif formdata == 'AIFC':
self._aifc = 1
else:
raise Error, 'not an AIFF or AIFF-C file'
self._comm_chunk_read = 0
while 1:
self._ssnd_seek_needed = 1
try:
chunk = Chunk(self._file)
except EOFError:
break
chunkname = chunk.getname()
if chunkname == 'COMM':
self._read_comm_chunk(chunk)
self._comm_chunk_read = 1
elif chunkname == 'SSND':
self._ssnd_chunk = chunk
dummy = chunk.read(8)
self._ssnd_seek_needed = 0
elif chunkname == 'FVER':
self._version = _read_ulong(chunk)
elif chunkname == 'MARK':
self._readmark(chunk)
elif chunkname in _skiplist:
pass
else:
raise Error, 'unrecognized chunk type '+chunk.chunkname
chunk.skip()
if not self._comm_chunk_read or not self._ssnd_chunk:
raise Error, 'COMM chunk and/or SSND chunk missing'
if self._aifc and self._decomp:
import cl
params = [cl.ORIGINAL_FORMAT, 0,
cl.BITS_PER_COMPONENT, self._sampwidth * 8,
cl.FRAME_RATE, self._framerate]
if self._nchannels == 1:
params[1] = cl.MONO
elif self._nchannels == 2:
params[1] = cl.STEREO_INTERLEAVED
else:
raise Error, 'cannot compress more than 2 channels'
self._decomp.SetParams(params)
def __init__(self, f):
if type(f) == type(''):
f = __builtin__.open(f, 'rb')
# else, assume it is an open file object already
self.initfp(f)
#
# User visible methods.
#
def getfp(self):
return self._file
def rewind(self):
self._ssnd_seek_needed = 1
self._soundpos = 0
def close(self):
if self._decomp:
self._decomp.CloseDecompressor()
self._decomp = None
self._file = None
def tell(self):
return self._soundpos
def getnchannels(self):
return self._nchannels
def getnframes(self):
return self._nframes
def getsampwidth(self):
return self._sampwidth
def getframerate(self):
return self._framerate
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
## def getversion(self):
## return self._version
def getparams(self):
return self.getnchannels(), self.getsampwidth(), \
self.getframerate(), self.getnframes(), \
self.getcomptype(), self.getcompname()
def getmarkers(self):
if len(self._markers) == 0:
return None
return self._markers
def getmark(self, id):
for marker in self._markers:
if id == marker[0]:
return marker
raise Error, 'marker %r does not exist' % (id,)
def setpos(self, pos):
if pos < 0 or pos > self._nframes:
raise Error, 'position not in range'
self._soundpos = pos
self._ssnd_seek_needed = 1
def readframes(self, nframes):
if self._ssnd_seek_needed:
self._ssnd_chunk.seek(0)
dummy = self._ssnd_chunk.read(8)
pos = self._soundpos * self._framesize
if pos:
self._ssnd_chunk.seek(pos + 8)
self._ssnd_seek_needed = 0
if nframes == 0:
return ''
data = self._ssnd_chunk.read(nframes * self._framesize)
if self._convert and data:
data = self._convert(data)
self._soundpos = self._soundpos + len(data) / (self._nchannels * self._sampwidth)
return data
#
# Internal methods.
#
def _decomp_data(self, data):
import cl
dummy = self._decomp.SetParam(cl.FRAME_BUFFER_SIZE,
len(data) * 2)
return self._decomp.Decompress(len(data) / self._nchannels,
data)
def _ulaw2lin(self, data):
import audioop
return audioop.ulaw2lin(data, 2)
def _adpcm2lin(self, data):
import audioop
if not hasattr(self, '_adpcmstate'):
# first time
self._adpcmstate = None
data, self._adpcmstate = audioop.adpcm2lin(data, 2,
self._adpcmstate)
return data
def _read_comm_chunk(self, chunk):
self._nchannels = _read_short(chunk)
self._nframes = _read_long(chunk)
self._sampwidth = (_read_short(chunk) + 7) / 8
self._framerate = int(_read_float(chunk))
self._framesize = self._nchannels * self._sampwidth
if self._aifc:
#DEBUG: SGI's soundeditor produces a bad size :-(
kludge = 0
if chunk.chunksize == 18:
kludge = 1
print 'Warning: bad COMM chunk size'
chunk.chunksize = 23
#DEBUG end
self._comptype = chunk.read(4)
#DEBUG start
if kludge:
length = ord(chunk.file.read(1))
if length & 1 == 0:
length = length + 1
chunk.chunksize = chunk.chunksize + length
chunk.file.seek(-1, 1)
#DEBUG end
self._compname = _read_string(chunk)
if self._comptype != 'NONE':
if self._comptype == 'G722':
try:
import audioop
except ImportError:
pass
else:
self._convert = self._adpcm2lin
self._framesize = self._framesize / 4
return
# for ULAW and ALAW try Compression Library
try:
import cl
except ImportError:
if self._comptype == 'ULAW':
try:
import audioop
self._convert = self._ulaw2lin
self._framesize = self._framesize / 2
return
except ImportError:
pass
raise Error, 'cannot read compressed AIFF-C files'
if self._comptype == 'ULAW':
scheme = cl.G711_ULAW
self._framesize = self._framesize / 2
elif self._comptype == 'ALAW':
scheme = cl.G711_ALAW
self._framesize = self._framesize / 2
else:
raise Error, 'unsupported compression type'
self._decomp = cl.OpenDecompressor(scheme)
self._convert = self._decomp_data
else:
self._comptype = 'NONE'
self._compname = 'not compressed'
def _readmark(self, chunk):
nmarkers = _read_short(chunk)
# Some files appear to contain invalid counts.
# Cope with this by testing for EOF.
try:
for i in range(nmarkers):
id = _read_short(chunk)
pos = _read_long(chunk)
name = _read_string(chunk)
if pos or name:
# some files appear to have
# dummy markers consisting of
# a position 0 and name ''
self._markers.append((id, pos, name))
except EOFError:
print 'Warning: MARK chunk contains only',
print len(self._markers),
if len(self._markers) == 1: print 'marker',
else: print 'markers',
print 'instead of', nmarkers
class Aifc_write:
# Variables used in this class:
#
# These variables are user settable through appropriate methods
# of this class:
# _file -- the open file with methods write(), close(), tell(), seek()
# set through the __init__() method
# _comptype -- the AIFF-C compression type ('NONE' in AIFF)
# set through the setcomptype() or setparams() method
# _compname -- the human-readable AIFF-C compression type
# set through the setcomptype() or setparams() method
# _nchannels -- the number of audio channels
# set through the setnchannels() or setparams() method
# _sampwidth -- the number of bytes per audio sample
# set through the setsampwidth() or setparams() method
# _framerate -- the sampling frequency
# set through the setframerate() or setparams() method
# _nframes -- the number of audio frames written to the header
# set through the setnframes() or setparams() method
# _aifc -- whether we're writing an AIFF-C file or an AIFF file
# set through the aifc() method, reset through the
# aiff() method
#
# These variables are used internally only:
# _version -- the AIFF-C version number
# _comp -- the compressor from builtin module cl
# _nframeswritten -- the number of audio frames actually written
# _datalength -- the size of the audio samples written to the header
# _datawritten -- the size of the audio samples actually written
def __init__(self, f):
if type(f) == type(''):
filename = f
f = __builtin__.open(f, 'wb')
else:
# else, assume it is an open file object already
filename = '???'
self.initfp(f)
if filename[-5:] == '.aiff':
self._aifc = 0
else:
self._aifc = 1
def initfp(self, file):
self._file = file
self._version = _AIFC_version
self._comptype = 'NONE'
self._compname = 'not compressed'
self._comp = None
self._convert = None
self._nchannels = 0
self._sampwidth = 0
self._framerate = 0
self._nframes = 0
self._nframeswritten = 0
self._datawritten = 0
self._datalength = 0
self._markers = []
self._marklength = 0
self._aifc = 1 # AIFF-C is default
def __del__(self):
if self._file:
self.close()
#
# User visible methods.
#
def aiff(self):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
self._aifc = 0
def aifc(self):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
self._aifc = 1
def setnchannels(self, nchannels):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if nchannels < 1:
raise Error, 'bad # of channels'
self._nchannels = nchannels
def getnchannels(self):
if not self._nchannels:
raise Error, 'number of channels not set'
return self._nchannels
def setsampwidth(self, sampwidth):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if sampwidth < 1 or sampwidth > 4:
raise Error, 'bad sample width'
self._sampwidth = sampwidth
def getsampwidth(self):
if not self._sampwidth:
raise Error, 'sample width not set'
return self._sampwidth
def setframerate(self, framerate):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if framerate <= 0:
raise Error, 'bad frame rate'
self._framerate = framerate
def getframerate(self):
if not self._framerate:
raise Error, 'frame rate not set'
return self._framerate
def setnframes(self, nframes):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
self._nframes = nframes
def getnframes(self):
return self._nframeswritten
def setcomptype(self, comptype, compname):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if comptype not in ('NONE', 'ULAW', 'ALAW', 'G722'):
raise Error, 'unsupported compression type'
self._comptype = comptype
self._compname = compname
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
## def setversion(self, version):
## if self._nframeswritten:
## raise Error, 'cannot change parameters after starting to write'
## self._version = version
def setparams(self, (nchannels, sampwidth, framerate, nframes, comptype, compname)):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if comptype not in ('NONE', 'ULAW', 'ALAW', 'G722'):
raise Error, 'unsupported compression type'
self.setnchannels(nchannels)
self.setsampwidth(sampwidth)
self.setframerate(framerate)
self.setnframes(nframes)
self.setcomptype(comptype, compname)
def getparams(self):
if not self._nchannels or not self._sampwidth or not self._framerate:
raise Error, 'not all parameters set'
return self._nchannels, self._sampwidth, self._framerate, \
self._nframes, self._comptype, self._compname
def setmark(self, id, pos, name):
if id <= 0:
raise Error, 'marker ID must be > 0'
if pos < 0:
raise Error, 'marker position must be >= 0'
if type(name) != type(''):
raise Error, 'marker name must be a string'
for i in range(len(self._markers)):
if id == self._markers[i][0]:
self._markers[i] = id, pos, name
return
self._markers.append((id, pos, name))
def getmark(self, id):
for marker in self._markers:
if id == marker[0]:
return marker
raise Error, 'marker %r does not exist' % (id,)
def getmarkers(self):
if len(self._markers) == 0:
return None
return self._markers
def tell(self):
return self._nframeswritten
def writeframesraw(self, data):
self._ensure_header_written(len(data))
nframes = len(data) / (self._sampwidth * self._nchannels)
if self._convert:
data = self._convert(data)
self._file.write(data)
self._nframeswritten = self._nframeswritten + nframes
self._datawritten = self._datawritten + len(data)
def writeframes(self, data):
self.writeframesraw(data)
if self._nframeswritten != self._nframes or \
self._datalength != self._datawritten:
self._patchheader()
def close(self):
self._ensure_header_written(0)
if self._datawritten & 1:
# quick pad to even size
self._file.write(chr(0))
self._datawritten = self._datawritten + 1
self._writemarkers()
if self._nframeswritten != self._nframes or \
self._datalength != self._datawritten or \
self._marklength:
self._patchheader()
if self._comp:
self._comp.CloseCompressor()
self._comp = None
self._file.flush()
self._file = None
#
# Internal methods.
#
def _comp_data(self, data):
import cl
dummy = self._comp.SetParam(cl.FRAME_BUFFER_SIZE, len(data))
dummy = self._comp.SetParam(cl.COMPRESSED_BUFFER_SIZE, len(data))
return self._comp.Compress(self._nframes, data)
def _lin2ulaw(self, data):
import audioop
return audioop.lin2ulaw(data, 2)
def _lin2adpcm(self, data):
import audioop
if not hasattr(self, '_adpcmstate'):
self._adpcmstate = None
data, self._adpcmstate = audioop.lin2adpcm(data, 2,
self._adpcmstate)
return data
def _ensure_header_written(self, datasize):
if not self._nframeswritten:
if self._comptype in ('ULAW', 'ALAW'):
if not self._sampwidth:
self._sampwidth = 2
if self._sampwidth != 2:
raise Error, 'sample width must be 2 when compressing with ULAW or ALAW'
if self._comptype == 'G722':
if not self._sampwidth:
self._sampwidth = 2
if self._sampwidth != 2:
raise Error, 'sample width must be 2 when compressing with G7.22 (ADPCM)'
if not self._nchannels:
raise Error, '# channels not specified'
if not self._sampwidth:
raise Error, 'sample width not specified'
if not self._framerate:
raise Error, 'sampling rate not specified'
self._write_header(datasize)
def _init_compression(self):
if self._comptype == 'G722':
self._convert = self._lin2adpcm
return
try:
import cl
except ImportError:
if self._comptype == 'ULAW':
try:
import audioop
self._convert = self._lin2ulaw
return
except ImportError:
pass
raise Error, 'cannot write compressed AIFF-C files'
if self._comptype == 'ULAW':
scheme = cl.G711_ULAW
elif self._comptype == 'ALAW':
scheme = cl.G711_ALAW
else:
raise Error, 'unsupported compression type'
self._comp = cl.OpenCompressor(scheme)
params = [cl.ORIGINAL_FORMAT, 0,
cl.BITS_PER_COMPONENT, self._sampwidth * 8,
cl.FRAME_RATE, self._framerate,
cl.FRAME_BUFFER_SIZE, 100,
cl.COMPRESSED_BUFFER_SIZE, 100]
if self._nchannels == 1:
params[1] = cl.MONO
elif self._nchannels == 2:
params[1] = cl.STEREO_INTERLEAVED
else:
raise Error, 'cannot compress more than 2 channels'
self._comp.SetParams(params)
# the compressor produces a header which we ignore
dummy = self._comp.Compress(0, '')
self._convert = self._comp_data
def _write_header(self, initlength):
if self._aifc and self._comptype != 'NONE':
self._init_compression()
self._file.write('FORM')
if not self._nframes:
self._nframes = initlength / (self._nchannels * self._sampwidth)
self._datalength = self._nframes * self._nchannels * self._sampwidth
if self._datalength & 1:
self._datalength = self._datalength + 1
if self._aifc:
if self._comptype in ('ULAW', 'ALAW'):
self._datalength = self._datalength / 2
if self._datalength & 1:
self._datalength = self._datalength + 1
elif self._comptype == 'G722':
self._datalength = (self._datalength + 3) / 4
if self._datalength & 1:
self._datalength = self._datalength + 1
self._form_length_pos = self._file.tell()
commlength = self._write_form_length(self._datalength)
if self._aifc:
self._file.write('AIFC')
self._file.write('FVER')
_write_long(self._file, 4)
_write_long(self._file, self._version)
else:
self._file.write('AIFF')
self._file.write('COMM')
_write_long(self._file, commlength)
_write_short(self._file, self._nchannels)
self._nframes_pos = self._file.tell()
_write_long(self._file, self._nframes)
_write_short(self._file, self._sampwidth * 8)
_write_float(self._file, self._framerate)
if self._aifc:
self._file.write(self._comptype)
_write_string(self._file, self._compname)
self._file.write('SSND')
self._ssnd_length_pos = self._file.tell()
_write_long(self._file, self._datalength + 8)
_write_long(self._file, 0)
_write_long(self._file, 0)
def _write_form_length(self, datalength):
if self._aifc:
commlength = 18 + 5 + len(self._compname)
if commlength & 1:
commlength = commlength + 1
verslength = 12
else:
commlength = 18
verslength = 0
_write_long(self._file, 4 + verslength + self._marklength + \
8 + commlength + 16 + datalength)
return commlength
def _patchheader(self):
curpos = self._file.tell()
if self._datawritten & 1:
datalength = self._datawritten + 1
self._file.write(chr(0))
else:
datalength = self._datawritten
if datalength == self._datalength and \
self._nframes == self._nframeswritten and \
self._marklength == 0:
self._file.seek(curpos, 0)
return
self._file.seek(self._form_length_pos, 0)
dummy = self._write_form_length(datalength)
self._file.seek(self._nframes_pos, 0)
_write_long(self._file, self._nframeswritten)
self._file.seek(self._ssnd_length_pos, 0)
_write_long(self._file, datalength + 8)
self._file.seek(curpos, 0)
self._nframes = self._nframeswritten
self._datalength = datalength
def _writemarkers(self):
if len(self._markers) == 0:
return
self._file.write('MARK')
length = 2
for marker in self._markers:
id, pos, name = marker
length = length + len(name) + 1 + 6
if len(name) & 1 == 0:
length = length + 1
_write_long(self._file, length)
self._marklength = length + 8
_write_short(self._file, len(self._markers))
for marker in self._markers:
id, pos, name = marker
_write_short(self._file, id)
_write_long(self._file, pos)
_write_string(self._file, name)
def open(f, mode=None):
if mode is None:
if hasattr(f, 'mode'):
mode = f.mode
else:
mode = 'rb'
if mode in ('r', 'rb'):
return Aifc_read(f)
elif mode in ('w', 'wb'):
return Aifc_write(f)
else:
raise Error, "mode must be 'r', 'rb', 'w', or 'wb'"
openfp = open # B/W compatibility
if __name__ == '__main__':
import sys
if not sys.argv[1:]:
sys.argv.append('/usr/demos/data/audio/bach.aiff')
fn = sys.argv[1]
f = open(fn, 'r')
print "Reading", fn
print "nchannels =", f.getnchannels()
print "nframes =", f.getnframes()
print "sampwidth =", f.getsampwidth()
print "framerate =", f.getframerate()
print "comptype =", f.getcomptype()
print "compname =", f.getcompname()
if sys.argv[2:]:
gn = sys.argv[2]
print "Writing", gn
g = open(gn, 'w')
g.setparams(f.getparams())
while 1:
data = f.readframes(1024)
if not data:
break
g.writeframes(data)
g.close()
f.close()
print "Done."
| gpl-2.0 |
tsufiev/horizon | openstack_dashboard/dashboards/project/access_and_security/security_groups/urls.py | 65 | 1367 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.project.access_and_security.\
security_groups import views
urlpatterns = patterns(
'',
url(r'^create/$', views.CreateView.as_view(), name='create'),
url(r'^(?P<security_group_id>[^/]+)/$',
views.DetailView.as_view(),
name='detail'),
url(r'^(?P<security_group_id>[^/]+)/add_rule/$',
views.AddRuleView.as_view(),
name='add_rule'),
url(r'^(?P<security_group_id>[^/]+)/update/$',
views.UpdateView.as_view(),
name='update')
)
| apache-2.0 |
SohKai/ChronoLogger | web/flask/lib/python2.7/site-packages/werkzeug/_internal.py | 301 | 13713 | # -*- coding: utf-8 -*-
"""
werkzeug._internal
~~~~~~~~~~~~~~~~~~
This module provides internally used helpers and constants.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import string
import inspect
from weakref import WeakKeyDictionary
from datetime import datetime, date
from itertools import chain
from werkzeug._compat import iter_bytes, text_type, BytesIO, int_to_byte, \
range_type, to_native
_logger = None
_empty_stream = BytesIO()
_signature_cache = WeakKeyDictionary()
_epoch_ord = date(1970, 1, 1).toordinal()
_cookie_params = set((b'expires', b'path', b'comment',
b'max-age', b'secure', b'httponly',
b'version'))
_legal_cookie_chars = (string.ascii_letters +
string.digits +
u"!#$%&'*+-.^_`|~:").encode('ascii')
_cookie_quoting_map = {
b',' : b'\\054',
b';' : b'\\073',
b'"' : b'\\"',
b'\\' : b'\\\\',
}
for _i in chain(range_type(32), range_type(127, 256)):
_cookie_quoting_map[int_to_byte(_i)] = ('\\%03o' % _i).encode('latin1')
_octal_re = re.compile(b'\\\\[0-3][0-7][0-7]')
_quote_re = re.compile(b'[\\\\].')
_legal_cookie_chars_re = b'[\w\d!#%&\'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]'
_cookie_re = re.compile(b"""(?x)
(?P<key>[^=]+)
\s*=\s*
(?P<val>
"(?:[^\\\\"]|\\\\.)*" |
(?:.*?)
)
\s*;
""")
class _Missing(object):
def __repr__(self):
return 'no value'
def __reduce__(self):
return '_missing'
_missing = _Missing()
def _get_environ(obj):
env = getattr(obj, 'environ', obj)
assert isinstance(env, dict), \
'%r is not a WSGI environment (has to be a dict)' % type(obj).__name__
return env
def _log(type, message, *args, **kwargs):
"""Log into the internal werkzeug logger."""
global _logger
if _logger is None:
import logging
_logger = logging.getLogger('werkzeug')
# Only set up a default log handler if the
# end-user application didn't set anything up.
if not logging.root.handlers and _logger.level == logging.NOTSET:
_logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
_logger.addHandler(handler)
getattr(_logger, type)(message.rstrip(), *args, **kwargs)
def _parse_signature(func):
"""Return a signature object for the function."""
if hasattr(func, 'im_func'):
func = func.im_func
# if we have a cached validator for this function, return it
parse = _signature_cache.get(func)
if parse is not None:
return parse
# inspect the function signature and collect all the information
positional, vararg_var, kwarg_var, defaults = inspect.getargspec(func)
defaults = defaults or ()
arg_count = len(positional)
arguments = []
for idx, name in enumerate(positional):
if isinstance(name, list):
raise TypeError('cannot parse functions that unpack tuples '
'in the function signature')
try:
default = defaults[idx - arg_count]
except IndexError:
param = (name, False, None)
else:
param = (name, True, default)
arguments.append(param)
arguments = tuple(arguments)
def parse(args, kwargs):
new_args = []
missing = []
extra = {}
# consume as many arguments as positional as possible
for idx, (name, has_default, default) in enumerate(arguments):
try:
new_args.append(args[idx])
except IndexError:
try:
new_args.append(kwargs.pop(name))
except KeyError:
if has_default:
new_args.append(default)
else:
missing.append(name)
else:
if name in kwargs:
extra[name] = kwargs.pop(name)
# handle extra arguments
extra_positional = args[arg_count:]
if vararg_var is not None:
new_args.extend(extra_positional)
extra_positional = ()
if kwargs and not kwarg_var is not None:
extra.update(kwargs)
kwargs = {}
return new_args, kwargs, missing, extra, extra_positional, \
arguments, vararg_var, kwarg_var
_signature_cache[func] = parse
return parse
def _date_to_unix(arg):
"""Converts a timetuple, integer or datetime object into the seconds from
epoch in utc.
"""
if isinstance(arg, datetime):
arg = arg.utctimetuple()
elif isinstance(arg, (int, long, float)):
return int(arg)
year, month, day, hour, minute, second = arg[:6]
days = date(year, month, 1).toordinal() - _epoch_ord + day - 1
hours = days * 24 + hour
minutes = hours * 60 + minute
seconds = minutes * 60 + second
return seconds
class _DictAccessorProperty(object):
"""Baseclass for `environ_property` and `header_property`."""
read_only = False
def __init__(self, name, default=None, load_func=None, dump_func=None,
read_only=None, doc=None):
self.name = name
self.default = default
self.load_func = load_func
self.dump_func = dump_func
if read_only is not None:
self.read_only = read_only
self.__doc__ = doc
def __get__(self, obj, type=None):
if obj is None:
return self
storage = self.lookup(obj)
if self.name not in storage:
return self.default
rv = storage[self.name]
if self.load_func is not None:
try:
rv = self.load_func(rv)
except (ValueError, TypeError):
rv = self.default
return rv
def __set__(self, obj, value):
if self.read_only:
raise AttributeError('read only property')
if self.dump_func is not None:
value = self.dump_func(value)
self.lookup(obj)[self.name] = value
def __delete__(self, obj):
if self.read_only:
raise AttributeError('read only property')
self.lookup(obj).pop(self.name, None)
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
self.name
)
def _cookie_quote(b):
buf = bytearray()
all_legal = True
_lookup = _cookie_quoting_map.get
_push = buf.extend
for char in iter_bytes(b):
if char not in _legal_cookie_chars:
all_legal = False
char = _lookup(char, char)
_push(char)
if all_legal:
return bytes(buf)
return bytes(b'"' + buf + b'"')
def _cookie_unquote(b):
if len(b) < 2:
return b
if b[:1] != b'"' or b[-1:] != b'"':
return b
b = b[1:-1]
i = 0
n = len(b)
rv = bytearray()
_push = rv.extend
while 0 <= i < n:
o_match = _octal_re.search(b, i)
q_match = _quote_re.search(b, i)
if not o_match and not q_match:
rv.extend(b[i:])
break
j = k = -1
if o_match:
j = o_match.start(0)
if q_match:
k = q_match.start(0)
if q_match and (not o_match or k < j):
_push(b[i:k])
_push(b[k + 1:k + 2])
i = k + 2
else:
_push(b[i:j])
rv.append(int(b[j + 1:j + 4], 8))
i = j + 4
return bytes(rv)
def _cookie_parse_impl(b):
"""Lowlevel cookie parsing facility that operates on bytes."""
i = 0
n = len(b)
while i < n:
match = _cookie_re.search(b + b';', i)
if not match:
break
key = match.group('key').strip()
value = match.group('val')
i = match.end(0)
# Ignore parameters. We have no interest in them.
if key.lower() not in _cookie_params:
yield _cookie_unquote(key), _cookie_unquote(value)
def _encode_idna(domain):
# If we're given bytes, make sure they fit into ASCII
if not isinstance(domain, text_type):
domain.decode('ascii')
return domain
# Otherwise check if it's already ascii, then return
try:
return domain.encode('ascii')
except UnicodeError:
pass
# Otherwise encode each part separately
parts = domain.split('.')
for idx, part in enumerate(parts):
parts[idx] = part.encode('idna')
return b'.'.join(parts)
def _decode_idna(domain):
# If the input is a string try to encode it to ascii to
# do the idna decoding. if that fails because of an
# unicode error, then we already have a decoded idna domain
if isinstance(domain, text_type):
try:
domain = domain.encode('ascii')
except UnicodeError:
return domain
# Decode each part separately. If a part fails, try to
# decode it with ascii and silently ignore errors. This makes
# most sense because the idna codec does not have error handling
parts = domain.split(b'.')
for idx, part in enumerate(parts):
try:
parts[idx] = part.decode('idna')
except UnicodeError:
parts[idx] = part.decode('ascii', 'ignore')
return '.'.join(parts)
def _make_cookie_domain(domain):
if domain is None:
return None
domain = _encode_idna(domain)
if b':' in domain:
domain = domain.split(b':', 1)[0]
if b'.' in domain:
return domain
raise ValueError(
'Setting \'domain\' for a cookie on a server running localy (ex: '
'localhost) is not supportted by complying browsers. You should '
'have something like: \'127.0.0.1 localhost dev.localhost\' on '
'your hosts file and then point your server to run on '
'\'dev.localhost\' and also set \'domain\' for \'dev.localhost\''
)
def _easteregg(app=None):
"""Like the name says. But who knows how it works?"""
def bzzzzzzz(gyver):
import base64
import zlib
return zlib.decompress(base64.b64decode(gyver)).decode('ascii')
gyver = u'\n'.join([x + (77 - len(x)) * u' ' for x in bzzzzzzz(b'''
eJyFlzuOJDkMRP06xRjymKgDJCDQStBYT8BCgK4gTwfQ2fcFs2a2FzvZk+hvlcRvRJD148efHt9m
9Xz94dRY5hGt1nrYcXx7us9qlcP9HHNh28rz8dZj+q4rynVFFPdlY4zH873NKCexrDM6zxxRymzz
4QIxzK4bth1PV7+uHn6WXZ5C4ka/+prFzx3zWLMHAVZb8RRUxtFXI5DTQ2n3Hi2sNI+HK43AOWSY
jmEzE4naFp58PdzhPMdslLVWHTGUVpSxImw+pS/D+JhzLfdS1j7PzUMxij+mc2U0I9zcbZ/HcZxc
q1QjvvcThMYFnp93agEx392ZdLJWXbi/Ca4Oivl4h/Y1ErEqP+lrg7Xa4qnUKu5UE9UUA4xeqLJ5
jWlPKJvR2yhRI7xFPdzPuc6adXu6ovwXwRPXXnZHxlPtkSkqWHilsOrGrvcVWXgGP3daXomCj317
8P2UOw/NnA0OOikZyFf3zZ76eN9QXNwYdD8f8/LdBRFg0BO3bB+Pe/+G8er8tDJv83XTkj7WeMBJ
v/rnAfdO51d6sFglfi8U7zbnr0u9tyJHhFZNXYfH8Iafv2Oa+DT6l8u9UYlajV/hcEgk1x8E8L/r
XJXl2SK+GJCxtnyhVKv6GFCEB1OO3f9YWAIEbwcRWv/6RPpsEzOkXURMN37J0PoCSYeBnJQd9Giu
LxYQJNlYPSo/iTQwgaihbART7Fcyem2tTSCcwNCs85MOOpJtXhXDe0E7zgZJkcxWTar/zEjdIVCk
iXy87FW6j5aGZhttDBoAZ3vnmlkx4q4mMmCdLtnHkBXFMCReqthSGkQ+MDXLLCpXwBs0t+sIhsDI
tjBB8MwqYQpLygZ56rRHHpw+OAVyGgaGRHWy2QfXez+ZQQTTBkmRXdV/A9LwH6XGZpEAZU8rs4pE
1R4FQ3Uwt8RKEtRc0/CrANUoes3EzM6WYcFyskGZ6UTHJWenBDS7h163Eo2bpzqxNE9aVgEM2CqI
GAJe9Yra4P5qKmta27VjzYdR04Vc7KHeY4vs61C0nbywFmcSXYjzBHdiEjraS7PGG2jHHTpJUMxN
Jlxr3pUuFvlBWLJGE3GcA1/1xxLcHmlO+LAXbhrXah1tD6Ze+uqFGdZa5FM+3eHcKNaEarutAQ0A
QMAZHV+ve6LxAwWnXbbSXEG2DmCX5ijeLCKj5lhVFBrMm+ryOttCAeFpUdZyQLAQkA06RLs56rzG
8MID55vqr/g64Qr/wqwlE0TVxgoiZhHrbY2h1iuuyUVg1nlkpDrQ7Vm1xIkI5XRKLedN9EjzVchu
jQhXcVkjVdgP2O99QShpdvXWoSwkp5uMwyjt3jiWCqWGSiaaPAzohjPanXVLbM3x0dNskJsaCEyz
DTKIs+7WKJD4ZcJGfMhLFBf6hlbnNkLEePF8Cx2o2kwmYF4+MzAxa6i+6xIQkswOqGO+3x9NaZX8
MrZRaFZpLeVTYI9F/djY6DDVVs340nZGmwrDqTCiiqD5luj3OzwpmQCiQhdRYowUYEA3i1WWGwL4
GCtSoO4XbIPFeKGU13XPkDf5IdimLpAvi2kVDVQbzOOa4KAXMFlpi/hV8F6IDe0Y2reg3PuNKT3i
RYhZqtkQZqSB2Qm0SGtjAw7RDwaM1roESC8HWiPxkoOy0lLTRFG39kvbLZbU9gFKFRvixDZBJmpi
Xyq3RE5lW00EJjaqwp/v3EByMSpVZYsEIJ4APaHmVtpGSieV5CALOtNUAzTBiw81GLgC0quyzf6c
NlWknzJeCsJ5fup2R4d8CYGN77mu5vnO1UqbfElZ9E6cR6zbHjgsr9ly18fXjZoPeDjPuzlWbFwS
pdvPkhntFvkc13qb9094LL5NrA3NIq3r9eNnop9DizWOqCEbyRBFJTHn6Tt3CG1o8a4HevYh0XiJ
sR0AVVHuGuMOIfbuQ/OKBkGRC6NJ4u7sbPX8bG/n5sNIOQ6/Y/BX3IwRlTSabtZpYLB85lYtkkgm
p1qXK3Du2mnr5INXmT/78KI12n11EFBkJHHp0wJyLe9MvPNUGYsf+170maayRoy2lURGHAIapSpQ
krEDuNoJCHNlZYhKpvw4mspVWxqo415n8cD62N9+EfHrAvqQnINStetek7RY2Urv8nxsnGaZfRr/
nhXbJ6m/yl1LzYqscDZA9QHLNbdaSTTr+kFg3bC0iYbX/eQy0Bv3h4B50/SGYzKAXkCeOLI3bcAt
mj2Z/FM1vQWgDynsRwNvrWnJHlespkrp8+vO1jNaibm+PhqXPPv30YwDZ6jApe3wUjFQobghvW9p
7f2zLkGNv8b191cD/3vs9Q833z8t''').splitlines()])
def easteregged(environ, start_response):
def injecting_start_response(status, headers, exc_info=None):
headers.append(('X-Powered-By', 'Werkzeug'))
return start_response(status, headers, exc_info)
if app is not None and environ.get('QUERY_STRING') != 'macgybarchakku':
return app(environ, injecting_start_response)
injecting_start_response('200 OK', [('Content-Type', 'text/html')])
return [(u'''
<!DOCTYPE html>
<html>
<head>
<title>About Werkzeug</title>
<style type="text/css">
body { font: 15px Georgia, serif; text-align: center; }
a { color: #333; text-decoration: none; }
h1 { font-size: 30px; margin: 20px 0 10px 0; }
p { margin: 0 0 30px 0; }
pre { font: 11px 'Consolas', 'Monaco', monospace; line-height: 0.95; }
</style>
</head>
<body>
<h1><a href="http://werkzeug.pocoo.org/">Werkzeug</a></h1>
<p>the Swiss Army knife of Python web development.</p>
<pre>%s\n\n\n</pre>
</body>
</html>''' % gyver).encode('latin1')]
return easteregged
| mit |
abaditsegay/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/copy_reg.py | 442 | 6800 | """Helper to provide extensibility for pickle/cPickle.
This is only useful to add pickle support for extension types defined in
C, not for instances of user-defined classes.
"""
from types import ClassType as _ClassType
__all__ = ["pickle", "constructor",
"add_extension", "remove_extension", "clear_extension_cache"]
dispatch_table = {}
def pickle(ob_type, pickle_function, constructor_ob=None):
if type(ob_type) is _ClassType:
raise TypeError("copy_reg is not intended for use with classes")
if not hasattr(pickle_function, '__call__'):
raise TypeError("reduction functions must be callable")
dispatch_table[ob_type] = pickle_function
# The constructor_ob function is a vestige of safe for unpickling.
# There is no reason for the caller to pass it anymore.
if constructor_ob is not None:
constructor(constructor_ob)
def constructor(object):
if not hasattr(object, '__call__'):
raise TypeError("constructors must be callable")
# Example: provide pickling support for complex numbers.
try:
complex
except NameError:
pass
else:
def pickle_complex(c):
return complex, (c.real, c.imag)
pickle(complex, pickle_complex, complex)
# Support for pickling new-style objects
def _reconstructor(cls, base, state):
if base is object:
obj = object.__new__(cls)
else:
obj = base.__new__(cls, state)
if base.__init__ != object.__init__:
base.__init__(obj, state)
return obj
_HEAPTYPE = 1<<9
# Python code for object.__reduce_ex__ for protocols 0 and 1
def _reduce_ex(self, proto):
assert proto < 2
for base in self.__class__.__mro__:
if hasattr(base, '__flags__') and not base.__flags__ & _HEAPTYPE:
break
else:
base = object # not really reachable
if base is object:
state = None
else:
if base is self.__class__:
raise TypeError, "can't pickle %s objects" % base.__name__
state = base(self)
args = (self.__class__, base, state)
try:
getstate = self.__getstate__
except AttributeError:
if getattr(self, "__slots__", None):
raise TypeError("a class that defines __slots__ without "
"defining __getstate__ cannot be pickled")
try:
dict = self.__dict__
except AttributeError:
dict = None
else:
dict = getstate()
if dict:
return _reconstructor, args, dict
else:
return _reconstructor, args
# Helper for __reduce_ex__ protocol 2
def __newobj__(cls, *args):
return cls.__new__(cls, *args)
def _slotnames(cls):
"""Return a list of slot names for a given class.
This needs to find slots defined by the class and its bases, so we
can't simply return the __slots__ attribute. We must walk down
the Method Resolution Order and concatenate the __slots__ of each
class found there. (This assumes classes don't modify their
__slots__ attribute to misrepresent their slots after the class is
defined.)
"""
# Get the value from a cache in the class if possible
names = cls.__dict__.get("__slotnames__")
if names is not None:
return names
# Not cached -- calculate the value
names = []
if not hasattr(cls, "__slots__"):
# This class has no slots
pass
else:
# Slots found -- gather slot names from all base classes
for c in cls.__mro__:
if "__slots__" in c.__dict__:
slots = c.__dict__['__slots__']
# if class has a single slot, it can be given as a string
if isinstance(slots, basestring):
slots = (slots,)
for name in slots:
# special descriptors
if name in ("__dict__", "__weakref__"):
continue
# mangled names
elif name.startswith('__') and not name.endswith('__'):
names.append('_%s%s' % (c.__name__, name))
else:
names.append(name)
# Cache the outcome in the class if at all possible
try:
cls.__slotnames__ = names
except:
pass # But don't die if we can't
return names
# A registry of extension codes. This is an ad-hoc compression
# mechanism. Whenever a global reference to <module>, <name> is about
# to be pickled, the (<module>, <name>) tuple is looked up here to see
# if it is a registered extension code for it. Extension codes are
# universal, so that the meaning of a pickle does not depend on
# context. (There are also some codes reserved for local use that
# don't have this restriction.) Codes are positive ints; 0 is
# reserved.
_extension_registry = {} # key -> code
_inverted_registry = {} # code -> key
_extension_cache = {} # code -> object
# Don't ever rebind those names: cPickle grabs a reference to them when
# it's initialized, and won't see a rebinding.
def add_extension(module, name, code):
"""Register an extension code."""
code = int(code)
if not 1 <= code <= 0x7fffffff:
raise ValueError, "code out of range"
key = (module, name)
if (_extension_registry.get(key) == code and
_inverted_registry.get(code) == key):
return # Redundant registrations are benign
if key in _extension_registry:
raise ValueError("key %s is already registered with code %s" %
(key, _extension_registry[key]))
if code in _inverted_registry:
raise ValueError("code %s is already in use for key %s" %
(code, _inverted_registry[code]))
_extension_registry[key] = code
_inverted_registry[code] = key
def remove_extension(module, name, code):
"""Unregister an extension code. For testing only."""
key = (module, name)
if (_extension_registry.get(key) != code or
_inverted_registry.get(code) != key):
raise ValueError("key %s is not registered with code %s" %
(key, code))
del _extension_registry[key]
del _inverted_registry[code]
if code in _extension_cache:
del _extension_cache[code]
def clear_extension_cache():
_extension_cache.clear()
# Standard extension code assignments
# Reserved ranges
# First Last Count Purpose
# 1 127 127 Reserved for Python standard library
# 128 191 64 Reserved for Zope
# 192 239 48 Reserved for 3rd parties
# 240 255 16 Reserved for private use (will never be assigned)
# 256 Inf Inf Reserved for future assignment
# Extension codes are assigned by the Python Software Foundation.
| apache-2.0 |
edisonlz/fruit | web_project/base/site-packages/django/contrib/gis/tests/utils.py | 114 | 1698 | from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
# function that will pass a test.
def pass_test(*args): return
def no_backend(test_func, backend):
"Use this decorator to disable test on specified backend."
if settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'].rsplit('.')[-1] == backend:
return pass_test
else:
return test_func
# Decorators to disable entire test functions for specific
# spatial backends.
def no_oracle(func): return no_backend(func, 'oracle')
def no_postgis(func): return no_backend(func, 'postgis')
def no_mysql(func): return no_backend(func, 'mysql')
def no_spatialite(func): return no_backend(func, 'spatialite')
# Shortcut booleans to omit only portions of tests.
_default_db = settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'].rsplit('.')[-1]
oracle = _default_db == 'oracle'
postgis = _default_db == 'postgis'
mysql = _default_db == 'mysql'
spatialite = _default_db == 'spatialite'
HAS_SPATIALREFSYS = True
if oracle and 'gis' in settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE']:
from django.contrib.gis.db.backends.oracle.models import SpatialRefSys
elif postgis:
from django.contrib.gis.db.backends.postgis.models import SpatialRefSys
elif spatialite:
from django.contrib.gis.db.backends.spatialite.models import SpatialRefSys
else:
HAS_SPATIALREFSYS = False
SpatialRefSys = None
def has_spatial_db():
# All databases must have spatial backends to run GeoDjango tests.
spatial_dbs = [name for name, db_dict in settings.DATABASES.items()
if db_dict['ENGINE'].startswith('django.contrib.gis')]
return len(spatial_dbs) == len(settings.DATABASES)
HAS_SPATIAL_DB = has_spatial_db()
| apache-2.0 |
skykiny/shadowsocks | utils/autoban.py | 1033 | 2156 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 clowwindy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='See README')
parser.add_argument('-c', '--count', default=3, type=int,
help='with how many failure times it should be '
'considered as an attack')
config = parser.parse_args()
ips = {}
banned = set()
for line in sys.stdin:
if 'can not parse header when' in line:
ip = line.split()[-1].split(':')[0]
if ip not in ips:
ips[ip] = 1
print(ip)
sys.stdout.flush()
else:
ips[ip] += 1
if ip not in banned and ips[ip] >= config.count:
banned.add(ip)
cmd = 'iptables -A INPUT -s %s -j DROP' % ip
print(cmd, file=sys.stderr)
sys.stderr.flush()
os.system(cmd)
| apache-2.0 |
virgree/odoo | openerp/cli/__init__.py | 135 | 2016 | import logging
import sys
import os
import openerp
from openerp import tools
from openerp.modules import module
_logger = logging.getLogger(__name__)
commands = {}
class CommandType(type):
def __init__(cls, name, bases, attrs):
super(CommandType, cls).__init__(name, bases, attrs)
name = getattr(cls, name, cls.__name__.lower())
cls.name = name
if name != 'command':
commands[name] = cls
class Command(object):
"""Subclass this class to define new openerp subcommands """
__metaclass__ = CommandType
def run(self, args):
pass
class Help(Command):
"""Display the list of available commands"""
def run(self, args):
print "Available commands:\n"
padding = max([len(k) for k in commands.keys()]) + 2
for k, v in commands.items():
print " %s%s" % (k.ljust(padding, ' '), v.__doc__ or '')
print "\nUse '%s <command> --help' for individual command help." % sys.argv[0].split(os.path.sep)[-1]
import server
import deploy
import scaffold
import start
def main():
args = sys.argv[1:]
# The only shared option is '--addons-path=' needed to discover additional
# commands from modules
if len(args) > 1 and args[0].startswith('--addons-path=') and not args[1].startswith("-"):
# parse only the addons-path, do not setup the logger...
tools.config._parse_config([args[0]])
args = args[1:]
# Default legacy command
command = "server"
# Subcommand discovery
if len(args) and not args[0].startswith("-"):
logging.disable(logging.CRITICAL)
for m in module.get_modules():
m = 'openerp.addons.' + m
__import__(m)
#try:
#except Exception, e:
# raise
# print e
logging.disable(logging.NOTSET)
command = args[0]
args = args[1:]
if command in commands:
o = commands[command]()
o.run(args)
# vim:et:ts=4:sw=4:
| agpl-3.0 |
jairideout/scikit-bio | skbio/diversity/_base.py | 6 | 3274 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import numpy as np
from skbio.tree import DuplicateNodeError, MissingNodeError
def _validate_counts_vector(counts, suppress_cast=False):
"""Validate and convert input to an acceptable counts vector type.
Note: may not always return a copy of `counts`!
"""
counts = np.asarray(counts)
if not suppress_cast:
counts = counts.astype(int, casting='safe', copy=False)
if counts.ndim != 1:
raise ValueError("Only 1-D vectors are supported.")
elif (counts < 0).any():
raise ValueError("Counts vector cannot contain negative values.")
return counts
def _validate_counts_vectors(*args, **kwargs):
results = []
lens = []
# py2-compatible mechanism for specifying a keyword argument when also
# passing *args derived from SO answer:
# http://stackoverflow.com/a/15302038/3424666
suppress_cast = kwargs.pop('suppress_cast', False)
for counts in args:
results.append(_validate_counts_vector(counts, suppress_cast))
lens.append(len(counts))
if len(set(lens)) > 1:
raise ValueError("Input vectors u_counts and v_counts must be of "
"equal length.")
return results
def _validate_otu_ids_and_tree(counts, otu_ids, tree):
# all otu_ids are unique
# len(otu_ids) == len(counts)
len_otu_ids = len(otu_ids)
set_otu_ids = set(otu_ids)
if len_otu_ids != len(set_otu_ids):
raise ValueError("OTU IDs vector cannot contain duplicated ids.")
if len(counts) != len_otu_ids:
raise ValueError("OTU IDs vector must be the same length as counts "
"vector(s).")
# the tree is rooted
if len(tree.root().children) > 2:
# this is an imperfect check for whether the tree is rooted or not.
# can this be improved?
raise ValueError("Tree must be rooted.")
# all nodes (except the root node) have corresponding branch lengths
# all tip names in tree are unique
# all otu_ids correspond to tip names in tree
branch_lengths = []
tip_names = []
for e in tree.traverse():
if not e.is_root():
branch_lengths.append(e.length)
if e.is_tip():
tip_names.append(e.name)
set_tip_names = set(tip_names)
if len(tip_names) != len(set_tip_names):
raise DuplicateNodeError("All tip names must be unique.")
if np.array([l is None for l in branch_lengths]).any():
raise ValueError("All non-root nodes in tree must have a branch "
"length.")
missing_tip_names = set_otu_ids - set_tip_names
if missing_tip_names != set():
raise MissingNodeError("All otu_ids must be present as tip names in "
"tree. Tree is missing tips with names: %s"
% " ".join(missing_tip_names))
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.