repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
ecosoft-odoo/odoo | addons/website_gengo/__init__.py | 316 | 1024 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP S.A. (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import controllers
import models
| agpl-3.0 |
maxlikely/scikit-learn | sklearn/datasets/svmlight_format.py | 1 | 13250 | """This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: Simple BSD.
from bz2 import BZ2File
from contextlib import closing
import gzip
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..utils import atleast2d_or_csr
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
This implementation is naive: it does allocate too much memory and
is slow since written in python. On large datasets it is recommended
to use an optimized loader such as:
https://github.com/mblondel/svmlight-loader
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to contraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
f: {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features: int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have example of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
return gzip.open(f, "rb")
elif ext == ".bz2":
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
return _load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
with closing(_gen_open(f)) as f:
return _load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Rationale
---------
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
if n_features is None:
n_features = max(ind[1].max() for ind in r) + 1
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, one_based, comment, query_id):
is_sp = int(hasattr(X, "tocsr"))
if X.dtype == np.float64:
value_pattern = u"%d:%0.16e"
else:
value_pattern = u"%d:%f"
if y.dtype.kind == 'i':
line_pattern = u"%d"
else:
line_pattern = u"%f"
if query_id is not None:
line_pattern += u" qid:%d"
line_pattern += u" %s\n"
if comment:
f.write("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__)
f.write("# Column indices are %s-based\n" % ["zero", "one"][one_based])
f.write("#\n")
f.writelines("# %s\n" % line for line in comment.splitlines())
for i in range(X.shape[0]):
s = " ".join([value_pattern % (j + one_based, X[i, j])
for j in X[i].nonzero()[is_sp]])
if query_id is not None:
feat = (y[i], query_id[i], s)
else:
feat = (y[i], s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if "\0" in comment:
raise ValueError("comment string contains NUL byte")
y = np.asarray(y)
if y.ndim != 1:
raise ValueError("expected y of shape (n_samples,), got %r"
% (y.shape,))
Xval = atleast2d_or_csr(X)
if Xval.shape[0] != y.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], y.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, one_based, comment, query_id)
| bsd-3-clause |
mhotwagner/abackend | abackend-env/lib/python3.5/site-packages/pip/_vendor/lockfile/mkdirlockfile.py | 478 | 3098 | from __future__ import absolute_import, division
import time
import os
import sys
import errno
from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout,
AlreadyLocked)
class MkdirLockFile(LockBase):
"""Lock file by creating a directory."""
def __init__(self, path, threaded=True, timeout=None):
"""
>>> lock = MkdirLockFile('somefile')
>>> lock = MkdirLockFile('somefile', threaded=False)
"""
LockBase.__init__(self, path, threaded, timeout)
# Lock file itself is a directory. Place the unique file name into
# it.
self.unique_name = os.path.join(self.lock_file,
"%s.%s%s" % (self.hostname,
self.tname,
self.pid))
def acquire(self, timeout=None):
timeout = timeout is not None and timeout or self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
if timeout is None:
wait = 0.1
else:
wait = max(0, timeout / 10)
while True:
try:
os.mkdir(self.lock_file)
except OSError:
err = sys.exc_info()[1]
if err.errno == errno.EEXIST:
# Already locked.
if os.path.exists(self.unique_name):
# Already locked by me.
return
if timeout is not None and time.time() > end_time:
if timeout > 0:
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
# Someone else has the lock.
raise AlreadyLocked("%s is already locked" %
self.path)
time.sleep(wait)
else:
# Couldn't create the lock for some other reason
raise LockFailed("failed to create %s" % self.lock_file)
else:
open(self.unique_name, "wb").close()
return
def release(self):
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
elif not os.path.exists(self.unique_name):
raise NotMyLock("%s is locked, but not by me" % self.path)
os.unlink(self.unique_name)
os.rmdir(self.lock_file)
def is_locked(self):
return os.path.exists(self.lock_file)
def i_am_locking(self):
return (self.is_locked() and
os.path.exists(self.unique_name))
def break_lock(self):
if os.path.exists(self.lock_file):
for name in os.listdir(self.lock_file):
os.unlink(os.path.join(self.lock_file, name))
os.rmdir(self.lock_file)
| mit |
jlspyaozhongkai/Uter | third_party_backup/Python-2.7.9/Demo/turtle/tdemo_tree.py | 4 | 1410 | #!/usr/bin/env python
""" turtle-example-suite:
tdemo_tree.py
Displays a 'breadth-first-tree' - in contrast
to the classical Logo tree drawing programs,
which use a depth-first-algorithm.
Uses:
(1) a tree-generator, where the drawing is
quasi the side-effect, whereas the generator
always yields None.
(2) Turtle-cloning: At each branching point
the current pen is cloned. So in the end
there are 1024 turtles.
"""
from turtle import Turtle, mainloop
from time import clock
def tree(plist, l, a, f):
""" plist is list of pens
l is length of branch
a is half of the angle between 2 branches
f is factor by which branch is shortened
from level to level."""
if l > 3:
lst = []
for p in plist:
p.forward(l)
q = p.clone()
p.left(a)
q.right(a)
lst.append(p)
lst.append(q)
for x in tree(lst, l*f, a, f):
yield None
def maketree():
p = Turtle()
p.setundobuffer(None)
p.hideturtle()
p.speed(0)
p.tracer(30,0)
p.left(90)
p.penup()
p.forward(-210)
p.pendown()
t = tree([p], 200, 65, 0.6375)
for x in t:
pass
print len(p.getscreen().turtles())
def main():
a=clock()
maketree()
b=clock()
return "done: %.2f sec." % (b-a)
if __name__ == "__main__":
msg = main()
print msg
mainloop()
| gpl-3.0 |
pixelated/pixelated-user-agent | service/pixelated/resources/users.py | 2 | 1200 | #
# Copyright (c) 2016 ThoughtWorks, Inc.
#
# Pixelated is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pixelated is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Pixelated. If not, see <http://www.gnu.org/licenses/>.
from pixelated.resources import respond_json_deferred, BaseResource, respond_json, UnAuthorizedResource
from twisted.web import server
class UsersResource(BaseResource):
isLeaf = True
def __init__(self, services_factory):
BaseResource.__init__(self, services_factory)
def render_GET(self, request):
if self.is_admin(request):
return respond_json({"count": self._services_factory.online_sessions()}, request)
return UnAuthorizedResource().render_GET(request)
| agpl-3.0 |
danielballan/scikit-xray | skbeam/core/fitting/background.py | 12 | 8569 | # ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# @author: Li Li (lili@bnl.gov) #
# created on 07/16/2014 #
# #
# Original code: #
# @author: Mirna Lerotic, 2nd Look Consulting #
# http://www.2ndlookconsulting.com/ #
# Copyright (c) 2013, Stefan Vogt, Argonne National Laboratory #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
from __future__ import absolute_import, division, print_function
import scipy.signal
import numpy as np
_defaults = {'con_val_no_bin': 3,
'con_val_bin': 5,
'iter_num_no_bin': 3,
'iter_num_bin': 5}
def snip_method(spectrum,
e_off, e_lin, e_quad,
xmin=0, xmax=4096, epsilon=2.96,
width=0.5, decrease_factor=np.sqrt(2),
spectral_binning=None,
con_val=None,
iter_num=None,
width_threshold=0.5):
"""
use snip algorithm to obtain background
Parameters
----------
spectrum : array
intensity spectrum
e_off : float
energy calibration, such as e_off + e_lin * energy + e_quad * energy^2
e_lin : float
energy calibration, such as e_off + e_lin * energy + e_quad * energy^2
e_quad : float
energy calibration, such as e_off + e_lin * energy + e_quad * energy^2
xmin : float, optional
smallest index to define the range
xmax : float, optional
largest index to define the range
epsilon : float, optional
energy to create a hole-electron pair
for Ge 2.96, for Si 3.61 at 300K
needs to double check this value
width : int, optional
window size to adjust how much to shift background
decrease_factor : float, optional
gradually decrease of window size, default as sqrt(2)
spectral_binning : float, optional
bin the data into different size
con_val : int, optional
size of scipy.signal.boxcar to convolve the spectrum.
Default value is controlled by the keys `con_val_no_bin`
and `con_val_bin` in the defaults dictionary, depending
on if spectral_binning is used or not
iter_num : int, optional
Number of iterations.
Default value is controlled by the keys `iter_num_no_bin`
and `iter_num_bin` in the defaults dictionary, depending
on if spectral_binning is used or not
width_threshold : float, optional
stop point of the algorithm
Returns
-------
background : array
output results with peak removed
References
----------
.. [1] C.G. Ryan etc, "SNIP, a statistics-sensitive background
treatment for the quantitative analysis of PIXE spectra in
geoscience applications", Nuclear Instruments and Methods in
Physics Research Section B, vol. 34, 1998.
"""
# clean input a bit
if con_val is None:
if spectral_binning is None:
con_val = _defaults['con_val_no_bin']
else:
con_val = _defaults['con_val_bin']
if iter_num is None:
if spectral_binning is None:
iter_num = _defaults['iter_num_no_bin']
else:
iter_num = _defaults['iter_num_bin']
background = np.array(spectrum)
n_background = background.size
energy = np.arange(n_background, dtype=np.float)
if spectral_binning is not None:
energy = energy * spectral_binning
energy = e_off + energy * e_lin + energy**2 * e_quad
# transfer from std to fwhm
std_fwhm = 2 * np.sqrt(2 * np.log(2))
tmp = (e_off / std_fwhm)**2 + energy * epsilon * e_lin
tmp[tmp < 0] = 0
fwhm = std_fwhm * np.sqrt(tmp)
# smooth the background
s = scipy.signal.boxcar(con_val)
# For background remove, we only care about the central parts
# where there are peaks. On the boundary part, we don't care
# the accuracy so much. But we need to pay attention to edge
# effects in general convolution.
A = s.sum()
background = scipy.signal.convolve(background, s, mode='same')/A
window_p = width * fwhm / e_lin
if spectral_binning is not None and spectral_binning > 0:
window_p = window_p/2.
background = np.log(np.log(background + 1) + 1)
index = np.arange(n_background)
# FIRST SNIPPING
for j in range(iter_num):
lo_index = np.clip(index - window_p,
np.max([xmin, 0]),
np.min([xmax, n_background - 1]))
hi_index = np.clip(index + window_p,
np.max([xmin, 0]),
np.min([xmax, n_background - 1]))
temp = (background[lo_index.astype(np.int)] +
background[hi_index.astype(np.int)]) / 2.
bg_index = background > temp
background[bg_index] = temp[bg_index]
current_width = window_p
max_current_width = np.amax(current_width)
while max_current_width >= width_threshold:
lo_index = np.clip(index - current_width,
np.max([xmin, 0]),
np.min([xmax, n_background - 1]))
hi_index = np.clip(index + current_width,
np.max([xmin, 0]),
np.min([xmax, n_background - 1]))
temp = (background[lo_index.astype(np.int)] +
background[hi_index.astype(np.int)]) / 2.
bg_index = background > temp
background[bg_index] = temp[bg_index]
# decrease the width and repeat
current_width = current_width / decrease_factor
max_current_width = np.amax(current_width)
background = np.exp(np.exp(background) - 1) - 1
inf_ind = np.where(~np.isfinite(background))
background[inf_ind] = 0.0
return background
| bsd-3-clause |
LokiCoder/Sick-Beard | lib/hachoir_parser/audio/itunesdb.py | 90 | 17106 | """
iPod iTunesDB parser.
Documentation:
- http://ipodlinux.org/ITunesDB
Author: Romain HERAULT
Creation date: 19 august 2006
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet,
UInt8, UInt16, UInt32, UInt64, TimestampMac32,
String, Float32, NullBytes, Enum)
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_core.tools import humanDuration
from lib.hachoir_core.text_handler import displayHandler, filesizeHandler
list_order={
1 : "playlist order (manual sort order)",
2 : "???",
3 : "songtitle",
4 : "album",
5 : "artist",
6 : "bitrate",
7 : "genre",
8 : "kind",
9 : "date modified",
10 : "track number",
11 : "size",
12 : "time",
13 : "year",
14 : "sample rate",
15 : "comment",
16 : "date added",
17 : "equalizer",
18 : "composer",
19 : "???",
20 : "play count",
21 : "last played",
22 : "disc number",
23 : "my rating",
24 : "release date",
25 : "BPM",
26 : "grouping",
27 : "category",
28 : "description",
29 : "show",
30 : "season",
31 : "episode number"
}
class DataObject(FieldSet):
type_name={
1:"Title",
2:"Location",
3:"Album",
4:"Artist",
5:"Genre",
6:"Filetype",
7:"EQ Setting",
8:"Comment",
9:"Category",
12:"Composer",
13:"Grouping",
14:"Description text",
15:"Podcast Enclosure URL",
16:"Podcast RSS URL",
17:"Chapter data",
18:"Subtitle",
19:"Show (for TV Shows only)",
20:"Episode",
21:"TV Network",
50:"Smart Playlist Data",
51:"Smart Playlist Rules",
52:"Library Playlist Index",
100:"Column info",
}
mhod52_sort_index_type_name={
3:"Title",
4:"Album, then Disk/Tracknumber, then Title",
5:"Artist, then Album, then Disc/Tracknumber, then Title",
7:"Genre, then Artist, then Album, then Disc/Tracknumber, then Title",
8:"Composer, then Title"
}
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = self["entry_length"].value *8
def createFields(self):
yield String(self, "header_id", 4, "Data Object Header Markup (\"mhod\")", charset="ISO-8859-1")
yield UInt32(self, "header_length", "Header Length")
yield UInt32(self, "entry_length", "Entry Length")
yield Enum(UInt32(self, "type", "type"),self.type_name)
if(self["type"].value<15):
yield UInt32(self, "unknown[]")
yield UInt32(self, "unknown[]")
yield UInt32(self, "position", "Position")
yield UInt32(self, "length", "String Length in bytes")
yield UInt32(self, "unknown[]")
yield UInt32(self, "unknown[]")
yield String(self, "string", self["length"].value, "String Data", charset="UTF-16-LE")
elif (self["type"].value<17):
yield UInt32(self, "unknown[]")
yield UInt32(self, "unknown[]")
yield String(self, "string", self._size/8-self["header_length"].value, "String Data", charset="UTF-8")
elif (self["type"].value == 52):
yield UInt32(self, "unknown[]", "unk1")
yield UInt32(self, "unknown[]", "unk2")
yield Enum(UInt32(self, "sort_index_type", "Sort Index Type"),self.mhod52_sort_index_type_name)
yield UInt32(self, "entry_count", "Entry Count")
indexes_size = self["entry_count"].value*4
padding_offset = self["entry_length"].value - indexes_size
padding = self.seekByte(padding_offset, "header padding")
if padding:
yield padding
for i in xrange(self["entry_count"].value):
yield UInt32(self, "index["+str(i)+"]", "Index of the "+str(i)+"nth mhit")
else:
padding = self.seekByte(self["header_length"].value, "header padding")
if padding:
yield padding
padding = self.seekBit(self._size, "entry padding")
if padding:
yield padding
class TrackItem(FieldSet):
x1_type_name={
0:"AAC or CBR MP3",
1:"VBR MP3"
}
x2_type_name={
0:"AAC",
1:"MP3"
}
media_type_name={
0x00:"Audio/Video",
0x01:"Audio",
0x02:"Video",
0x04:"Podcast",
0x06:"Video Podcast",
0x08:"Audiobook",
0x20:"Music Video",
0x40:"TV Show",
0X60:"TV Show (Music lists)",
}
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = self["entry_length"].value *8
def createFields(self):
yield String(self, "header_id", 4, "Track Item Header Markup (\"mhit\")", charset="ISO-8859-1")
yield UInt32(self, "header_length", "Header Length")
yield UInt32(self, "entry_length", "Entry Length")
yield UInt32(self, "string_number", "Number of Strings")
yield UInt32(self, "unique_id", "Unique ID")
yield UInt32(self, "visible_tag", "Visible Tag")
yield String(self, "file_type", 4, "File Type")
yield Enum(UInt8(self, "x1_type", "Extended Type 1"),self.x1_type_name)
yield Enum(UInt8(self, "x2_type", "Extended type 2"),self.x2_type_name)
yield UInt8(self, "compilation_flag", "Compilation Flag")
yield UInt8(self, "rating", "Rating")
yield TimestampMac32(self, "added_date", "Date when the item was added")
yield filesizeHandler(UInt32(self, "size", "Track size in bytes"))
yield displayHandler(UInt32(self, "length", "Track length in milliseconds"), humanDuration)
yield UInt32(self, "track_number", "Number of this track")
yield UInt32(self, "total_track", "Total number of tracks")
yield UInt32(self, "year", "Year of the track")
yield UInt32(self, "bitrate", "Bitrate")
yield UInt32(self, "samplerate", "Sample Rate")
yield UInt32(self, "volume", "volume")
yield UInt32(self, "start_time", "Start playing at, in milliseconds")
yield UInt32(self, "stop_time", "Stop playing at, in milliseconds")
yield UInt32(self, "soundcheck", "SoundCheck preamp")
yield UInt32(self, "playcount_1", "Play count of the track")
yield UInt32(self, "playcount_2", "Play count of the track (identical to playcount_1)")
yield UInt32(self, "last_played_time", "Time the song was last played")
yield UInt32(self, "disc_number", "disc number in multi disc sets")
yield UInt32(self, "total_discs", "Total number of discs in the disc set")
yield UInt32(self, "userid", "User ID in the DRM scheme")
yield TimestampMac32(self, "last_modified", "Time of the last modification of the track")
yield UInt32(self, "bookmark_time", "Bookmark time for AudioBook")
yield UInt64(self, "dbid", "Unique DataBase ID for the song (identical in mhit and in mhii)")
yield UInt8(self, "checked", "song is checked")
yield UInt8(self, "application_rating", "Last Rating before change")
yield UInt16(self, "BPM", "BPM of the track")
yield UInt16(self, "artwork_count", "number of artworks fo this item")
yield UInt16(self, "unknown[]")
yield UInt32(self, "artwork_size", "Total size of artworks in bytes")
yield UInt32(self, "unknown[]")
yield Float32(self, "sample_rate_2", "Sample Rate express in float")
yield UInt32(self, "released_date", "Date of release in Music Store or in Podcast")
yield UInt32(self, "unknown[]")
yield UInt32(self, "unknown[]")
yield UInt32(self, "unknown[]")
yield UInt32(self, "unknown[]")
yield UInt32(self, "unknown[]")
yield UInt8(self, "has_artwork", "0x01 for track with artwork, 0x02 otherwise")
yield UInt8(self, "skip_wen_shuffling", "Skip that track when shuffling")
yield UInt8(self, "remember_playback_position", "Remember playback position")
yield UInt8(self, "flag4", "Flag 4")
yield UInt64(self, "dbid2", "Unique DataBase ID for the song (identical as above)")
yield UInt8(self, "lyrics_flag", "Lyrics Flag")
yield UInt8(self, "movie_file_flag", "Movie File Flag")
yield UInt8(self, "played_mark", "Track has been played")
yield UInt8(self, "unknown[]")
yield UInt32(self, "unknown[]")
yield UInt32(self, "unknown[]")
yield UInt32(self, "sample_count", "Number of samples in the song (only for WAV and AAC files)")
yield UInt32(self, "unknown[]")
yield UInt32(self, "unknown[]")
yield UInt32(self, "unknown[]")
yield UInt32(self, "unknown[]")
yield Enum(UInt32(self, "media_type", "Media Type for video iPod"),self.media_type_name)
yield UInt32(self, "season_number", "Season Number")
yield UInt32(self, "episode_number", "Episode Number")
yield UInt32(self, "unknown[]")
yield UInt32(self, "unknown[]")
yield UInt32(self, "unknown[]")
yield UInt32(self, "unknown[]")
yield UInt32(self, "unknown[]")
yield UInt32(self, "unknown[]")
padding = self.seekByte(self["header_length"].value, "header padding")
if padding:
yield padding
#while ((self.stream.readBytes(0, 4) == 'mhod') and ((self.current_size/8) < self["entry_length"].value)):
for i in xrange(self["string_number"].value):
yield DataObject(self, "data[]")
padding = self.seekBit(self._size, "entry padding")
if padding:
yield padding
class TrackList(FieldSet):
def createFields(self):
yield String(self, "header_id", 4, "Track List Header Markup (\"mhlt\")", charset="ISO-8859-1")
yield UInt32(self, "header_length", "Header Length")
yield UInt32(self, "track_number", "Number of Tracks")
padding = self.seekByte(self["header_length"].value, "header padding")
if padding:
yield padding
for i in xrange(self["track_number"].value):
yield TrackItem(self, "track[]")
class PlaylistItem(FieldSet):
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = self["entry_length"].value *8
def createFields(self):
yield String(self, "header_id", 4, "Playlist Item Header Markup (\"mhip\")", charset="ISO-8859-1")
yield UInt32(self, "header_length", "Header Length")
yield UInt32(self, "entry_length", "Entry Length")
yield UInt32(self, "data_object_child_count", "Number of Child Data Objects")
yield UInt32(self, "podcast_grouping_flag", "Podcast Grouping Flag")
yield UInt32(self, "group_id", "Group ID")
yield UInt32(self, "track_id", "Track ID")
yield TimestampMac32(self, "timestamp", "Song Timestamp")
yield UInt32(self, "podcast_grouping_ref", "Podcast Grouping Reference")
padding = self.seekByte(self["header_length"].value, "header padding")
if padding:
yield padding
for i in xrange(self["data_object_child_count"].value):
yield DataObject(self, "mhod[]")
class Playlist(FieldSet):
is_master_pl_name={
0:"Regular playlist",
1:"Master playlist"
}
is_podcast_name={
0:"Normal Playlist List",
1:"Podcast Playlist List"
}
list_sort_order_name={
1:"Manual Sort Order",
2:"???",
3:"Song Title",
4:"Album",
5:"Artist",
6:"Bitrate",
7:"Genre",
8:"Kind",
9:"Date Modified",
10:"Track Number",
11:"Size",
12:"Time",
13:"Year",
14:"Sample Rate",
15:"Comment",
16:"Date Added",
17:"Equalizer",
18:"Composer",
19:"???",
20:"Play Count",
21:"Last Played",
22:"Disc Number",
23:"My Rating",
24:"Release Date",
25:"BPM",
26:"Grouping",
27:"Category",
28:"Description",
29:"Show",
30:"Season",
31:"Episode Number"
}
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = self["entry_length"].value *8
def createFields(self):
yield String(self, "header_id", 4, "Playlist List Header Markup (\"mhyp\")", charset="ISO-8859-1")
yield UInt32(self, "header_length", "Header Length")
yield UInt32(self, "entry_length", "Entry Length")
yield UInt32(self, "data_object_child_count", "Number of Child Data Objects")
yield UInt32(self, "playlist_count", "Number of Playlist Items")
yield Enum(UInt8(self, "type", "Normal or master playlist?"), self.is_master_pl_name)
yield UInt8(self, "XXX1", "XXX1")
yield UInt8(self, "XXX2", "XXX2")
yield UInt8(self, "XXX3", "XXX3")
yield TimestampMac32(self, "creation_date", "Date when the playlist was created")
yield UInt64(self, "playlistid", "Persistent Playlist ID")
yield UInt32(self, "unk3", "unk3")
yield UInt16(self, "string_mhod_count", "Number of string MHODs for this playlist")
yield Enum(UInt16(self, "is_podcast", "Playlist or Podcast List?"), self.is_podcast_name)
yield Enum(UInt32(self, "sort_order", "Playlist Sort Order"), self.list_sort_order_name)
padding = self.seekByte(self["header_length"].value, "entry padding")
if padding:
yield padding
for i in xrange(self["data_object_child_count"].value):
yield DataObject(self, "mhod[]")
for i in xrange(self["playlist_count"].value):
yield PlaylistItem(self, "playlist_item[]")
class PlaylistList(FieldSet):
def createFields(self):
yield String(self, "header_id", 4, "Playlist List Header Markup (\"mhlp\")", charset="ISO-8859-1")
yield UInt32(self, "header_length", "Header Length")
yield UInt32(self, "playlist_number", "Number of Playlists")
padding = self.seekByte(self["header_length"].value, "header padding")
if padding:
yield padding
for i in xrange(self["playlist_number"].value):
yield Playlist(self, "playlist[]")
class DataSet(FieldSet):
type_name={
1:"Track List",
2:"Play List",
3:"Podcast List"
}
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = self["entry_length"].value *8
def createFields(self):
yield String(self, "header_id", 4, "DataSet Header Markup (\"mhsd\")", charset="ISO-8859-1")
yield UInt32(self, "header_length", "Header Length")
yield UInt32(self, "entry_length", "Entry Length")
yield Enum(UInt32(self, "type", "type"),self.type_name)
padding = self.seekByte(self["header_length"].value, "header_raw")
if padding:
yield padding
if self["type"].value == 1:
yield TrackList(self, "tracklist[]")
if self["type"].value == 2:
yield PlaylistList(self, "playlist_list[]");
if self["type"].value == 3:
yield PlaylistList(self, "podcast_list[]");
padding = self.seekBit(self._size, "entry padding")
if padding:
yield padding
class DataBase(FieldSet):
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = self["entry_length"].value *8
# def createFields(self):
class ITunesDBFile(Parser):
PARSER_TAGS = {
"id": "itunesdb",
"category": "audio",
"min_size": 44*8,
"magic": (('mhbd',0),),
"description": "iPod iTunesDB file"
}
endian = LITTLE_ENDIAN
def validate(self):
return self.stream.readBytes(0, 4) == 'mhbd'
def createFields(self):
yield String(self, "header_id", 4, "DataBase Header Markup (\"mhbd\")", charset="ISO-8859-1")
yield UInt32(self, "header_length", "Header Length")
yield UInt32(self, "entry_length", "Entry Length")
yield UInt32(self, "unknown[]")
yield UInt32(self, "version_number", "Version Number")
yield UInt32(self, "child_number", "Number of Children")
yield UInt64(self, "id", "ID for this database")
yield UInt32(self, "unknown[]")
yield UInt64(self, "initial_dbid", "Initial DBID")
size = self["header_length"].value-self.current_size/ 8
if size>0:
yield NullBytes(self, "padding", size)
for i in xrange(self["child_number"].value):
yield DataSet(self, "dataset[]")
padding = self.seekByte(self["entry_length"].value, "entry padding")
if padding:
yield padding
def createContentSize(self):
return self["entry_length"].value * 8
| gpl-3.0 |
JoaoVasques/aws-devtool | eb/linux/python3/lib/aws/requests/packages/chardet2/escprober.py | 25 | 3175 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
from .escsm import HZSMModel, ISO2022CNSMModel, ISO2022JPSMModel, ISO2022KRSMModel
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
class EscCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = [ \
CodingStateMachine(HZSMModel),
CodingStateMachine(ISO2022CNSMModel),
CodingStateMachine(ISO2022JPSMModel),
CodingStateMachine(ISO2022KRSMModel)
]
self.reset()
def reset(self):
CharSetProber.reset(self)
for codingSM in self._mCodingSM:
if not codingSM: continue
codingSM.active = True
codingSM.reset()
self._mActiveSM = len(self._mCodingSM)
self._mDetectedCharset = None
def get_charset_name(self):
return self._mDetectedCharset
def get_confidence(self):
if self._mDetectedCharset:
return 0.99
else:
return 0.00
def feed(self, aBuf):
for c in aBuf:
# PY3K: aBuf is a byte array, so c is an int, not a byte
for codingSM in self._mCodingSM:
if not codingSM: continue
if not codingSM.active: continue
codingState = codingSM.next_state(c)
if codingState == constants.eError:
codingSM.active = False
self._mActiveSM -= 1
if self._mActiveSM <= 0:
self._mState = constants.eNotMe
return self.get_state()
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
self._mDetectedCharset = codingSM.get_coding_state_machine()
return self.get_state()
return self.get_state()
| apache-2.0 |
pjg101/SickRage | lib/unidecode/x0b7.py | 253 | 4833 | data = (
'ddwim', # 0x00
'ddwib', # 0x01
'ddwibs', # 0x02
'ddwis', # 0x03
'ddwiss', # 0x04
'ddwing', # 0x05
'ddwij', # 0x06
'ddwic', # 0x07
'ddwik', # 0x08
'ddwit', # 0x09
'ddwip', # 0x0a
'ddwih', # 0x0b
'ddyu', # 0x0c
'ddyug', # 0x0d
'ddyugg', # 0x0e
'ddyugs', # 0x0f
'ddyun', # 0x10
'ddyunj', # 0x11
'ddyunh', # 0x12
'ddyud', # 0x13
'ddyul', # 0x14
'ddyulg', # 0x15
'ddyulm', # 0x16
'ddyulb', # 0x17
'ddyuls', # 0x18
'ddyult', # 0x19
'ddyulp', # 0x1a
'ddyulh', # 0x1b
'ddyum', # 0x1c
'ddyub', # 0x1d
'ddyubs', # 0x1e
'ddyus', # 0x1f
'ddyuss', # 0x20
'ddyung', # 0x21
'ddyuj', # 0x22
'ddyuc', # 0x23
'ddyuk', # 0x24
'ddyut', # 0x25
'ddyup', # 0x26
'ddyuh', # 0x27
'ddeu', # 0x28
'ddeug', # 0x29
'ddeugg', # 0x2a
'ddeugs', # 0x2b
'ddeun', # 0x2c
'ddeunj', # 0x2d
'ddeunh', # 0x2e
'ddeud', # 0x2f
'ddeul', # 0x30
'ddeulg', # 0x31
'ddeulm', # 0x32
'ddeulb', # 0x33
'ddeuls', # 0x34
'ddeult', # 0x35
'ddeulp', # 0x36
'ddeulh', # 0x37
'ddeum', # 0x38
'ddeub', # 0x39
'ddeubs', # 0x3a
'ddeus', # 0x3b
'ddeuss', # 0x3c
'ddeung', # 0x3d
'ddeuj', # 0x3e
'ddeuc', # 0x3f
'ddeuk', # 0x40
'ddeut', # 0x41
'ddeup', # 0x42
'ddeuh', # 0x43
'ddyi', # 0x44
'ddyig', # 0x45
'ddyigg', # 0x46
'ddyigs', # 0x47
'ddyin', # 0x48
'ddyinj', # 0x49
'ddyinh', # 0x4a
'ddyid', # 0x4b
'ddyil', # 0x4c
'ddyilg', # 0x4d
'ddyilm', # 0x4e
'ddyilb', # 0x4f
'ddyils', # 0x50
'ddyilt', # 0x51
'ddyilp', # 0x52
'ddyilh', # 0x53
'ddyim', # 0x54
'ddyib', # 0x55
'ddyibs', # 0x56
'ddyis', # 0x57
'ddyiss', # 0x58
'ddying', # 0x59
'ddyij', # 0x5a
'ddyic', # 0x5b
'ddyik', # 0x5c
'ddyit', # 0x5d
'ddyip', # 0x5e
'ddyih', # 0x5f
'ddi', # 0x60
'ddig', # 0x61
'ddigg', # 0x62
'ddigs', # 0x63
'ddin', # 0x64
'ddinj', # 0x65
'ddinh', # 0x66
'ddid', # 0x67
'ddil', # 0x68
'ddilg', # 0x69
'ddilm', # 0x6a
'ddilb', # 0x6b
'ddils', # 0x6c
'ddilt', # 0x6d
'ddilp', # 0x6e
'ddilh', # 0x6f
'ddim', # 0x70
'ddib', # 0x71
'ddibs', # 0x72
'ddis', # 0x73
'ddiss', # 0x74
'dding', # 0x75
'ddij', # 0x76
'ddic', # 0x77
'ddik', # 0x78
'ddit', # 0x79
'ddip', # 0x7a
'ddih', # 0x7b
'ra', # 0x7c
'rag', # 0x7d
'ragg', # 0x7e
'rags', # 0x7f
'ran', # 0x80
'ranj', # 0x81
'ranh', # 0x82
'rad', # 0x83
'ral', # 0x84
'ralg', # 0x85
'ralm', # 0x86
'ralb', # 0x87
'rals', # 0x88
'ralt', # 0x89
'ralp', # 0x8a
'ralh', # 0x8b
'ram', # 0x8c
'rab', # 0x8d
'rabs', # 0x8e
'ras', # 0x8f
'rass', # 0x90
'rang', # 0x91
'raj', # 0x92
'rac', # 0x93
'rak', # 0x94
'rat', # 0x95
'rap', # 0x96
'rah', # 0x97
'rae', # 0x98
'raeg', # 0x99
'raegg', # 0x9a
'raegs', # 0x9b
'raen', # 0x9c
'raenj', # 0x9d
'raenh', # 0x9e
'raed', # 0x9f
'rael', # 0xa0
'raelg', # 0xa1
'raelm', # 0xa2
'raelb', # 0xa3
'raels', # 0xa4
'raelt', # 0xa5
'raelp', # 0xa6
'raelh', # 0xa7
'raem', # 0xa8
'raeb', # 0xa9
'raebs', # 0xaa
'raes', # 0xab
'raess', # 0xac
'raeng', # 0xad
'raej', # 0xae
'raec', # 0xaf
'raek', # 0xb0
'raet', # 0xb1
'raep', # 0xb2
'raeh', # 0xb3
'rya', # 0xb4
'ryag', # 0xb5
'ryagg', # 0xb6
'ryags', # 0xb7
'ryan', # 0xb8
'ryanj', # 0xb9
'ryanh', # 0xba
'ryad', # 0xbb
'ryal', # 0xbc
'ryalg', # 0xbd
'ryalm', # 0xbe
'ryalb', # 0xbf
'ryals', # 0xc0
'ryalt', # 0xc1
'ryalp', # 0xc2
'ryalh', # 0xc3
'ryam', # 0xc4
'ryab', # 0xc5
'ryabs', # 0xc6
'ryas', # 0xc7
'ryass', # 0xc8
'ryang', # 0xc9
'ryaj', # 0xca
'ryac', # 0xcb
'ryak', # 0xcc
'ryat', # 0xcd
'ryap', # 0xce
'ryah', # 0xcf
'ryae', # 0xd0
'ryaeg', # 0xd1
'ryaegg', # 0xd2
'ryaegs', # 0xd3
'ryaen', # 0xd4
'ryaenj', # 0xd5
'ryaenh', # 0xd6
'ryaed', # 0xd7
'ryael', # 0xd8
'ryaelg', # 0xd9
'ryaelm', # 0xda
'ryaelb', # 0xdb
'ryaels', # 0xdc
'ryaelt', # 0xdd
'ryaelp', # 0xde
'ryaelh', # 0xdf
'ryaem', # 0xe0
'ryaeb', # 0xe1
'ryaebs', # 0xe2
'ryaes', # 0xe3
'ryaess', # 0xe4
'ryaeng', # 0xe5
'ryaej', # 0xe6
'ryaec', # 0xe7
'ryaek', # 0xe8
'ryaet', # 0xe9
'ryaep', # 0xea
'ryaeh', # 0xeb
'reo', # 0xec
'reog', # 0xed
'reogg', # 0xee
'reogs', # 0xef
'reon', # 0xf0
'reonj', # 0xf1
'reonh', # 0xf2
'reod', # 0xf3
'reol', # 0xf4
'reolg', # 0xf5
'reolm', # 0xf6
'reolb', # 0xf7
'reols', # 0xf8
'reolt', # 0xf9
'reolp', # 0xfa
'reolh', # 0xfb
'reom', # 0xfc
'reob', # 0xfd
'reobs', # 0xfe
'reos', # 0xff
)
| gpl-3.0 |
mancoast/CPythonPyc_test | cpython/260_test_support.py | 3 | 29308 | """Supporting definitions for the Python regression tests."""
if __name__ != 'test.test_support':
raise ImportError('test_support must be imported from the test package')
import contextlib
import errno
import socket
import sys
import os
import shutil
import warnings
import unittest
__all__ = ["Error", "TestFailed", "TestSkipped", "ResourceDenied", "import_module",
"verbose", "use_resources", "max_memuse", "record_original_stdout",
"get_original_stdout", "unload", "unlink", "rmtree", "forget",
"is_resource_enabled", "requires", "find_unused_port", "bind_port",
"fcmp", "have_unicode", "is_jython", "TESTFN", "HOST", "FUZZ",
"findfile", "verify", "vereq", "sortdict", "check_syntax_error",
"open_urlresource", "check_warnings", "CleanImport",
"EnvironmentVarGuard", "captured_output",
"captured_stdout", "TransientResource", "transient_internet",
"run_with_locale", "set_memlimit", "bigmemtest", "bigaddrspacetest",
"BasicTestRunner", "run_unittest", "run_doctest", "threading_setup",
"threading_cleanup", "reap_children"]
class Error(Exception):
"""Base class for regression test exceptions."""
class TestFailed(Error):
"""Test failed."""
class TestSkipped(Error):
"""Test skipped.
This can be raised to indicate that a test was deliberatly
skipped, but not because a feature wasn't available. For
example, if some resource can't be used, such as the network
appears to be unavailable, this should be raised instead of
TestFailed.
"""
class ResourceDenied(TestSkipped):
"""Test skipped because it requested a disallowed resource.
This is raised when a test calls requires() for a resource that
has not be enabled. It is used to distinguish between expected
and unexpected skips.
"""
def import_module(name, deprecated=False):
"""Import the module to be tested, raising TestSkipped if it is not
available."""
with warnings.catch_warnings():
if deprecated:
warnings.filterwarnings("ignore", ".+ (module|package)",
DeprecationWarning)
try:
module = __import__(name, level=0)
except ImportError:
raise TestSkipped("No module named " + name)
else:
return module
verbose = 1 # Flag set to 0 by regrtest.py
use_resources = None # Flag set to [] by regrtest.py
max_memuse = 0 # Disable bigmem tests (they will still be run with
# small sizes, to make sure they work.)
real_max_memuse = 0
# _original_stdout is meant to hold stdout at the time regrtest began.
# This may be "the real" stdout, or IDLE's emulation of stdout, or whatever.
# The point is to have some flavor of stdout the user can actually see.
_original_stdout = None
def record_original_stdout(stdout):
global _original_stdout
_original_stdout = stdout
def get_original_stdout():
return _original_stdout or sys.stdout
def unload(name):
try:
del sys.modules[name]
except KeyError:
pass
def unlink(filename):
try:
os.unlink(filename)
except OSError:
pass
def rmtree(path):
try:
shutil.rmtree(path)
except OSError, e:
# Unix returns ENOENT, Windows returns ESRCH.
if e.errno not in (errno.ENOENT, errno.ESRCH):
raise
def forget(modname):
'''"Forget" a module was ever imported by removing it from sys.modules and
deleting any .pyc and .pyo files.'''
unload(modname)
for dirname in sys.path:
unlink(os.path.join(dirname, modname + os.extsep + 'pyc'))
# Deleting the .pyo file cannot be within the 'try' for the .pyc since
# the chance exists that there is no .pyc (and thus the 'try' statement
# is exited) but there is a .pyo file.
unlink(os.path.join(dirname, modname + os.extsep + 'pyo'))
def is_resource_enabled(resource):
"""Test whether a resource is enabled. Known resources are set by
regrtest.py."""
return use_resources is not None and resource in use_resources
def requires(resource, msg=None):
"""Raise ResourceDenied if the specified resource is not available.
If the caller's module is __main__ then automatically return True. The
possibility of False being returned occurs when regrtest.py is executing."""
# see if the caller's module is __main__ - if so, treat as if
# the resource was set
if sys._getframe().f_back.f_globals.get("__name__") == "__main__":
return
if not is_resource_enabled(resource):
if msg is None:
msg = "Use of the `%s' resource not enabled" % resource
raise ResourceDenied(msg)
HOST = 'localhost'
def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):
"""Returns an unused port that should be suitable for binding. This is
achieved by creating a temporary socket with the same family and type as
the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to
the specified host address (defaults to 0.0.0.0) with the port set to 0,
eliciting an unused ephemeral port from the OS. The temporary socket is
then closed and deleted, and the ephemeral port is returned.
Either this method or bind_port() should be used for any tests where a
server socket needs to be bound to a particular port for the duration of
the test. Which one to use depends on whether the calling code is creating
a python socket, or if an unused port needs to be provided in a constructor
or passed to an external program (i.e. the -accept argument to openssl's
s_server mode). Always prefer bind_port() over find_unused_port() where
possible. Hard coded ports should *NEVER* be used. As soon as a server
socket is bound to a hard coded port, the ability to run multiple instances
of the test simultaneously on the same host is compromised, which makes the
test a ticking time bomb in a buildbot environment. On Unix buildbots, this
may simply manifest as a failed test, which can be recovered from without
intervention in most cases, but on Windows, the entire python process can
completely and utterly wedge, requiring someone to log in to the buildbot
and manually kill the affected process.
(This is easy to reproduce on Windows, unfortunately, and can be traced to
the SO_REUSEADDR socket option having different semantics on Windows versus
Unix/Linux. On Unix, you can't have two AF_INET SOCK_STREAM sockets bind,
listen and then accept connections on identical host/ports. An EADDRINUSE
socket.error will be raised at some point (depending on the platform and
the order bind and listen were called on each socket).
However, on Windows, if SO_REUSEADDR is set on the sockets, no EADDRINUSE
will ever be raised when attempting to bind two identical host/ports. When
accept() is called on each socket, the second caller's process will steal
the port from the first caller, leaving them both in an awkwardly wedged
state where they'll no longer respond to any signals or graceful kills, and
must be forcibly killed via OpenProcess()/TerminateProcess().
The solution on Windows is to use the SO_EXCLUSIVEADDRUSE socket option
instead of SO_REUSEADDR, which effectively affords the same semantics as
SO_REUSEADDR on Unix. Given the propensity of Unix developers in the Open
Source world compared to Windows ones, this is a common mistake. A quick
look over OpenSSL's 0.9.8g source shows that they use SO_REUSEADDR when
openssl.exe is called with the 's_server' option, for example. See
http://bugs.python.org/issue2550 for more info. The following site also
has a very thorough description about the implications of both REUSEADDR
and EXCLUSIVEADDRUSE on Windows:
http://msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx)
XXX: although this approach is a vast improvement on previous attempts to
elicit unused ports, it rests heavily on the assumption that the ephemeral
port returned to us by the OS won't immediately be dished back out to some
other process when we close and delete our temporary socket but before our
calling code has a chance to bind the returned port. We can deal with this
issue if/when we come across it."""
tempsock = socket.socket(family, socktype)
port = bind_port(tempsock)
tempsock.close()
del tempsock
return port
def bind_port(sock, host=HOST):
"""Bind the socket to a free port and return the port number. Relies on
ephemeral ports in order to ensure we are using an unbound port. This is
important as many tests may be running simultaneously, especially in a
buildbot environment. This method raises an exception if the sock.family
is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
or SO_REUSEPORT set on it. Tests should *never* set these socket options
for TCP/IP sockets. The only case for setting these options is testing
multicasting via multiple UDP sockets.
Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
on Windows), it will be set on the socket. This will prevent anyone else
from bind()'ing to our host/port for the duration of the test.
"""
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
if hasattr(socket, 'SO_REUSEADDR'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
raise TestFailed("tests should never set the SO_REUSEADDR " \
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_REUSEPORT'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
raise TestFailed("tests should never set the SO_REUSEPORT " \
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
sock.bind((host, 0))
port = sock.getsockname()[1]
return port
FUZZ = 1e-6
def fcmp(x, y): # fuzzy comparison function
if isinstance(x, float) or isinstance(y, float):
try:
fuzz = (abs(x) + abs(y)) * FUZZ
if abs(x-y) <= fuzz:
return 0
except:
pass
elif type(x) == type(y) and isinstance(x, (tuple, list)):
for i in range(min(len(x), len(y))):
outcome = fcmp(x[i], y[i])
if outcome != 0:
return outcome
return (len(x) > len(y)) - (len(x) < len(y))
return (x > y) - (x < y)
try:
unicode
have_unicode = True
except NameError:
have_unicode = False
is_jython = sys.platform.startswith('java')
# Filename used for testing
if os.name == 'java':
# Jython disallows @ in module names
TESTFN = '$test'
elif os.name == 'riscos':
TESTFN = 'testfile'
else:
TESTFN = '@test'
# Unicode name only used if TEST_FN_ENCODING exists for the platform.
if have_unicode:
# Assuming sys.getfilesystemencoding()!=sys.getdefaultencoding()
# TESTFN_UNICODE is a filename that can be encoded using the
# file system encoding, but *not* with the default (ascii) encoding
if isinstance('', unicode):
# python -U
# XXX perhaps unicode() should accept Unicode strings?
TESTFN_UNICODE = "@test-\xe0\xf2"
else:
# 2 latin characters.
TESTFN_UNICODE = unicode("@test-\xe0\xf2", "latin-1")
TESTFN_ENCODING = sys.getfilesystemencoding()
# TESTFN_UNICODE_UNENCODEABLE is a filename that should *not* be
# able to be encoded by *either* the default or filesystem encoding.
# This test really only makes sense on Windows NT platforms
# which have special Unicode support in posixmodule.
if (not hasattr(sys, "getwindowsversion") or
sys.getwindowsversion()[3] < 2): # 0=win32s or 1=9x/ME
TESTFN_UNICODE_UNENCODEABLE = None
else:
# Japanese characters (I think - from bug 846133)
TESTFN_UNICODE_UNENCODEABLE = eval('u"@test-\u5171\u6709\u3055\u308c\u308b"')
try:
# XXX - Note - should be using TESTFN_ENCODING here - but for
# Windows, "mbcs" currently always operates as if in
# errors=ignore' mode - hence we get '?' characters rather than
# the exception. 'Latin1' operates as we expect - ie, fails.
# See [ 850997 ] mbcs encoding ignores errors
TESTFN_UNICODE_UNENCODEABLE.encode("Latin1")
except UnicodeEncodeError:
pass
else:
print \
'WARNING: The filename %r CAN be encoded by the filesystem. ' \
'Unicode filename tests may not be effective' \
% TESTFN_UNICODE_UNENCODEABLE
# Make sure we can write to TESTFN, try in /tmp if we can't
fp = None
try:
fp = open(TESTFN, 'w+')
except IOError:
TMP_TESTFN = os.path.join('/tmp', TESTFN)
try:
fp = open(TMP_TESTFN, 'w+')
TESTFN = TMP_TESTFN
del TMP_TESTFN
except IOError:
print ('WARNING: tests will fail, unable to write to: %s or %s' %
(TESTFN, TMP_TESTFN))
if fp is not None:
fp.close()
unlink(TESTFN)
del fp
def findfile(file, here=__file__):
"""Try to find a file on sys.path and the working directory. If it is not
found the argument passed to the function is returned (this does not
necessarily signal failure; could still be the legitimate path)."""
if os.path.isabs(file):
return file
path = sys.path
path = [os.path.dirname(here)] + path
for dn in path:
fn = os.path.join(dn, file)
if os.path.exists(fn): return fn
return file
def verify(condition, reason='test failed'):
"""Verify that condition is true. If not, raise TestFailed.
The optional argument reason can be given to provide
a better error text.
"""
if not condition:
raise TestFailed(reason)
def vereq(a, b):
"""Raise TestFailed if a == b is false.
This is better than verify(a == b) because, in case of failure, the
error message incorporates repr(a) and repr(b) so you can see the
inputs.
Note that "not (a == b)" isn't necessarily the same as "a != b"; the
former is tested.
"""
if not (a == b):
raise TestFailed("%r == %r" % (a, b))
def sortdict(dict):
"Like repr(dict), but in sorted order."
items = dict.items()
items.sort()
reprpairs = ["%r: %r" % pair for pair in items]
withcommas = ", ".join(reprpairs)
return "{%s}" % withcommas
def check_syntax_error(testcase, statement):
try:
compile(statement, '<test string>', 'exec')
except SyntaxError:
pass
else:
testcase.fail('Missing SyntaxError: "%s"' % statement)
def open_urlresource(url):
import urllib, urlparse
requires('urlfetch')
filename = urlparse.urlparse(url)[2].split('/')[-1] # '/': it's URL!
for path in [os.path.curdir, os.path.pardir]:
fn = os.path.join(path, filename)
if os.path.exists(fn):
return open(fn)
print >> get_original_stdout(), '\tfetching %s ...' % url
fn, _ = urllib.urlretrieve(url, filename)
return open(fn)
class WarningsRecorder(object):
"""Convenience wrapper for the warnings list returned on
entry to the warnings.catch_warnings() context manager.
"""
def __init__(self, warnings_list):
self.warnings = warnings_list
def __getattr__(self, attr):
if self.warnings:
return getattr(self.warnings[-1], attr)
elif attr in warnings.WarningMessage._WARNING_DETAILS:
return None
raise AttributeError("%r has no attribute %r" % (self, attr))
def reset(self):
del self.warnings[:]
@contextlib.contextmanager
def check_warnings():
with warnings.catch_warnings(record=True) as w:
yield WarningsRecorder(w)
class CleanImport(object):
"""Context manager to force import to return a new module reference.
This is useful for testing module-level behaviours, such as
the emission of a DeprecationWarning on import.
Use like this:
with CleanImport("foo"):
__import__("foo") # new reference
"""
def __init__(self, *module_names):
self.original_modules = sys.modules.copy()
for module_name in module_names:
if module_name in sys.modules:
module = sys.modules[module_name]
# It is possible that module_name is just an alias for
# another module (e.g. stub for modules renamed in 3.x).
# In that case, we also need delete the real module to clear
# the import cache.
if module.__name__ != module_name:
del sys.modules[module.__name__]
del sys.modules[module_name]
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
sys.modules.update(self.original_modules)
class EnvironmentVarGuard(object):
"""Class to help protect the environment variable properly. Can be used as
a context manager."""
def __init__(self):
self._environ = os.environ
self._unset = set()
self._reset = dict()
def set(self, envvar, value):
if envvar not in self._environ:
self._unset.add(envvar)
else:
self._reset[envvar] = self._environ[envvar]
self._environ[envvar] = value
def unset(self, envvar):
if envvar in self._environ:
self._reset[envvar] = self._environ[envvar]
del self._environ[envvar]
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
for envvar, value in self._reset.iteritems():
self._environ[envvar] = value
for unset in self._unset:
del self._environ[unset]
class TransientResource(object):
"""Raise ResourceDenied if an exception is raised while the context manager
is in effect that matches the specified exception and attributes."""
def __init__(self, exc, **kwargs):
self.exc = exc
self.attrs = kwargs
def __enter__(self):
return self
def __exit__(self, type_=None, value=None, traceback=None):
"""If type_ is a subclass of self.exc and value has attributes matching
self.attrs, raise ResourceDenied. Otherwise let the exception
propagate (if any)."""
if type_ is not None and issubclass(self.exc, type_):
for attr, attr_value in self.attrs.iteritems():
if not hasattr(value, attr):
break
if getattr(value, attr) != attr_value:
break
else:
raise ResourceDenied("an optional resource is not available")
def transient_internet():
"""Return a context manager that raises ResourceDenied when various issues
with the Internet connection manifest themselves as exceptions."""
time_out = TransientResource(IOError, errno=errno.ETIMEDOUT)
socket_peer_reset = TransientResource(socket.error, errno=errno.ECONNRESET)
ioerror_peer_reset = TransientResource(IOError, errno=errno.ECONNRESET)
return contextlib.nested(time_out, socket_peer_reset, ioerror_peer_reset)
@contextlib.contextmanager
def captured_output(stream_name):
"""Run the 'with' statement body using a StringIO object in place of a
specific attribute on the sys module.
Example use (with 'stream_name=stdout')::
with captured_stdout() as s:
print "hello"
assert s.getvalue() == "hello"
"""
import StringIO
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, StringIO.StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
return captured_output("stdout")
#=======================================================================
# Decorator for running a function in a different locale, correctly resetting
# it afterwards.
def run_with_locale(catstr, *locales):
def decorator(func):
def inner(*args, **kwds):
try:
import locale
category = getattr(locale, catstr)
orig_locale = locale.setlocale(category)
except AttributeError:
# if the test author gives us an invalid category string
raise
except:
# cannot retrieve original locale, so do nothing
locale = orig_locale = None
else:
for loc in locales:
try:
locale.setlocale(category, loc)
break
except:
pass
# now run the function, resetting the locale on exceptions
try:
return func(*args, **kwds)
finally:
if locale and orig_locale:
locale.setlocale(category, orig_locale)
inner.func_name = func.func_name
inner.__doc__ = func.__doc__
return inner
return decorator
#=======================================================================
# Big-memory-test support. Separate from 'resources' because memory use should be configurable.
# Some handy shorthands. Note that these are used for byte-limits as well
# as size-limits, in the various bigmem tests
_1M = 1024*1024
_1G = 1024 * _1M
_2G = 2 * _1G
_4G = 4 * _1G
MAX_Py_ssize_t = sys.maxsize
def set_memlimit(limit):
import re
global max_memuse
global real_max_memuse
sizes = {
'k': 1024,
'm': _1M,
'g': _1G,
't': 1024*_1G,
}
m = re.match(r'(\d+(\.\d+)?) (K|M|G|T)b?$', limit,
re.IGNORECASE | re.VERBOSE)
if m is None:
raise ValueError('Invalid memory limit %r' % (limit,))
memlimit = int(float(m.group(1)) * sizes[m.group(3).lower()])
real_max_memuse = memlimit
if memlimit > MAX_Py_ssize_t:
memlimit = MAX_Py_ssize_t
if memlimit < _2G - 1:
raise ValueError('Memory limit %r too low to be useful' % (limit,))
max_memuse = memlimit
def bigmemtest(minsize, memuse, overhead=5*_1M):
"""Decorator for bigmem tests.
'minsize' is the minimum useful size for the test (in arbitrary,
test-interpreted units.) 'memuse' is the number of 'bytes per size' for
the test, or a good estimate of it. 'overhead' specifies fixed overhead,
independent of the testsize, and defaults to 5Mb.
The decorator tries to guess a good value for 'size' and passes it to
the decorated test function. If minsize * memuse is more than the
allowed memory use (as defined by max_memuse), the test is skipped.
Otherwise, minsize is adjusted upward to use up to max_memuse.
"""
def decorator(f):
def wrapper(self):
if not max_memuse:
# If max_memuse is 0 (the default),
# we still want to run the tests with size set to a few kb,
# to make sure they work. We still want to avoid using
# too much memory, though, but we do that noisily.
maxsize = 5147
self.failIf(maxsize * memuse + overhead > 20 * _1M)
else:
maxsize = int((max_memuse - overhead) / memuse)
if maxsize < minsize:
# Really ought to print 'test skipped' or something
if verbose:
sys.stderr.write("Skipping %s because of memory "
"constraint\n" % (f.__name__,))
return
# Try to keep some breathing room in memory use
maxsize = max(maxsize - 50 * _1M, minsize)
return f(self, maxsize)
wrapper.minsize = minsize
wrapper.memuse = memuse
wrapper.overhead = overhead
return wrapper
return decorator
def precisionbigmemtest(size, memuse, overhead=5*_1M):
def decorator(f):
def wrapper(self):
if not real_max_memuse:
maxsize = 5147
else:
maxsize = size
if real_max_memuse and real_max_memuse < maxsize * memuse:
if verbose:
sys.stderr.write("Skipping %s because of memory "
"constraint\n" % (f.__name__,))
return
return f(self, maxsize)
wrapper.size = size
wrapper.memuse = memuse
wrapper.overhead = overhead
return wrapper
return decorator
def bigaddrspacetest(f):
"""Decorator for tests that fill the address space."""
def wrapper(self):
if max_memuse < MAX_Py_ssize_t:
if verbose:
sys.stderr.write("Skipping %s because of memory "
"constraint\n" % (f.__name__,))
else:
return f(self)
return wrapper
#=======================================================================
# unittest integration.
class BasicTestRunner:
def run(self, test):
result = unittest.TestResult()
test(result)
return result
def _run_suite(suite):
"""Run tests from a unittest.TestSuite-derived class."""
if verbose:
runner = unittest.TextTestRunner(sys.stdout, verbosity=2)
else:
runner = BasicTestRunner()
result = runner.run(suite)
if not result.wasSuccessful():
if len(result.errors) == 1 and not result.failures:
err = result.errors[0][1]
elif len(result.failures) == 1 and not result.errors:
err = result.failures[0][1]
else:
err = "errors occurred; run in verbose mode for details"
raise TestFailed(err)
def run_unittest(*classes):
"""Run tests from unittest.TestCase-derived classes."""
valid_types = (unittest.TestSuite, unittest.TestCase)
suite = unittest.TestSuite()
for cls in classes:
if isinstance(cls, str):
if cls in sys.modules:
suite.addTest(unittest.findTestCases(sys.modules[cls]))
else:
raise ValueError("str arguments must be keys in sys.modules")
elif isinstance(cls, valid_types):
suite.addTest(cls)
else:
suite.addTest(unittest.makeSuite(cls))
_run_suite(suite)
#=======================================================================
# doctest driver.
def run_doctest(module, verbosity=None):
"""Run doctest on the given module. Return (#failures, #tests).
If optional argument verbosity is not specified (or is None), pass
test_support's belief about verbosity on to doctest. Else doctest's
usual behavior is used (it searches sys.argv for -v).
"""
import doctest
if verbosity is None:
verbosity = verbose
else:
verbosity = None
# Direct doctest output (normally just errors) to real stdout; doctest
# output shouldn't be compared by regrtest.
save_stdout = sys.stdout
sys.stdout = get_original_stdout()
try:
f, t = doctest.testmod(module, verbose=verbosity)
if f:
raise TestFailed("%d of %d doctests failed" % (f, t))
finally:
sys.stdout = save_stdout
if verbose:
print 'doctest (%s) ... %d tests with zero failures' % (module.__name__, t)
return f, t
#=======================================================================
# Threading support to prevent reporting refleaks when running regrtest.py -R
def threading_setup():
import threading
return len(threading._active), len(threading._limbo)
def threading_cleanup(num_active, num_limbo):
import threading
import time
_MAX_COUNT = 10
count = 0
while len(threading._active) != num_active and count < _MAX_COUNT:
count += 1
time.sleep(0.1)
count = 0
while len(threading._limbo) != num_limbo and count < _MAX_COUNT:
count += 1
time.sleep(0.1)
def reap_children():
"""Use this function at the end of test_main() whenever sub-processes
are started. This will help ensure that no extra children (zombies)
stick around to hog resources and create problems when looking
for refleaks.
"""
# Reap all our dead child processes so we don't leave zombies around.
# These hog resources and might be causing some of the buildbots to die.
if hasattr(os, 'waitpid'):
any_process = -1
while True:
try:
# This will raise an exception on Windows. That's ok.
pid, status = os.waitpid(any_process, os.WNOHANG)
if pid == 0:
break
except:
break
| gpl-3.0 |
ininex/geofire-python | resource/lib/python2.7/site-packages/pip/_vendor/requests/sessions.py | 355 | 25609 | # -*- coding: utf-8 -*-
"""
requests.session
~~~~~~~~~~~~~~~~
This module provides a Session object to manage and persist settings across
requests (cookies, auth, proxies).
"""
import os
from collections import Mapping
from datetime import datetime
from .auth import _basic_auth_str
from .compat import cookielib, OrderedDict, urljoin, urlparse
from .cookies import (
cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies)
from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT
from .hooks import default_hooks, dispatch_hook
from .utils import to_key_val_list, default_headers, to_native_string
from .exceptions import (
TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError)
from .packages.urllib3._collections import RecentlyUsedContainer
from .structures import CaseInsensitiveDict
from .adapters import HTTPAdapter
from .utils import (
requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies,
get_auth_from_url
)
from .status_codes import codes
# formerly defined here, reexposed here for backward compatibility
from .models import REDIRECT_STATI
REDIRECT_CACHE_SIZE = 1000
def merge_setting(request_setting, session_setting, dict_class=OrderedDict):
"""Determines appropriate setting for a given request, taking into account
the explicit setting on that request, and the setting in the session. If a
setting is a dictionary, they will be merged together using `dict_class`
"""
if session_setting is None:
return request_setting
if request_setting is None:
return session_setting
# Bypass if not a dictionary (e.g. verify)
if not (
isinstance(session_setting, Mapping) and
isinstance(request_setting, Mapping)
):
return request_setting
merged_setting = dict_class(to_key_val_list(session_setting))
merged_setting.update(to_key_val_list(request_setting))
# Remove keys that are set to None. Extract keys first to avoid altering
# the dictionary during iteration.
none_keys = [k for (k, v) in merged_setting.items() if v is None]
for key in none_keys:
del merged_setting[key]
return merged_setting
def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict):
"""Properly merges both requests and session hooks.
This is necessary because when request_hooks == {'response': []}, the
merge breaks Session hooks entirely.
"""
if session_hooks is None or session_hooks.get('response') == []:
return request_hooks
if request_hooks is None or request_hooks.get('response') == []:
return session_hooks
return merge_setting(request_hooks, session_hooks, dict_class)
class SessionRedirectMixin(object):
def resolve_redirects(self, resp, req, stream=False, timeout=None,
verify=True, cert=None, proxies=None, **adapter_kwargs):
"""Receives a Response. Returns a generator of Responses."""
i = 0
hist = [] # keep track of history
while resp.is_redirect:
prepared_request = req.copy()
if i > 0:
# Update history and keep track of redirects.
hist.append(resp)
new_hist = list(hist)
resp.history = new_hist
try:
resp.content # Consume socket so it can be released
except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
resp.raw.read(decode_content=False)
if i >= self.max_redirects:
raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects, response=resp)
# Release the connection back into the pool.
resp.close()
url = resp.headers['location']
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'):
parsed_rurl = urlparse(resp.url)
url = '%s:%s' % (parsed_rurl.scheme, url)
# The scheme should be lower case...
parsed = urlparse(url)
url = parsed.geturl()
# Facilitate relative 'location' headers, as allowed by RFC 7231.
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
# Compliant with RFC3986, we percent encode the url.
if not parsed.netloc:
url = urljoin(resp.url, requote_uri(url))
else:
url = requote_uri(url)
prepared_request.url = to_native_string(url)
# Cache the url, unless it redirects to itself.
if resp.is_permanent_redirect and req.url != prepared_request.url:
self.redirect_cache[req.url] = prepared_request.url
self.rebuild_method(prepared_request, resp)
# https://github.com/kennethreitz/requests/issues/1084
if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect):
# https://github.com/kennethreitz/requests/issues/3490
purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding')
for header in purged_headers:
prepared_request.headers.pop(header, None)
prepared_request.body = None
headers = prepared_request.headers
try:
del headers['Cookie']
except KeyError:
pass
# Extract any cookies sent on the response to the cookiejar
# in the new request. Because we've mutated our copied prepared
# request, use the old one that we haven't yet touched.
extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
prepared_request._cookies.update(self.cookies)
prepared_request.prepare_cookies(prepared_request._cookies)
# Rebuild auth and proxy information.
proxies = self.rebuild_proxies(prepared_request, proxies)
self.rebuild_auth(prepared_request, resp)
# Override the original request.
req = prepared_request
resp = self.send(
req,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies,
allow_redirects=False,
**adapter_kwargs
)
extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
i += 1
yield resp
def rebuild_auth(self, prepared_request, response):
"""When being redirected we may want to strip authentication from the
request to avoid leaking credentials. This method intelligently removes
and reapplies authentication where possible to avoid credential loss.
"""
headers = prepared_request.headers
url = prepared_request.url
if 'Authorization' in headers:
# If we get redirected to a new host, we should strip out any
# authentication headers.
original_parsed = urlparse(response.request.url)
redirect_parsed = urlparse(url)
if (original_parsed.hostname != redirect_parsed.hostname):
del headers['Authorization']
# .netrc might have more auth for us on our new host.
new_auth = get_netrc_auth(url) if self.trust_env else None
if new_auth is not None:
prepared_request.prepare_auth(new_auth)
return
def rebuild_proxies(self, prepared_request, proxies):
"""This method re-evaluates the proxy configuration by considering the
environment variables. If we are redirected to a URL covered by
NO_PROXY, we strip the proxy configuration. Otherwise, we set missing
proxy keys for this URL (in case they were stripped by a previous
redirect).
This method also replaces the Proxy-Authorization header where
necessary.
:rtype: dict
"""
headers = prepared_request.headers
url = prepared_request.url
scheme = urlparse(url).scheme
new_proxies = proxies.copy() if proxies is not None else {}
if self.trust_env and not should_bypass_proxies(url):
environ_proxies = get_environ_proxies(url)
proxy = environ_proxies.get('all', environ_proxies.get(scheme))
if proxy:
new_proxies.setdefault(scheme, proxy)
if 'Proxy-Authorization' in headers:
del headers['Proxy-Authorization']
try:
username, password = get_auth_from_url(new_proxies[scheme])
except KeyError:
username, password = None, None
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username, password)
return new_proxies
def rebuild_method(self, prepared_request, response):
"""When being redirected we may want to change the method of the request
based on certain specs or browser behavior.
"""
method = prepared_request.method
# http://tools.ietf.org/html/rfc7231#section-6.4.4
if response.status_code == codes.see_other and method != 'HEAD':
method = 'GET'
# Do what the browsers do, despite standards...
# First, turn 302s into GETs.
if response.status_code == codes.found and method != 'HEAD':
method = 'GET'
# Second, if a POST is responded to with a 301, turn it into a GET.
# This bizarre behaviour is explained in Issue 1704.
if response.status_code == codes.moved and method == 'POST':
method = 'GET'
prepared_request.method = method
class Session(SessionRedirectMixin):
"""A Requests session.
Provides cookie persistence, connection-pooling, and configuration.
Basic Usage::
>>> import requests
>>> s = requests.Session()
>>> s.get('http://httpbin.org/get')
<Response [200]>
Or as a context manager::
>>> with requests.Session() as s:
>>> s.get('http://httpbin.org/get')
<Response [200]>
"""
__attrs__ = [
'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify',
'cert', 'prefetch', 'adapters', 'stream', 'trust_env',
'max_redirects',
]
def __init__(self):
#: A case-insensitive dictionary of headers to be sent on each
#: :class:`Request <Request>` sent from this
#: :class:`Session <Session>`.
self.headers = default_headers()
#: Default Authentication tuple or object to attach to
#: :class:`Request <Request>`.
self.auth = None
#: Dictionary mapping protocol or protocol and host to the URL of the proxy
#: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to
#: be used on each :class:`Request <Request>`.
self.proxies = {}
#: Event-handling hooks.
self.hooks = default_hooks()
#: Dictionary of querystring data to attach to each
#: :class:`Request <Request>`. The dictionary values may be lists for
#: representing multivalued query parameters.
self.params = {}
#: Stream response content default.
self.stream = False
#: SSL Verification default.
self.verify = True
#: SSL certificate default.
self.cert = None
#: Maximum number of redirects allowed. If the request exceeds this
#: limit, a :class:`TooManyRedirects` exception is raised.
#: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is
#: 30.
self.max_redirects = DEFAULT_REDIRECT_LIMIT
#: Trust environment settings for proxy configuration, default
#: authentication and similar.
self.trust_env = True
#: A CookieJar containing all currently outstanding cookies set on this
#: session. By default it is a
#: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but
#: may be any other ``cookielib.CookieJar`` compatible object.
self.cookies = cookiejar_from_dict({})
# Default connection adapters.
self.adapters = OrderedDict()
self.mount('https://', HTTPAdapter())
self.mount('http://', HTTPAdapter())
# Only store 1000 redirects to prevent using infinite memory
self.redirect_cache = RecentlyUsedContainer(REDIRECT_CACHE_SIZE)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def prepare_request(self, request):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for
transmission and returns it. The :class:`PreparedRequest` has settings
merged from the :class:`Request <Request>` instance and those of the
:class:`Session`.
:param request: :class:`Request` instance to prepare with this
session's settings.
:rtype: requests.PreparedRequest
"""
cookies = request.cookies or {}
# Bootstrap CookieJar.
if not isinstance(cookies, cookielib.CookieJar):
cookies = cookiejar_from_dict(cookies)
# Merge with session cookies
merged_cookies = merge_cookies(
merge_cookies(RequestsCookieJar(), self.cookies), cookies)
# Set environment's basic authentication if not explicitly set.
auth = request.auth
if self.trust_env and not auth and not self.auth:
auth = get_netrc_auth(request.url)
p = PreparedRequest()
p.prepare(
method=request.method.upper(),
url=request.url,
files=request.files,
data=request.data,
json=request.json,
headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict),
params=merge_setting(request.params, self.params),
auth=merge_setting(auth, self.auth),
cookies=merged_cookies,
hooks=merge_hooks(request.hooks, self.hooks),
)
return p
def request(self, method, url,
params=None,
data=None,
headers=None,
cookies=None,
files=None,
auth=None,
timeout=None,
allow_redirects=True,
proxies=None,
hooks=None,
stream=None,
verify=None,
cert=None,
json=None):
"""Constructs a :class:`Request <Request>`, prepares it and sends it.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query
string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send
in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the
:class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the
:class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the
:class:`Request`.
:param files: (optional) Dictionary of ``'filename': file-like-objects``
for multipart encoding upload.
:param auth: (optional) Auth tuple or callable to enable
Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Set to True by default.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol or protocol and
hostname to the URL of the proxy.
:param stream: (optional) whether to immediately download the response
content. Defaults to ``False``.
:param verify: (optional) whether the SSL cert will be verified.
A CA_BUNDLE path can also be provided. Defaults to ``True``.
:param cert: (optional) if String, path to ssl client cert file (.pem).
If Tuple, ('cert', 'key') pair.
:rtype: requests.Response
"""
# Create the Request.
req = Request(
method = method.upper(),
url = url,
headers = headers,
files = files,
data = data or {},
json = json,
params = params or {},
auth = auth,
cookies = cookies,
hooks = hooks,
)
prep = self.prepare_request(req)
proxies = proxies or {}
settings = self.merge_environment_settings(
prep.url, proxies, stream, verify, cert
)
# Send the request.
send_kwargs = {
'timeout': timeout,
'allow_redirects': allow_redirects,
}
send_kwargs.update(settings)
resp = self.send(prep, **send_kwargs)
return resp
def get(self, url, **kwargs):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return self.request('GET', url, **kwargs)
def options(self, url, **kwargs):
"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return self.request('OPTIONS', url, **kwargs)
def head(self, url, **kwargs):
"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', False)
return self.request('HEAD', url, **kwargs)
def post(self, url, data=None, json=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('POST', url, data=data, json=json, **kwargs)
def put(self, url, data=None, **kwargs):
"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('PUT', url, data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('PATCH', url, data=data, **kwargs)
def delete(self, url, **kwargs):
"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('DELETE', url, **kwargs)
def send(self, request, **kwargs):
"""
Send a given PreparedRequest.
:rtype: requests.Response
"""
# Set defaults that the hooks can utilize to ensure they always have
# the correct parameters to reproduce the previous request.
kwargs.setdefault('stream', self.stream)
kwargs.setdefault('verify', self.verify)
kwargs.setdefault('cert', self.cert)
kwargs.setdefault('proxies', self.proxies)
# It's possible that users might accidentally send a Request object.
# Guard against that specific failure case.
if isinstance(request, Request):
raise ValueError('You can only send PreparedRequests.')
# Set up variables needed for resolve_redirects and dispatching of hooks
allow_redirects = kwargs.pop('allow_redirects', True)
stream = kwargs.get('stream')
hooks = request.hooks
# Resolve URL in redirect cache, if available.
if allow_redirects:
checked_urls = set()
while request.url in self.redirect_cache:
checked_urls.add(request.url)
new_url = self.redirect_cache.get(request.url)
if new_url in checked_urls:
break
request.url = new_url
# Get the appropriate adapter to use
adapter = self.get_adapter(url=request.url)
# Start time (approximately) of the request
start = datetime.utcnow()
# Send the request
r = adapter.send(request, **kwargs)
# Total elapsed time of the request (approximately)
r.elapsed = datetime.utcnow() - start
# Response manipulation hooks
r = dispatch_hook('response', hooks, r, **kwargs)
# Persist cookies
if r.history:
# If the hooks create history then we want those cookies too
for resp in r.history:
extract_cookies_to_jar(self.cookies, resp.request, resp.raw)
extract_cookies_to_jar(self.cookies, request, r.raw)
# Redirect resolving generator.
gen = self.resolve_redirects(r, request, **kwargs)
# Resolve redirects if allowed.
history = [resp for resp in gen] if allow_redirects else []
# Shuffle things around if there's history.
if history:
# Insert the first (original) request at the start
history.insert(0, r)
# Get the last request made
r = history.pop()
r.history = history
if not stream:
r.content
return r
def merge_environment_settings(self, url, proxies, stream, verify, cert):
"""
Check the environment and merge it with some settings.
:rtype: dict
"""
# Gather clues from the surrounding environment.
if self.trust_env:
# Set environment's proxies.
env_proxies = get_environ_proxies(url) or {}
for (k, v) in env_proxies.items():
proxies.setdefault(k, v)
# Look for requests environment configuration and be compatible
# with cURL.
if verify is True or verify is None:
verify = (os.environ.get('REQUESTS_CA_BUNDLE') or
os.environ.get('CURL_CA_BUNDLE'))
# Merge all the kwargs.
proxies = merge_setting(proxies, self.proxies)
stream = merge_setting(stream, self.stream)
verify = merge_setting(verify, self.verify)
cert = merge_setting(cert, self.cert)
return {'verify': verify, 'proxies': proxies, 'stream': stream,
'cert': cert}
def get_adapter(self, url):
"""
Returns the appropriate connection adapter for the given URL.
:rtype: requests.adapters.BaseAdapter
"""
for (prefix, adapter) in self.adapters.items():
if url.lower().startswith(prefix):
return adapter
# Nothing matches :-/
raise InvalidSchema("No connection adapters were found for '%s'" % url)
def close(self):
"""Closes all adapters and as such the session"""
for v in self.adapters.values():
v.close()
def mount(self, prefix, adapter):
"""Registers a connection adapter to a prefix.
Adapters are sorted in descending order by key length.
"""
self.adapters[prefix] = adapter
keys_to_move = [k for k in self.adapters if len(k) < len(prefix)]
for key in keys_to_move:
self.adapters[key] = self.adapters.pop(key)
def __getstate__(self):
state = dict((attr, getattr(self, attr, None)) for attr in self.__attrs__)
state['redirect_cache'] = dict(self.redirect_cache)
return state
def __setstate__(self, state):
redirect_cache = state.pop('redirect_cache', {})
for attr, value in state.items():
setattr(self, attr, value)
self.redirect_cache = RecentlyUsedContainer(REDIRECT_CACHE_SIZE)
for redirect, to in redirect_cache.items():
self.redirect_cache[redirect] = to
def session():
"""
Returns a :class:`Session` for context-management.
:rtype: Session
"""
return Session()
| mit |
gangadharkadam/johnerp | erpnext/hr/doctype/leave_allocation/leave_allocation.py | 38 | 4247 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cint, flt
from frappe import _
from frappe.model.document import Document
from erpnext.hr.utils import set_employee_name
class LeaveAllocation(Document):
def validate(self):
self.validate_new_leaves_allocated_value()
self.check_existing_leave_allocation()
if not self.total_leaves_allocated:
self.total_leaves_allocated = self.new_leaves_allocated
set_employee_name(self)
def on_update_after_submit(self):
self.validate_new_leaves_allocated_value()
def on_update(self):
self.get_total_allocated_leaves()
def on_cancel(self):
self.check_for_leave_application()
def validate_new_leaves_allocated_value(self):
"""validate that leave allocation is in multiples of 0.5"""
if flt(self.new_leaves_allocated) % 0.5:
frappe.throw(_("Leaves must be allocated in multiples of 0.5"))
def check_existing_leave_allocation(self):
"""check whether leave for same type is already allocated or not"""
leave_allocation = frappe.db.sql("""select name from `tabLeave Allocation`
where employee=%s and leave_type=%s and fiscal_year=%s and docstatus=1""",
(self.employee, self.leave_type, self.fiscal_year))
if leave_allocation:
frappe.msgprint(_("Leaves for type {0} already allocated for Employee {1} for Fiscal Year {0}").format(self.leave_type,
self.employee, self.fiscal_year))
frappe.throw('<a href="#Form/Leave Allocation/{0}">{0}</a>'.format(leave_allocation[0][0]))
def get_leave_bal(self, prev_fyear):
return self.get_leaves_allocated(prev_fyear) - self.get_leaves_applied(prev_fyear)
def get_leaves_applied(self, fiscal_year):
leaves_applied = frappe.db.sql("""select SUM(ifnull(total_leave_days, 0))
from `tabLeave Application` where employee=%s and leave_type=%s
and fiscal_year=%s and docstatus=1""",
(self.employee, self.leave_type, fiscal_year))
return leaves_applied and flt(leaves_applied[0][0]) or 0
def get_leaves_allocated(self, fiscal_year):
leaves_allocated = frappe.db.sql("""select SUM(ifnull(total_leaves_allocated, 0))
from `tabLeave Allocation` where employee=%s and leave_type=%s
and fiscal_year=%s and docstatus=1 and name!=%s""",
(self.employee, self.leave_type, fiscal_year, self.name))
return leaves_allocated and flt(leaves_allocated[0][0]) or 0
def allow_carry_forward(self):
"""check whether carry forward is allowed or not for this leave type"""
cf = frappe.db.sql("""select is_carry_forward from `tabLeave Type` where name = %s""",
self.leave_type)
cf = cf and cint(cf[0][0]) or 0
if not cf:
frappe.db.set(self,'carry_forward',0)
frappe.throw(_("Cannot carry forward {0}").format(self.leave_type))
def get_carry_forwarded_leaves(self):
if self.carry_forward:
self.allow_carry_forward()
prev_fiscal_year = frappe.db.sql("""select name from `tabFiscal Year`
where year_start_date = (select date_add(year_start_date, interval -1 year)
from `tabFiscal Year` where name=%s)
order by name desc limit 1""", self.fiscal_year)
prev_fiscal_year = prev_fiscal_year and prev_fiscal_year[0][0] or ''
prev_bal = 0
if prev_fiscal_year and cint(self.carry_forward) == 1:
prev_bal = self.get_leave_bal(prev_fiscal_year)
ret = {
'carry_forwarded_leaves': prev_bal,
'total_leaves_allocated': flt(prev_bal) + flt(self.new_leaves_allocated)
}
return ret
def get_total_allocated_leaves(self):
leave_det = self.get_carry_forwarded_leaves()
frappe.db.set(self,'carry_forwarded_leaves',flt(leave_det['carry_forwarded_leaves']))
frappe.db.set(self,'total_leaves_allocated',flt(leave_det['total_leaves_allocated']))
def check_for_leave_application(self):
exists = frappe.db.sql("""select name from `tabLeave Application`
where employee=%s and leave_type=%s and fiscal_year=%s and docstatus=1""",
(self.employee, self.leave_type, self.fiscal_year))
if exists:
frappe.msgprint(_("Cannot cancel because Employee {0} is already approved for {1}").format(self.employee,
self.leave_type))
frappe.throw('<a href="#Form/Leave Application/{0}">{0}</a>'.format(exists[0][0]))
| agpl-3.0 |
jmehnle/ansible | lib/ansible/compat/tests/mock.py | 117 | 4320 | # (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
'''
Compat module for Python3.x's unittest.mock module
'''
import sys
# Python 2.7
# Note: Could use the pypi mock library on python3.x as well as python2.x. It
# is the same as the python3 stdlib mock library
try:
from unittest.mock import *
except ImportError:
# Python 2
try:
from mock import *
except ImportError:
print('You need the mock library installed on python2.x to run tests')
# Prior to 3.4.4, mock_open cannot handle binary read_data
if sys.version_info >= (3,) and sys.version_info < (3, 4, 4):
file_spec = None
def _iterate_read_data(read_data):
# Helper for mock_open:
# Retrieve lines from read_data via a generator so that separate calls to
# readline, read, and readlines are properly interleaved
sep = b'\n' if isinstance(read_data, bytes) else '\n'
data_as_list = [l + sep for l in read_data.split(sep)]
if data_as_list[-1] == sep:
# If the last line ended in a newline, the list comprehension will have an
# extra entry that's just a newline. Remove this.
data_as_list = data_as_list[:-1]
else:
# If there wasn't an extra newline by itself, then the file being
# emulated doesn't have a newline to end the last line remove the
# newline that our naive format() added
data_as_list[-1] = data_as_list[-1][:-1]
for line in data_as_list:
yield line
def mock_open(mock=None, read_data=''):
"""
A helper function to create a mock to replace the use of `open`. It works
for `open` called directly or used as a context manager.
The `mock` argument is the mock object to configure. If `None` (the
default) then a `MagicMock` will be created for you, with the API limited
to methods or attributes available on standard file handles.
`read_data` is a string for the `read` methoddline`, and `readlines` of the
file handle to return. This is an empty string by default.
"""
def _readlines_side_effect(*args, **kwargs):
if handle.readlines.return_value is not None:
return handle.readlines.return_value
return list(_data)
def _read_side_effect(*args, **kwargs):
if handle.read.return_value is not None:
return handle.read.return_value
return type(read_data)().join(_data)
def _readline_side_effect():
if handle.readline.return_value is not None:
while True:
yield handle.readline.return_value
for line in _data:
yield line
global file_spec
if file_spec is None:
import _io
file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))))
if mock is None:
mock = MagicMock(name='open', spec=open)
handle = MagicMock(spec=file_spec)
handle.__enter__.return_value = handle
_data = _iterate_read_data(read_data)
handle.write.return_value = None
handle.read.return_value = None
handle.readline.return_value = None
handle.readlines.return_value = None
handle.read.side_effect = _read_side_effect
handle.readline.side_effect = _readline_side_effect()
handle.readlines.side_effect = _readlines_side_effect
mock.return_value = handle
return mock
| gpl-3.0 |
jorgenkg/python-neural-network | nimblenet/cost_functions.py | 1 | 1632 | import numpy as np
import math
def sum_squared_error( outputs, targets, derivative=False ):
if derivative:
return outputs - targets
else:
return 0.5 * np.mean(np.sum( np.power(outputs - targets,2), axis = 1 ))
#end cost function
def hellinger_distance( outputs, targets, derivative=False ):
"""
The output signals should be in the range [0, 1]
"""
root_difference = np.sqrt( outputs ) - np.sqrt( targets )
if derivative:
return root_difference/( np.sqrt(2) * np.sqrt( outputs ))
else:
return np.mean(np.sum( np.power(root_difference, 2 ), axis=1) / math.sqrt( 2 ))
#end cost function
def binary_cross_entropy_cost( outputs, targets, derivative=False, epsilon=1e-11 ):
"""
The output signals should be in the range [0, 1]
"""
# Prevent overflow
outputs = np.clip(outputs, epsilon, 1 - epsilon)
divisor = np.maximum(outputs * (1 - outputs), epsilon)
if derivative:
return (outputs - targets) / divisor
else:
return np.mean(-np.sum(targets * np.log( outputs ) + (1 - targets) * np.log(1 - outputs), axis=1))
#end cost function
cross_entropy_cost = binary_cross_entropy_cost
def softmax_categorical_cross_entropy_cost( outputs, targets, derivative=False, epsilon=1e-11 ):
"""
The output signals should be in the range [0, 1]
"""
outputs = np.clip(outputs, epsilon, 1 - epsilon)
if derivative:
return outputs - targets
else:
return np.mean(-np.sum(targets * np.log( outputs ), axis=1))
#end cost function
softmax_neg_loss = softmax_categorical_cross_entropy_cost | bsd-2-clause |
stammen/xbmc | addons/metadata.demo.albums/demo.py | 26 | 5381 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import xbmcplugin,xbmcgui,xbmc,xbmcaddon
import os,sys,urllib
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
params=get_params()
print params
try:
action=urllib.unquote_plus(params["action"])
except:
pass
print ("Action: "+action)
if action == 'find':
try:
artist=urllib.unquote_plus(params["artist"])
album=urllib.unquote_plus(params["title"])
except:
pass
print 'Find album with title %s from artist %s' %(album, artist)
liz=xbmcgui.ListItem('Demo album 1', thumbnailImage='DefaultAlbum.png', offscreen=True)
liz.setProperty('relevance', '0.5')
liz.setProperty('album.artist', artist)
liz.setProperty('album.year', '2005')
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url="/path/to/album", listitem=liz, isFolder=True)
liz=xbmcgui.ListItem('Demo album 2', thumbnailImage='DefaultVideo.png', offscreen=True)
liz.setProperty('relevance', '0.3')
liz.setProperty('album.artist', 'spiff')
liz.setProperty('album.year', '2016')
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url="/path/to/album2", listitem=liz, isFolder=True)
elif action == 'getdetails':
try:
url=urllib.unquote_plus(params["url"])
except:
pass
if url == '/path/to/album':
liz=xbmcgui.ListItem('Demo album 1', offscreen=True)
liz.setProperty('album.musicbrainzid', '123')
liz.setProperty('album.artists', '2')
liz.setProperty('album.artist1.name', 'Jan')
liz.setProperty('album.artist1.musicbrainzid', '456')
liz.setProperty('album.artist2.name', 'Banan')
liz.setProperty('album.artist2.musicbrainzid', '789')
liz.setProperty('album.artist_description', 'I hate this album.')
liz.setProperty('album.genre', 'rock / pop')
liz.setProperty('album.styles', 'light / heavy')
liz.setProperty('album.moods', 'angry / happy')
liz.setProperty('album.themes', 'Morbid sexual things.. And urmumz.')
liz.setProperty('album.compiliation', 'true')
liz.setProperty('album.review', 'Somebody should die for making this')
liz.setProperty('album.release_date', '2005-01-02')
liz.setProperty('album.label', 'ArtistExploitation inc')
liz.setProperty('album.type', 'what is this?')
liz.setProperty('album.release_type', 'single')
liz.setProperty('album.year', '2005')
liz.setProperty('album.rating', '2.5')
liz.setProperty('album.userrating', '4.5')
liz.setProperty('album.votes', '100')
liz.setProperty('album.thumbs', '2')
liz.setProperty('album.thumb1.url', 'DefaultBackFanart.png')
liz.setProperty('album.thumb1.aspect', '1.78')
liz.setProperty('album.thumb2.url', '/home/akva/Pictures/hawaii-shirt.png')
liz.setProperty('album.thumb2.aspect', '2.35')
xbmcplugin.setResolvedUrl(handle=int(sys.argv[1]), succeeded=True, listitem=liz)
elif url == '/path/to/album2':
liz=xbmcgui.ListItem('Demo album 2', offscreen=True)
liz.setProperty('album.musicbrainzid', '123')
liz.setProperty('album.artists', '2')
liz.setProperty('album.artist1.name', 'Heise')
liz.setProperty('album.artist1.musicbrainzid', '456')
liz.setProperty('album.artist2.name', 'Kran')
liz.setProperty('album.artist2.musicbrainzid', '789')
liz.setProperty('album.artist_description', 'I love this album.')
liz.setProperty('album.genre', 'classical / jazz')
liz.setProperty('album.styles', 'yay / hurrah')
liz.setProperty('album.moods', 'sad / excited')
liz.setProperty('album.themes', 'Nice things.. And unicorns.')
liz.setProperty('album.compiliation', 'false')
liz.setProperty('album.review', 'Somebody should be rewarded for making this')
liz.setProperty('album.release_date', '2015-01-02')
liz.setProperty('album.label', 'Artists inc')
liz.setProperty('album.type', 'what is that?')
liz.setProperty('album.release_type', 'album')
liz.setProperty('album.year', '2015')
liz.setProperty('album.rating', '4.5')
liz.setProperty('album.userrating', '3.5')
liz.setProperty('album.votes', '200')
liz.setProperty('album.thumbs', '2')
liz.setProperty('album.thumb1.url', 'DefaultBackFanart.png')
liz.setProperty('album.thumb1.aspect', '1.78')
liz.setProperty('album.thumb2.url', '/home/akva/Pictures/hawaii-shirt.png')
liz.setProperty('album.thumb2.aspect', '2.35')
xbmcplugin.setResolvedUrl(handle=int(sys.argv[1]), succeeded=True, listitem=liz)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
| gpl-2.0 |
TwilioDevEd/marketing-notifications-flask | tests/helper_tests.py | 3 | 1094 | import unittest
from marketing_notifications_python.view_helpers import redirect_to, view
from base import BaseTestCase
from flask import redirect, url_for, render_template
class ViewHelperTests(BaseTestCase):
# Ensures 'redirect_to' redirect you to the same place as 'redirect'
def test_redirect_to_redirects_to_same_location_of_redirect(self):
# assert
assert redirect_to('views.notifications').location == redirect(url_for('views.notifications')).location
# Ensures 'redirect_to' redirect you to the same place as 'redirect' with routes params
def test_redirect_to_redirects_to_same_location_of_redirect_with_route_params(self):
# assert
assert redirect_to('views.notifications').location == redirect(
url_for('views.notifications')).location
# Ensures 'view' renders the same template that 'render_template'
def test_view_renders_the_same_template_as_render_template(self):
# assert
assert view('notifications') == render_template('notifications.html')
if __name__ == '__main__':
unittest.main()
| mit |
IRSO/irsosav | nodejs-v6.2/lib/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/input.py | 713 | 115880 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from compiler.ast import Const
from compiler.ast import Dict
from compiler.ast import Discard
from compiler.ast import List
from compiler.ast import Module
from compiler.ast import Node
from compiler.ast import Stmt
import compiler
import gyp.common
import gyp.simple_copy
import multiprocessing
import optparse
import os.path
import re
import shlex
import signal
import subprocess
import sys
import threading
import time
import traceback
from gyp.common import GypError
from gyp.common import OrderedSet
# A list of types that are treated as linkable.
linkable_types = [
'executable',
'shared_library',
'loadable_module',
'mac_kernel_extension',
]
# A list of sections that contain links to other targets.
dependency_sections = ['dependencies', 'export_dependent_settings']
# base_path_sections is a list of sections defined by GYP that contain
# pathnames. The generators can provide more keys, the two lists are merged
# into path_sections, but you should call IsPathSection instead of using either
# list directly.
base_path_sections = [
'destination',
'files',
'include_dirs',
'inputs',
'libraries',
'outputs',
'sources',
]
path_sections = set()
# These per-process dictionaries are used to cache build file data when loading
# in parallel mode.
per_process_data = {}
per_process_aux_data = {}
def IsPathSection(section):
# If section ends in one of the '=+?!' characters, it's applied to a section
# without the trailing characters. '/' is notably absent from this list,
# because there's no way for a regular expression to be treated as a path.
while section and section[-1:] in '=+?!':
section = section[:-1]
if section in path_sections:
return True
# Sections mathing the regexp '_(dir|file|path)s?$' are also
# considered PathSections. Using manual string matching since that
# is much faster than the regexp and this can be called hundreds of
# thousands of times so micro performance matters.
if "_" in section:
tail = section[-6:]
if tail[-1] == 's':
tail = tail[:-1]
if tail[-5:] in ('_file', '_path'):
return True
return tail[-4:] == '_dir'
return False
# base_non_configuration_keys is a list of key names that belong in the target
# itself and should not be propagated into its configurations. It is merged
# with a list that can come from the generator to
# create non_configuration_keys.
base_non_configuration_keys = [
# Sections that must exist inside targets and not configurations.
'actions',
'configurations',
'copies',
'default_configuration',
'dependencies',
'dependencies_original',
'libraries',
'postbuilds',
'product_dir',
'product_extension',
'product_name',
'product_prefix',
'rules',
'run_as',
'sources',
'standalone_static_library',
'suppress_wildcard',
'target_name',
'toolset',
'toolsets',
'type',
# Sections that can be found inside targets or configurations, but that
# should not be propagated from targets into their configurations.
'variables',
]
non_configuration_keys = []
# Keys that do not belong inside a configuration dictionary.
invalid_configuration_keys = [
'actions',
'all_dependent_settings',
'configurations',
'dependencies',
'direct_dependent_settings',
'libraries',
'link_settings',
'sources',
'standalone_static_library',
'target_name',
'type',
]
# Controls whether or not the generator supports multiple toolsets.
multiple_toolsets = False
# Paths for converting filelist paths to output paths: {
# toplevel,
# qualified_output_dir,
# }
generator_filelist_paths = None
def GetIncludedBuildFiles(build_file_path, aux_data, included=None):
"""Return a list of all build files included into build_file_path.
The returned list will contain build_file_path as well as all other files
that it included, either directly or indirectly. Note that the list may
contain files that were included into a conditional section that evaluated
to false and was not merged into build_file_path's dict.
aux_data is a dict containing a key for each build file or included build
file. Those keys provide access to dicts whose "included" keys contain
lists of all other files included by the build file.
included should be left at its default None value by external callers. It
is used for recursion.
The returned list will not contain any duplicate entries. Each build file
in the list will be relative to the current directory.
"""
if included == None:
included = []
if build_file_path in included:
return included
included.append(build_file_path)
for included_build_file in aux_data[build_file_path].get('included', []):
GetIncludedBuildFiles(included_build_file, aux_data, included)
return included
def CheckedEval(file_contents):
"""Return the eval of a gyp file.
The gyp file is restricted to dictionaries and lists only, and
repeated keys are not allowed.
Note that this is slower than eval() is.
"""
ast = compiler.parse(file_contents)
assert isinstance(ast, Module)
c1 = ast.getChildren()
assert c1[0] is None
assert isinstance(c1[1], Stmt)
c2 = c1[1].getChildren()
assert isinstance(c2[0], Discard)
c3 = c2[0].getChildren()
assert len(c3) == 1
return CheckNode(c3[0], [])
def CheckNode(node, keypath):
if isinstance(node, Dict):
c = node.getChildren()
dict = {}
for n in range(0, len(c), 2):
assert isinstance(c[n], Const)
key = c[n].getChildren()[0]
if key in dict:
raise GypError("Key '" + key + "' repeated at level " +
repr(len(keypath) + 1) + " with key path '" +
'.'.join(keypath) + "'")
kp = list(keypath) # Make a copy of the list for descending this node.
kp.append(key)
dict[key] = CheckNode(c[n + 1], kp)
return dict
elif isinstance(node, List):
c = node.getChildren()
children = []
for index, child in enumerate(c):
kp = list(keypath) # Copy list.
kp.append(repr(index))
children.append(CheckNode(child, kp))
return children
elif isinstance(node, Const):
return node.getChildren()[0]
else:
raise TypeError("Unknown AST node at key path '" + '.'.join(keypath) +
"': " + repr(node))
def LoadOneBuildFile(build_file_path, data, aux_data, includes,
is_target, check):
if build_file_path in data:
return data[build_file_path]
if os.path.exists(build_file_path):
build_file_contents = open(build_file_path).read()
else:
raise GypError("%s not found (cwd: %s)" % (build_file_path, os.getcwd()))
build_file_data = None
try:
if check:
build_file_data = CheckedEval(build_file_contents)
else:
build_file_data = eval(build_file_contents, {'__builtins__': None},
None)
except SyntaxError, e:
e.filename = build_file_path
raise
except Exception, e:
gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path)
raise
if type(build_file_data) is not dict:
raise GypError("%s does not evaluate to a dictionary." % build_file_path)
data[build_file_path] = build_file_data
aux_data[build_file_path] = {}
# Scan for includes and merge them in.
if ('skip_includes' not in build_file_data or
not build_file_data['skip_includes']):
try:
if is_target:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, includes, check)
else:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, None, check)
except Exception, e:
gyp.common.ExceptionAppend(e,
'while reading includes of ' + build_file_path)
raise
return build_file_data
def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data,
includes, check):
includes_list = []
if includes != None:
includes_list.extend(includes)
if 'includes' in subdict:
for include in subdict['includes']:
# "include" is specified relative to subdict_path, so compute the real
# path to include by appending the provided "include" to the directory
# in which subdict_path resides.
relative_include = \
os.path.normpath(os.path.join(os.path.dirname(subdict_path), include))
includes_list.append(relative_include)
# Unhook the includes list, it's no longer needed.
del subdict['includes']
# Merge in the included files.
for include in includes_list:
if not 'included' in aux_data[subdict_path]:
aux_data[subdict_path]['included'] = []
aux_data[subdict_path]['included'].append(include)
gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'", include)
MergeDicts(subdict,
LoadOneBuildFile(include, data, aux_data, None, False, check),
subdict_path, include)
# Recurse into subdictionaries.
for k, v in subdict.iteritems():
if type(v) is dict:
LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data,
None, check)
elif type(v) is list:
LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data,
check)
# This recurses into lists so that it can look for dicts.
def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data, check):
for item in sublist:
if type(item) is dict:
LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data,
None, check)
elif type(item) is list:
LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data, check)
# Processes toolsets in all the targets. This recurses into condition entries
# since they can contain toolsets as well.
def ProcessToolsetsInDict(data):
if 'targets' in data:
target_list = data['targets']
new_target_list = []
for target in target_list:
# If this target already has an explicit 'toolset', and no 'toolsets'
# list, don't modify it further.
if 'toolset' in target and 'toolsets' not in target:
new_target_list.append(target)
continue
if multiple_toolsets:
toolsets = target.get('toolsets', ['target'])
else:
toolsets = ['target']
# Make sure this 'toolsets' definition is only processed once.
if 'toolsets' in target:
del target['toolsets']
if len(toolsets) > 0:
# Optimization: only do copies if more than one toolset is specified.
for build in toolsets[1:]:
new_target = gyp.simple_copy.deepcopy(target)
new_target['toolset'] = build
new_target_list.append(new_target)
target['toolset'] = toolsets[0]
new_target_list.append(target)
data['targets'] = new_target_list
if 'conditions' in data:
for condition in data['conditions']:
if type(condition) is list:
for condition_dict in condition[1:]:
if type(condition_dict) is dict:
ProcessToolsetsInDict(condition_dict)
# TODO(mark): I don't love this name. It just means that it's going to load
# a build file that contains targets and is expected to provide a targets dict
# that contains the targets...
def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes,
depth, check, load_dependencies):
# If depth is set, predefine the DEPTH variable to be a relative path from
# this build file's directory to the directory identified by depth.
if depth:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path))
if d == '':
variables['DEPTH'] = '.'
else:
variables['DEPTH'] = d.replace('\\', '/')
# The 'target_build_files' key is only set when loading target build files in
# the non-parallel code path, where LoadTargetBuildFile is called
# recursively. In the parallel code path, we don't need to check whether the
# |build_file_path| has already been loaded, because the 'scheduled' set in
# ParallelState guarantees that we never load the same |build_file_path|
# twice.
if 'target_build_files' in data:
if build_file_path in data['target_build_files']:
# Already loaded.
return False
data['target_build_files'].add(build_file_path)
gyp.DebugOutput(gyp.DEBUG_INCLUDES,
"Loading Target Build File '%s'", build_file_path)
build_file_data = LoadOneBuildFile(build_file_path, data, aux_data,
includes, True, check)
# Store DEPTH for later use in generators.
build_file_data['_DEPTH'] = depth
# Set up the included_files key indicating which .gyp files contributed to
# this target dict.
if 'included_files' in build_file_data:
raise GypError(build_file_path + ' must not contain included_files key')
included = GetIncludedBuildFiles(build_file_path, aux_data)
build_file_data['included_files'] = []
for included_file in included:
# included_file is relative to the current directory, but it needs to
# be made relative to build_file_path's directory.
included_relative = \
gyp.common.RelativePath(included_file,
os.path.dirname(build_file_path))
build_file_data['included_files'].append(included_relative)
# Do a first round of toolsets expansion so that conditions can be defined
# per toolset.
ProcessToolsetsInDict(build_file_data)
# Apply "pre"/"early" variable expansions and condition evaluations.
ProcessVariablesAndConditionsInDict(
build_file_data, PHASE_EARLY, variables, build_file_path)
# Since some toolsets might have been defined conditionally, perform
# a second round of toolsets expansion now.
ProcessToolsetsInDict(build_file_data)
# Look at each project's target_defaults dict, and merge settings into
# targets.
if 'target_defaults' in build_file_data:
if 'targets' not in build_file_data:
raise GypError("Unable to find targets in build file %s" %
build_file_path)
index = 0
while index < len(build_file_data['targets']):
# This procedure needs to give the impression that target_defaults is
# used as defaults, and the individual targets inherit from that.
# The individual targets need to be merged into the defaults. Make
# a deep copy of the defaults for each target, merge the target dict
# as found in the input file into that copy, and then hook up the
# copy with the target-specific data merged into it as the replacement
# target dict.
old_target_dict = build_file_data['targets'][index]
new_target_dict = gyp.simple_copy.deepcopy(
build_file_data['target_defaults'])
MergeDicts(new_target_dict, old_target_dict,
build_file_path, build_file_path)
build_file_data['targets'][index] = new_target_dict
index += 1
# No longer needed.
del build_file_data['target_defaults']
# Look for dependencies. This means that dependency resolution occurs
# after "pre" conditionals and variable expansion, but before "post" -
# in other words, you can't put a "dependencies" section inside a "post"
# conditional within a target.
dependencies = []
if 'targets' in build_file_data:
for target_dict in build_file_data['targets']:
if 'dependencies' not in target_dict:
continue
for dependency in target_dict['dependencies']:
dependencies.append(
gyp.common.ResolveTarget(build_file_path, dependency, None)[0])
if load_dependencies:
for dependency in dependencies:
try:
LoadTargetBuildFile(dependency, data, aux_data, variables,
includes, depth, check, load_dependencies)
except Exception, e:
gyp.common.ExceptionAppend(
e, 'while loading dependencies of %s' % build_file_path)
raise
else:
return (build_file_path, dependencies)
def CallLoadTargetBuildFile(global_flags,
build_file_path, variables,
includes, depth, check,
generator_input_info):
"""Wrapper around LoadTargetBuildFile for parallel processing.
This wrapper is used when LoadTargetBuildFile is executed in
a worker process.
"""
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Apply globals so that the worker process behaves the same.
for key, value in global_flags.iteritems():
globals()[key] = value
SetGeneratorGlobals(generator_input_info)
result = LoadTargetBuildFile(build_file_path, per_process_data,
per_process_aux_data, variables,
includes, depth, check, False)
if not result:
return result
(build_file_path, dependencies) = result
# We can safely pop the build_file_data from per_process_data because it
# will never be referenced by this process again, so we don't need to keep
# it in the cache.
build_file_data = per_process_data.pop(build_file_path)
# This gets serialized and sent back to the main process via a pipe.
# It's handled in LoadTargetBuildFileCallback.
return (build_file_path,
build_file_data,
dependencies)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return None
except Exception, e:
print >>sys.stderr, 'Exception:', e
print >>sys.stderr, traceback.format_exc()
return None
class ParallelProcessingError(Exception):
pass
class ParallelState(object):
"""Class to keep track of state when processing input files in parallel.
If build files are loaded in parallel, use this to keep track of
state during farming out and processing parallel jobs. It's stored
in a global so that the callback function can have access to it.
"""
def __init__(self):
# The multiprocessing pool.
self.pool = None
# The condition variable used to protect this object and notify
# the main loop when there might be more data to process.
self.condition = None
# The "data" dict that was passed to LoadTargetBuildFileParallel
self.data = None
# The number of parallel calls outstanding; decremented when a response
# was received.
self.pending = 0
# The set of all build files that have been scheduled, so we don't
# schedule the same one twice.
self.scheduled = set()
# A list of dependency build file paths that haven't been scheduled yet.
self.dependencies = []
# Flag to indicate if there was an error in a child process.
self.error = False
def LoadTargetBuildFileCallback(self, result):
"""Handle the results of running LoadTargetBuildFile in another process.
"""
self.condition.acquire()
if not result:
self.error = True
self.condition.notify()
self.condition.release()
return
(build_file_path0, build_file_data0, dependencies0) = result
self.data[build_file_path0] = build_file_data0
self.data['target_build_files'].add(build_file_path0)
for new_dependency in dependencies0:
if new_dependency not in self.scheduled:
self.scheduled.add(new_dependency)
self.dependencies.append(new_dependency)
self.pending -= 1
self.condition.notify()
self.condition.release()
def LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info):
parallel_state = ParallelState()
parallel_state.condition = threading.Condition()
# Make copies of the build_files argument that we can modify while working.
parallel_state.dependencies = list(build_files)
parallel_state.scheduled = set(build_files)
parallel_state.pending = 0
parallel_state.data = data
try:
parallel_state.condition.acquire()
while parallel_state.dependencies or parallel_state.pending:
if parallel_state.error:
break
if not parallel_state.dependencies:
parallel_state.condition.wait()
continue
dependency = parallel_state.dependencies.pop()
parallel_state.pending += 1
global_flags = {
'path_sections': globals()['path_sections'],
'non_configuration_keys': globals()['non_configuration_keys'],
'multiple_toolsets': globals()['multiple_toolsets']}
if not parallel_state.pool:
parallel_state.pool = multiprocessing.Pool(multiprocessing.cpu_count())
parallel_state.pool.apply_async(
CallLoadTargetBuildFile,
args = (global_flags, dependency,
variables, includes, depth, check, generator_input_info),
callback = parallel_state.LoadTargetBuildFileCallback)
except KeyboardInterrupt, e:
parallel_state.pool.terminate()
raise e
parallel_state.condition.release()
parallel_state.pool.close()
parallel_state.pool.join()
parallel_state.pool = None
if parallel_state.error:
sys.exit(1)
# Look for the bracket that matches the first bracket seen in a
# string, and return the start and end as a tuple. For example, if
# the input is something like "<(foo <(bar)) blah", then it would
# return (1, 13), indicating the entire string except for the leading
# "<" and trailing " blah".
LBRACKETS= set('{[(')
BRACKETS = {'}': '{', ']': '[', ')': '('}
def FindEnclosingBracketGroup(input_str):
stack = []
start = -1
for index, char in enumerate(input_str):
if char in LBRACKETS:
stack.append(char)
if start == -1:
start = index
elif char in BRACKETS:
if not stack:
return (-1, -1)
if stack.pop() != BRACKETS[char]:
return (-1, -1)
if not stack:
return (start, index + 1)
return (-1, -1)
def IsStrCanonicalInt(string):
"""Returns True if |string| is in its canonical integer form.
The canonical form is such that str(int(string)) == string.
"""
if type(string) is str:
# This function is called a lot so for maximum performance, avoid
# involving regexps which would otherwise make the code much
# shorter. Regexps would need twice the time of this function.
if string:
if string == "0":
return True
if string[0] == "-":
string = string[1:]
if not string:
return False
if '1' <= string[0] <= '9':
return string.isdigit()
return False
# This matches things like "<(asdf)", "<!(cmd)", "<!@(cmd)", "<|(list)",
# "<!interpreter(arguments)", "<([list])", and even "<([)" and "<(<())".
# In the last case, the inner "<()" is captured in match['content'].
early_variable_re = re.compile(
r'(?P<replace>(?P<type><(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '>' instead of '<'.
late_variable_re = re.compile(
r'(?P<replace>(?P<type>>(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '^' instead of '<'.
latelate_variable_re = re.compile(
r'(?P<replace>(?P<type>[\^](?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# Global cache of results from running commands so they don't have to be run
# more then once.
cached_command_results = {}
def FixupPlatformCommand(cmd):
if sys.platform == 'win32':
if type(cmd) is list:
cmd = [re.sub('^cat ', 'type ', cmd[0])] + cmd[1:]
else:
cmd = re.sub('^cat ', 'type ', cmd)
return cmd
PHASE_EARLY = 0
PHASE_LATE = 1
PHASE_LATELATE = 2
def ExpandVariables(input, phase, variables, build_file):
# Look for the pattern that gets expanded into variables
if phase == PHASE_EARLY:
variable_re = early_variable_re
expansion_symbol = '<'
elif phase == PHASE_LATE:
variable_re = late_variable_re
expansion_symbol = '>'
elif phase == PHASE_LATELATE:
variable_re = latelate_variable_re
expansion_symbol = '^'
else:
assert False
input_str = str(input)
if IsStrCanonicalInt(input_str):
return int(input_str)
# Do a quick scan to determine if an expensive regex search is warranted.
if expansion_symbol not in input_str:
return input_str
# Get the entire list of matches as a list of MatchObject instances.
# (using findall here would return strings instead of MatchObjects).
matches = list(variable_re.finditer(input_str))
if not matches:
return input_str
output = input_str
# Reverse the list of matches so that replacements are done right-to-left.
# That ensures that earlier replacements won't mess up the string in a
# way that causes later calls to find the earlier substituted text instead
# of what's intended for replacement.
matches.reverse()
for match_group in matches:
match = match_group.groupdict()
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Matches: %r", match)
# match['replace'] is the substring to look for, match['type']
# is the character code for the replacement type (< > <! >! <| >| <@
# >@ <!@ >!@), match['is_array'] contains a '[' for command
# arrays, and match['content'] is the name of the variable (< >)
# or command to run (<! >!). match['command_string'] is an optional
# command string. Currently, only 'pymod_do_main' is supported.
# run_command is true if a ! variant is used.
run_command = '!' in match['type']
command_string = match['command_string']
# file_list is true if a | variant is used.
file_list = '|' in match['type']
# Capture these now so we can adjust them later.
replace_start = match_group.start('replace')
replace_end = match_group.end('replace')
# Find the ending paren, and re-evaluate the contained string.
(c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:])
# Adjust the replacement range to match the entire command
# found by FindEnclosingBracketGroup (since the variable_re
# probably doesn't match the entire command if it contained
# nested variables).
replace_end = replace_start + c_end
# Find the "real" replacement, matching the appropriate closing
# paren, and adjust the replacement start and end.
replacement = input_str[replace_start:replace_end]
# Figure out what the contents of the variable parens are.
contents_start = replace_start + c_start + 1
contents_end = replace_end - 1
contents = input_str[contents_start:contents_end]
# Do filter substitution now for <|().
# Admittedly, this is different than the evaluation order in other
# contexts. However, since filtration has no chance to run on <|(),
# this seems like the only obvious way to give them access to filters.
if file_list:
processed_variables = gyp.simple_copy.deepcopy(variables)
ProcessListFiltersInDict(contents, processed_variables)
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase,
processed_variables, build_file)
else:
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase, variables, build_file)
# Strip off leading/trailing whitespace so that variable matches are
# simpler below (and because they are rarely needed).
contents = contents.strip()
# expand_to_list is true if an @ variant is used. In that case,
# the expansion should result in a list. Note that the caller
# is to be expecting a list in return, and not all callers do
# because not all are working in list context. Also, for list
# expansions, there can be no other text besides the variable
# expansion in the input string.
expand_to_list = '@' in match['type'] and input_str == replacement
if run_command or file_list:
# Find the build file's directory, so commands can be run or file lists
# generated relative to it.
build_file_dir = os.path.dirname(build_file)
if build_file_dir == '' and not file_list:
# If build_file is just a leaf filename indicating a file in the
# current directory, build_file_dir might be an empty string. Set
# it to None to signal to subprocess.Popen that it should run the
# command in the current directory.
build_file_dir = None
# Support <|(listfile.txt ...) which generates a file
# containing items from a gyp list, generated at gyp time.
# This works around actions/rules which have more inputs than will
# fit on the command line.
if file_list:
if type(contents) is list:
contents_list = contents
else:
contents_list = contents.split(' ')
replacement = contents_list[0]
if os.path.isabs(replacement):
raise GypError('| cannot handle absolute paths, got "%s"' % replacement)
if not generator_filelist_paths:
path = os.path.join(build_file_dir, replacement)
else:
if os.path.isabs(build_file_dir):
toplevel = generator_filelist_paths['toplevel']
rel_build_file_dir = gyp.common.RelativePath(build_file_dir, toplevel)
else:
rel_build_file_dir = build_file_dir
qualified_out_dir = generator_filelist_paths['qualified_out_dir']
path = os.path.join(qualified_out_dir, rel_build_file_dir, replacement)
gyp.common.EnsureDirExists(path)
replacement = gyp.common.RelativePath(path, build_file_dir)
f = gyp.common.WriteOnDiff(path)
for i in contents_list[1:]:
f.write('%s\n' % i)
f.close()
elif run_command:
use_shell = True
if match['is_array']:
contents = eval(contents)
use_shell = False
# Check for a cached value to avoid executing commands, or generating
# file lists more than once. The cache key contains the command to be
# run as well as the directory to run it from, to account for commands
# that depend on their current directory.
# TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory,
# someone could author a set of GYP files where each time the command
# is invoked it produces different output by design. When the need
# arises, the syntax should be extended to support no caching off a
# command's output so it is run every time.
cache_key = (str(contents), build_file_dir)
cached_value = cached_command_results.get(cache_key, None)
if cached_value is None:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Executing command '%s' in directory '%s'",
contents, build_file_dir)
replacement = ''
if command_string == 'pymod_do_main':
# <!pymod_do_main(modulename param eters) loads |modulename| as a
# python module and then calls that module's DoMain() function,
# passing ["param", "eters"] as a single list argument. For modules
# that don't load quickly, this can be faster than
# <!(python modulename param eters). Do this in |build_file_dir|.
oldwd = os.getcwd() # Python doesn't like os.open('.'): no fchdir.
if build_file_dir: # build_file_dir may be None (see above).
os.chdir(build_file_dir)
try:
parsed_contents = shlex.split(contents)
try:
py_module = __import__(parsed_contents[0])
except ImportError as e:
raise GypError("Error importing pymod_do_main"
"module (%s): %s" % (parsed_contents[0], e))
replacement = str(py_module.DoMain(parsed_contents[1:])).rstrip()
finally:
os.chdir(oldwd)
assert replacement != None
elif command_string:
raise GypError("Unknown command string '%s' in '%s'." %
(command_string, contents))
else:
# Fix up command with platform specific workarounds.
contents = FixupPlatformCommand(contents)
try:
p = subprocess.Popen(contents, shell=use_shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=build_file_dir)
except Exception, e:
raise GypError("%s while executing command '%s' in %s" %
(e, contents, build_file))
p_stdout, p_stderr = p.communicate('')
if p.wait() != 0 or p_stderr:
sys.stderr.write(p_stderr)
# Simulate check_call behavior, since check_call only exists
# in python 2.5 and later.
raise GypError("Call to '%s' returned exit status %d while in %s." %
(contents, p.returncode, build_file))
replacement = p_stdout.rstrip()
cached_command_results[cache_key] = replacement
else:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Had cache value for command '%s' in directory '%s'",
contents,build_file_dir)
replacement = cached_value
else:
if not contents in variables:
if contents[-1] in ['!', '/']:
# In order to allow cross-compiles (nacl) to happen more naturally,
# we will allow references to >(sources/) etc. to resolve to
# and empty list if undefined. This allows actions to:
# 'action!': [
# '>@(_sources!)',
# ],
# 'action/': [
# '>@(_sources/)',
# ],
replacement = []
else:
raise GypError('Undefined variable ' + contents +
' in ' + build_file)
else:
replacement = variables[contents]
if type(replacement) is list:
for item in replacement:
if not contents[-1] == '/' and type(item) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'list contains a ' +
item.__class__.__name__)
# Run through the list and handle variable expansions in it. Since
# the list is guaranteed not to contain dicts, this won't do anything
# with conditions sections.
ProcessVariablesAndConditionsInList(replacement, phase, variables,
build_file)
elif type(replacement) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'found a ' + replacement.__class__.__name__)
if expand_to_list:
# Expanding in list context. It's guaranteed that there's only one
# replacement to do in |input_str| and that it's this replacement. See
# above.
if type(replacement) is list:
# If it's already a list, make a copy.
output = replacement[:]
else:
# Split it the same way sh would split arguments.
output = shlex.split(str(replacement))
else:
# Expanding in string context.
encoded_replacement = ''
if type(replacement) is list:
# When expanding a list into string context, turn the list items
# into a string in a way that will work with a subprocess call.
#
# TODO(mark): This isn't completely correct. This should
# call a generator-provided function that observes the
# proper list-to-argument quoting rules on a specific
# platform instead of just calling the POSIX encoding
# routine.
encoded_replacement = gyp.common.EncodePOSIXShellList(replacement)
else:
encoded_replacement = replacement
output = output[:replace_start] + str(encoded_replacement) + \
output[replace_end:]
# Prepare for the next match iteration.
input_str = output
if output == input:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Found only identity matches on %r, avoiding infinite "
"recursion.",
output)
else:
# Look for more matches now that we've replaced some, to deal with
# expanding local variables (variables defined in the same
# variables block as this one).
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found output %r, recursing.", output)
if type(output) is list:
if output and type(output[0]) is list:
# Leave output alone if it's a list of lists.
# We don't want such lists to be stringified.
pass
else:
new_output = []
for item in output:
new_output.append(
ExpandVariables(item, phase, variables, build_file))
output = new_output
else:
output = ExpandVariables(output, phase, variables, build_file)
# Convert all strings that are canonically-represented integers into integers.
if type(output) is list:
for index in xrange(0, len(output)):
if IsStrCanonicalInt(output[index]):
output[index] = int(output[index])
elif IsStrCanonicalInt(output):
output = int(output)
return output
# The same condition is often evaluated over and over again so it
# makes sense to cache as much as possible between evaluations.
cached_conditions_asts = {}
def EvalCondition(condition, conditions_key, phase, variables, build_file):
"""Returns the dict that should be used or None if the result was
that nothing should be used."""
if type(condition) is not list:
raise GypError(conditions_key + ' must be a list')
if len(condition) < 2:
# It's possible that condition[0] won't work in which case this
# attempt will raise its own IndexError. That's probably fine.
raise GypError(conditions_key + ' ' + condition[0] +
' must be at least length 2, not ' + str(len(condition)))
i = 0
result = None
while i < len(condition):
cond_expr = condition[i]
true_dict = condition[i + 1]
if type(true_dict) is not dict:
raise GypError('{} {} must be followed by a dictionary, not {}'.format(
conditions_key, cond_expr, type(true_dict)))
if len(condition) > i + 2 and type(condition[i + 2]) is dict:
false_dict = condition[i + 2]
i = i + 3
if i != len(condition):
raise GypError('{} {} has {} unexpected trailing items'.format(
conditions_key, cond_expr, len(condition) - i))
else:
false_dict = None
i = i + 2
if result == None:
result = EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file)
return result
def EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file):
"""Returns true_dict if cond_expr evaluates to true, and false_dict
otherwise."""
# Do expansions on the condition itself. Since the conditon can naturally
# contain variable references without needing to resort to GYP expansion
# syntax, this is of dubious value for variables, but someone might want to
# use a command expansion directly inside a condition.
cond_expr_expanded = ExpandVariables(cond_expr, phase, variables,
build_file)
if type(cond_expr_expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + cond_expr_expanded.__class__.__name__)
try:
if cond_expr_expanded in cached_conditions_asts:
ast_code = cached_conditions_asts[cond_expr_expanded]
else:
ast_code = compile(cond_expr_expanded, '<string>', 'eval')
cached_conditions_asts[cond_expr_expanded] = ast_code
if eval(ast_code, {'__builtins__': None}, variables):
return true_dict
return false_dict
except SyntaxError, e:
syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s '
'at character %d.' %
(str(e.args[0]), e.text, build_file, e.offset),
e.filename, e.lineno, e.offset, e.text)
raise syntax_error
except NameError, e:
gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' %
(cond_expr_expanded, build_file))
raise GypError(e)
def ProcessConditionsInDict(the_dict, phase, variables, build_file):
# Process a 'conditions' or 'target_conditions' section in the_dict,
# depending on phase.
# early -> conditions
# late -> target_conditions
# latelate -> no conditions
#
# Each item in a conditions list consists of cond_expr, a string expression
# evaluated as the condition, and true_dict, a dict that will be merged into
# the_dict if cond_expr evaluates to true. Optionally, a third item,
# false_dict, may be present. false_dict is merged into the_dict if
# cond_expr evaluates to false.
#
# Any dict merged into the_dict will be recursively processed for nested
# conditionals and other expansions, also according to phase, immediately
# prior to being merged.
if phase == PHASE_EARLY:
conditions_key = 'conditions'
elif phase == PHASE_LATE:
conditions_key = 'target_conditions'
elif phase == PHASE_LATELATE:
return
else:
assert False
if not conditions_key in the_dict:
return
conditions_list = the_dict[conditions_key]
# Unhook the conditions list, it's no longer needed.
del the_dict[conditions_key]
for condition in conditions_list:
merge_dict = EvalCondition(condition, conditions_key, phase, variables,
build_file)
if merge_dict != None:
# Expand variables and nested conditinals in the merge_dict before
# merging it.
ProcessVariablesAndConditionsInDict(merge_dict, phase,
variables, build_file)
MergeDicts(the_dict, merge_dict, build_file, build_file)
def LoadAutomaticVariablesFromDict(variables, the_dict):
# Any keys with plain string values in the_dict become automatic variables.
# The variable name is the key name with a "_" character prepended.
for key, value in the_dict.iteritems():
if type(value) in (str, int, list):
variables['_' + key] = value
def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key):
# Any keys in the_dict's "variables" dict, if it has one, becomes a
# variable. The variable name is the key name in the "variables" dict.
# Variables that end with the % character are set only if they are unset in
# the variables dict. the_dict_key is the name of the key that accesses
# the_dict in the_dict's parent dict. If the_dict's parent is not a dict
# (it could be a list or it could be parentless because it is a root dict),
# the_dict_key will be None.
for key, value in the_dict.get('variables', {}).iteritems():
if type(value) not in (str, int, list):
continue
if key.endswith('%'):
variable_name = key[:-1]
if variable_name in variables:
# If the variable is already set, don't set it.
continue
if the_dict_key is 'variables' and variable_name in the_dict:
# If the variable is set without a % in the_dict, and the_dict is a
# variables dict (making |variables| a varaibles sub-dict of a
# variables dict), use the_dict's definition.
value = the_dict[variable_name]
else:
variable_name = key
variables[variable_name] = value
def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in,
build_file, the_dict_key=None):
"""Handle all variable and command expansion and conditional evaluation.
This function is the public entry point for all variable expansions and
conditional evaluations. The variables_in dictionary will not be modified
by this function.
"""
# Make a copy of the variables_in dict that can be modified during the
# loading of automatics and the loading of the variables dict.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
if 'variables' in the_dict:
# Make sure all the local variables are added to the variables
# list before we process them so that you can reference one
# variable from another. They will be fully expanded by recursion
# in ExpandVariables.
for key, value in the_dict['variables'].iteritems():
variables[key] = value
# Handle the associated variables dict first, so that any variable
# references within can be resolved prior to using them as variables.
# Pass a copy of the variables dict to avoid having it be tainted.
# Otherwise, it would have extra automatics added for everything that
# should just be an ordinary variable in this scope.
ProcessVariablesAndConditionsInDict(the_dict['variables'], phase,
variables, build_file, 'variables')
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
for key, value in the_dict.iteritems():
# Skip "variables", which was already processed if present.
if key != 'variables' and type(value) is str:
expanded = ExpandVariables(value, phase, variables, build_file)
if type(expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__ + ' for ' + key)
the_dict[key] = expanded
# Variable expansion may have resulted in changes to automatics. Reload.
# TODO(mark): Optimization: only reload if no changes were made.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Process conditions in this dict. This is done after variable expansion
# so that conditions may take advantage of expanded variables. For example,
# if the_dict contains:
# {'type': '<(library_type)',
# 'conditions': [['_type=="static_library"', { ... }]]},
# _type, as used in the condition, will only be set to the value of
# library_type if variable expansion is performed before condition
# processing. However, condition processing should occur prior to recursion
# so that variables (both automatic and "variables" dict type) may be
# adjusted by conditions sections, merged into the_dict, and have the
# intended impact on contained dicts.
#
# This arrangement means that a "conditions" section containing a "variables"
# section will only have those variables effective in subdicts, not in
# the_dict. The workaround is to put a "conditions" section within a
# "variables" section. For example:
# {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]],
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will not result in "IS_MAC" being appended to the "defines" list in the
# current scope but would result in it being appended to the "defines" list
# within "my_subdict". By comparison:
# {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]},
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will append "IS_MAC" to both "defines" lists.
# Evaluate conditions sections, allowing variable expansions within them
# as well as nested conditionals. This will process a 'conditions' or
# 'target_conditions' section, perform appropriate merging and recursive
# conditional and variable processing, and then remove the conditions section
# from the_dict if it is present.
ProcessConditionsInDict(the_dict, phase, variables, build_file)
# Conditional processing may have resulted in changes to automatics or the
# variables dict. Reload.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Recurse into child dicts, or process child lists which may result in
# further recursion into descendant dicts.
for key, value in the_dict.iteritems():
# Skip "variables" and string values, which were already processed if
# present.
if key == 'variables' or type(value) is str:
continue
if type(value) is dict:
# Pass a copy of the variables dict so that subdicts can't influence
# parents.
ProcessVariablesAndConditionsInDict(value, phase, variables,
build_file, key)
elif type(value) is list:
# The list itself can't influence the variables dict, and
# ProcessVariablesAndConditionsInList will make copies of the variables
# dict if it needs to pass it to something that can influence it. No
# copy is necessary here.
ProcessVariablesAndConditionsInList(value, phase, variables,
build_file)
elif type(value) is not int:
raise TypeError('Unknown type ' + value.__class__.__name__ + \
' for ' + key)
def ProcessVariablesAndConditionsInList(the_list, phase, variables,
build_file):
# Iterate using an index so that new values can be assigned into the_list.
index = 0
while index < len(the_list):
item = the_list[index]
if type(item) is dict:
# Make a copy of the variables dict so that it won't influence anything
# outside of its own scope.
ProcessVariablesAndConditionsInDict(item, phase, variables, build_file)
elif type(item) is list:
ProcessVariablesAndConditionsInList(item, phase, variables, build_file)
elif type(item) is str:
expanded = ExpandVariables(item, phase, variables, build_file)
if type(expanded) in (str, int):
the_list[index] = expanded
elif type(expanded) is list:
the_list[index:index+1] = expanded
index += len(expanded)
# index now identifies the next item to examine. Continue right now
# without falling into the index increment below.
continue
else:
raise ValueError(
'Variable expansion in this context permits strings and ' + \
'lists only, found ' + expanded.__class__.__name__ + ' at ' + \
index)
elif type(item) is not int:
raise TypeError('Unknown type ' + item.__class__.__name__ + \
' at index ' + index)
index = index + 1
def BuildTargetsDict(data):
"""Builds a dict mapping fully-qualified target names to their target dicts.
|data| is a dict mapping loaded build files by pathname relative to the
current directory. Values in |data| are build file contents. For each
|data| value with a "targets" key, the value of the "targets" key is taken
as a list containing target dicts. Each target's fully-qualified name is
constructed from the pathname of the build file (|data| key) and its
"target_name" property. These fully-qualified names are used as the keys
in the returned dict. These keys provide access to the target dicts,
the dicts in the "targets" lists.
"""
targets = {}
for build_file in data['target_build_files']:
for target in data[build_file].get('targets', []):
target_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if target_name in targets:
raise GypError('Duplicate target definitions for ' + target_name)
targets[target_name] = target
return targets
def QualifyDependencies(targets):
"""Make dependency links fully-qualified relative to the current directory.
|targets| is a dict mapping fully-qualified target names to their target
dicts. For each target in this dict, keys known to contain dependency
links are examined, and any dependencies referenced will be rewritten
so that they are fully-qualified and relative to the current directory.
All rewritten dependencies are suitable for use as keys to |targets| or a
similar dict.
"""
all_dependency_sections = [dep + op
for dep in dependency_sections
for op in ('', '!', '/')]
for target, target_dict in targets.iteritems():
target_build_file = gyp.common.BuildFile(target)
toolset = target_dict['toolset']
for dependency_key in all_dependency_sections:
dependencies = target_dict.get(dependency_key, [])
for index in xrange(0, len(dependencies)):
dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget(
target_build_file, dependencies[index], toolset)
if not multiple_toolsets:
# Ignore toolset specification in the dependency if it is specified.
dep_toolset = toolset
dependency = gyp.common.QualifiedTarget(dep_file,
dep_target,
dep_toolset)
dependencies[index] = dependency
# Make sure anything appearing in a list other than "dependencies" also
# appears in the "dependencies" list.
if dependency_key != 'dependencies' and \
dependency not in target_dict['dependencies']:
raise GypError('Found ' + dependency + ' in ' + dependency_key +
' of ' + target + ', but not in dependencies')
def ExpandWildcardDependencies(targets, data):
"""Expands dependencies specified as build_file:*.
For each target in |targets|, examines sections containing links to other
targets. If any such section contains a link of the form build_file:*, it
is taken as a wildcard link, and is expanded to list each target in
build_file. The |data| dict provides access to build file dicts.
Any target that does not wish to be included by wildcard can provide an
optional "suppress_wildcard" key in its target dict. When present and
true, a wildcard dependency link will not include such targets.
All dependency names, including the keys to |targets| and the values in each
dependency list, must be qualified when this function is called.
"""
for target, target_dict in targets.iteritems():
toolset = target_dict['toolset']
target_build_file = gyp.common.BuildFile(target)
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
# Loop this way instead of "for dependency in" or "for index in xrange"
# because the dependencies list will be modified within the loop body.
index = 0
while index < len(dependencies):
(dependency_build_file, dependency_target, dependency_toolset) = \
gyp.common.ParseQualifiedTarget(dependencies[index])
if dependency_target != '*' and dependency_toolset != '*':
# Not a wildcard. Keep it moving.
index = index + 1
continue
if dependency_build_file == target_build_file:
# It's an error for a target to depend on all other targets in
# the same file, because a target cannot depend on itself.
raise GypError('Found wildcard in ' + dependency_key + ' of ' +
target + ' referring to same build file')
# Take the wildcard out and adjust the index so that the next
# dependency in the list will be processed the next time through the
# loop.
del dependencies[index]
index = index - 1
# Loop through the targets in the other build file, adding them to
# this target's list of dependencies in place of the removed
# wildcard.
dependency_target_dicts = data[dependency_build_file]['targets']
for dependency_target_dict in dependency_target_dicts:
if int(dependency_target_dict.get('suppress_wildcard', False)):
continue
dependency_target_name = dependency_target_dict['target_name']
if (dependency_target != '*' and
dependency_target != dependency_target_name):
continue
dependency_target_toolset = dependency_target_dict['toolset']
if (dependency_toolset != '*' and
dependency_toolset != dependency_target_toolset):
continue
dependency = gyp.common.QualifiedTarget(dependency_build_file,
dependency_target_name,
dependency_target_toolset)
index = index + 1
dependencies.insert(index, dependency)
index = index + 1
def Unify(l):
"""Removes duplicate elements from l, keeping the first element."""
seen = {}
return [seen.setdefault(e, e) for e in l if e not in seen]
def RemoveDuplicateDependencies(targets):
"""Makes sure every dependency appears only once in all targets's dependency
lists."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
target_dict[dependency_key] = Unify(dependencies)
def Filter(l, item):
"""Removes item from l."""
res = {}
return [res.setdefault(e, e) for e in l if e != item]
def RemoveSelfDependencies(targets):
"""Remove self dependencies from targets that have the prune_self_dependency
variable set."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if t == target_name:
if targets[t].get('variables', {}).get('prune_self_dependency', 0):
target_dict[dependency_key] = Filter(dependencies, target_name)
def RemoveLinkDependenciesFromNoneTargets(targets):
"""Remove dependencies having the 'link_dependency' attribute from the 'none'
targets."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if target_dict.get('type', None) == 'none':
if targets[t].get('variables', {}).get('link_dependency', 0):
target_dict[dependency_key] = \
Filter(target_dict[dependency_key], t)
class DependencyGraphNode(object):
"""
Attributes:
ref: A reference to an object that this DependencyGraphNode represents.
dependencies: List of DependencyGraphNodes on which this one depends.
dependents: List of DependencyGraphNodes that depend on this one.
"""
class CircularException(GypError):
pass
def __init__(self, ref):
self.ref = ref
self.dependencies = []
self.dependents = []
def __repr__(self):
return '<DependencyGraphNode: %r>' % self.ref
def FlattenToList(self):
# flat_list is the sorted list of dependencies - actually, the list items
# are the "ref" attributes of DependencyGraphNodes. Every target will
# appear in flat_list after all of its dependencies, and before all of its
# dependents.
flat_list = OrderedSet()
# in_degree_zeros is the list of DependencyGraphNodes that have no
# dependencies not in flat_list. Initially, it is a copy of the children
# of this node, because when the graph was built, nodes with no
# dependencies were made implicit dependents of the root node.
in_degree_zeros = set(self.dependents[:])
while in_degree_zeros:
# Nodes in in_degree_zeros have no dependencies not in flat_list, so they
# can be appended to flat_list. Take these nodes out of in_degree_zeros
# as work progresses, so that the next node to process from the list can
# always be accessed at a consistent position.
node = in_degree_zeros.pop()
flat_list.add(node.ref)
# Look at dependents of the node just added to flat_list. Some of them
# may now belong in in_degree_zeros.
for node_dependent in node.dependents:
is_in_degree_zero = True
# TODO: We want to check through the
# node_dependent.dependencies list but if it's long and we
# always start at the beginning, then we get O(n^2) behaviour.
for node_dependent_dependency in node_dependent.dependencies:
if not node_dependent_dependency.ref in flat_list:
# The dependent one or more dependencies not in flat_list. There
# will be more chances to add it to flat_list when examining
# it again as a dependent of those other dependencies, provided
# that there are no cycles.
is_in_degree_zero = False
break
if is_in_degree_zero:
# All of the dependent's dependencies are already in flat_list. Add
# it to in_degree_zeros where it will be processed in a future
# iteration of the outer loop.
in_degree_zeros.add(node_dependent)
return list(flat_list)
def FindCycles(self):
"""
Returns a list of cycles in the graph, where each cycle is its own list.
"""
results = []
visited = set()
def Visit(node, path):
for child in node.dependents:
if child in path:
results.append([child] + path[:path.index(child) + 1])
elif not child in visited:
visited.add(child)
Visit(child, [child] + path)
visited.add(self)
Visit(self, [self])
return results
def DirectDependencies(self, dependencies=None):
"""Returns a list of just direct dependencies."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
return dependencies
def _AddImportedDependencies(self, targets, dependencies=None):
"""Given a list of direct dependencies, adds indirect dependencies that
other dependencies have declared to export their settings.
This method does not operate on self. Rather, it operates on the list
of dependencies in the |dependencies| argument. For each dependency in
that list, if any declares that it exports the settings of one of its
own dependencies, those dependencies whose settings are "passed through"
are added to the list. As new items are added to the list, they too will
be processed, so it is possible to import settings through multiple levels
of dependencies.
This method is not terribly useful on its own, it depends on being
"primed" with a list of direct dependencies such as one provided by
DirectDependencies. DirectAndImportedDependencies is intended to be the
public entry point.
"""
if dependencies == None:
dependencies = []
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Add any dependencies whose settings should be imported to the list
# if not already present. Newly-added items will be checked for
# their own imports when the list iteration reaches them.
# Rather than simply appending new items, insert them after the
# dependency that exported them. This is done to more closely match
# the depth-first method used by DeepDependencies.
add_index = 1
for imported_dependency in \
dependency_dict.get('export_dependent_settings', []):
if imported_dependency not in dependencies:
dependencies.insert(index + add_index, imported_dependency)
add_index = add_index + 1
index = index + 1
return dependencies
def DirectAndImportedDependencies(self, targets, dependencies=None):
"""Returns a list of a target's direct dependencies and all indirect
dependencies that a dependency has advertised settings should be exported
through the dependency for.
"""
dependencies = self.DirectDependencies(dependencies)
return self._AddImportedDependencies(targets, dependencies)
def DeepDependencies(self, dependencies=None):
"""Returns an OrderedSet of all of a target's dependencies, recursively."""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref is None:
continue
if dependency.ref not in dependencies:
dependency.DeepDependencies(dependencies)
dependencies.add(dependency.ref)
return dependencies
def _LinkDependenciesInternal(self, targets, include_shared_libraries,
dependencies=None, initial=True):
"""Returns an OrderedSet of dependency targets that are linked
into this target.
This function has a split personality, depending on the setting of
|initial|. Outside callers should always leave |initial| at its default
setting.
When adding a target to the list of dependencies, this function will
recurse into itself with |initial| set to False, to collect dependencies
that are linked into the linkable target for which the list is being built.
If |include_shared_libraries| is False, the resulting dependencies will not
include shared_library targets that are linked into this target.
"""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
# Check for None, corresponding to the root node.
if self.ref is None:
return dependencies
# It's kind of sucky that |targets| has to be passed into this function,
# but that's presently the easiest way to access the target dicts so that
# this function can find target types.
if 'target_name' not in targets[self.ref]:
raise GypError("Missing 'target_name' field in target.")
if 'type' not in targets[self.ref]:
raise GypError("Missing 'type' field in target %s" %
targets[self.ref]['target_name'])
target_type = targets[self.ref]['type']
is_linkable = target_type in linkable_types
if initial and not is_linkable:
# If this is the first target being examined and it's not linkable,
# return an empty list of link dependencies, because the link
# dependencies are intended to apply to the target itself (initial is
# True) and this target won't be linked.
return dependencies
# Don't traverse 'none' targets if explicitly excluded.
if (target_type == 'none' and
not targets[self.ref].get('dependencies_traverse', True)):
dependencies.add(self.ref)
return dependencies
# Executables, mac kernel extensions and loadable modules are already fully
# and finally linked. Nothing else can be a link dependency of them, there
# can only be dependencies in the sense that a dependent target might run
# an executable or load the loadable_module.
if not initial and target_type in ('executable', 'loadable_module',
'mac_kernel_extension'):
return dependencies
# Shared libraries are already fully linked. They should only be included
# in |dependencies| when adjusting static library dependencies (in order to
# link against the shared_library's import lib), but should not be included
# in |dependencies| when propagating link_settings.
# The |include_shared_libraries| flag controls which of these two cases we
# are handling.
if (not initial and target_type == 'shared_library' and
not include_shared_libraries):
return dependencies
# The target is linkable, add it to the list of link dependencies.
if self.ref not in dependencies:
dependencies.add(self.ref)
if initial or not is_linkable:
# If this is a subsequent target and it's linkable, don't look any
# further for linkable dependencies, as they'll already be linked into
# this target linkable. Always look at dependencies of the initial
# target, and always look at dependencies of non-linkables.
for dependency in self.dependencies:
dependency._LinkDependenciesInternal(targets,
include_shared_libraries,
dependencies, False)
return dependencies
def DependenciesForLinkSettings(self, targets):
"""
Returns a list of dependency targets whose link_settings should be merged
into this target.
"""
# TODO(sbaig) Currently, chrome depends on the bug that shared libraries'
# link_settings are propagated. So for now, we will allow it, unless the
# 'allow_sharedlib_linksettings_propagation' flag is explicitly set to
# False. Once chrome is fixed, we can remove this flag.
include_shared_libraries = \
targets[self.ref].get('allow_sharedlib_linksettings_propagation', True)
return self._LinkDependenciesInternal(targets, include_shared_libraries)
def DependenciesToLinkAgainst(self, targets):
"""
Returns a list of dependency targets that are linked into this target.
"""
return self._LinkDependenciesInternal(targets, True)
def BuildDependencyList(targets):
# Create a DependencyGraphNode for each target. Put it into a dict for easy
# access.
dependency_nodes = {}
for target, spec in targets.iteritems():
if target not in dependency_nodes:
dependency_nodes[target] = DependencyGraphNode(target)
# Set up the dependency links. Targets that have no dependencies are treated
# as dependent on root_node.
root_node = DependencyGraphNode(None)
for target, spec in targets.iteritems():
target_node = dependency_nodes[target]
target_build_file = gyp.common.BuildFile(target)
dependencies = spec.get('dependencies')
if not dependencies:
target_node.dependencies = [root_node]
root_node.dependents.append(target_node)
else:
for dependency in dependencies:
dependency_node = dependency_nodes.get(dependency)
if not dependency_node:
raise GypError("Dependency '%s' not found while "
"trying to load target %s" % (dependency, target))
target_node.dependencies.append(dependency_node)
dependency_node.dependents.append(target_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(targets):
if not root_node.dependents:
# If all targets have dependencies, add the first target as a dependent
# of root_node so that the cycle can be discovered from root_node.
target = targets.keys()[0]
target_node = dependency_nodes[target]
target_node.dependencies.append(root_node)
root_node.dependents.append(target_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in dependency graph detected:\n' + '\n'.join(cycles))
return [dependency_nodes, flat_list]
def VerifyNoGYPFileCircularDependencies(targets):
# Create a DependencyGraphNode for each gyp file containing a target. Put
# it into a dict for easy access.
dependency_nodes = {}
for target in targets.iterkeys():
build_file = gyp.common.BuildFile(target)
if not build_file in dependency_nodes:
dependency_nodes[build_file] = DependencyGraphNode(build_file)
# Set up the dependency links.
for target, spec in targets.iteritems():
build_file = gyp.common.BuildFile(target)
build_file_node = dependency_nodes[build_file]
target_dependencies = spec.get('dependencies', [])
for dependency in target_dependencies:
try:
dependency_build_file = gyp.common.BuildFile(dependency)
except GypError, e:
gyp.common.ExceptionAppend(
e, 'while computing dependencies of .gyp file %s' % build_file)
raise
if dependency_build_file == build_file:
# A .gyp file is allowed to refer back to itself.
continue
dependency_node = dependency_nodes.get(dependency_build_file)
if not dependency_node:
raise GypError("Dependancy '%s' not found" % dependency_build_file)
if dependency_node not in build_file_node.dependencies:
build_file_node.dependencies.append(dependency_node)
dependency_node.dependents.append(build_file_node)
# Files that have no dependencies are treated as dependent on root_node.
root_node = DependencyGraphNode(None)
for build_file_node in dependency_nodes.itervalues():
if len(build_file_node.dependencies) == 0:
build_file_node.dependencies.append(root_node)
root_node.dependents.append(build_file_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(dependency_nodes):
if not root_node.dependents:
# If all files have dependencies, add the first file as a dependent
# of root_node so that the cycle can be discovered from root_node.
file_node = dependency_nodes.values()[0]
file_node.dependencies.append(root_node)
root_node.dependents.append(file_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in .gyp file dependency graph detected:\n' + '\n'.join(cycles))
def DoDependentSettings(key, flat_list, targets, dependency_nodes):
# key should be one of all_dependent_settings, direct_dependent_settings,
# or link_settings.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
if key == 'all_dependent_settings':
dependencies = dependency_nodes[target].DeepDependencies()
elif key == 'direct_dependent_settings':
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
elif key == 'link_settings':
dependencies = \
dependency_nodes[target].DependenciesForLinkSettings(targets)
else:
raise GypError("DoDependentSettings doesn't know how to determine "
'dependencies for ' + key)
for dependency in dependencies:
dependency_dict = targets[dependency]
if not key in dependency_dict:
continue
dependency_build_file = gyp.common.BuildFile(dependency)
MergeDicts(target_dict, dependency_dict[key],
build_file, dependency_build_file)
def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
sort_dependencies):
# Recompute target "dependencies" properties. For each static library
# target, remove "dependencies" entries referring to other static libraries,
# unless the dependency has the "hard_dependency" attribute set. For each
# linkable target, add a "dependencies" entry referring to all of the
# target's computed list of link dependencies (including static libraries
# if no such entry is already present.
for target in flat_list:
target_dict = targets[target]
target_type = target_dict['type']
if target_type == 'static_library':
if not 'dependencies' in target_dict:
continue
target_dict['dependencies_original'] = target_dict.get(
'dependencies', [])[:]
# A static library should not depend on another static library unless
# the dependency relationship is "hard," which should only be done when
# a dependent relies on some side effect other than just the build
# product, like a rule or action output. Further, if a target has a
# non-hard dependency, but that dependency exports a hard dependency,
# the non-hard dependency can safely be removed, but the exported hard
# dependency must be added to the target to keep the same dependency
# ordering.
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Remove every non-hard static library dependency and remove every
# non-static library dependency that isn't a direct dependency.
if (dependency_dict['type'] == 'static_library' and \
not dependency_dict.get('hard_dependency', False)) or \
(dependency_dict['type'] != 'static_library' and \
not dependency in target_dict['dependencies']):
# Take the dependency out of the list, and don't increment index
# because the next dependency to analyze will shift into the index
# formerly occupied by the one being removed.
del dependencies[index]
else:
index = index + 1
# Update the dependencies. If the dependencies list is empty, it's not
# needed, so unhook it.
if len(dependencies) > 0:
target_dict['dependencies'] = dependencies
else:
del target_dict['dependencies']
elif target_type in linkable_types:
# Get a list of dependency targets that should be linked into this
# target. Add them to the dependencies list if they're not already
# present.
link_dependencies = \
dependency_nodes[target].DependenciesToLinkAgainst(targets)
for dependency in link_dependencies:
if dependency == target:
continue
if not 'dependencies' in target_dict:
target_dict['dependencies'] = []
if not dependency in target_dict['dependencies']:
target_dict['dependencies'].append(dependency)
# Sort the dependencies list in the order from dependents to dependencies.
# e.g. If A and B depend on C and C depends on D, sort them in A, B, C, D.
# Note: flat_list is already sorted in the order from dependencies to
# dependents.
if sort_dependencies and 'dependencies' in target_dict:
target_dict['dependencies'] = [dep for dep in reversed(flat_list)
if dep in target_dict['dependencies']]
# Initialize this here to speed up MakePathRelative.
exception_re = re.compile(r'''["']?[-/$<>^]''')
def MakePathRelative(to_file, fro_file, item):
# If item is a relative path, it's relative to the build file dict that it's
# coming from. Fix it up to make it relative to the build file dict that
# it's going into.
# Exception: any |item| that begins with these special characters is
# returned without modification.
# / Used when a path is already absolute (shortcut optimization;
# such paths would be returned as absolute anyway)
# $ Used for build environment variables
# - Used for some build environment flags (such as -lapr-1 in a
# "libraries" section)
# < Used for our own variable and command expansions (see ExpandVariables)
# > Used for our own variable and command expansions (see ExpandVariables)
# ^ Used for our own variable and command expansions (see ExpandVariables)
#
# "/' Used when a value is quoted. If these are present, then we
# check the second character instead.
#
if to_file == fro_file or exception_re.match(item):
return item
else:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
ret = os.path.normpath(os.path.join(
gyp.common.RelativePath(os.path.dirname(fro_file),
os.path.dirname(to_file)),
item)).replace('\\', '/')
if item[-1] == '/':
ret += '/'
return ret
def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True):
# Python documentation recommends objects which do not support hash
# set this value to None. Python library objects follow this rule.
is_hashable = lambda val: val.__hash__
# If x is hashable, returns whether x is in s. Else returns whether x is in l.
def is_in_set_or_list(x, s, l):
if is_hashable(x):
return x in s
return x in l
prepend_index = 0
# Make membership testing of hashables in |to| (in particular, strings)
# faster.
hashable_to_set = set(x for x in to if is_hashable(x))
for item in fro:
singleton = False
if type(item) in (str, int):
# The cheap and easy case.
if is_paths:
to_item = MakePathRelative(to_file, fro_file, item)
else:
to_item = item
if not (type(item) is str and item.startswith('-')):
# Any string that doesn't begin with a "-" is a singleton - it can
# only appear once in a list, to be enforced by the list merge append
# or prepend.
singleton = True
elif type(item) is dict:
# Make a copy of the dictionary, continuing to look for paths to fix.
# The other intelligent aspects of merge processing won't apply because
# item is being merged into an empty dict.
to_item = {}
MergeDicts(to_item, item, to_file, fro_file)
elif type(item) is list:
# Recurse, making a copy of the list. If the list contains any
# descendant dicts, path fixing will occur. Note that here, custom
# values for is_paths and append are dropped; those are only to be
# applied to |to| and |fro|, not sublists of |fro|. append shouldn't
# matter anyway because the new |to_item| list is empty.
to_item = []
MergeLists(to_item, item, to_file, fro_file)
else:
raise TypeError(
'Attempt to merge list item of unsupported type ' + \
item.__class__.__name__)
if append:
# If appending a singleton that's already in the list, don't append.
# This ensures that the earliest occurrence of the item will stay put.
if not singleton or not is_in_set_or_list(to_item, hashable_to_set, to):
to.append(to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
else:
# If prepending a singleton that's already in the list, remove the
# existing instance and proceed with the prepend. This ensures that the
# item appears at the earliest possible position in the list.
while singleton and to_item in to:
to.remove(to_item)
# Don't just insert everything at index 0. That would prepend the new
# items to the list in reverse order, which would be an unwelcome
# surprise.
to.insert(prepend_index, to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
prepend_index = prepend_index + 1
def MergeDicts(to, fro, to_file, fro_file):
# I wanted to name the parameter "from" but it's a Python keyword...
for k, v in fro.iteritems():
# It would be nice to do "if not k in to: to[k] = v" but that wouldn't give
# copy semantics. Something else may want to merge from the |fro| dict
# later, and having the same dict ref pointed to twice in the tree isn't
# what anyone wants considering that the dicts may subsequently be
# modified.
if k in to:
bad_merge = False
if type(v) in (str, int):
if type(to[k]) not in (str, int):
bad_merge = True
elif type(v) is not type(to[k]):
bad_merge = True
if bad_merge:
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[k].__class__.__name__ + \
' for key ' + k)
if type(v) in (str, int):
# Overwrite the existing value, if any. Cheap and easy.
is_path = IsPathSection(k)
if is_path:
to[k] = MakePathRelative(to_file, fro_file, v)
else:
to[k] = v
elif type(v) is dict:
# Recurse, guaranteeing copies will be made of objects that require it.
if not k in to:
to[k] = {}
MergeDicts(to[k], v, to_file, fro_file)
elif type(v) is list:
# Lists in dicts can be merged with different policies, depending on
# how the key in the "from" dict (k, the from-key) is written.
#
# If the from-key has ...the to-list will have this action
# this character appended:... applied when receiving the from-list:
# = replace
# + prepend
# ? set, only if to-list does not yet exist
# (none) append
#
# This logic is list-specific, but since it relies on the associated
# dict key, it's checked in this dict-oriented function.
ext = k[-1]
append = True
if ext == '=':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '?']
to[list_base] = []
elif ext == '+':
list_base = k[:-1]
lists_incompatible = [list_base + '=', list_base + '?']
append = False
elif ext == '?':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '=', list_base + '+']
else:
list_base = k
lists_incompatible = [list_base + '=', list_base + '?']
# Some combinations of merge policies appearing together are meaningless.
# It's stupid to replace and append simultaneously, for example. Append
# and prepend are the only policies that can coexist.
for list_incompatible in lists_incompatible:
if list_incompatible in fro:
raise GypError('Incompatible list policies ' + k + ' and ' +
list_incompatible)
if list_base in to:
if ext == '?':
# If the key ends in "?", the list will only be merged if it doesn't
# already exist.
continue
elif type(to[list_base]) is not list:
# This may not have been checked above if merging in a list with an
# extension character.
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[list_base].__class__.__name__ + \
' for key ' + list_base + '(' + k + ')')
else:
to[list_base] = []
# Call MergeLists, which will make copies of objects that require it.
# MergeLists can recurse back into MergeDicts, although this will be
# to make copies of dicts (with paths fixed), there will be no
# subsequent dict "merging" once entering a list because lists are
# always replaced, appended to, or prepended to.
is_paths = IsPathSection(list_base)
MergeLists(to[list_base], v, to_file, fro_file, is_paths, append)
else:
raise TypeError(
'Attempt to merge dict value of unsupported type ' + \
v.__class__.__name__ + ' for key ' + k)
def MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, visited):
# Skip if previously visted.
if configuration in visited:
return
# Look at this configuration.
configuration_dict = target_dict['configurations'][configuration]
# Merge in parents.
for parent in configuration_dict.get('inherit_from', []):
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, parent, visited + [configuration])
# Merge it into the new config.
MergeDicts(new_configuration_dict, configuration_dict,
build_file, build_file)
# Drop abstract.
if 'abstract' in new_configuration_dict:
del new_configuration_dict['abstract']
def SetUpConfigurations(target, target_dict):
# key_suffixes is a list of key suffixes that might appear on key names.
# These suffixes are handled in conditional evaluations (for =, +, and ?)
# and rules/exclude processing (for ! and /). Keys with these suffixes
# should be treated the same as keys without.
key_suffixes = ['=', '+', '?', '!', '/']
build_file = gyp.common.BuildFile(target)
# Provide a single configuration by default if none exists.
# TODO(mark): Signal an error if default_configurations exists but
# configurations does not.
if not 'configurations' in target_dict:
target_dict['configurations'] = {'Default': {}}
if not 'default_configuration' in target_dict:
concrete = [i for (i, config) in target_dict['configurations'].iteritems()
if not config.get('abstract')]
target_dict['default_configuration'] = sorted(concrete)[0]
merged_configurations = {}
configs = target_dict['configurations']
for (configuration, old_configuration_dict) in configs.iteritems():
# Skip abstract configurations (saves work only).
if old_configuration_dict.get('abstract'):
continue
# Configurations inherit (most) settings from the enclosing target scope.
# Get the inheritance relationship right by making a copy of the target
# dict.
new_configuration_dict = {}
for (key, target_val) in target_dict.iteritems():
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
new_configuration_dict[key] = gyp.simple_copy.deepcopy(target_val)
# Merge in configuration (with all its parents first).
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, [])
merged_configurations[configuration] = new_configuration_dict
# Put the new configurations back into the target dict as a configuration.
for configuration in merged_configurations.keys():
target_dict['configurations'][configuration] = (
merged_configurations[configuration])
# Now drop all the abstract ones.
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
if old_configuration_dict.get('abstract'):
del target_dict['configurations'][configuration]
# Now that all of the target's configurations have been built, go through
# the target dict's keys and remove everything that's been moved into a
# "configurations" section.
delete_keys = []
for key in target_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del target_dict[key]
# Check the configurations to see if they contain invalid keys.
for configuration in target_dict['configurations'].keys():
configuration_dict = target_dict['configurations'][configuration]
for key in configuration_dict.keys():
if key in invalid_configuration_keys:
raise GypError('%s not allowed in the %s configuration, found in '
'target %s' % (key, configuration, target))
def ProcessListFiltersInDict(name, the_dict):
"""Process regular expression and exclusion-based filters on lists.
An exclusion list is in a dict key named with a trailing "!", like
"sources!". Every item in such a list is removed from the associated
main list, which in this example, would be "sources". Removed items are
placed into a "sources_excluded" list in the dict.
Regular expression (regex) filters are contained in dict keys named with a
trailing "/", such as "sources/" to operate on the "sources" list. Regex
filters in a dict take the form:
'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'],
['include', '_mac\\.cc$'] ],
The first filter says to exclude all files ending in _linux.cc, _mac.cc, and
_win.cc. The second filter then includes all files ending in _mac.cc that
are now or were once in the "sources" list. Items matching an "exclude"
filter are subject to the same processing as would occur if they were listed
by name in an exclusion list (ending in "!"). Items matching an "include"
filter are brought back into the main list if previously excluded by an
exclusion list or exclusion regex filter. Subsequent matching "exclude"
patterns can still cause items to be excluded after matching an "include".
"""
# Look through the dictionary for any lists whose keys end in "!" or "/".
# These are lists that will be treated as exclude lists and regular
# expression-based exclude/include lists. Collect the lists that are
# needed first, looking for the lists that they operate on, and assemble
# then into |lists|. This is done in a separate loop up front, because
# the _included and _excluded keys need to be added to the_dict, and that
# can't be done while iterating through it.
lists = []
del_lists = []
for key, value in the_dict.iteritems():
operation = key[-1]
if operation != '!' and operation != '/':
continue
if type(value) is not list:
raise ValueError(name + ' key ' + key + ' must be list, not ' + \
value.__class__.__name__)
list_key = key[:-1]
if list_key not in the_dict:
# This happens when there's a list like "sources!" but no corresponding
# "sources" list. Since there's nothing for it to operate on, queue up
# the "sources!" list for deletion now.
del_lists.append(key)
continue
if type(the_dict[list_key]) is not list:
value = the_dict[list_key]
raise ValueError(name + ' key ' + list_key + \
' must be list, not ' + \
value.__class__.__name__ + ' when applying ' + \
{'!': 'exclusion', '/': 'regex'}[operation])
if not list_key in lists:
lists.append(list_key)
# Delete the lists that are known to be unneeded at this point.
for del_list in del_lists:
del the_dict[del_list]
for list_key in lists:
the_list = the_dict[list_key]
# Initialize the list_actions list, which is parallel to the_list. Each
# item in list_actions identifies whether the corresponding item in
# the_list should be excluded, unconditionally preserved (included), or
# whether no exclusion or inclusion has been applied. Items for which
# no exclusion or inclusion has been applied (yet) have value -1, items
# excluded have value 0, and items included have value 1. Includes and
# excludes override previous actions. All items in list_actions are
# initialized to -1 because no excludes or includes have been processed
# yet.
list_actions = list((-1,) * len(the_list))
exclude_key = list_key + '!'
if exclude_key in the_dict:
for exclude_item in the_dict[exclude_key]:
for index in xrange(0, len(the_list)):
if exclude_item == the_list[index]:
# This item matches the exclude_item, so set its action to 0
# (exclude).
list_actions[index] = 0
# The "whatever!" list is no longer needed, dump it.
del the_dict[exclude_key]
regex_key = list_key + '/'
if regex_key in the_dict:
for regex_item in the_dict[regex_key]:
[action, pattern] = regex_item
pattern_re = re.compile(pattern)
if action == 'exclude':
# This item matches an exclude regex, so set its value to 0 (exclude).
action_value = 0
elif action == 'include':
# This item matches an include regex, so set its value to 1 (include).
action_value = 1
else:
# This is an action that doesn't make any sense.
raise ValueError('Unrecognized action ' + action + ' in ' + name + \
' key ' + regex_key)
for index in xrange(0, len(the_list)):
list_item = the_list[index]
if list_actions[index] == action_value:
# Even if the regex matches, nothing will change so continue (regex
# searches are expensive).
continue
if pattern_re.search(list_item):
# Regular expression match.
list_actions[index] = action_value
# The "whatever/" list is no longer needed, dump it.
del the_dict[regex_key]
# Add excluded items to the excluded list.
#
# Note that exclude_key ("sources!") is different from excluded_key
# ("sources_excluded"). The exclude_key list is input and it was already
# processed and deleted; the excluded_key list is output and it's about
# to be created.
excluded_key = list_key + '_excluded'
if excluded_key in the_dict:
raise GypError(name + ' key ' + excluded_key +
' must not be present prior '
' to applying exclusion/regex filters for ' + list_key)
excluded_list = []
# Go backwards through the list_actions list so that as items are deleted,
# the indices of items that haven't been seen yet don't shift. That means
# that things need to be prepended to excluded_list to maintain them in the
# same order that they existed in the_list.
for index in xrange(len(list_actions) - 1, -1, -1):
if list_actions[index] == 0:
# Dump anything with action 0 (exclude). Keep anything with action 1
# (include) or -1 (no include or exclude seen for the item).
excluded_list.insert(0, the_list[index])
del the_list[index]
# If anything was excluded, put the excluded list into the_dict at
# excluded_key.
if len(excluded_list) > 0:
the_dict[excluded_key] = excluded_list
# Now recurse into subdicts and lists that may contain dicts.
for key, value in the_dict.iteritems():
if type(value) is dict:
ProcessListFiltersInDict(key, value)
elif type(value) is list:
ProcessListFiltersInList(key, value)
def ProcessListFiltersInList(name, the_list):
for item in the_list:
if type(item) is dict:
ProcessListFiltersInDict(name, item)
elif type(item) is list:
ProcessListFiltersInList(name, item)
def ValidateTargetType(target, target_dict):
"""Ensures the 'type' field on the target is one of the known types.
Arguments:
target: string, name of target.
target_dict: dict, target spec.
Raises an exception on error.
"""
VALID_TARGET_TYPES = ('executable', 'loadable_module',
'static_library', 'shared_library',
'mac_kernel_extension', 'none')
target_type = target_dict.get('type', None)
if target_type not in VALID_TARGET_TYPES:
raise GypError("Target %s has an invalid target type '%s'. "
"Must be one of %s." %
(target, target_type, '/'.join(VALID_TARGET_TYPES)))
if (target_dict.get('standalone_static_library', 0) and
not target_type == 'static_library'):
raise GypError('Target %s has type %s but standalone_static_library flag is'
' only valid for static_library type.' % (target,
target_type))
def ValidateSourcesInTarget(target, target_dict, build_file,
duplicate_basename_check):
if not duplicate_basename_check:
return
if target_dict.get('type', None) != 'static_library':
return
sources = target_dict.get('sources', [])
basenames = {}
for source in sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
target + error + 'libtool on Mac cannot handle that. Use '
'--no-duplicate-basename-check to disable this validation.')
raise GypError('Duplicate basenames in sources section, see list above')
def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules):
"""Ensures that the rules sections in target_dict are valid and consistent,
and determines which sources they apply to.
Arguments:
target: string, name of target.
target_dict: dict, target spec containing "rules" and "sources" lists.
extra_sources_for_rules: a list of keys to scan for rule matches in
addition to 'sources'.
"""
# Dicts to map between values found in rules' 'rule_name' and 'extension'
# keys and the rule dicts themselves.
rule_names = {}
rule_extensions = {}
rules = target_dict.get('rules', [])
for rule in rules:
# Make sure that there's no conflict among rule names and extensions.
rule_name = rule['rule_name']
if rule_name in rule_names:
raise GypError('rule %s exists in duplicate, target %s' %
(rule_name, target))
rule_names[rule_name] = rule
rule_extension = rule['extension']
if rule_extension.startswith('.'):
rule_extension = rule_extension[1:]
if rule_extension in rule_extensions:
raise GypError(('extension %s associated with multiple rules, ' +
'target %s rules %s and %s') %
(rule_extension, target,
rule_extensions[rule_extension]['rule_name'],
rule_name))
rule_extensions[rule_extension] = rule
# Make sure rule_sources isn't already there. It's going to be
# created below if needed.
if 'rule_sources' in rule:
raise GypError(
'rule_sources must not exist in input, target %s rule %s' %
(target, rule_name))
rule_sources = []
source_keys = ['sources']
source_keys.extend(extra_sources_for_rules)
for source_key in source_keys:
for source in target_dict.get(source_key, []):
(source_root, source_extension) = os.path.splitext(source)
if source_extension.startswith('.'):
source_extension = source_extension[1:]
if source_extension == rule_extension:
rule_sources.append(source)
if len(rule_sources) > 0:
rule['rule_sources'] = rule_sources
def ValidateRunAsInTarget(target, target_dict, build_file):
target_name = target_dict.get('target_name')
run_as = target_dict.get('run_as')
if not run_as:
return
if type(run_as) is not dict:
raise GypError("The 'run_as' in target %s from file %s should be a "
"dictionary." %
(target_name, build_file))
action = run_as.get('action')
if not action:
raise GypError("The 'run_as' in target %s from file %s must have an "
"'action' section." %
(target_name, build_file))
if type(action) is not list:
raise GypError("The 'action' for 'run_as' in target %s from file %s "
"must be a list." %
(target_name, build_file))
working_directory = run_as.get('working_directory')
if working_directory and type(working_directory) is not str:
raise GypError("The 'working_directory' for 'run_as' in target %s "
"in file %s should be a string." %
(target_name, build_file))
environment = run_as.get('environment')
if environment and type(environment) is not dict:
raise GypError("The 'environment' for 'run_as' in target %s "
"in file %s should be a dictionary." %
(target_name, build_file))
def ValidateActionsInTarget(target, target_dict, build_file):
'''Validates the inputs to the actions in a target.'''
target_name = target_dict.get('target_name')
actions = target_dict.get('actions', [])
for action in actions:
action_name = action.get('action_name')
if not action_name:
raise GypError("Anonymous action in target %s. "
"An action must have an 'action_name' field." %
target_name)
inputs = action.get('inputs', None)
if inputs is None:
raise GypError('Action in target %s has no inputs.' % target_name)
action_command = action.get('action')
if action_command and not action_command[0]:
raise GypError("Empty action as command in target %s." % target_name)
def TurnIntIntoStrInDict(the_dict):
"""Given dict the_dict, recursively converts all integers into strings.
"""
# Use items instead of iteritems because there's no need to try to look at
# reinserted keys and their associated values.
for k, v in the_dict.items():
if type(v) is int:
v = str(v)
the_dict[k] = v
elif type(v) is dict:
TurnIntIntoStrInDict(v)
elif type(v) is list:
TurnIntIntoStrInList(v)
if type(k) is int:
del the_dict[k]
the_dict[str(k)] = v
def TurnIntIntoStrInList(the_list):
"""Given list the_list, recursively converts all integers into strings.
"""
for index in xrange(0, len(the_list)):
item = the_list[index]
if type(item) is int:
the_list[index] = str(item)
elif type(item) is dict:
TurnIntIntoStrInDict(item)
elif type(item) is list:
TurnIntIntoStrInList(item)
def PruneUnwantedTargets(targets, flat_list, dependency_nodes, root_targets,
data):
"""Return only the targets that are deep dependencies of |root_targets|."""
qualified_root_targets = []
for target in root_targets:
target = target.strip()
qualified_targets = gyp.common.FindQualifiedTargets(target, flat_list)
if not qualified_targets:
raise GypError("Could not find target %s" % target)
qualified_root_targets.extend(qualified_targets)
wanted_targets = {}
for target in qualified_root_targets:
wanted_targets[target] = targets[target]
for dependency in dependency_nodes[target].DeepDependencies():
wanted_targets[dependency] = targets[dependency]
wanted_flat_list = [t for t in flat_list if t in wanted_targets]
# Prune unwanted targets from each build_file's data dict.
for build_file in data['target_build_files']:
if not 'targets' in data[build_file]:
continue
new_targets = []
for target in data[build_file]['targets']:
qualified_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if qualified_name in wanted_targets:
new_targets.append(target)
data[build_file]['targets'] = new_targets
return wanted_targets, wanted_flat_list
def VerifyNoCollidingTargets(targets):
"""Verify that no two targets in the same directory share the same name.
Arguments:
targets: A list of targets in the form 'path/to/file.gyp:target_name'.
"""
# Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'.
used = {}
for target in targets:
# Separate out 'path/to/file.gyp, 'target_name' from
# 'path/to/file.gyp:target_name'.
path, name = target.rsplit(':', 1)
# Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'.
subdir, gyp = os.path.split(path)
# Use '.' for the current directory '', so that the error messages make
# more sense.
if not subdir:
subdir = '.'
# Prepare a key like 'path/to:target_name'.
key = subdir + ':' + name
if key in used:
# Complain if this target is already used.
raise GypError('Duplicate target name "%s" in directory "%s" used both '
'in "%s" and "%s".' % (name, subdir, gyp, used[key]))
used[key] = gyp
def SetGeneratorGlobals(generator_input_info):
# Set up path_sections and non_configuration_keys with the default data plus
# the generator-specific data.
global path_sections
path_sections = set(base_path_sections)
path_sections.update(generator_input_info['path_sections'])
global non_configuration_keys
non_configuration_keys = base_non_configuration_keys[:]
non_configuration_keys.extend(generator_input_info['non_configuration_keys'])
global multiple_toolsets
multiple_toolsets = generator_input_info[
'generator_supports_multiple_toolsets']
global generator_filelist_paths
generator_filelist_paths = generator_input_info['generator_filelist_paths']
def Load(build_files, variables, includes, depth, generator_input_info, check,
circular_check, duplicate_basename_check, parallel, root_targets):
SetGeneratorGlobals(generator_input_info)
# A generator can have other lists (in addition to sources) be processed
# for rules.
extra_sources_for_rules = generator_input_info['extra_sources_for_rules']
# Load build files. This loads every target-containing build file into
# the |data| dictionary such that the keys to |data| are build file names,
# and the values are the entire build file contents after "early" or "pre"
# processing has been done and includes have been resolved.
# NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as
# well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps
# track of the keys corresponding to "target" files.
data = {'target_build_files': set()}
# Normalize paths everywhere. This is important because paths will be
# used as keys to the data dict and for references between input files.
build_files = set(map(os.path.normpath, build_files))
if parallel:
LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info)
else:
aux_data = {}
for build_file in build_files:
try:
LoadTargetBuildFile(build_file, data, aux_data,
variables, includes, depth, check, True)
except Exception, e:
gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file)
raise
# Build a dict to access each target's subdict by qualified name.
targets = BuildTargetsDict(data)
# Fully qualify all dependency links.
QualifyDependencies(targets)
# Remove self-dependencies from targets that have 'prune_self_dependencies'
# set to 1.
RemoveSelfDependencies(targets)
# Expand dependencies specified as build_file:*.
ExpandWildcardDependencies(targets, data)
# Remove all dependencies marked as 'link_dependency' from the targets of
# type 'none'.
RemoveLinkDependenciesFromNoneTargets(targets)
# Apply exclude (!) and regex (/) list filters only for dependency_sections.
for target_name, target_dict in targets.iteritems():
tmp_dict = {}
for key_base in dependency_sections:
for op in ('', '!', '/'):
key = key_base + op
if key in target_dict:
tmp_dict[key] = target_dict[key]
del target_dict[key]
ProcessListFiltersInDict(target_name, tmp_dict)
# Write the results back to |target_dict|.
for key in tmp_dict:
target_dict[key] = tmp_dict[key]
# Make sure every dependency appears at most once.
RemoveDuplicateDependencies(targets)
if circular_check:
# Make sure that any targets in a.gyp don't contain dependencies in other
# .gyp files that further depend on a.gyp.
VerifyNoGYPFileCircularDependencies(targets)
[dependency_nodes, flat_list] = BuildDependencyList(targets)
if root_targets:
# Remove, from |targets| and |flat_list|, the targets that are not deep
# dependencies of the targets specified in |root_targets|.
targets, flat_list = PruneUnwantedTargets(
targets, flat_list, dependency_nodes, root_targets, data)
# Check that no two targets in the same directory have the same name.
VerifyNoCollidingTargets(flat_list)
# Handle dependent settings of various types.
for settings_type in ['all_dependent_settings',
'direct_dependent_settings',
'link_settings']:
DoDependentSettings(settings_type, flat_list, targets, dependency_nodes)
# Take out the dependent settings now that they've been published to all
# of the targets that require them.
for target in flat_list:
if settings_type in targets[target]:
del targets[target][settings_type]
# Make sure static libraries don't declare dependencies on other static
# libraries, but that linkables depend on all unlinked static libraries
# that they need so that their link steps will be correct.
gii = generator_input_info
if gii['generator_wants_static_library_dependencies_adjusted']:
AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
gii['generator_wants_sorted_dependencies'])
# Apply "post"/"late"/"target" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATE, variables, build_file)
# Move everything that can go into a "configurations" section into one.
for target in flat_list:
target_dict = targets[target]
SetUpConfigurations(target, target_dict)
# Apply exclude (!) and regex (/) list filters.
for target in flat_list:
target_dict = targets[target]
ProcessListFiltersInDict(target, target_dict)
# Apply "latelate" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATELATE, variables, build_file)
# Make sure that the rules make sense, and build up rule_sources lists as
# needed. Not all generators will need to use the rule_sources lists, but
# some may, and it seems best to build the list in a common spot.
# Also validate actions and run_as elements in targets.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ValidateTargetType(target, target_dict)
ValidateSourcesInTarget(target, target_dict, build_file,
duplicate_basename_check)
ValidateRulesInTarget(target, target_dict, extra_sources_for_rules)
ValidateRunAsInTarget(target, target_dict, build_file)
ValidateActionsInTarget(target, target_dict, build_file)
# Generators might not expect ints. Turn them into strs.
TurnIntIntoStrInDict(data)
# TODO(mark): Return |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
return [flat_list, targets, data]
| gpl-3.0 |
lumig242/Hue-Integration-with-CDAP | desktop/core/ext-py/python-openid-2.2.5/openid/yadis/accept.py | 184 | 3742 | """Functions for generating and parsing HTTP Accept: headers for
supporting server-directed content negotiation.
"""
def generateAcceptHeader(*elements):
"""Generate an accept header value
[str or (str, float)] -> str
"""
parts = []
for element in elements:
if type(element) is str:
qs = "1.0"
mtype = element
else:
mtype, q = element
q = float(q)
if q > 1 or q <= 0:
raise ValueError('Invalid preference factor: %r' % q)
qs = '%0.1f' % (q,)
parts.append((qs, mtype))
parts.sort()
chunks = []
for q, mtype in parts:
if q == '1.0':
chunks.append(mtype)
else:
chunks.append('%s; q=%s' % (mtype, q))
return ', '.join(chunks)
def parseAcceptHeader(value):
"""Parse an accept header, ignoring any accept-extensions
returns a list of tuples containing main MIME type, MIME subtype,
and quality markdown.
str -> [(str, str, float)]
"""
chunks = [chunk.strip() for chunk in value.split(',')]
accept = []
for chunk in chunks:
parts = [s.strip() for s in chunk.split(';')]
mtype = parts.pop(0)
if '/' not in mtype:
# This is not a MIME type, so ignore the bad data
continue
main, sub = mtype.split('/', 1)
for ext in parts:
if '=' in ext:
k, v = ext.split('=', 1)
if k == 'q':
try:
q = float(v)
break
except ValueError:
# Ignore poorly formed q-values
pass
else:
q = 1.0
accept.append((q, main, sub))
accept.sort()
accept.reverse()
return [(main, sub, q) for (q, main, sub) in accept]
def matchTypes(accept_types, have_types):
"""Given the result of parsing an Accept: header, and the
available MIME types, return the acceptable types with their
quality markdowns.
For example:
>>> acceptable = parseAcceptHeader('text/html, text/plain; q=0.5')
>>> matchTypes(acceptable, ['text/plain', 'text/html', 'image/jpeg'])
[('text/html', 1.0), ('text/plain', 0.5)]
Type signature: ([(str, str, float)], [str]) -> [(str, float)]
"""
if not accept_types:
# Accept all of them
default = 1
else:
default = 0
match_main = {}
match_sub = {}
for (main, sub, q) in accept_types:
if main == '*':
default = max(default, q)
continue
elif sub == '*':
match_main[main] = max(match_main.get(main, 0), q)
else:
match_sub[(main, sub)] = max(match_sub.get((main, sub), 0), q)
accepted_list = []
order_maintainer = 0
for mtype in have_types:
main, sub = mtype.split('/')
if (main, sub) in match_sub:
q = match_sub[(main, sub)]
else:
q = match_main.get(main, default)
if q:
accepted_list.append((1 - q, order_maintainer, q, mtype))
order_maintainer += 1
accepted_list.sort()
return [(mtype, q) for (_, _, q, mtype) in accepted_list]
def getAcceptable(accept_header, have_types):
"""Parse the accept header and return a list of available types in
preferred order. If a type is unacceptable, it will not be in the
resulting list.
This is a convenience wrapper around matchTypes and
parseAcceptHeader.
(str, [str]) -> [str]
"""
accepted = parseAcceptHeader(accept_header)
preferred = matchTypes(accepted, have_types)
return [mtype for (mtype, _) in preferred]
| apache-2.0 |
googleapis/googleapis-gen | google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/enums/types/negative_geo_target_type.py | 1 | 1192 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v7.enums',
marshal='google.ads.googleads.v7',
manifest={
'NegativeGeoTargetTypeEnum',
},
)
class NegativeGeoTargetTypeEnum(proto.Message):
r"""Container for enum describing possible negative geo target
types.
"""
class NegativeGeoTargetType(proto.Enum):
r"""The possible negative geo target types."""
UNSPECIFIED = 0
UNKNOWN = 1
PRESENCE_OR_INTEREST = 4
PRESENCE = 5
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 |
darren-rogan/CouchPotatoServer | libs/werkzeug/wsgi.py | 13 | 30419 | # -*- coding: utf-8 -*-
"""
werkzeug.wsgi
~~~~~~~~~~~~~
This module implements WSGI related helpers.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import os
import urllib
import urlparse
import posixpath
import mimetypes
from itertools import chain, repeat
from zlib import adler32
from time import time, mktime
from datetime import datetime
from functools import partial
from werkzeug._internal import _patch_wrapper
from werkzeug.http import is_resource_modified, http_date
def responder(f):
"""Marks a function as responder. Decorate a function with it and it
will automatically call the return value as WSGI application.
Example::
@responder
def application(environ, start_response):
return Response('Hello World!')
"""
return _patch_wrapper(f, lambda *a: f(*a)(*a[-2:]))
def get_current_url(environ, root_only=False, strip_querystring=False,
host_only=False):
"""A handy helper function that recreates the full URL for the current
request or parts of it. Here an example:
>>> from werkzeug.test import create_environ
>>> env = create_environ("/?param=foo", "http://localhost/script")
>>> get_current_url(env)
'http://localhost/script/?param=foo'
>>> get_current_url(env, root_only=True)
'http://localhost/script/'
>>> get_current_url(env, host_only=True)
'http://localhost/'
>>> get_current_url(env, strip_querystring=True)
'http://localhost/script/'
:param environ: the WSGI environment to get the current URL from.
:param root_only: set `True` if you only want the root URL.
:param strip_querystring: set to `True` if you don't want the querystring.
:param host_only: set to `True` if the host URL should be returned.
"""
tmp = [environ['wsgi.url_scheme'], '://', get_host(environ)]
cat = tmp.append
if host_only:
return ''.join(tmp) + '/'
cat(urllib.quote(environ.get('SCRIPT_NAME', '').rstrip('/')))
if root_only:
cat('/')
else:
cat(urllib.quote('/' + environ.get('PATH_INFO', '').lstrip('/')))
if not strip_querystring:
qs = environ.get('QUERY_STRING')
if qs:
cat('?' + qs)
return ''.join(tmp)
def get_host(environ):
"""Return the real host for the given WSGI environment. This takes care
of the `X-Forwarded-Host` header.
:param environ: the WSGI environment to get the host of.
"""
if 'HTTP_X_FORWARDED_HOST' in environ:
return environ['HTTP_X_FORWARDED_HOST']
elif 'HTTP_HOST' in environ:
return environ['HTTP_HOST']
result = environ['SERVER_NAME']
if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \
in (('https', '443'), ('http', '80')):
result += ':' + environ['SERVER_PORT']
return result
def pop_path_info(environ):
"""Removes and returns the next segment of `PATH_INFO`, pushing it onto
`SCRIPT_NAME`. Returns `None` if there is nothing left on `PATH_INFO`.
If there are empty segments (``'/foo//bar``) these are ignored but
properly pushed to the `SCRIPT_NAME`:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> pop_path_info(env)
'a'
>>> env['SCRIPT_NAME']
'/foo/a'
>>> pop_path_info(env)
'b'
>>> env['SCRIPT_NAME']
'/foo/a/b'
.. versionadded:: 0.5
:param environ: the WSGI environment that is modified.
"""
path = environ.get('PATH_INFO')
if not path:
return None
script_name = environ.get('SCRIPT_NAME', '')
# shift multiple leading slashes over
old_path = path
path = path.lstrip('/')
if path != old_path:
script_name += '/' * (len(old_path) - len(path))
if '/' not in path:
environ['PATH_INFO'] = ''
environ['SCRIPT_NAME'] = script_name + path
return path
segment, path = path.split('/', 1)
environ['PATH_INFO'] = '/' + path
environ['SCRIPT_NAME'] = script_name + segment
return segment
def peek_path_info(environ):
"""Returns the next segment on the `PATH_INFO` or `None` if there
is none. Works like :func:`pop_path_info` without modifying the
environment:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> peek_path_info(env)
'a'
>>> peek_path_info(env)
'a'
.. versionadded:: 0.5
:param environ: the WSGI environment that is checked.
"""
segments = environ.get('PATH_INFO', '').lstrip('/').split('/', 1)
if segments:
return segments[0]
def extract_path_info(environ_or_baseurl, path_or_url, charset='utf-8',
errors='replace', collapse_http_schemes=True):
"""Extracts the path info from the given URL (or WSGI environment) and
path. The path info returned is a unicode string, not a bytestring
suitable for a WSGI environment. The URLs might also be IRIs.
If the path info could not be determined, `None` is returned.
Some examples:
>>> extract_path_info('http://example.com/app', '/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello',
... collapse_http_schemes=False) is None
True
Instead of providing a base URL you can also pass a WSGI environment.
.. versionadded:: 0.6
:param environ_or_baseurl: a WSGI environment dict, a base URL or
base IRI. This is the root of the
application.
:param path_or_url: an absolute path from the server root, a
relative path (in which case it's the path info)
or a full URL. Also accepts IRIs and unicode
parameters.
:param charset: the charset for byte data in URLs
:param errors: the error handling on decode
:param collapse_http_schemes: if set to `False` the algorithm does
not assume that http and https on the
same server point to the same
resource.
"""
from werkzeug.urls import uri_to_iri, url_fix
def _as_iri(obj):
if not isinstance(obj, unicode):
return uri_to_iri(obj, charset, errors)
return obj
def _normalize_netloc(scheme, netloc):
parts = netloc.split(u'@', 1)[-1].split(u':', 1)
if len(parts) == 2:
netloc, port = parts
if (scheme == u'http' and port == u'80') or \
(scheme == u'https' and port == u'443'):
port = None
else:
netloc = parts[0]
port = None
if port is not None:
netloc += u':' + port
return netloc
# make sure whatever we are working on is a IRI and parse it
path = _as_iri(path_or_url)
if isinstance(environ_or_baseurl, dict):
environ_or_baseurl = get_current_url(environ_or_baseurl,
root_only=True)
base_iri = _as_iri(environ_or_baseurl)
base_scheme, base_netloc, base_path, = \
urlparse.urlsplit(base_iri)[:3]
cur_scheme, cur_netloc, cur_path, = \
urlparse.urlsplit(urlparse.urljoin(base_iri, path))[:3]
# normalize the network location
base_netloc = _normalize_netloc(base_scheme, base_netloc)
cur_netloc = _normalize_netloc(cur_scheme, cur_netloc)
# is that IRI even on a known HTTP scheme?
if collapse_http_schemes:
for scheme in base_scheme, cur_scheme:
if scheme not in (u'http', u'https'):
return None
else:
if not (base_scheme in (u'http', u'https') and \
base_scheme == cur_scheme):
return None
# are the netlocs compatible?
if base_netloc != cur_netloc:
return None
# are we below the application path?
base_path = base_path.rstrip(u'/')
if not cur_path.startswith(base_path):
return None
return u'/' + cur_path[len(base_path):].lstrip(u'/')
class SharedDataMiddleware(object):
"""A WSGI middleware that provides static content for development
environments or simple server setups. Usage is quite simple::
import os
from werkzeug.wsgi import SharedDataMiddleware
app = SharedDataMiddleware(app, {
'/shared': os.path.join(os.path.dirname(__file__), 'shared')
})
The contents of the folder ``./shared`` will now be available on
``http://example.com/shared/``. This is pretty useful during development
because a standalone media server is not required. One can also mount
files on the root folder and still continue to use the application because
the shared data middleware forwards all unhandled requests to the
application, even if the requests are below one of the shared folders.
If `pkg_resources` is available you can also tell the middleware to serve
files from package data::
app = SharedDataMiddleware(app, {
'/shared': ('myapplication', 'shared_files')
})
This will then serve the ``shared_files`` folder in the `myapplication`
Python package.
The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch`
rules for files that are not accessible from the web. If `cache` is set to
`False` no caching headers are sent.
Currently the middleware does not support non ASCII filenames. If the
encoding on the file system happens to be the encoding of the URI it may
work but this could also be by accident. We strongly suggest using ASCII
only file names for static files.
The middleware will guess the mimetype using the Python `mimetype`
module. If it's unable to figure out the charset it will fall back
to `fallback_mimetype`.
.. versionchanged:: 0.5
The cache timeout is configurable now.
.. versionadded:: 0.6
The `fallback_mimetype` parameter was added.
:param app: the application to wrap. If you don't want to wrap an
application you can pass it :exc:`NotFound`.
:param exports: a dict of exported files and folders.
:param disallow: a list of :func:`~fnmatch.fnmatch` rules.
:param fallback_mimetype: the fallback mimetype for unknown files.
:param cache: enable or disable caching headers.
:Param cache_timeout: the cache timeout in seconds for the headers.
"""
def __init__(self, app, exports, disallow=None, cache=True,
cache_timeout=60 * 60 * 12, fallback_mimetype='text/plain'):
self.app = app
self.exports = {}
self.cache = cache
self.cache_timeout = cache_timeout
for key, value in exports.iteritems():
if isinstance(value, tuple):
loader = self.get_package_loader(*value)
elif isinstance(value, basestring):
if os.path.isfile(value):
loader = self.get_file_loader(value)
else:
loader = self.get_directory_loader(value)
else:
raise TypeError('unknown def %r' % value)
self.exports[key] = loader
if disallow is not None:
from fnmatch import fnmatch
self.is_allowed = lambda x: not fnmatch(x, disallow)
self.fallback_mimetype = fallback_mimetype
def is_allowed(self, filename):
"""Subclasses can override this method to disallow the access to
certain files. However by providing `disallow` in the constructor
this method is overwritten.
"""
return True
def _opener(self, filename):
return lambda: (
open(filename, 'rb'),
datetime.utcfromtimestamp(os.path.getmtime(filename)),
int(os.path.getsize(filename))
)
def get_file_loader(self, filename):
return lambda x: (os.path.basename(filename), self._opener(filename))
def get_package_loader(self, package, package_path):
from pkg_resources import DefaultProvider, ResourceManager, \
get_provider
loadtime = datetime.utcnow()
provider = get_provider(package)
manager = ResourceManager()
filesystem_bound = isinstance(provider, DefaultProvider)
def loader(path):
if path is None:
return None, None
path = posixpath.join(package_path, path)
if not provider.has_resource(path):
return None, None
basename = posixpath.basename(path)
if filesystem_bound:
return basename, self._opener(
provider.get_resource_filename(manager, path))
return basename, lambda: (
provider.get_resource_stream(manager, path),
loadtime,
0
)
return loader
def get_directory_loader(self, directory):
def loader(path):
if path is not None:
path = os.path.join(directory, path)
else:
path = directory
if os.path.isfile(path):
return os.path.basename(path), self._opener(path)
return None, None
return loader
def generate_etag(self, mtime, file_size, real_filename):
return 'wzsdm-%d-%s-%s' % (
mktime(mtime.timetuple()),
file_size,
adler32(real_filename) & 0xffffffff
)
def __call__(self, environ, start_response):
# sanitize the path for non unix systems
cleaned_path = environ.get('PATH_INFO', '').strip('/')
for sep in os.sep, os.altsep:
if sep and sep != '/':
cleaned_path = cleaned_path.replace(sep, '/')
path = '/'.join([''] + [x for x in cleaned_path.split('/')
if x and x != '..'])
file_loader = None
for search_path, loader in self.exports.iteritems():
if search_path == path:
real_filename, file_loader = loader(None)
if file_loader is not None:
break
if not search_path.endswith('/'):
search_path += '/'
if path.startswith(search_path):
real_filename, file_loader = loader(path[len(search_path):])
if file_loader is not None:
break
if file_loader is None or not self.is_allowed(real_filename):
return self.app(environ, start_response)
guessed_type = mimetypes.guess_type(real_filename)
mime_type = guessed_type[0] or self.fallback_mimetype
f, mtime, file_size = file_loader()
headers = [('Date', http_date())]
if self.cache:
timeout = self.cache_timeout
etag = self.generate_etag(mtime, file_size, real_filename)
headers += [
('Etag', '"%s"' % etag),
('Cache-Control', 'max-age=%d, public' % timeout)
]
if not is_resource_modified(environ, etag, last_modified=mtime):
f.close()
start_response('304 Not Modified', headers)
return []
headers.append(('Expires', http_date(time() + timeout)))
else:
headers.append(('Cache-Control', 'public'))
headers.extend((
('Content-Type', mime_type),
('Content-Length', str(file_size)),
('Last-Modified', http_date(mtime))
))
start_response('200 OK', headers)
return wrap_file(environ, f)
class DispatcherMiddleware(object):
"""Allows one to mount middlewares or applications in a WSGI application.
This is useful if you want to combine multiple WSGI applications::
app = DispatcherMiddleware(app, {
'/app2': app2,
'/app3': app3
})
"""
def __init__(self, app, mounts=None):
self.app = app
self.mounts = mounts or {}
def __call__(self, environ, start_response):
script = environ.get('PATH_INFO', '')
path_info = ''
while '/' in script:
if script in self.mounts:
app = self.mounts[script]
break
items = script.split('/')
script = '/'.join(items[:-1])
path_info = '/%s%s' % (items[-1], path_info)
else:
app = self.mounts.get(script, self.app)
original_script_name = environ.get('SCRIPT_NAME', '')
environ['SCRIPT_NAME'] = original_script_name + script
environ['PATH_INFO'] = path_info
return app(environ, start_response)
class ClosingIterator(object):
"""The WSGI specification requires that all middlewares and gateways
respect the `close` callback of an iterator. Because it is useful to add
another close action to a returned iterator and adding a custom iterator
is a boring task this class can be used for that::
return ClosingIterator(app(environ, start_response), [cleanup_session,
cleanup_locals])
If there is just one close function it can be passed instead of the list.
A closing iterator is not needed if the application uses response objects
and finishes the processing if the response is started::
try:
return response(environ, start_response)
finally:
cleanup_session()
cleanup_locals()
"""
def __init__(self, iterable, callbacks=None):
iterator = iter(iterable)
self._next = iterator.next
if callbacks is None:
callbacks = []
elif callable(callbacks):
callbacks = [callbacks]
else:
callbacks = list(callbacks)
iterable_close = getattr(iterator, 'close', None)
if iterable_close:
callbacks.insert(0, iterable_close)
self._callbacks = callbacks
def __iter__(self):
return self
def next(self):
return self._next()
def close(self):
for callback in self._callbacks:
callback()
def wrap_file(environ, file, buffer_size=8192):
"""Wraps a file. This uses the WSGI server's file wrapper if available
or otherwise the generic :class:`FileWrapper`.
.. versionadded:: 0.5
If the file wrapper from the WSGI server is used it's important to not
iterate over it from inside the application but to pass it through
unchanged. If you want to pass out a file wrapper inside a response
object you have to set :attr:`~BaseResponse.direct_passthrough` to `True`.
More information about file wrappers are available in :pep:`333`.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
return environ.get('wsgi.file_wrapper', FileWrapper)(file, buffer_size)
class FileWrapper(object):
"""This class can be used to convert a :class:`file`-like object into
an iterable. It yields `buffer_size` blocks until the file is fully
read.
You should not use this class directly but rather use the
:func:`wrap_file` function that uses the WSGI server's file wrapper
support if it's available.
.. versionadded:: 0.5
If you're using this object together with a :class:`BaseResponse` you have
to use the `direct_passthrough` mode.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
def __init__(self, file, buffer_size=8192):
self.file = file
self.buffer_size = buffer_size
def close(self):
if hasattr(self.file, 'close'):
self.file.close()
def __iter__(self):
return self
def next(self):
data = self.file.read(self.buffer_size)
if data:
return data
raise StopIteration()
def make_limited_stream(stream, limit):
"""Makes a stream limited."""
if not isinstance(stream, LimitedStream):
if limit is None:
raise TypeError('stream not limited and no limit provided.')
stream = LimitedStream(stream, limit)
return stream
def make_chunk_iter_func(stream, limit, buffer_size):
"""Helper for the line and chunk iter functions."""
if hasattr(stream, 'read'):
return partial(make_limited_stream(stream, limit).read, buffer_size)
return iter(chain(stream, repeat(''))).next
def make_line_iter(stream, limit=None, buffer_size=10 * 1024):
"""Safely iterates line-based over an input stream. If the input stream
is not a :class:`LimitedStream` the `limit` parameter is mandatory.
This uses the stream's :meth:`~file.read` method internally as opposite
to the :meth:`~file.readline` method that is unsafe and can only be used
in violation of the WSGI specification. The same problem applies to the
`__iter__` function of the input stream which calls :meth:`~file.readline`
without arguments.
If you need line-by-line processing it's strongly recommended to iterate
over the input stream using this helper function.
.. versionchanged:: 0.8
This function now ensures that the limit was reached.
.. versionadded:: 0.9
added support for iterators as input stream.
:param stream: the stream or iterate to iterate over.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is a :class:`LimitedStream`.
:param buffer_size: The optional buffer size.
"""
def _iter_basic_lines():
_read = make_chunk_iter_func(stream, limit, buffer_size)
buffer = []
while 1:
new_data = _read()
if not new_data:
break
new_buf = []
for item in chain(buffer, new_data.splitlines(True)):
new_buf.append(item)
if item and item[-1:] in '\r\n':
yield ''.join(new_buf)
new_buf = []
buffer = new_buf
if buffer:
yield ''.join(buffer)
# This hackery is necessary to merge 'foo\r' and '\n' into one item
# of 'foo\r\n' if we were unlucky and we hit a chunk boundary.
previous = ''
for item in _iter_basic_lines():
if item == '\n' and previous[-1:] == '\r':
previous += '\n'
item = ''
if previous:
yield previous
previous = item
if previous:
yield previous
def make_chunk_iter(stream, separator, limit=None, buffer_size=10 * 1024):
"""Works like :func:`make_line_iter` but accepts a separator
which divides chunks. If you want newline based processing
you should use :func:`make_limited_stream` instead as it
supports arbitrary newline markers.
.. versionadded:: 0.8
.. versionadded:: 0.9
added support for iterators as input stream.
:param stream: the stream or iterate to iterate over.
:param separator: the separator that divides chunks.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is a :class:`LimitedStream`.
:param buffer_size: The optional buffer size.
"""
_read = make_chunk_iter_func(stream, limit, buffer_size)
_split = re.compile(r'(%s)' % re.escape(separator)).split
buffer = []
while 1:
new_data = _read()
if not new_data:
break
chunks = _split(new_data)
new_buf = []
for item in chain(buffer, chunks):
if item == separator:
yield ''.join(new_buf)
new_buf = []
else:
new_buf.append(item)
buffer = new_buf
if buffer:
yield ''.join(buffer)
class LimitedStream(object):
"""Wraps a stream so that it doesn't read more than n bytes. If the
stream is exhausted and the caller tries to get more bytes from it
:func:`on_exhausted` is called which by default returns an empty
string. The return value of that function is forwarded
to the reader function. So if it returns an empty string
:meth:`read` will return an empty string as well.
The limit however must never be higher than what the stream can
output. Otherwise :meth:`readlines` will try to read past the
limit.
The `silent` parameter has no effect if :meth:`is_exhausted` is
overriden by a subclass.
.. versionchanged:: 0.6
Non-silent usage was deprecated because it causes confusion.
If you want that, override :meth:`is_exhausted` and raise a
:exc:`~exceptions.BadRequest` yourself.
.. admonition:: Note on WSGI compliance
calls to :meth:`readline` and :meth:`readlines` are not
WSGI compliant because it passes a size argument to the
readline methods. Unfortunately the WSGI PEP is not safely
implementable without a size argument to :meth:`readline`
because there is no EOF marker in the stream. As a result
of that the use of :meth:`readline` is discouraged.
For the same reason iterating over the :class:`LimitedStream`
is not portable. It internally calls :meth:`readline`.
We strongly suggest using :meth:`read` only or using the
:func:`make_line_iter` which safely iterates line-based
over a WSGI input stream.
:param stream: the stream to wrap.
:param limit: the limit for the stream, must not be longer than
what the string can provide if the stream does not
end with `EOF` (like `wsgi.input`)
:param silent: If set to `True` the stream will allow reading
past the limit and will return an empty string.
"""
def __init__(self, stream, limit, silent=True):
self._read = stream.read
self._readline = stream.readline
self._pos = 0
self.limit = limit
self.silent = silent
if not silent:
from warnings import warn
warn(DeprecationWarning('non-silent usage of the '
'LimitedStream is deprecated. If you want to '
'continue to use the stream in non-silent usage '
'override on_exhausted.'), stacklevel=2)
def __iter__(self):
return self
@property
def is_exhausted(self):
"""If the stream is exhausted this attribute is `True`."""
return self._pos >= self.limit
def on_exhausted(self):
"""This is called when the stream tries to read past the limit.
The return value of this function is returned from the reading
function.
"""
if self.silent:
return ''
from werkzeug.exceptions import BadRequest
raise BadRequest('input stream exhausted')
def on_disconnect(self):
"""What should happen if a disconnect is detected? The return
value of this function is returned from read functions in case
the client went away. By default a
:exc:`~werkzeug.exceptions.ClientDisconnected` exception is raised.
"""
from werkzeug.exceptions import ClientDisconnected
raise ClientDisconnected()
def exhaust(self, chunk_size=1024 * 16):
"""Exhaust the stream. This consumes all the data left until the
limit is reached.
:param chunk_size: the size for a chunk. It will read the chunk
until the stream is exhausted and throw away
the results.
"""
to_read = self.limit - self._pos
chunk = chunk_size
while to_read > 0:
chunk = min(to_read, chunk)
self.read(chunk)
to_read -= chunk
def read(self, size=None):
"""Read `size` bytes or if size is not provided everything is read.
:param size: the number of bytes read.
"""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None or size == -1: # -1 is for consistence with file
size = self.limit
to_read = min(self.limit - self._pos, size)
try:
read = self._read(to_read)
except (IOError, ValueError):
return self.on_disconnect()
if to_read and len(read) != to_read:
return self.on_disconnect()
self._pos += len(read)
return read
def readline(self, size=None):
"""Reads one line from the stream."""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None:
size = self.limit - self._pos
else:
size = min(size, self.limit - self._pos)
try:
line = self._readline(size)
except (ValueError, IOError):
return self.on_disconnect()
if size and not line:
return self.on_disconnect()
self._pos += len(line)
return line
def readlines(self, size=None):
"""Reads a file into a list of strings. It calls :meth:`readline`
until the file is read to the end. It does support the optional
`size` argument if the underlaying stream supports it for
`readline`.
"""
last_pos = self._pos
result = []
if size is not None:
end = min(self.limit, last_pos + size)
else:
end = self.limit
while 1:
if size is not None:
size -= last_pos - self._pos
if self._pos >= end:
break
result.append(self.readline(size))
if size is not None:
last_pos = self._pos
return result
def tell(self):
"""Returns the position of the stream.
.. versionadded:: 0.9
"""
return self._pos
def next(self):
line = self.readline()
if line is None:
raise StopIteration()
return line
| gpl-3.0 |
Korkki/django | django/contrib/messages/storage/base.py | 526 | 6285 | from __future__ import unicode_literals
from django.conf import settings
from django.contrib.messages import constants, utils
from django.utils.encoding import force_text, python_2_unicode_compatible
LEVEL_TAGS = utils.get_level_tags()
@python_2_unicode_compatible
class Message(object):
"""
Represents an actual message that can be stored in any of the supported
storage classes (typically session- or cookie-based) and rendered in a view
or template.
"""
def __init__(self, level, message, extra_tags=None):
self.level = int(level)
self.message = message
self.extra_tags = extra_tags
def _prepare(self):
"""
Prepares the message for serialization by forcing the ``message``
and ``extra_tags`` to unicode in case they are lazy translations.
Known "safe" types (None, int, etc.) are not converted (see Django's
``force_text`` implementation for details).
"""
self.message = force_text(self.message, strings_only=True)
self.extra_tags = force_text(self.extra_tags, strings_only=True)
def __eq__(self, other):
return isinstance(other, Message) and self.level == other.level and \
self.message == other.message
def __str__(self):
return force_text(self.message)
def _get_tags(self):
extra_tags = force_text(self.extra_tags, strings_only=True)
if extra_tags and self.level_tag:
return ' '.join([extra_tags, self.level_tag])
elif extra_tags:
return extra_tags
elif self.level_tag:
return self.level_tag
return ''
tags = property(_get_tags)
@property
def level_tag(self):
return force_text(LEVEL_TAGS.get(self.level, ''), strings_only=True)
class BaseStorage(object):
"""
This is the base backend for temporary message storage.
This is not a complete class; to be a usable storage backend, it must be
subclassed and the two methods ``_get`` and ``_store`` overridden.
"""
def __init__(self, request, *args, **kwargs):
self.request = request
self._queued_messages = []
self.used = False
self.added_new = False
super(BaseStorage, self).__init__(*args, **kwargs)
def __len__(self):
return len(self._loaded_messages) + len(self._queued_messages)
def __iter__(self):
self.used = True
if self._queued_messages:
self._loaded_messages.extend(self._queued_messages)
self._queued_messages = []
return iter(self._loaded_messages)
def __contains__(self, item):
return item in self._loaded_messages or item in self._queued_messages
@property
def _loaded_messages(self):
"""
Returns a list of loaded messages, retrieving them first if they have
not been loaded yet.
"""
if not hasattr(self, '_loaded_data'):
messages, all_retrieved = self._get()
self._loaded_data = messages or []
return self._loaded_data
def _get(self, *args, **kwargs):
"""
Retrieves a list of stored messages. Returns a tuple of the messages
and a flag indicating whether or not all the messages originally
intended to be stored in this storage were, in fact, stored and
retrieved; e.g., ``(messages, all_retrieved)``.
**This method must be implemented by a subclass.**
If it is possible to tell if the backend was not used (as opposed to
just containing no messages) then ``None`` should be returned in
place of ``messages``.
"""
raise NotImplementedError('subclasses of BaseStorage must provide a _get() method')
def _store(self, messages, response, *args, **kwargs):
"""
Stores a list of messages, returning a list of any messages which could
not be stored.
One type of object must be able to be stored, ``Message``.
**This method must be implemented by a subclass.**
"""
raise NotImplementedError('subclasses of BaseStorage must provide a _store() method')
def _prepare_messages(self, messages):
"""
Prepares a list of messages for storage.
"""
for message in messages:
message._prepare()
def update(self, response):
"""
Stores all unread messages.
If the backend has yet to be iterated, previously stored messages will
be stored again. Otherwise, only messages added after the last
iteration will be stored.
"""
self._prepare_messages(self._queued_messages)
if self.used:
return self._store(self._queued_messages, response)
elif self.added_new:
messages = self._loaded_messages + self._queued_messages
return self._store(messages, response)
def add(self, level, message, extra_tags=''):
"""
Queues a message to be stored.
The message is only queued if it contained something and its level is
not less than the recording level (``self.level``).
"""
if not message:
return
# Check that the message level is not less than the recording level.
level = int(level)
if level < self.level:
return
# Add the message.
self.added_new = True
message = Message(level, message, extra_tags=extra_tags)
self._queued_messages.append(message)
def _get_level(self):
"""
Returns the minimum recorded level.
The default level is the ``MESSAGE_LEVEL`` setting. If this is
not found, the ``INFO`` level is used.
"""
if not hasattr(self, '_level'):
self._level = getattr(settings, 'MESSAGE_LEVEL', constants.INFO)
return self._level
def _set_level(self, value=None):
"""
Sets a custom minimum recorded level.
If set to ``None``, the default level will be used (see the
``_get_level`` method).
"""
if value is None and hasattr(self, '_level'):
del self._level
else:
self._level = int(value)
level = property(_get_level, _set_level, _set_level)
| bsd-3-clause |
stonekyx/binary | vendor/scons-local-2.3.4/SCons/Tool/dvi.py | 9 | 2335 | """SCons.Tool.dvi
Common DVI Builder definition for various other Tool modules that use it.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/dvi.py 2014/09/27 12:51:43 garyo"
import SCons.Builder
import SCons.Tool
DVIBuilder = None
def generate(env):
try:
env['BUILDERS']['DVI']
except KeyError:
global DVIBuilder
if DVIBuilder is None:
# The suffix is hard-coded to '.dvi', not configurable via a
# construction variable like $DVISUFFIX, because the output
# file name is hard-coded within TeX.
DVIBuilder = SCons.Builder.Builder(action = {},
source_scanner = SCons.Tool.LaTeXScanner,
suffix = '.dvi',
emitter = {},
source_ext_match = None)
env['BUILDERS']['DVI'] = DVIBuilder
def exists(env):
# This only puts a skeleton Builder in place, so if someone
# references this Tool directly, it's always "available."
return 1
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 |
mvaled/sentry | src/sentry/south_migrations/0326_auto__add_field_groupsnooze_count__add_field_groupsnooze_window__add_f.py | 1 | 116733 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'GroupSnooze.count'
db.add_column(
'sentry_groupsnooze',
'count',
self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(null=True),
keep_default=False
)
# Adding field 'GroupSnooze.window'
db.add_column(
'sentry_groupsnooze',
'window',
self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(null=True),
keep_default=False
)
# Adding field 'GroupSnooze.user_count'
db.add_column(
'sentry_groupsnooze',
'user_count',
self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(null=True),
keep_default=False
)
# Adding field 'GroupSnooze.user_window'
db.add_column(
'sentry_groupsnooze',
'user_window',
self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(null=True),
keep_default=False
)
# Adding field 'GroupSnooze.state'
db.add_column(
'sentry_groupsnooze',
'state',
self.gf('sentry.db.models.fields.jsonfield.JSONField')(null=True),
keep_default=False
)
# Changing field 'GroupSnooze.until'
db.alter_column(
'sentry_groupsnooze',
'until',
self.gf('django.db.models.fields.DateTimeField')(null=True)
)
def backwards(self, orm):
raise RuntimeError(
"Cannot reverse this migration. 'GroupSnooze.until' and its values cannot be restored."
)
models = {
'sentry.activity': {
'Meta': {
'object_name': 'Activity'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.apiapplication': {
'Meta': {
'object_name': 'ApiApplication'
},
'allowed_origins':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'client_id': (
'django.db.models.fields.CharField', [], {
'default': "'1fe2246606cd41688e14b95ae1bdc14c6b7652dea035446fa2dc8bcacf21afd6'",
'unique': 'True',
'max_length': '64'
}
),
'client_secret': (
'sentry.db.models.fields.encrypted.EncryptedTextField', [], {
'default': "'7f918820281a421d991389c5fad78a41551739601ae745e8a24e9cb56ee8ffaa'"
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'homepage_url':
('django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': (
'django.db.models.fields.CharField', [], {
'default': "'Trusting Weasel'",
'max_length': '64',
'blank': 'True'
}
),
'owner': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'privacy_url':
('django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True'
}),
'redirect_uris': ('django.db.models.fields.TextField', [], {}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'terms_url':
('django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True'
})
},
'sentry.apiauthorization': {
'Meta': {
'unique_together': "(('user', 'application'),)",
'object_name': 'ApiAuthorization'
},
'application': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiApplication']",
'null': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'scope_list': (
'sentry.db.models.fields.array.ArrayField', [], {
'of': ('django.db.models.fields.TextField', [], {})
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.apigrant': {
'Meta': {
'object_name': 'ApiGrant'
},
'application': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiApplication']"
}
),
'code': (
'django.db.models.fields.CharField', [], {
'default': "'d959d133f88c4292a581081e6190b949'",
'max_length': '64',
'db_index': 'True'
}
),
'expires_at': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime(2017, 6, 1, 0, 0)',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'redirect_uri': ('django.db.models.fields.CharField', [], {
'max_length': '255'
}),
'scope_list': (
'sentry.db.models.fields.array.ArrayField', [], {
'of': ('django.db.models.fields.TextField', [], {})
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.apikey': {
'Meta': {
'object_name': 'ApiKey'
},
'allowed_origins':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32'
}),
'label': (
'django.db.models.fields.CharField', [], {
'default': "'Default'",
'max_length': '64',
'blank': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Organization']"
}
),
'scope_list': (
'sentry.db.models.fields.array.ArrayField', [], {
'of': ('django.db.models.fields.TextField', [], {})
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.apitoken': {
'Meta': {
'object_name': 'ApiToken'
},
'application': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiApplication']",
'null': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'expires_at': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime(2017, 7, 1, 0, 0)',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'refresh_token': (
'django.db.models.fields.CharField', [], {
'default': "'6c4fadd19de34e39ac0859f3f896065cd8c3cd19c56c453287ab9f199c539138'",
'max_length': '64',
'unique': 'True',
'null': 'True'
}
),
'scope_list': (
'sentry.db.models.fields.array.ArrayField', [], {
'of': ('django.db.models.fields.TextField', [], {})
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'token': (
'django.db.models.fields.CharField', [], {
'default': "'94b568466766407cad05e6e2a630f6561a04ecb269c047c381f78c857d84422a'",
'unique': 'True',
'max_length': '64'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.auditlogentry': {
'Meta': {
'object_name': 'AuditLogEntry'
},
'actor': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_actors'",
'null': 'True',
'to': "orm['sentry.User']"
}
),
'actor_key': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiKey']",
'null': 'True',
'blank': 'True'
}
),
'actor_label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'target_object':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'target_user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_targets'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.authenticator': {
'Meta': {
'unique_together': "(('user', 'type'),)",
'object_name': 'Authenticator',
'db_table': "'auth_authenticator'"
},
'config': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {}),
'created_at':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authidentity': {
'Meta': {
'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))",
'object_name': 'AuthIdentity'
},
'auth_provider': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.AuthProvider']"
}
),
'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'last_synced':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_verified':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authprovider': {
'Meta': {
'object_name': 'AuthProvider'
},
'config':
('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_global_access':
('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'default_role':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'default_teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']",
'unique': 'True'
}
),
'provider': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'sync_time':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
})
},
'sentry.broadcast': {
'Meta': {
'object_name': 'Broadcast'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_expires': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime(2017, 6, 8, 0, 0)',
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active':
('django.db.models.fields.BooleanField', [], {
'default': 'True',
'db_index': 'True'
}),
'link': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.CharField', [], {
'max_length': '256'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'upstream_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'blank': 'True'
}
)
},
'sentry.broadcastseen': {
'Meta': {
'unique_together': "(('broadcast', 'user'),)",
'object_name': 'BroadcastSeen'
},
'broadcast': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Broadcast']"
}
),
'date_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.commit': {
'Meta': {
'unique_together': "(('repository_id', 'key'),)",
'object_name': 'Commit',
'index_together': "(('repository_id', 'date_added'),)"
},
'author': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.CommitAuthor']",
'null': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'message': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'repository_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {
'unique_together':
"(('organization_id', 'email'), ('organization_id', 'external_id'))",
'object_name':
'CommitAuthor'
},
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'external_id':
('django.db.models.fields.CharField', [], {
'max_length': '164',
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.commitfilechange': {
'Meta': {
'unique_together': "(('commit', 'filename'),)",
'object_name': 'CommitFileChange'
},
'commit': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Commit']"
}
),
'filename': ('django.db.models.fields.CharField', [], {
'max_length': '255'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '1'
})
},
'sentry.counter': {
'Meta': {
'object_name': 'Counter',
'db_table': "'sentry_projectcounter'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'unique': 'True'
}
),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.deploy': {
'Meta': {
'object_name': 'Deploy'
},
'date_finished':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_started':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'environment_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'notified': (
'django.db.models.fields.NullBooleanField', [], {
'default': 'False',
'null': 'True',
'db_index': 'True',
'blank': 'True'
}
),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
),
'url': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
)
},
'sentry.distribution': {
'Meta': {
'unique_together': "(('release', 'name'),)",
'object_name': 'Distribution'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.dsymapp': {
'Meta': {
'unique_together': "(('project', 'platform', 'app_id'),)",
'object_name': 'DSymApp'
},
'app_id': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_synced':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'platform':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'sync_id':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
})
},
'sentry.dsymbundle': {
'Meta': {
'object_name': 'DSymBundle'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymObject']"
}
),
'sdk': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymSDK']"
}
)
},
'sentry.dsymobject': {
'Meta': {
'object_name': 'DSymObject'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_path': ('django.db.models.fields.TextField', [], {
'db_index': 'True'
}),
'uuid':
('django.db.models.fields.CharField', [], {
'max_length': '36',
'db_index': 'True'
}),
'vmaddr':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'vmsize':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
})
},
'sentry.dsymsdk': {
'Meta': {
'object_name':
'DSymSDK',
'index_together':
"[('version_major', 'version_minor', 'version_patchlevel', 'version_build')]"
},
'dsym_type':
('django.db.models.fields.CharField', [], {
'max_length': '20',
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'sdk_name': ('django.db.models.fields.CharField', [], {
'max_length': '20'
}),
'version_build': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'version_major': ('django.db.models.fields.IntegerField', [], {}),
'version_minor': ('django.db.models.fields.IntegerField', [], {}),
'version_patchlevel': ('django.db.models.fields.IntegerField', [], {})
},
'sentry.dsymsymbol': {
'Meta': {
'unique_together': "[('object', 'address')]",
'object_name': 'DSymSymbol'
},
'address':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymObject']"
}
),
'symbol': ('django.db.models.fields.TextField', [], {})
},
'sentry.environment': {
'Meta': {
'unique_together': "(('project_id', 'name'), ('organization_id', 'name'))",
'object_name': 'Environment'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'project_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'projects': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Project']",
'through': "orm['sentry.EnvironmentProject']",
'symmetrical': 'False'
}
)
},
'sentry.environmentproject': {
'Meta': {
'unique_together': "(('project', 'environment'),)",
'object_name': 'EnvironmentProject'
},
'environment': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Environment']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.event': {
'Meta': {
'unique_together': "(('project_id', 'event_id'),)",
'object_name': 'Event',
'db_table': "'sentry_message'",
'index_together': "(('group_id', 'datetime'),)"
},
'data':
('sentry.db.models.fields.node.NodeField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'db_column': "'message_id'"
}
),
'group_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'time_spent':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'null': 'True'
})
},
'sentry.eventmapping': {
'Meta': {
'unique_together': "(('project_id', 'event_id'),)",
'object_name': 'EventMapping'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventprocessingissue': {
'Meta': {
'unique_together': "(('raw_event', 'processing_issue'),)",
'object_name': 'EventProcessingIssue'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'processing_issue': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ProcessingIssue']"
}
),
'raw_event': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.RawEvent']"
}
)
},
'sentry.eventtag': {
'Meta': {
'unique_together':
"(('event_id', 'key_id', 'value_id'),)",
'object_name':
'EventTag',
'index_together':
"(('project_id', 'key_id', 'value_id'), ('group_id', 'key_id', 'value_id'))"
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {
'unique_together':
"(('project', 'ident'), ('project', 'hash'))",
'object_name':
'EventUser',
'index_together':
"(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'name':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'username':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
})
},
'sentry.file': {
'Meta': {
'object_name': 'File'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'legacy_blob'",
'null': 'True',
'to': "orm['sentry.FileBlob']"
}
),
'blobs': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.FileBlob']",
'through': "orm['sentry.FileBlobIndex']",
'symmetrical': 'False'
}
),
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '40',
'null': 'True'
}),
'headers': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.fileblob': {
'Meta': {
'object_name': 'FileBlob'
},
'checksum':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
)
},
'sentry.fileblobindex': {
'Meta': {
'unique_together': "(('file', 'blob', 'offset'),)",
'object_name': 'FileBlobIndex'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.FileBlob']"
}
),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.globaldsymfile': {
'Meta': {
'object_name': 'GlobalDSymFile'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'uuid':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '36'
})
},
'sentry.group': {
'Meta': {
'unique_together': "(('project', 'short_id'),)",
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'",
'index_together': "(('project', 'first_release'),)"
},
'active_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']",
'null': 'True',
'on_delete': 'models.PROTECT'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_public': (
'django.db.models.fields.NullBooleanField', [], {
'default': 'False',
'null': 'True',
'blank': 'True'
}
),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'resolved_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'short_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'time_spent_total':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'times_seen': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
)
},
'sentry.groupassignee': {
'Meta': {
'object_name': 'GroupAssignee',
'db_table': "'sentry_groupasignee'"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'unique': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_assignee_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupbookmark': {
'Meta': {
'unique_together': "(('project', 'user', 'group'),)",
'object_name': 'GroupBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_bookmark_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupcommitresolution': {
'Meta': {
'unique_together': "(('group_id', 'commit_id'),)",
'object_name': 'GroupCommitResolution'
},
'commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
})
},
'sentry.groupemailthread': {
'Meta': {
'unique_together': "(('email', 'group'), ('email', 'msgid'))",
'object_name': 'GroupEmailThread'
},
'date': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'msgid': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Project']"
}
)
},
'sentry.grouphash': {
'Meta': {
'unique_together': "(('project', 'hash'),)",
'object_name': 'GroupHash'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
)
},
'sentry.groupmeta': {
'Meta': {
'unique_together': "(('group', 'key'),)",
'object_name': 'GroupMeta'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {
'object_name': 'GroupRedirect'
},
'group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'previous_group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'unique': 'True'
})
},
'sentry.grouprelease': {
'Meta': {
'unique_together': "(('group_id', 'release_id', 'environment'),)",
'object_name': 'GroupRelease'
},
'environment':
('django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64'
}),
'first_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'release_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.groupresolution': {
'Meta': {
'object_name': 'GroupResolution'
},
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouprulestatus': {
'Meta': {
'unique_together': "(('rule', 'group'),)",
'object_name': 'GroupRuleStatus'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_active': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'rule': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Rule']"
}
),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
})
},
'sentry.groupseen': {
'Meta': {
'unique_together': "(('user', 'group'),)",
'object_name': 'GroupSeen'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'db_index': 'False'
}
)
},
'sentry.groupsnooze': {
'Meta': {
'object_name': 'GroupSnooze'
},
'count':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'state': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'null': 'True'
}),
'until': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'user_count':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'user_window':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'window':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
})
},
'sentry.groupsubscription': {
'Meta': {
'unique_together': "(('group', 'user'),)",
'object_name': 'GroupSubscription'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'subscription_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'subscription_set'",
'to': "orm['sentry.Project']"
}
),
'reason':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.grouptagkey': {
'Meta': {
'unique_together': "(('project', 'group', 'key'),)",
'object_name': 'GroupTagKey'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouptagvalue': {
'Meta': {
'unique_together': "(('group_id', 'key', 'value'),)",
'object_name': 'GroupTagValue',
'db_table': "'sentry_messagefiltervalue'",
'index_together': "(('project_id', 'key', 'value', 'last_seen'),)"
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.lostpasswordhash': {
'Meta': {
'object_name': 'LostPasswordHash'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'unique': 'True'
}
)
},
'sentry.option': {
'Meta': {
'object_name': 'Option'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'last_updated':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {
'object_name': 'Organization'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'org_memberships'",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMember']",
'to': "orm['sentry.User']"
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'slug':
('django.db.models.fields.SlugField', [], {
'unique': 'True',
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.organizationaccessrequest': {
'Meta': {
'unique_together': "(('team', 'member'),)",
'object_name': 'OrganizationAccessRequest'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'member': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationavatar': {
'Meta': {
'object_name': 'OrganizationAvatar'
},
'avatar_type':
('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']",
'unique': 'True',
'null': 'True',
'on_delete': 'models.SET_NULL'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': (
'django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32',
'db_index': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'avatar'",
'unique': 'True',
'to': "orm['sentry.Organization']"
}
)
},
'sentry.organizationmember': {
'Meta': {
'unique_together': "(('organization', 'user'), ('organization', 'email'))",
'object_name': 'OrganizationMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': (
'django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Organization']"
}
),
'role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMemberTeam']",
'blank': 'True'
}
),
'token': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'unique': 'True',
'null': 'True',
'blank': 'True'
}
),
'type': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'sentry_orgmember_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.organizationmemberteam': {
'Meta': {
'unique_together': "(('team', 'organizationmember'),)",
'object_name': 'OrganizationMemberTeam',
'db_table': "'sentry_organizationmember_teams'"
},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'organizationmember': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationonboardingtask': {
'Meta': {
'unique_together': "(('organization', 'task'),)",
'object_name': 'OrganizationOnboardingTask'
},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_completed':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.organizationoption': {
'Meta': {
'unique_together': "(('organization', 'key'),)",
'object_name': 'OrganizationOption',
'db_table': "'sentry_organizationoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.processingissue': {
'Meta': {
'unique_together': "(('project', 'checksum', 'type'),)",
'object_name': 'ProcessingIssue'
},
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '40',
'db_index': 'True'
}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '30'
})
},
'sentry.project': {
'Meta': {
'unique_together': "(('team', 'slug'), ('organization', 'slug'))",
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'first_event': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'flags':
('django.db.models.fields.BigIntegerField', [], {
'default': '0',
'null': 'True'
}),
'forced_color': (
'django.db.models.fields.CharField', [], {
'max_length': '6',
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.projectbookmark': {
'Meta': {
'unique_together': "(('project_id', 'user'),)",
'object_name': 'ProjectBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.projectdsymfile': {
'Meta': {
'unique_together': "(('project', 'uuid'),)",
'object_name': 'ProjectDSymFile'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'uuid': ('django.db.models.fields.CharField', [], {
'max_length': '36'
})
},
'sentry.projectkey': {
'Meta': {
'object_name': 'ProjectKey'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'rate_limit_count':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'rate_limit_window':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'roles': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.projectoption': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'ProjectOption',
'db_table': "'sentry_projectoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.projectplatform': {
'Meta': {
'unique_together': "(('project_id', 'platform'),)",
'object_name': 'ProjectPlatform'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'platform': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.rawevent': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'RawEvent'
},
'data':
('sentry.db.models.fields.node.NodeField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.release': {
'Meta': {
'unique_together': "(('organization', 'version'),)",
'object_name': 'Release'
},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_released':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'date_started':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'new_groups':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'owner': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True',
'blank': 'True'
}
),
'project_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'projects': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'releases'",
'symmetrical': 'False',
'through': "orm['sentry.ReleaseProject']",
'to': "orm['sentry.Project']"
}
),
'ref': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'url': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'version': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.releasecommit': {
'Meta': {
'unique_together': "(('release', 'commit'), ('release', 'order'))",
'object_name': 'ReleaseCommit'
},
'commit': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Commit']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True',
'db_index': 'True'
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.releaseenvironment': {
'Meta': {
'unique_together':
"(('project_id', 'release_id', 'environment_id'), ('organization_id', 'release_id', 'environment_id'))",
'object_name':
'ReleaseEnvironment',
'db_table':
"'sentry_environmentrelease'"
},
'environment_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'first_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True',
'db_index': 'True'
}
),
'release_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.releasefile': {
'Meta': {
'unique_together': "(('release', 'ident'),)",
'object_name': 'ReleaseFile'
},
'dist': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Distribution']",
'null': 'True'
}
),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'name': ('django.db.models.fields.TextField', [], {}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'project_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.releaseheadcommit': {
'Meta': {
'unique_together': "(('repository_id', 'release'),)",
'object_name': 'ReleaseHeadCommit'
},
'commit': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Commit']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
),
'repository_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.releaseproject': {
'Meta': {
'unique_together': "(('project', 'release'),)",
'object_name': 'ReleaseProject',
'db_table': "'sentry_release_project'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'new_groups': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.repository': {
'Meta': {
'unique_together':
"(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))",
'object_name':
'Repository'
},
'config': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'external_id':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'provider':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'url': ('django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True'
})
},
'sentry.reprocessingreport': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'ReprocessingReport'
},
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.rule': {
'Meta': {
'object_name': 'Rule'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.savedsearch': {
'Meta': {
'unique_together': "(('project', 'name'),)",
'object_name': 'SavedSearch'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_default': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'owner': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {
'unique_together': "(('project', 'user'),)",
'object_name': 'SavedSearchUserDefault',
'db_table': "'sentry_savedsearch_userdefault'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'savedsearch': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.SavedSearch']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.scheduleddeletion': {
'Meta': {
'unique_together': "(('app_label', 'model_name', 'object_id'),)",
'object_name': 'ScheduledDeletion'
},
'aborted': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'actor_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'app_label': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_scheduled': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime(2017, 7, 1, 0, 0)'
}
),
'guid': (
'django.db.models.fields.CharField', [], {
'default': "'7dcd5c1ace824812b6cc232360d975f7'",
'unique': 'True',
'max_length': '32'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'in_progress': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'model_name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'object_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.tagkey': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'TagKey',
'db_table': "'sentry_filterkey'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'label':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.tagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'TagValue',
'db_table': "'sentry_filtervalue'"
},
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.team': {
'Meta': {
'unique_together': "(('organization', 'slug'),)",
'object_name': 'Team'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_password_expired':
('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'name': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'db_column': "'first_name'",
'blank': 'True'
}
),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'session_nonce':
('django.db.models.fields.CharField', [], {
'max_length': '12',
'null': 'True'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '128'
})
},
'sentry.useravatar': {
'Meta': {
'object_name': 'UserAvatar'
},
'avatar_type':
('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']",
'unique': 'True',
'null': 'True',
'on_delete': 'models.SET_NULL'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': (
'django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32',
'db_index': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'avatar'",
'unique': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.useremail': {
'Meta': {
'unique_together': "(('user', 'email'),)",
'object_name': 'UserEmail'
},
'date_hash_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_verified': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'emails'",
'to': "orm['sentry.User']"
}
),
'validation_hash': (
'django.db.models.fields.CharField', [], {
'default': "u'UgLIAnDusbhZ8E66pCx3Af5EoUtzEmSA'",
'max_length': '32'
}
)
},
'sentry.useroption': {
'Meta': {
'unique_together': "(('user', 'project', 'key'), ('user', 'organization', 'key'))",
'object_name': 'UserOption'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']",
'null': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'UserReport',
'index_together': "(('project', 'event_id'), ('project', 'date_added'))"
},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'event_user_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.versiondsymfile': {
'Meta': {
'unique_together': "(('dsym_file', 'version', 'build'),)",
'object_name': 'VersionDSymFile'
},
'build':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'dsym_app': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymApp']"
}
),
'dsym_file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ProjectDSymFile']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'version': ('django.db.models.fields.CharField', [], {
'max_length': '32'
})
}
}
complete_apps = ['sentry']
| bsd-3-clause |
mancoast/CPythonPyc_test | fail/331_test_codecencodings_kr.py | 28 | 3125 | #!/usr/bin/env python3
#
# test_codecencodings_kr.py
# Codec encoding tests for ROK encodings.
#
from test import support
from test import multibytecodec_support
import unittest
class Test_CP949(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'cp949'
tstring = multibytecodec_support.load_teststring('cp949')
codectests = (
# invalid bytes
(b"abc\x80\x80\xc1\xc4", "strict", None),
(b"abc\xc8", "strict", None),
(b"abc\x80\x80\xc1\xc4", "replace", "abc\ufffd\ufffd\uc894"),
(b"abc\x80\x80\xc1\xc4\xc8", "replace", "abc\ufffd\ufffd\uc894\ufffd"),
(b"abc\x80\x80\xc1\xc4", "ignore", "abc\uc894"),
)
class Test_EUCKR(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'euc_kr'
tstring = multibytecodec_support.load_teststring('euc_kr')
codectests = (
# invalid bytes
(b"abc\x80\x80\xc1\xc4", "strict", None),
(b"abc\xc8", "strict", None),
(b"abc\x80\x80\xc1\xc4", "replace", 'abc\ufffd\ufffd\uc894'),
(b"abc\x80\x80\xc1\xc4\xc8", "replace", "abc\ufffd\ufffd\uc894\ufffd"),
(b"abc\x80\x80\xc1\xc4", "ignore", "abc\uc894"),
# composed make-up sequence errors
(b"\xa4\xd4", "strict", None),
(b"\xa4\xd4\xa4", "strict", None),
(b"\xa4\xd4\xa4\xb6", "strict", None),
(b"\xa4\xd4\xa4\xb6\xa4", "strict", None),
(b"\xa4\xd4\xa4\xb6\xa4\xd0", "strict", None),
(b"\xa4\xd4\xa4\xb6\xa4\xd0\xa4", "strict", None),
(b"\xa4\xd4\xa4\xb6\xa4\xd0\xa4\xd4", "strict", "\uc4d4"),
(b"\xa4\xd4\xa4\xb6\xa4\xd0\xa4\xd4x", "strict", "\uc4d4x"),
(b"a\xa4\xd4\xa4\xb6\xa4", "replace", 'a\ufffd'),
(b"\xa4\xd4\xa3\xb6\xa4\xd0\xa4\xd4", "strict", None),
(b"\xa4\xd4\xa4\xb6\xa3\xd0\xa4\xd4", "strict", None),
(b"\xa4\xd4\xa4\xb6\xa4\xd0\xa3\xd4", "strict", None),
(b"\xa4\xd4\xa4\xff\xa4\xd0\xa4\xd4", "replace", '\ufffd\u6e21\ufffd\u3160\ufffd'),
(b"\xa4\xd4\xa4\xb6\xa4\xff\xa4\xd4", "replace", '\ufffd\u6e21\ub544\ufffd\ufffd'),
(b"\xa4\xd4\xa4\xb6\xa4\xd0\xa4\xff", "replace", '\ufffd\u6e21\ub544\u572d\ufffd'),
(b"\xa4\xd4\xff\xa4\xd4\xa4\xb6\xa4\xd0\xa4\xd4", "replace", '\ufffd\ufffd\ufffd\uc4d4'),
(b"\xc1\xc4", "strict", "\uc894"),
)
class Test_JOHAB(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'johab'
tstring = multibytecodec_support.load_teststring('johab')
codectests = (
# invalid bytes
(b"abc\x80\x80\xc1\xc4", "strict", None),
(b"abc\xc8", "strict", None),
(b"abc\x80\x80\xc1\xc4", "replace", "abc\ufffd\ufffd\ucd27"),
(b"abc\x80\x80\xc1\xc4\xc8", "replace", "abc\ufffd\ufffd\ucd27\ufffd"),
(b"abc\x80\x80\xc1\xc4", "ignore", "abc\ucd27"),
(b"\xD8abc", "replace", "\uFFFDabc"),
(b"\xD8\xFFabc", "replace", "\uFFFD\uFFFDabc"),
(b"\x84bxy", "replace", "\uFFFDbxy"),
(b"\x8CBxy", "replace", "\uFFFDBxy"),
)
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
aperigault/ansible | test/units/modules/cloud/xenserver/test_xenserver_guest_powerstate.py | 39 | 10606 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import json
import pytest
from .common import fake_xenapi_ref
testcase_set_powerstate = {
"params": [
(False, "someoldstate"),
(True, "somenewstate"),
],
"ids": [
"state-same",
"state-changed",
],
}
testcase_module_params_state_present = {
"params": [
{
"hostname": "somehost",
"username": "someuser",
"password": "somepwd",
"name": "somevmname",
},
{
"hostname": "somehost",
"username": "someuser",
"password": "somepwd",
"name": "somevmname",
"state": "present",
},
],
"ids": [
"present-implicit",
"present-explicit",
],
}
testcase_module_params_state_other = {
"params": [
{
"hostname": "somehost",
"username": "someuser",
"password": "somepwd",
"name": "somevmname",
"state": "powered-on",
},
{
"hostname": "somehost",
"username": "someuser",
"password": "somepwd",
"name": "somevmname",
"state": "powered-off",
},
{
"hostname": "somehost",
"username": "someuser",
"password": "somepwd",
"name": "somevmname",
"state": "restarted",
},
{
"hostname": "somehost",
"username": "someuser",
"password": "somepwd",
"name": "somevmname",
"state": "shutdown-guest",
},
{
"hostname": "somehost",
"username": "someuser",
"password": "somepwd",
"name": "somevmname",
"state": "reboot-guest",
},
{
"hostname": "somehost",
"username": "someuser",
"password": "somepwd",
"name": "somevmname",
"state": "suspended",
},
],
"ids": [
"powered-on",
"powered-off",
"restarted",
"shutdown-guest",
"reboot-guest",
"suspended",
],
}
testcase_module_params_wait = {
"params": [
{
"hostname": "somehost",
"username": "someuser",
"password": "somepwd",
"name": "somevmname",
"state": "present",
"wait_for_ip_address": "yes",
},
{
"hostname": "somehost",
"username": "someuser",
"password": "somepwd",
"name": "somevmname",
"state": "powered-on",
"wait_for_ip_address": "yes",
},
],
"ids": [
"wait-present",
"wait-other",
],
}
@pytest.mark.parametrize('power_state', testcase_set_powerstate['params'], ids=testcase_set_powerstate['ids'])
def test_xenserver_guest_powerstate_set_power_state(mocker, fake_ansible_module, XenAPI, xenserver_guest_powerstate, power_state):
"""Tests power state change handling."""
mocker.patch('ansible.modules.cloud.xenserver.xenserver_guest_powerstate.get_object_ref', return_value=fake_xenapi_ref('VM'))
mocker.patch('ansible.modules.cloud.xenserver.xenserver_guest_powerstate.gather_vm_params', return_value={"power_state": "Someoldstate"})
mocked_set_vm_power_state = mocker.patch('ansible.modules.cloud.xenserver.xenserver_guest_powerstate.set_vm_power_state',
return_value=power_state)
mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
mocked_returns = {
"pool.get_all.return_value": [fake_xenapi_ref('pool')],
"pool.get_default_SR.return_value": fake_xenapi_ref('SR'),
}
mocked_xenapi.configure_mock(**mocked_returns)
mocker.patch('ansible.module_utils.xenserver.get_xenserver_version', return_value=[7, 2, 0])
fake_ansible_module.params.update({
"name": "somename",
"uuid": "someuuid",
"state_change_timeout": 1,
})
vm = xenserver_guest_powerstate.XenServerVM(fake_ansible_module)
state_changed = vm.set_power_state(None)
mocked_set_vm_power_state.assert_called_once_with(fake_ansible_module, fake_xenapi_ref('VM'), None, 1)
assert state_changed == power_state[0]
assert vm.vm_params['power_state'] == power_state[1].capitalize()
@pytest.mark.parametrize('patch_ansible_module',
testcase_module_params_state_present['params'],
ids=testcase_module_params_state_present['ids'],
indirect=True)
def test_xenserver_guest_powerstate_present(mocker, patch_ansible_module, capfd, XenAPI, xenserver_guest_powerstate):
"""
Tests regular module invocation including parsing and propagation of
module params and module output when state is set to present.
"""
fake_vm_facts = {"fake-vm-fact": True}
mocker.patch('ansible.modules.cloud.xenserver.xenserver_guest_powerstate.get_object_ref', return_value=fake_xenapi_ref('VM'))
mocker.patch('ansible.modules.cloud.xenserver.xenserver_guest_powerstate.gather_vm_params', return_value={})
mocker.patch('ansible.modules.cloud.xenserver.xenserver_guest_powerstate.gather_vm_facts', return_value=fake_vm_facts)
mocked_set_vm_power_state = mocker.patch('ansible.modules.cloud.xenserver.xenserver_guest_powerstate.set_vm_power_state',
return_value=(True, "somenewstate"))
mocked_wait_for_vm_ip_address = mocker.patch('ansible.modules.cloud.xenserver.xenserver_guest_powerstate.wait_for_vm_ip_address',
return_value={})
mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
mocked_returns = {
"pool.get_all.return_value": [fake_xenapi_ref('pool')],
"pool.get_default_SR.return_value": fake_xenapi_ref('SR'),
}
mocked_xenapi.configure_mock(**mocked_returns)
mocker.patch('ansible.module_utils.xenserver.get_xenserver_version', return_value=[7, 2, 0])
with pytest.raises(SystemExit):
xenserver_guest_powerstate.main()
out, err = capfd.readouterr()
result = json.loads(out)
mocked_set_vm_power_state.assert_not_called()
mocked_wait_for_vm_ip_address.assert_not_called()
assert result['changed'] is False
assert result['instance'] == fake_vm_facts
@pytest.mark.parametrize('patch_ansible_module',
testcase_module_params_state_other['params'],
ids=testcase_module_params_state_other['ids'],
indirect=True)
def test_xenserver_guest_powerstate_other(mocker, patch_ansible_module, capfd, XenAPI, xenserver_guest_powerstate):
"""
Tests regular module invocation including parsing and propagation of
module params and module output when state is set to other value than
present.
"""
fake_vm_facts = {"fake-vm-fact": True}
mocker.patch('ansible.modules.cloud.xenserver.xenserver_guest_powerstate.get_object_ref', return_value=fake_xenapi_ref('VM'))
mocker.patch('ansible.modules.cloud.xenserver.xenserver_guest_powerstate.gather_vm_params', return_value={})
mocker.patch('ansible.modules.cloud.xenserver.xenserver_guest_powerstate.gather_vm_facts', return_value=fake_vm_facts)
mocked_set_vm_power_state = mocker.patch(
'ansible.modules.cloud.xenserver.xenserver_guest_powerstate.set_vm_power_state',
return_value=(True, "somenewstate"))
mocked_wait_for_vm_ip_address = mocker.patch(
'ansible.modules.cloud.xenserver.xenserver_guest_powerstate.wait_for_vm_ip_address',
return_value={})
mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
mocked_returns = {
"pool.get_all.return_value": [fake_xenapi_ref('pool')],
"pool.get_default_SR.return_value": fake_xenapi_ref('SR'),
}
mocked_xenapi.configure_mock(**mocked_returns)
mocker.patch('ansible.module_utils.xenserver.get_xenserver_version', return_value=[7, 2, 0])
with pytest.raises(SystemExit):
xenserver_guest_powerstate.main()
out, err = capfd.readouterr()
result = json.loads(out)
mocked_set_vm_power_state.assert_called_once()
mocked_wait_for_vm_ip_address.assert_not_called()
assert result['changed'] is True
assert result['instance'] == fake_vm_facts
@pytest.mark.parametrize('patch_ansible_module',
testcase_module_params_wait['params'],
ids=testcase_module_params_wait['ids'],
indirect=True)
def test_xenserver_guest_powerstate_wait(mocker, patch_ansible_module, capfd, XenAPI, xenserver_guest_powerstate):
"""
Tests regular module invocation including parsing and propagation of
module params and module output when wait_for_ip_address option is used.
"""
fake_vm_facts = {"fake-vm-fact": True}
mocker.patch('ansible.modules.cloud.xenserver.xenserver_guest_powerstate.get_object_ref', return_value=fake_xenapi_ref('VM'))
mocker.patch('ansible.modules.cloud.xenserver.xenserver_guest_powerstate.gather_vm_params', return_value={})
mocker.patch('ansible.modules.cloud.xenserver.xenserver_guest_powerstate.gather_vm_facts', return_value=fake_vm_facts)
mocked_set_vm_power_state = mocker.patch(
'ansible.modules.cloud.xenserver.xenserver_guest_powerstate.set_vm_power_state',
return_value=(True, "somenewstate"))
mocked_wait_for_vm_ip_address = mocker.patch(
'ansible.modules.cloud.xenserver.xenserver_guest_powerstate.wait_for_vm_ip_address',
return_value={})
mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
mocked_returns = {
"pool.get_all.return_value": [fake_xenapi_ref('pool')],
"pool.get_default_SR.return_value": fake_xenapi_ref('SR'),
}
mocked_xenapi.configure_mock(**mocked_returns)
mocker.patch('ansible.module_utils.xenserver.get_xenserver_version', return_value=[7, 2, 0])
with pytest.raises(SystemExit):
xenserver_guest_powerstate.main()
out, err = capfd.readouterr()
result = json.loads(out)
mocked_wait_for_vm_ip_address.assert_called_once()
assert result['instance'] == fake_vm_facts
| gpl-3.0 |
tazo90/lux | setup.py | 1 | 1994 | import os
import json
from setuptools import setup, find_packages
package_name = 'lux'
def read(name):
root_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(root_dir, name), 'r') as f:
return f.read()
def run():
install_requires = []
dependency_links = []
pkg = json.loads(read('package.json'))
for line in read('requirements.txt').split('\n'):
if line.startswith('-e '):
link = line[3:].strip()
if link == '.':
continue
dependency_links.append(link)
line = link.split('=')[1]
line = line.strip()
if line:
install_requires.append(line)
packages = find_packages(exclude=['tests', 'tests.*'])
setup(name=package_name,
version=pkg['version'],
author=pkg['author']['name'],
author_email=pkg['author']['email'],
url=pkg['homepage'],
license=pkg['licenses'][0]['type'],
description=pkg['description'],
long_description=read('README.rst'),
packages=packages,
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
dependency_links=dependency_links,
scripts=['bin/luxmake.py'],
classifiers=['Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: JavaScript',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Utilities'])
if __name__ == '__main__':
run()
| bsd-3-clause |
pierluigiferrari/fcn8s_tensorflow | cityscapesscripts/annotation/cityscapesLabelTool.py | 1 | 109346 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#################
## Import modules
#################
# pyqt for everything graphical
from PyQt4 import QtGui, QtCore
# get command line parameters
import sys
# walk directories
import glob
# access to OS functionality
import os
# (de)serialize config file
import json
# call processes
import subprocess
# get the user name
import getpass
# xml parsing
import xml.etree.ElementTree as ET
# copy text to clipboard
try:
from Tkinter import Tk
except:
from tkinter import Tk
# copy stuff
import copy
#################
## Helper classes
#################
# annotation helper
sys.path.append( os.path.normpath( os.path.join( os.path.dirname( __file__ ) , '..' , 'helpers' ) ) )
from annotation import Point, Annotation, CsObject
from labels import name2label, assureSingleInstanceName
# Helper class that contains the current configuration of the Gui
# This config is loaded when started and saved when leaving
class configuration:
# Constructor
def __init__(self):
# The filename of the image we currently working on
self.currentFile = ""
# The filename of the labels we currently working on
self.currentLabelFile = ""
# The filename of the corrections we currently working on
self.currentCorrectionFile = ""
# The path where the Cityscapes dataset is located
self.csPath = ""
# The path of the images of the currently loaded city
self.city = ""
# The name of the currently loaded city
self.cityName = ""
# The type of the current annotations
self.gtType = ""
# The split, where the currently loaded city belongs to
self.split = ""
# The path of the labels. In this folder we expect a folder for each city
# Within these city folders we expect the label with a filename matching
# the images, except for the extension
self.labelPath = ""
# The path to store correction markings
self.correctionPath = ""
# The transparency of the labels over the image
self.transp = 0.5
# The zoom toggle
self.zoom = False
# The zoom factor
self.zoomFactor = 1.0
# The size of the zoom window. Currently there is no setter or getter for that
self.zoomSize = 400 #px
# The highlight toggle
self.highlight = False
# The highlight label
self.highlightLabelSelection = ""
# Screenshot file
self.screenshotFilename = "%i"
# Correction mode
self.correctionMode = False
# Warn before saving that you are overwriting files
self.showSaveWarning = True
# Load from given filename
def load(self, filename):
if os.path.isfile(filename):
with open(filename, 'r') as f:
jsonText = f.read()
jsonDict = json.loads(jsonText)
for key in jsonDict:
if key in self.__dict__:
self.__dict__[key] = jsonDict[key]
self.fixConsistency()
# Make sure the config is consistent.
# Automatically called after loading
def fixConsistency(self):
if self.currentFile:
self.currentFile = os.path.normpath(self.currentFile)
if self.currentLabelFile:
self.currentLabelFile = os.path.normpath(self.currentLabelFile)
if self.currentCorrectionFile:
self.currentCorrectionFile = os.path.normpath(self.currentCorrectionFile)
if self.csPath:
self.csPath = os.path.normpath(self.csPath)
if not os.path.isdir(self.csPath):
self.csPath = ""
if self.city:
self.city = os.path.normpath(self.city)
if not os.path.isdir(self.city):
self.city = ""
if self.labelPath:
self.labelPath = os.path.normpath(self.labelPath)
if self.correctionPath:
self.correctionPath = os.path.normpath(self.correctionPath)
if self.city:
self.cityName == os.path.basename(self.city)
if not os.path.isfile(self.currentFile) or os.path.dirname(self.currentFile) != self.city:
self.currentFile = ""
if not os.path.isfile(self.currentLabelFile) or \
not os.path.isdir( os.path.join(self.labelPath,self.cityName) ) or \
os.path.dirname(self.currentLabelFile) != os.path.join(self.labelPath,self.cityName):
self.currentLabelFile = ""
if not os.path.isfile(self.currentCorrectionFile) or \
not os.path.isdir( os.path.join(self.correctionPath,self.cityName) ) or \
os.path.dirname(self.currentCorrectionFile) != os.path.join(self.correctionPath,self.cityName):
self.currentCorrectionFile = ""
# Save to given filename (using pickle)
def save(self, filename):
with open(filename, 'w') as f:
f.write(json.dumps(self.__dict__, default=lambda o: o.__dict__, sort_keys=True, indent=4))
def enum(**enums):
return type('Enum', (), enums)
class CorrectionBox:
types = enum(TO_CORRECT=1, TO_REVIEW=2, RESOLVED=3, QUESTION=4)
def __init__(self, rect=None, annotation=""):
self.type = CorrectionBox.types.TO_CORRECT
self.bbox = rect
self.annotation = annotation
self.selected = False
return
def get_colour(self):
if self.type == CorrectionBox.types.TO_CORRECT:
return QtGui.QColor(255,0,0)
elif self.type == CorrectionBox.types.TO_REVIEW:
return QtGui.QColor(255,255,0)
elif self.type == CorrectionBox.types.RESOLVED:
return QtGui.QColor(0,255,0)
elif self.type == CorrectionBox.types.QUESTION:
return QtGui.QColor(0,0,255)
def select(self):
if not self.selected:
self.selected = True
return
def unselect(self):
if self.selected:
self.selected = False
return
# Read the information from the given object node in an XML file
# The node must have the tag object and contain all expected fields
def readFromXMLNode(self, correctionNode):
if not correctionNode.tag == 'correction':
return
typeNode = correctionNode.find('type')
self.type = int(typeNode.text)
annotationNode = correctionNode.find('annotation')
self.annotation = annotationNode.text
bboxNode = correctionNode.find('bbox')
x = float(bboxNode.find('x').text)
y = float(bboxNode.find('y').text)
width = float(bboxNode.find('width').text)
height = float(bboxNode.find('height').text)
self.bbox = QtCore.QRectF(x,y,width,height)
# Append the information to a node of an XML file
# Creates an object node with all children and appends to the given node
# Usually the given node is the root
def appendToXMLNode(self, node):
# New object node
correctionNode = ET.SubElement(node,'correction')
correctionNode.tail = "\n"
correctionNode.text = "\n"
# Name node
typeNode = ET.SubElement(correctionNode,'type')
typeNode.tail = "\n"
typeNode.text = str(int(self.type))
# Deleted node
annotationNode = ET.SubElement(correctionNode,'annotation')
annotationNode.tail = "\n"
annotationNode.text = str(self.annotation)
# Polygon node
bboxNode = ET.SubElement(correctionNode,'bbox')
bboxNode.text = "\n"
bboxNode.tail = "\n"
xNode = ET.SubElement(bboxNode,'x')
xNode.tail = "\n"
yNode = ET.SubElement(bboxNode,'y')
yNode.tail = "\n"
xNode.text = str(int(round(self.bbox.x())))
yNode.text = str(int(round(self.bbox.y())))
wNode = ET.SubElement(bboxNode,'width')
wNode.tail = "\n"
hNode = ET.SubElement(bboxNode,'height')
hNode.tail = "\n"
wNode.text = str(int(round(self.bbox.width())))
hNode.text = str(int(round(self.bbox.height())))
#################
## Main GUI class
#################
# The main class which is a QtGui -> Main Window
class CityscapesLabelTool(QtGui.QMainWindow):
#############################
## Construction / Destruction
#############################
# Constructor
def __init__(self):
# Construct base class
super(CityscapesLabelTool, self).__init__()
# The filename of where the config is saved and loaded
configDir = os.path.dirname( __file__ )
self.configFile = os.path.join( configDir , "cityscapesLabelTool.conf" )
# This is the configuration.
self.config = configuration()
self.config.load(self.configFile)
# for copying text to clipboard
self.tk = Tk()
# Other member variables
# The width that we actually use to show the image
self.w = 0
# The height that we actually use to show the image
self.h = 0
# The horizontal offset where we start drawing within the widget
self.xoff = 0
# The vertical offset where we start drawing withing the widget
self.yoff = 0
# A gap that we leave around the image as little border
self.bordergap = 20
# The scale that was used, ie
# self.w = self.scale * self.image.width()
# self.h = self.scale * self.image.height()
self.scale = 1.0
# Filenames of all images in current city
self.images = []
# Image extension
self.imageExt = "_leftImg8bit.png"
# Ground truth extension
self.gtExt = "{}_polygons.json"
# Current image as QImage
self.image = QtGui.QImage()
# Index of the current image within the city folder
self.idx = 0
# All annotated objects in current image
self.annotation = None
# The XML ElementTree representing the corrections for the current image
self.correctionXML = None
# A list of changes that we did on the current annotation
# Each change is simply a descriptive string
self.changes = []
# The current object the mouse points to. It's index in self.annotation.objects
self.mouseObj = -1
# The currently selected objects. Their index in self.annotation.objects
self.selObjs = []
# The objects that are highlighted. List of object instances
self.highlightObjs = []
# A label that is selected for highlighting
self.highlightObjLabel = None
# Texture for highlighting
self.highlightTexture = None
# The position of the mouse
self.mousePos = None
# TODO: NEEDS BETTER EXPLANATION/ORGANISATION
self.mousePosOrig = None
# The position of the mouse scaled to label coordinates
self.mousePosScaled = None
# If the mouse is outside of the image
self.mouseOutsideImage = True
# The position of the mouse upon enabling the zoom window
self.mousePosOnZoom = None
# The button state of the mouse
self.mouseButtons = 0
# A list of objects with changed layer
self.changedLayer = []
# A list of objects with changed polygon
self.changedPolygon = []
# A polygon that is drawn by the user
self.drawPoly = QtGui.QPolygonF()
# Treat the polygon as being closed
self.drawPolyClosed = False
# A point of this poly that is dragged
self.draggedPt = -1
# A list of toolbar actions that need an image
self.actImage = []
# A list of toolbar actions that need an image that is not the first
self.actImageNotFirst = []
# A list of toolbar actions that need an image that is not the last
self.actImageNotLast = []
# A list of toolbar actions that need changes
self.actChanges = []
# A list of toolbar actions that need a drawn polygon or selected objects
self.actPolyOrSelObj = []
# A list of toolbar actions that need a closed drawn polygon
self.actClosedPoly = []
# A list of toolbar actions that need selected objects
self.actSelObj = []
# A list of toolbar actions that need a single active selected object
self.singleActSelObj = []
# Toggle status of auto-doing screenshots
self.screenshotToggleState = False
# Toggle status of the play icon
self.playState = False
# Temporary zero transparency
self.transpTempZero = False
# Toggle correction mode on and off
self.correctAction = []
self.corrections = []
self.selected_correction = -1
self.in_progress_bbox = None
self.in_progress_correction = None
self.mousePressEvent = []
# Default label
self.defaultLabel = 'static'
if not self.defaultLabel in name2label:
print( 'The {0} label is missing in the internal label definitions.'.format(self.defaultLabel) )
return
# Last selected label
self.lastLabel = self.defaultLabel
# Setup the GUI
self.initUI()
# Initially clear stuff
self.deselectAllObjects()
self.clearPolygon()
self.clearChanges()
# If we already know a city from the saved config -> load it
self.loadCity()
self.imageChanged()
# Destructor
def __del__(self):
self.config.save(self.configFile)
# Construct everything GUI related. Called by constructor
def initUI(self):
# Create a toolbar
self.toolbar = self.addToolBar('Tools')
# Add the tool buttons
iconDir = os.path.join( os.path.dirname(__file__) , 'icons' )
# Loading a new city
loadAction = QtGui.QAction(QtGui.QIcon( os.path.join( iconDir , 'open.png' )), '&Tools', self)
loadAction.setShortcuts(['o'])
self.setTip( loadAction, 'Open city' )
loadAction.triggered.connect( self.selectCity )
self.toolbar.addAction(loadAction)
# Open previous image
backAction = QtGui.QAction(QtGui.QIcon( os.path.join( iconDir , 'back.png')), '&Tools', self)
backAction.setShortcut('left')
backAction.setStatusTip('Previous image')
backAction.triggered.connect( self.prevImage )
self.toolbar.addAction(backAction)
self.actImageNotFirst.append(backAction)
# Open next image
nextAction = QtGui.QAction(QtGui.QIcon( os.path.join( iconDir , 'next.png')), '&Tools', self)
nextAction.setShortcut('right')
self.setTip( nextAction, 'Next image' )
nextAction.triggered.connect( self.nextImage )
self.toolbar.addAction(nextAction)
self.actImageNotLast.append(nextAction)
# Play
playAction = QtGui.QAction(QtGui.QIcon( os.path.join( iconDir , 'play.png')), '&Tools', self)
playAction.setShortcut(' ')
playAction.setCheckable(True)
playAction.setChecked(False)
self.setTip( playAction, 'Play all images' )
playAction.triggered.connect( self.playImages )
self.toolbar.addAction(playAction)
self.actImageNotLast.append(playAction)
self.playAction = playAction
# Select image
selImageAction = QtGui.QAction(QtGui.QIcon( os.path.join( iconDir , 'shuffle.png' )), '&Tools', self)
selImageAction.setShortcut('i')
self.setTip( selImageAction, 'Select image' )
selImageAction.triggered.connect( self.selectImage )
self.toolbar.addAction(selImageAction)
self.actImage.append(selImageAction)
# Save the current image
saveAction = QtGui.QAction(QtGui.QIcon( os.path.join( iconDir , 'save.png' )), '&Tools', self)
saveAction.setShortcuts('s')
self.setTip( saveAction,'Save changes' )
saveAction.triggered.connect( self.save )
self.toolbar.addAction(saveAction)
self.actChanges.append(saveAction)
# Clear the currently edited polygon
clearPolAction = QtGui.QAction(QtGui.QIcon( os.path.join( iconDir , 'clearpolygon.png' )), '&Tools', self)
clearPolAction.setShortcuts(['q','Esc'])
self.setTip( clearPolAction, 'Clear polygon' )
clearPolAction.triggered.connect( self.clearPolygonAction )
self.toolbar.addAction(clearPolAction)
self.actPolyOrSelObj.append(clearPolAction)
# Create new object from drawn polygon
newObjAction = QtGui.QAction(QtGui.QIcon( os.path.join( iconDir , 'newobject.png' )), '&Tools', self)
newObjAction.setShortcuts(['n'])
self.setTip( newObjAction, 'New object' )
newObjAction.triggered.connect( self.newObject )
self.toolbar.addAction(newObjAction)
self.actClosedPoly.append(newObjAction)
# Delete the currently selected object
deleteObjectAction = QtGui.QAction(QtGui.QIcon( os.path.join( iconDir , 'deleteobject.png' )), '&Tools', self)
deleteObjectAction.setShortcuts(['d','delete'])
self.setTip( deleteObjectAction, 'Delete object' )
deleteObjectAction.triggered.connect( self.deleteObject )
self.toolbar.addAction(deleteObjectAction)
self.actSelObj.append(deleteObjectAction)
# Undo changes in current image, ie. reload labels from file
undoAction = QtGui.QAction(QtGui.QIcon( os.path.join( iconDir , 'undo.png' )), '&Tools', self)
undoAction.setShortcuts('u')
self.setTip( undoAction,'Undo all unsaved changes' )
undoAction.triggered.connect( self.undo )
self.toolbar.addAction(undoAction)
self.actChanges.append(undoAction)
# Modify the label of a selected object
labelAction = QtGui.QAction(QtGui.QIcon( os.path.join( iconDir , 'modify.png' )), '&Tools', self)
labelAction.setShortcuts(['m','l'])
self.setTip( labelAction, 'Modify label' )
labelAction.triggered.connect( self.modifyLabel )
self.toolbar.addAction(labelAction)
self.actSelObj.append(labelAction)
# Move selected object a layer up
layerUpAction = QtGui.QAction(QtGui.QIcon( os.path.join( iconDir , 'layerup.png' )), '&Tools', self)
layerUpAction.setShortcuts(['Up'])
self.setTip( layerUpAction, 'Move object a layer up' )
layerUpAction.triggered.connect( self.layerUp )
self.toolbar.addAction(layerUpAction)
self.singleActSelObj.append(layerUpAction)
# Move selected object a layer down
layerDownAction = QtGui.QAction(QtGui.QIcon( os.path.join( iconDir , 'layerdown.png' )), '&Tools', self)
layerDownAction.setShortcuts(['Down'])
self.setTip( layerDownAction, 'Move object a layer down' )
layerDownAction.triggered.connect( self.layerDown )
self.toolbar.addAction(layerDownAction)
self.singleActSelObj.append(layerDownAction)
# Enable/disable zoom. Toggle button
zoomAction = QtGui.QAction(QtGui.QIcon( os.path.join( iconDir , 'zoom.png' )), '&Tools', self)
zoomAction.setShortcuts(['z'])
zoomAction.setCheckable(True)
zoomAction.setChecked(self.config.zoom)
self.setTip( zoomAction, 'Enable/disable permanent zoom' )
zoomAction.toggled.connect( self.zoomToggle )
self.toolbar.addAction(zoomAction)
self.actImage.append(zoomAction)
# Highlight objects of a certain class
highlightAction = QtGui.QAction(QtGui.QIcon( os.path.join( iconDir , 'highlight.png' )), '&Tools', self)
highlightAction.setShortcuts(['g'])
highlightAction.setCheckable(True)
highlightAction.setChecked(self.config.highlight)
self.setTip( highlightAction, 'Enable/disable highlight of certain object class' )
highlightAction.toggled.connect( self.highlightClassToggle )
self.toolbar.addAction(highlightAction)
self.actImage.append(highlightAction)
# Decrease transparency
minusAction = QtGui.QAction(QtGui.QIcon( os.path.join( iconDir , 'minus.png' )), '&Tools', self)
minusAction.setShortcut('-')
self.setTip( minusAction, 'Decrease transparency' )
minusAction.triggered.connect( self.minus )
self.toolbar.addAction(minusAction)
# Increase transparency
plusAction = QtGui.QAction(QtGui.QIcon( os.path.join( iconDir , 'plus.png' )), '&Tools', self)
plusAction.setShortcut('+')
self.setTip( plusAction, 'Increase transparency' )
plusAction.triggered.connect( self.plus )
self.toolbar.addAction(plusAction)
# Take a screenshot
screenshotAction = QtGui.QAction(QtGui.QIcon( os.path.join( iconDir , 'screenshot.png' )), '&Tools', self)
screenshotAction.setShortcut('t')
self.setTip( screenshotAction, 'Take a screenshot' )
screenshotAction.triggered.connect( self.screenshot )
self.toolbar.addAction(screenshotAction)
self.actImage.append(screenshotAction)
# Take a screenshot in each loaded frame
screenshotToggleAction = QtGui.QAction(QtGui.QIcon( os.path.join( iconDir , 'screenshotToggle.png' )), '&Tools', self)
screenshotToggleAction.setShortcut('Ctrl+t')
screenshotToggleAction.setCheckable(True)
screenshotToggleAction.setChecked(False)
self.setTip( screenshotToggleAction, 'Take a screenshot in each loaded frame' )
screenshotToggleAction.toggled.connect( self.screenshotToggle )
self.toolbar.addAction(screenshotToggleAction)
self.actImage.append(screenshotToggleAction)
# Display path to current image in message bar
displayFilepathAction = QtGui.QAction(QtGui.QIcon( os.path.join( iconDir , 'filepath.png' )), '&Tools', self)
displayFilepathAction.setShortcut('f')
self.setTip( displayFilepathAction, 'Show path to current image' )
displayFilepathAction.triggered.connect( self.displayFilepath )
self.toolbar.addAction(displayFilepathAction)
# Open correction mode
self.correctAction = QtGui.QAction(QtGui.QIcon( os.path.join( iconDir , 'checked6.png' )), '&Tools', self)
self.correctAction.setShortcut('c')
self.correctAction.setCheckable(True)
self.correctAction.setChecked(self.config.correctionMode)
if self.config.correctionMode:
self.correctAction.setIcon(QtGui.QIcon(os.path.join( iconDir , 'checked6_red.png' )))
self.setTip( self.correctAction, 'Toggle correction mode' )
self.correctAction.triggered.connect( self.toggleCorrectionMode )
self.toolbar.addAction(self.correctAction)
# Display help message
helpAction = QtGui.QAction(QtGui.QIcon( os.path.join( iconDir , 'help19.png' )), '&Tools', self)
helpAction.setShortcut('h')
self.setTip( helpAction, 'Help' )
helpAction.triggered.connect( self.displayHelpMessage )
self.toolbar.addAction(helpAction)
# Close the application
exitAction = QtGui.QAction(QtGui.QIcon( os.path.join( iconDir , 'exit.png' )), '&Tools', self)
#exitAction.setShortcuts(['Esc'])
self.setTip( exitAction, 'Exit' )
exitAction.triggered.connect( self.close )
self.toolbar.addAction(exitAction)
# The default text for the status bar
self.defaultStatusbar = 'Ready'
# Create a statusbar. Init with default
self.statusBar().showMessage( self.defaultStatusbar )
# Enable mouse move events
self.setMouseTracking(True)
self.toolbar.setMouseTracking(True)
# Open in full screen
screenShape = QtGui.QDesktopWidget().screenGeometry()
self.resize(screenShape.width(), screenShape.height())
# Set a title
self.applicationTitle = 'Cityscapes Label Tool v1.0'
self.setWindowTitle(self.applicationTitle)
# And show the application
self.show()
#############################
## Toolbar call-backs
#############################
# The user pressed "select city"
# The purpose of this method is to set these configuration attributes:
# - self.config.city : path to the folder containing the images to annotate
# - self.config.cityName : name of this folder, i.e. the city
# - self.config.labelPath : path to the folder to store the polygons
# - self.config.correctionPath : path to store the correction boxes in
# - self.config.gtType : type of ground truth, e.g. gtFine or gtCoarse
# - self.config.split : type of split, e.g. train, val, test
# The current implementation uses the environment variable 'CITYSCAPES_DATASET'
# to determine the dataset root folder and search available data within.
# Annotation types are required to start with 'gt', e.g. gtFine or gtCoarse.
# To add your own annotations you could create a folder gtCustom with similar structure.
#
# However, this implementation could be easily changed to a completely different folder structure.
# Just make sure to specify all three paths and a descriptive name as 'cityName'.
# The gtType and split can be left empty.
def selectCity(self):
# Reset the status bar to this message when leaving
restoreMessage = self.statusBar().currentMessage()
csPath = self.config.csPath
if not csPath or not os.path.isdir(csPath):
if 'CITYSCAPES_DATASET' in os.environ:
csPath = os.environ['CITYSCAPES_DATASET']
else:
csPath = os.path.join(os.path.dirname(os.path.realpath(__file__)),'..','..')
availableCities = []
annotations = sorted( glob.glob( os.path.join( csPath , 'gt*' ) ) )
annotations = [ os.path.basename(a) for a in annotations ]
splits = [ "train_extra" , "train" , "val" , "test" ]
for gt in annotations:
for split in splits:
cities = glob.glob(os.path.join(csPath, gt, split, '*'))
cities.sort()
availableCities.extend( [ (split,gt,os.path.basename(c)) for c in cities if os.path.isdir(c)] )
# List of possible labels
items = [split + ", " + gt + ", " + city for (split,gt,city) in availableCities]
# default
previousItem = self.config.split + ", " + self.config.gtType + ", " + self.config.cityName
default = 0
if previousItem in items:
default = items.index(previousItem)
# Specify title
dlgTitle = "Select city"
message = dlgTitle
question = dlgTitle
message = "Select city for editing"
question = "Which city would you like to edit?"
self.statusBar().showMessage(message)
if items:
# Create and wait for dialog
(item, ok) = QtGui.QInputDialog.getItem(self, dlgTitle, question, items, default, False)
# Restore message
self.statusBar().showMessage( restoreMessage )
if ok and item:
(split,gt,city) = [ str(i) for i in item.split(', ') ]
self.config.city = os.path.normpath( os.path.join( csPath, "leftImg8bit" , split , city ) )
self.config.cityName = city
self.config.labelPath = os.path.normpath( os.path.join( csPath, gt , split , city ) )
self.config.correctionPath = os.path.normpath( os.path.join( csPath, gt+'_corrections', split , city ) )
self.config.gtType = gt
self.config.split = split
self.deselectAllObjects()
self.clearPolygon()
self.loadCity()
self.imageChanged()
else:
warning = ""
warning += "The data was not found. Please:\n\n"
warning += " - make sure the scripts folder is in the Cityscapes root folder\n"
warning += "or\n"
warning += " - set CITYSCAPES_DATASET to the Cityscapes root folder\n"
warning += " e.g. 'export CITYSCAPES_DATASET=<root_path>'\n"
reply = QtGui.QMessageBox.information(self, "ERROR!", warning, QtGui.QMessageBox.Ok)
if reply == QtGui.QMessageBox.Ok:
sys.exit()
return
# Switch to previous image in file list
# Load the image
# Load its labels
# Update the mouse selection
# View
def prevImage(self):
if not self.images:
return
if self.idx > 0:
if self.checkAndSave():
self.idx -= 1
self.imageChanged()
return
# Switch to next image in file list
# Load the image
# Load its labels
# Update the mouse selection
# View
def nextImage(self):
if not self.images:
return
if self.idx < len(self.images)-1:
if self.checkAndSave():
self.idx += 1
self.imageChanged()
elif self.playState:
self.playState = False
self.playAction.setChecked(False)
if self.playState:
QtCore.QTimer.singleShot(0, self.nextImage)
return
# Play images, i.e. auto-switch to next image
def playImages(self, status):
self.playState = status
if self.playState:
QtCore.QTimer.singleShot(0, self.nextImage)
# switch correction mode on and off
def toggleCorrectionMode(self):
if not self.config.correctionMode:
self.config.correctionMode = True
iconDir = os.path.join( os.path.dirname(sys.argv[0]) , 'icons' )
self.correctAction.setIcon(QtGui.QIcon(os.path.join( iconDir , 'checked6_red.png' )))
else:
self.config.correctionMode = False
iconDir = os.path.join( os.path.dirname(sys.argv[0]) , 'icons' )
self.correctAction.setIcon(QtGui.QIcon(os.path.join( iconDir , 'checked6.png' )))
self.update()
return
# Switch to a selected image of the file list
# Ask the user for an image
# Load the image
# Load its labels
# Update the mouse selection
# View
def selectImage(self):
if not self.images:
return
dlgTitle = "Select image to load"
self.statusBar().showMessage(dlgTitle)
items = QtCore.QStringList( [ "{}: {}".format(num,os.path.basename(i)) for (num,i) in enumerate(self.images) ] )
(item, ok) = QtGui.QInputDialog.getItem(self, dlgTitle, "Image", items, self.idx, False)
if (ok and item):
idx = items.indexOf(item)
if idx != self.idx and self.checkAndSave():
self.idx = idx
self.imageChanged()
else:
# Restore the message
self.statusBar().showMessage( self.defaultStatusbar )
# Save labels
def save(self):
# Status
saved = False
# Message to show at the status bar when done
message = ""
# Only save if there are changes, labels, an image filename and an image
if self.changes and (self.annotation or self.corrections) and self.config.currentFile and self.image:
if self.annotation:
# Determine the filename
# If we have a loaded label file, then this is also the filename
filename = self.config.currentLabelFile
# If not, then generate one
if not filename:
filename = self.getLabelFilename(True)
if filename:
proceed = True
# warn user that he is overwriting an old file
if os.path.isfile(filename) and self.config.showSaveWarning:
msgBox = QtGui.QMessageBox(self)
msgBox.setWindowTitle("Overwriting")
msgBox.setText("Saving overwrites the original file and it cannot be reversed. Do you want to continue?")
msgBox.addButton(QtGui.QMessageBox.Cancel)
okAndNeverAgainButton = msgBox.addButton('OK and never ask again',QtGui.QMessageBox.AcceptRole)
okButton = msgBox.addButton(QtGui.QMessageBox.Ok)
msgBox.setDefaultButton(QtGui.QMessageBox.Ok)
msgBox.setIcon(QtGui.QMessageBox.Warning)
msgBox.exec_()
# User clicked on "OK"
if msgBox.clickedButton() == okButton:
pass
# User clicked on "OK and never ask again"
elif msgBox.clickedButton() == okAndNeverAgainButton:
self.config.showSaveWarning = False
else:
# Do nothing
message += "Nothing saved, no harm has been done. "
proceed = False
# Save JSON file
if proceed:
try:
self.annotation.toJsonFile(filename)
saved = True
message += "Saved labels to {0} ".format(filename)
except IOError as e:
message += "Error writing labels to {0}. Message: {1} ".format( filename, e.strerror )
else:
message += "Error writing labels. Cannot generate a valid filename. "
if self.corrections or self.config.currentCorrectionFile:
# Determine the filename
# If we have a loaded label file, then this is also the filename
filename = self.config.currentCorrectionFile
# If not, then generate one
if not filename:
filename = self.getCorrectionFilename(True)
if filename:
# Prepare the root
root = ET.Element('correction')
root.text = "\n"
root.tail = "\n"
# Add the filename of the image that is annotated
filenameNode = ET.SubElement(root,'filename')
filenameNode.text = os.path.basename(self.config.currentFile)
filenameNode.tail = "\n"
# Add the folder where this image is located in
# For compatibility with the LabelMe Tool, we need to use the folder
# StereoDataset/cityName
folderNode = ET.SubElement(root,'folder')
folderNode.text = "StereoDataset/" + self.config.cityName
folderNode.tail = "\n"
# The name of the tool. Here, we do not follow the output of the LabelMe tool,
# since this is crap anyway
sourceNode = ET.SubElement(root,'source')
sourceNode.text = "\n"
sourceNode.tail = "\n"
sourceImageNode = ET.SubElement(sourceNode,'sourceImage')
sourceImageNode.text = "Label Cities"
sourceImageNode.tail = "\n"
sourceAnnotationNode = ET.SubElement(sourceNode,'sourceAnnotation')
sourceAnnotationNode.text = "mcLabelTool"
sourceAnnotationNode.tail = "\n"
# The image size
imagesizeNode = ET.SubElement(root,'imagesize')
imagesizeNode.text = "\n"
imagesizeNode.tail = "\n"
nrowsNode = ET.SubElement(imagesizeNode,'nrows')
nrowsNode.text = str(self.image.height())
nrowsNode.tail = "\n"
ncolsNode = ET.SubElement(imagesizeNode,'ncols')
ncolsNode.text = str(self.image.height())
ncolsNode.tail = "\n"
# Add all objects
for correction in self.corrections:
correction.appendToXMLNode(root)
# Create the actual XML tree
self.correctionXML = ET.ElementTree(root)
# Save XML file
try:
self.correctionXML.write(filename)
saved = True
message += "Saved corrections to {0} ".format(filename)
except IOError as e:
message += "Error writing corrections to {0}. Message: {1} ".format( filename, e.strerror )
else:
message += "Error writing corrections. Cannot generate a valid filename. "
# Clear changes
if saved:
self.clearChanges()
else:
message += "Nothing to save "
saved = True
# Show the status message
self.statusBar().showMessage(message)
return saved
# Undo changes, ie. reload labels
def undo(self):
# check if we really want to do this in case there are multiple changes
if len( self.changes ) > 1:
# Backup of status message
restoreMessage = self.statusBar().currentMessage()
# Create the dialog
dlgTitle = "Undo changes?"
self.statusBar().showMessage(dlgTitle)
text = "Do you want to undo the following changes?\n"
for c in self.changes:
text += "- " + c + '\n'
buttons = QtGui.QMessageBox.Ok | QtGui.QMessageBox.Cancel
ret = QtGui.QMessageBox.question(self, dlgTitle, text, buttons, QtGui.QMessageBox.Ok )
proceed = False
# If the user selected yes -> undo
if ret == QtGui.QMessageBox.Ok:
proceed = True
self.statusBar().showMessage( restoreMessage )
# If we do not proceed -> return
if not proceed:
return
# Clear labels to force a reload
self.annotation = None
# Reload
self.imageChanged()
# Clear the drawn polygon and update
def clearPolygonAction(self):
self.deselectAllObjects()
self.clearPolygon()
self.update()
# Create a new object from the current polygon
def newObject(self):
# Default label
label = self.lastLabel
# Ask the user for a label
(label, ok) = self.getLabelFromUser( label )
if ok and label:
# Append and create the new object
self.appendObject( label , self.drawPoly )
# Clear the drawn polygon
self.deselectAllObjects()
self.clearPolygon()
# Default message
self.statusBar().showMessage( self.defaultStatusbar )
# Set as default label for next time
self.lastLabel = label
# Redraw
self.update()
# Delete the currently selected object
def deleteObject(self):
# Cannot do anything without a selected object
if not self.selObjs:
return
# Cannot do anything without labels
if not self.annotation:
return
for selObj in self.selObjs:
# The selected object that is deleted
obj = self.annotation.objects[selObj]
# Delete
obj.delete()
# Save changes
self.addChange( "Deleted object {0} with label {1}".format( obj.id, obj.label ) )
# Clear polygon
self.deselectAllObjects()
self.clearPolygon()
# Redraw
self.update()
# Modify the label of a selected object
def modifyLabel(self):
# Cannot do anything without labels
if not self.annotation:
return
# Cannot do anything without a selected object
if not self.selObjs:
return
# The last selected object
obj = self.annotation.objects[self.selObjs[-1]]
# default label
defaultLabel = obj.label
defaultId = -1
# If there is only one object the dialog text can be improved
if len(self.selObjs) == 1:
defaultId = obj.id
(label, ok) = self.getLabelFromUser( defaultLabel , defaultId )
if ok and label:
for selObj in self.selObjs:
# The selected object that is modified
obj = self.annotation.objects[selObj]
# Save changes
if obj.label != label:
self.addChange( "Set label {0} for object {1} with previous label {2}".format( label, obj.id, obj.label ) )
obj.label = label
obj.updateDate()
# Update
self.update()
# Move object a layer up
def layerUp(self):
# Change layer
self.modifyLayer(+1)
# Update
self.update()
# Move object a layer down
def layerDown(self):
# Change layer
self.modifyLayer(-1)
# Update
self.update()
# Toggle zoom
def zoomToggle(self, status):
self.config.zoom = status
if status :
self.mousePosOnZoom = self.mousePos
self.update()
# Toggle highlight
def highlightClassToggle(self, status):
if status :
defaultLabel = ""
if self.config.highlightLabelSelection and self.config.highlightLabelSelection in name2label:
defaultLabel = self.config.highlightLabelSelection
(label, ok) = self.getLabelFromUser( defaultLabel )
if ok and label:
self.config.highlightLabelSelection = label
else:
status = False
self.config.highlight = status
self.update()
# Increase label transparency
def minus(self):
self.config.transp = max(self.config.transp-0.1,0.0)
self.update()
def displayFilepath(self):
self.statusBar().showMessage("Current image: {0}".format( self.config.currentFile ))
self.update()
# Decrease label transparency
def plus(self):
self.config.transp = min(self.config.transp+0.1,1.0)
self.update()
# Take a screenshot
def screenshot(self):
# Get a filename for saving
dlgTitle = "Get screenshot filename"
filter = "Images (*.png *.xpm *.jpg)"
answer = QtGui.QFileDialog.getSaveFileName(self, dlgTitle, self.config.screenshotFilename,filter, options=QtGui.QFileDialog.DontUseNativeDialog)
if answer:
self.config.screenshotFilename = str(answer)
else:
return
# Actually make the screenshot
self.doScreenshot()
# Toggle auto-making of screenshots
def screenshotToggle(self, status):
self.screenshotToggleState = status
if status:
self.screenshot()
def displayHelpMessage(self):
message = self.applicationTitle + "\n\n"
message += "INSTRUCTIONS\n"
message += " - press open (left button) to select a city from drop-down menu\n"
message += " - browse images and edit labels using\n"
message += " the toolbar buttons (check tooltips) and the controls below\n"
message += " - note that the editing happens in-place;\n"
message += " if you want to annotate your own images or edit a custom\n"
message += " set of labels, check (and modify) the code of the method 'loadCity'\n"
message += " - note that this tool modifys the JSON polygon files, but\n"
message += " does not create or update the pngs; for the latter use\n"
message += " the preparation tools that come with this tool box.\n"
message += "\n"
message += "CONTROLS\n"
message += " - highlight objects [move mouse]\n"
message += " - draw new polygon\n"
message += " - start drawing a polygon [left click]\n"
message += " - add point to open polygon [left click]\n"
message += " - delete last added point [Backspace]\n"
message += " - close polygon [left click on first point]\n"
message += " - select closed polygon, existing object [Ctrl + left click]\n"
message += " - move point [left click and hold on point, move mouse]\n"
message += " - add point [click on edge]\n"
message += " - delete point from polygon [Shift + left click on point]\n"
message += " - deselect polygon [Q]\n"
message += " - select multiple polygons [Ctrl + left click]\n"
message += " - intersect/merge two polygons: draw new polygon, then\n"
message += " - intersect [Shift + left click on existing polygon]\n"
message += " - merge [Alt + left click on existing polygon]\n"
message += " - open zoom window [Z or hold down right mouse button]\n"
message += " - zoom in/out [mousewheel]\n"
message += " - enlarge/shrink zoom window [shift+mousewheel]\n"
message += " - start correction mode [C]\n"
message += " - draw a correction box [left click and hold, move, release]\n"
message += " - set box type [1,2,3,4]\n"
message += " - previous/next box [E,R]\n"
message += " - delete box [D]\n"
message += " - modify text, use ascii only [M]\n"
QtGui.QMessageBox.about(self, "HELP!", message)
self.update()
# Close the application
def closeEvent(self,event):
if self.checkAndSave():
event.accept()
else:
event.ignore()
#############################
## Custom events
#############################
def imageChanged(self):
# Clear corrections
self.corrections = []
self.selected_correction = -1
# Clear the polygon
self.deselectAllObjects()
self.clearPolygon()
# Load the first image
self.loadImage()
# Load its labels if available
self.loadLabels()
# Load its corrections if available
self.loadCorrections()
# Update the object the mouse points to
self.updateMouseObject()
# Update the GUI
self.update()
# Save screenshot if set
if self.screenshotToggleState:
self.doScreenshot()
#############################
## File I/O
#############################
# Load the currently selected city if possible
def loadCity(self):
# Search for all *.pngs to get the image list
self.images = []
if os.path.isdir(self.config.city):
self.images = glob.glob( os.path.join( self.config.city , '*' + self.imageExt ) )
self.images.sort()
if self.config.currentFile in self.images:
self.idx = self.images.index(self.config.currentFile)
else:
self.idx = 0
# Load the currently selected image
# Does only load if not previously loaded
# Does not refresh the GUI
def loadImage(self):
success = False
message = self.defaultStatusbar
if self.images:
filename = self.images[self.idx]
filename = os.path.normpath( filename )
if not self.image.isNull() and filename == self.config.currentFile:
success = True
else:
self.image = QtGui.QImage(filename)
if self.image.isNull():
message = "Failed to read image: {0}".format( filename )
else:
message = "Read image: {0}".format( filename )
self.config.currentFile = filename
success = True
# Update toolbar actions that need an image
for act in self.actImage:
act.setEnabled(success)
for act in self.actImageNotFirst:
act.setEnabled(success and self.idx > 0)
for act in self.actImageNotLast:
act.setEnabled(success and self.idx < len(self.images)-1)
self.statusBar().showMessage(message)
# Load the labels from file
# Only loads if they exist
# Otherwise the filename is stored and that's it
def loadLabels(self):
filename = self.getLabelFilename()
if not filename or not os.path.isfile(filename):
self.clearAnnotation()
return
# If we have everything and the filename did not change, then we are good
if self.annotation and filename == self.currentLabelFile:
return
# Clear the current labels first
self.clearAnnotation()
try:
self.annotation = Annotation()
self.annotation.fromJsonFile(filename)
except IOError as e:
# This is the error if the file does not exist
message = "Error parsing labels in {0}. Message: {1}".format( filename, e.strerror )
self.statusBar().showMessage(message)
# Remember the filename loaded
self.currentLabelFile = filename
# Remeber the status bar message to restore it later
restoreMessage = self.statusBar().currentMessage()
# Restore the message
self.statusBar().showMessage( restoreMessage )
# Load the labels from file
# Only loads if they exist
# Otherwise the filename is stored and that's it
def loadCorrections(self): #TODO
filename = self.getCorrectionFilename()
if not filename:
self.clearCorrections()
return
# If we have everything and the filename did not change, then we are good
if self.correctionXML and self.corrections and filename == self.config.currentCorrectionFile:
return
# Clear the current labels first
self.clearCorrections()
# We do not always expect to have corrections, therefore prevent a failure due to missing file
if not os.path.isfile(filename):
return
try:
# Try to parse the XML file
self.correctionXML = ET.parse( filename )
except IOError as e:
# This is the error if the file does not exist
message = "Error parsing corrections in {0}. Message: {1}".format( filename, e.strerror )
self.statusBar().showMessage(message)
self.correctionXML = []
return
except ET.ParseError as e:
# This is the error if the content is no valid XML
message = "Error parsing corrections in {0}. Message: {1}".format( filename, e )
self.statusBar().showMessage(message)
self.correctionXML = []
return
# Remember the filename loaded
self.config.currentCorrectionFile = filename
# Remeber the status bar message to restore it later
restoreMessage = self.statusBar().currentMessage()
# Iterate through all objects in the XML
root = self.correctionXML.getroot()
for i, objNode in enumerate(root.findall('correction')):
# Instantate a new object and read the XML node
obj = CorrectionBox()
obj.readFromXMLNode( objNode )
if i == 0:
self.selected_correction = 0
obj.select()
# Append the object to our list of labels
self.corrections.append(obj)
# Restore the message
self.statusBar().showMessage( restoreMessage )
def modify_correction_type(self, correction_type):
if self.selected_correction >= 0:
self.corrections[self.selected_correction].type = correction_type
self.addChange( "Modified correction type.")
self.update()
return
def delete_selected_annotation(self):
if self.selected_correction >= 0 and self.config.correctionMode:
del self.corrections[self.selected_correction]
if self.selected_correction == len(self.corrections):
self.selected_correction = self.selected_correction - 1
if self.selected_correction >= 0:
self.corrections[self.selected_correction].select()
self.addChange( "Deleted correction.")
self.update()
return
def modify_correction_description(self):
if self.selected_correction >= 0 and self.config.correctionMode:
description = QtGui.QInputDialog.getText(self, "Modify Error Description", "Please describe the labeling error briefly.",
text = self.corrections[self.selected_correction].annotation)
if description[1]:
self.corrections[self.selected_correction].annotation = description[0]
self.addChange( "Changed correction description.")
self.update()
return
def select_next_correction(self):
if self.selected_correction >= 0:
self.corrections[self.selected_correction].unselect()
if self.selected_correction == (len(self.corrections) - 1) :
self.selected_correction = 0
else :
self.selected_correction = self.selected_correction + 1
self.corrections[self.selected_correction].select()
self.update()
return
def select_previous_correction(self):
if self.selected_correction >= 0 :
self.corrections[self.selected_correction].unselect()
if self.selected_correction == 0 :
self.selected_correction = (len(self.corrections) - 1)
else :
self.selected_correction = self.selected_correction - 1
self.corrections[self.selected_correction].select()
self.update()
return
#############################
## Drawing
#############################
# This method is called when redrawing everything
# Can be manually triggered by self.update()
# Note that there must not be any other self.update within this method
# or any methods that are called within
def paintEvent(self, event):
# Create a QPainter that can perform draw actions within a widget or image
qp = QtGui.QPainter()
# Begin drawing in the application widget
qp.begin(self)
# Update scale
self.updateScale(qp)
# Determine the object ID to highlight
self.getHighlightedObject(qp)
# Draw the image first
self.drawImage(qp)
# Draw the labels on top
overlay = self.drawLabels(qp)
# Draw the user drawn polygon
self.drawDrawPoly(qp)
self.drawDrawRect(qp)
# Draw the label name next to the mouse
self.drawLabelAtMouse(qp)
# Draw the zoom
# self.drawZoom(qp, overlay)
self.drawZoom(qp,None)
# Thats all drawing
qp.end()
# Forward the paint event
QtGui.QMainWindow.paintEvent(self,event)
# Update the scaling
def updateScale(self, qp):
if not self.image.width() or not self.image.height():
return
# Horizontal offset
self.xoff = self.bordergap
# Vertical offset
self.yoff = self.toolbar.height()+self.bordergap
# We want to make sure to keep the image aspect ratio and to make it fit within the widget
# Without keeping the aspect ratio, each side of the image is scaled (multiplied) with
sx = float(qp.device().width() - 2*self.xoff) / self.image.width()
sy = float(qp.device().height() - 2*self.yoff) / self.image.height()
# To keep the aspect ratio while making sure it fits, we use the minimum of both scales
# Remember the scale for later
self.scale = min( sx , sy )
# These are then the actual dimensions used
self.w = self.scale * self.image.width()
self.h = self.scale * self.image.height()
# Determine the highlighted object for drawing
def getHighlightedObject(self, qp):
# These variables we want to fill
self.highlightObjs = []
self.highlightObjLabel = None
# Without labels we cannot do so
if not self.annotation:
return
# If available set the selected objects
highlightObjIds = self.selObjs
# If not available but the polygon is empty or closed, its the mouse object
if not highlightObjIds and (self.drawPoly.isEmpty() or self.drawPolyClosed) and self.mouseObj>=0 and not self.mouseOutsideImage:
highlightObjIds = [self.mouseObj]
# Get the actual object that is highlighted
if highlightObjIds:
self.highlightObjs = [ self.annotation.objects[i] for i in highlightObjIds ]
# Set the highlight object label if appropriate
if self.config.highlight:
self.highlightObjLabel = self.config.highlightLabelSelection
elif len(highlightObjIds) == 1 and self.config.correctionMode:
self.highlightObjLabel = self.annotation.objects[highlightObjIds[-1]].label
# Draw the image in the given QPainter qp
def drawImage(self, qp):
# Return if no image available
if self.image.isNull():
return
# Save the painters current setting to a stack
qp.save()
# Draw the image
qp.drawImage(QtCore.QRect( self.xoff, self.yoff, self.w, self.h ), self.image)
# Restore the saved setting from the stack
qp.restore()
def getPolygon(self, obj):
poly = QtGui.QPolygonF()
for pt in obj.polygon:
point = QtCore.QPointF(pt.x,pt.y)
poly.append( point )
return poly
# Draw the labels in the given QPainter qp
# optionally provide a list of labels to ignore
def drawLabels(self, qp, ignore = []):
if self.image.isNull() or self.w <= 0 or self.h <= 0:
return
if not self.annotation:
return
if self.transpTempZero:
return
# The overlay is created in the viewing coordinates
# This way, the drawing is more dense and the polygon edges are nicer
# We create an image that is the overlay
# Within this image we draw using another QPainter
# Finally we use the real QPainter to overlay the overlay-image on what is drawn so far
# The image that is used to draw the overlays
overlay = QtGui.QImage( self.w, self.h, QtGui.QImage.Format_ARGB32_Premultiplied )
# Fill the image with the default color
defaultLabel = name2label[self.defaultLabel]
col = QtGui.QColor( *defaultLabel.color )
overlay.fill( col )
# Create a new QPainter that draws in the overlay image
qp2 = QtGui.QPainter()
qp2.begin(overlay)
# The color of the outlines
qp2.setPen(QtGui.QColor('white'))
# Draw all objects
for obj in self.annotation.objects:
# Some are flagged to not be drawn. Skip them
if not obj.draw:
continue
# The label of the object
name = assureSingleInstanceName( obj.label )
# If we do not know a color for this label, warn the user
if not name in name2label:
print( "The annotations contain unkown labels. This should not happen. Please inform the datasets authors. Thank you!" )
print( "Details: label '{}', file '{}'".format(name,self.currentLabelFile) )
continue
# If we ignore this label, skip
if name in ignore:
continue
poly = self.getPolygon(obj)
# Scale the polygon properly
polyToDraw = poly * QtGui.QTransform.fromScale(self.scale,self.scale)
# Default drawing
# Color from color table, solid brush
col = QtGui.QColor( *name2label[name].color )
brush = QtGui.QBrush( col, QtCore.Qt.SolidPattern )
qp2.setBrush(brush)
# Overwrite drawing if this is the highlighted object
if ( obj in self.highlightObjs or name == self.highlightObjLabel ):
# First clear everything below of the polygon
qp2.setCompositionMode( QtGui.QPainter.CompositionMode_Clear )
qp2.drawPolygon( polyToDraw )
qp2.setCompositionMode( QtGui.QPainter.CompositionMode_SourceOver )
# Set the drawing to a special pattern
brush = QtGui.QBrush(col,QtCore.Qt.DiagCrossPattern)
qp2.setBrush(brush)
qp2.drawPolygon( polyToDraw )
# Draw outline of selected object dotted
for obj in self.highlightObjs:
brush = QtGui.QBrush(QtCore.Qt.NoBrush)
qp2.setBrush(brush)
qp2.setPen(QtCore.Qt.DashLine)
polyToDraw = self.getPolygon(obj) * QtGui.QTransform.fromScale(self.scale,self.scale)
qp2.drawPolygon( polyToDraw )
# End the drawing of the overlay
qp2.end()
# Save QPainter settings to stack
qp.save()
# Define transparency
qp.setOpacity(self.config.transp)
# Draw the overlay image
qp.drawImage(self.xoff,self.yoff,overlay)
# Restore settings
qp.restore()
return overlay
def drawDrawRect(self, qp):
qp.save()
qp.setBrush(QtGui.QBrush(QtCore.Qt.NoBrush))
qp.setFont(QtGui.QFont('QFont::AnyStyle', 14))
thickPen = QtGui.QPen()
qp.setPen(thickPen)
for c in self.corrections:
rect = copy.deepcopy(c.bbox)
width = rect.width()
height = rect.height()
rect.setX(c.bbox.x() * self.scale + self.xoff)
rect.setY(c.bbox.y() * self.scale + self.yoff)
rect.setWidth(width * self.scale)
rect.setHeight(height * self.scale)
if c.selected:
thickPen.setColor(QtGui.QColor(0,0,0))
if c.type == CorrectionBox.types.QUESTION:
descr = "QUESTION"
elif c.type == CorrectionBox.types.RESOLVED:
descr = "FIXED"
else:
descr = "ERROR"
qp.setPen(thickPen)
qp.drawText(QtCore.QPoint( self.xoff, self.yoff + self.h + 20 ),
"(%s: %s)" % (descr, c.annotation))
pen_width = 6
else:
pen_width = 3
colour = c.get_colour()
thickPen.setColor(colour)
thickPen.setWidth(pen_width)
qp.setPen(thickPen)
qp.drawRect(rect)
if self.in_progress_bbox is not None:
rect = copy.deepcopy(self.in_progress_bbox)
width = rect.width()
height = rect.height()
rect.setX(self.in_progress_bbox.x() * self.scale + self.xoff)
rect.setY(self.in_progress_bbox.y() * self.scale + self.yoff)
rect.setWidth(width * self.scale)
rect.setHeight(height * self.scale)
thickPen.setColor(QtGui.QColor(255,0,0))
thickPen.setWidth(3)
qp.setPen(thickPen)
qp.drawRect(rect)
qp.restore()
# Draw the polygon that is drawn and edited by the user
# Usually the polygon must be rescaled properly. However when drawing
# The polygon within the zoom, this is not needed. Therefore the option transform.
def drawDrawPoly(self, qp, transform=None):
# Nothing to do?
if self.drawPoly.isEmpty():
return
if not self.image:
return
# Save QPainter settings to stack
qp.save()
# The polygon - make a copy
poly = QtGui.QPolygonF(self.drawPoly)
# Append the current mouse position
if not self.drawPolyClosed and (self.mousePosScaled is not None):
poly.append( self.mousePosScaled )
# Transform
if not transform:
poly = poly * QtGui.QTransform.fromScale(self.scale,self.scale)
poly.translate(self.xoff,self.yoff)
else:
poly = poly * transform
# Do not fill the polygon
qp.setBrush(QtGui.QBrush(QtCore.Qt.NoBrush))
# Draw the polygon edges
polyColor = QtGui.QColor(255,0,0)
qp.setPen(polyColor)
if not self.drawPolyClosed:
qp.drawPolyline( poly )
else:
qp.drawPolygon( poly )
# Get the ID of the closest point to the mouse
if self.mousePosScaled is not None:
closestPt = self.getClosestPoint( self.drawPoly, self.mousePosScaled )
else:
closestPt = (-1,-1)
# If a polygon edge is selected, draw in bold
if closestPt[0] != closestPt[1]:
thickPen = QtGui.QPen(polyColor)
thickPen.setWidth(3)
qp.setPen(thickPen)
qp.drawLine( poly[closestPt[0]], poly[closestPt[1]] )
# Draw the polygon points
qp.setPen(polyColor)
startDrawingPts = 0
# A bit different if not closed
if not self.drawPolyClosed:
# Draw
self.drawPoint( qp, poly.first(), True, closestPt==(0,0) and self.drawPoly.size()>1 )
# Do not draw again
startDrawingPts = 1
# The next in red
for pt in range(startDrawingPts,poly.size()):
self.drawPoint( qp, poly[pt], False, self.drawPolyClosed and closestPt==(pt,pt) )
# Restore QPainter settings from stack
qp.restore()
# Draw the label name next to the mouse
def drawLabelAtMouse(self, qp):
# Nothing to do without a highlighted object
if not self.highlightObjs:
return
# Also we do not want to draw the label, if we have a drawn polygon
if not self.drawPoly.isEmpty():
return
# Nothing to without a mouse position
if not self.mousePos:
return
# Save QPainter settings to stack
qp.save()
# That is the mouse positiong
mouse = self.mousePos
# Will show zoom
showZoom = self.config.zoom and not self.image.isNull() and self.w and self.h
# The text that is written next to the mouse
mouseText = self.highlightObjs[-1].label
# Where to write the text
# Depends on the zoom (additional offset to mouse to make space for zoom?)
# The location in the image (if we are at the top we want to write below of the mouse)
off = 36
if showZoom:
off += self.config.zoomSize/2
if mouse.y()-off > self.toolbar.height():
top = mouse.y()-off
btm = mouse.y()
vAlign = QtCore.Qt.AlignTop
else:
# The height of the cursor
if not showZoom:
off += 20
top = mouse.y()
btm = mouse.y()+off
vAlign = QtCore.Qt.AlignBottom
# Here we can draw
rect = QtCore.QRect()
rect.setTopLeft(QtCore.QPoint(mouse.x()-100,top))
rect.setBottomRight(QtCore.QPoint(mouse.x()+100,btm))
# The color
qp.setPen(QtGui.QColor('white'))
# The font to use
font = QtGui.QFont("Helvetica",20,QtGui.QFont.Bold)
qp.setFont(font)
# Non-transparent
qp.setOpacity(1)
# Draw the text, horizontally centered
qp.drawText(rect,QtCore.Qt.AlignHCenter|vAlign,mouseText)
# Restore settings
qp.restore()
# Draw the zoom
def drawZoom(self,qp,overlay):
# Zoom disabled?
if not self.config.zoom:
return
# No image
if self.image.isNull() or not self.w or not self.h:
return
# No mouse
if not self.mousePos:
return
# Abbrevation for the zoom window size
zoomSize = self.config.zoomSize
# Abbrevation for the mouse position
mouse = self.mousePos
# The pixel that is the zoom center
pix = self.mousePosScaled
# The size of the part of the image that is drawn in the zoom window
selSize = zoomSize / ( self.config.zoomFactor * self.config.zoomFactor )
# The selection window for the image
sel = QtCore.QRectF(pix.x() -selSize/2 ,pix.y() -selSize/2 ,selSize,selSize )
# The selection window for the widget
view = QtCore.QRectF(mouse.x()-zoomSize/2,mouse.y()-zoomSize/2,zoomSize,zoomSize)
# Show the zoom image
qp.drawImage(view,self.image,sel)
# If we are currently drawing the polygon, we need to draw again in the zoom
if not self.drawPoly.isEmpty():
transform = QtGui.QTransform()
quadFrom = QtGui.QPolygonF()
quadFrom.append( sel.topLeft() )
quadFrom.append( sel.topRight() )
quadFrom.append( sel.bottomRight() )
quadFrom.append( sel.bottomLeft() )
quadTo = QtGui.QPolygonF()
quadTo.append( view.topLeft() )
quadTo.append( view.topRight() )
quadTo.append( view.bottomRight() )
quadTo.append( view.bottomLeft() )
if QtGui.QTransform.quadToQuad( quadFrom , quadTo , transform ):
qp.setClipRect(view)
#transform.translate(self.xoff,self.yoff)
self.drawDrawPoly(qp,transform)
else:
print( "not possible" )
#############################
## Mouse/keyboard events
#############################
# Mouse moved
# Need to save the mouse position
# Need to drag a polygon point
# Need to update the mouse selected object
def mouseMoveEvent(self,event):
if self.image.isNull() or self.w == 0 or self.h == 0:
return
self.updateMousePos( event.posF() )
if not self.config.correctionMode:
# If we are dragging a point, update
if self.draggedPt >= 0:
# Update the dragged point
self.drawPoly.replace( self.draggedPt , self.mousePosScaled )
# If the polygon is the polygon of the selected object,
# update the object polygon and
# keep track of the changes we do
if self.selObjs:
obj = self.annotation.objects[self.selObjs[-1]]
obj.polygon[self.draggedPt] = Point(self.mousePosScaled.x(),self.mousePosScaled.y())
# Check if we changed the object's polygon the first time
if not obj.id in self.changedPolygon:
self.changedPolygon.append(obj.id)
self.addChange( "Changed polygon of object {0} with label {1}".format( obj.id, obj.label ) )
else:
if self.in_progress_bbox is not None:
p0 = (self.mousePosScaled.x(), self.mousePosScaled.y())
p1 = (self.mousePressEvent.x(), self.mousePressEvent.y())
xy = min(p0[0], p1[0]), min(p0[1], p1[1])
w, h = abs(p0[0] - p1[0]), abs(p0[1] - p1[1])
self.in_progress_bbox = QtCore.QRectF(xy[0], xy[1], w, h)
#p.set_x(xy[0])
#p.set_y(xy[1])
#p.set_width(w)
#p.set_height(h)
# Update the object selected by the mouse
self.updateMouseObject()
# Redraw
self.update()
# Mouse left the widget
def leaveEvent(self, event):
self.mousePos = None
self.mousePosScaled = None
self.mouseOutsideImage = True
# Mouse button pressed
# Start dragging of polygon point
# Enable temporary toggling of zoom
def mousePressEvent(self,event):
self.mouseButtons = event.buttons()
shiftPressed = QtGui.QApplication.keyboardModifiers() == QtCore.Qt.ShiftModifier
self.updateMousePos( event.posF() )
self.mousePressEvent = self.mousePosScaled
# Handle left click
if event.button() == QtCore.Qt.LeftButton:
# If the drawn polygon is closed and the mouse clicks a point,
# Then this one is dragged around
if not self.config.correctionMode:
if self.drawPolyClosed and (self.mousePosScaled is not None):
closestPt = self.getClosestPoint( self.drawPoly, self.mousePosScaled )
if shiftPressed :
if closestPt[0] == closestPt[1]:
del self.drawPoly[closestPt[0]]
# If the polygon is the polygon of the selected object,
# update the object
# and keep track of the changes we do
if self.selObjs:
obj = self.annotation.objects[self.selObjs[-1]]
del obj.polygon[closestPt[0]]
# Check if we changed the object's polygon the first time
if not obj.id in self.changedPolygon:
self.changedPolygon.append(obj.id)
self.addChange( "Changed polygon of object {0} with label {1}".format( obj.id, obj.label ) )
self.update()
else :
# If we got a point (or nothing), we make it dragged
if closestPt[0] == closestPt[1]:
self.draggedPt = closestPt[0]
# If we got an edge, we insert a point and make it dragged
else:
self.drawPoly.insert( closestPt[1] , self.mousePosScaled )
self.draggedPt = closestPt[1]
# If the polygon is the polygon of the selected object,
# update the object
# and keep track of the changes we do
if self.selObjs:
obj = self.annotation.objects[self.selObjs[-1]]
obj.polygon.insert( closestPt[1] , Point( self.mousePosScaled.x() , self.mousePosScaled.y() ) )
# Check if we changed the object's polygon the first time
if not obj.id in self.changedPolygon:
self.changedPolygon.append(obj.id)
self.addChange( "Changed polygon of object {0} with label {1}".format( obj.id, obj.label ) )
else:
assert self.in_progress_bbox == None
self.in_progress_bbox = QtCore.QRectF(self.mousePosScaled.x(), self.mousePosScaled.y(), 0, 0)
# Handle right click
elif event.button() == QtCore.Qt.RightButton:
self.toggleZoom( event.posF() )
# Redraw
self.update()
# Mouse button released
# End dragging of polygon
# Select an object
# Add a point to the polygon
# Disable temporary toggling of zoom
def mouseReleaseEvent(self,event):
self.mouseButtons = event.buttons()
ctrlPressed = event.modifiers() & QtCore.Qt.ControlModifier
shiftPressed = event.modifiers() & QtCore.Qt.ShiftModifier
altPressed = event.modifiers() & QtCore.Qt.AltModifier
# Handle left click
if event.button() == QtCore.Qt.LeftButton:
if not self.config.correctionMode:
# Check if Ctrl is pressed
if ctrlPressed:
# If also Shift is pressed and we have a closed polygon, then we intersect
# the polygon with the mouse object
if shiftPressed and self.drawPolyClosed:
self.intersectPolygon()
# If also Alt is pressed and we have a closed polygon, then we merge
# the polygon with the mouse object
if altPressed and self.drawPolyClosed:
self.mergePolygon()
# Make the current mouse object the selected
# and process the selection
else:
self.selectObject()
# Add the point to the drawn polygon if not already closed
elif not self.drawPolyClosed:
# If the mouse would close the poly make sure to do so
if self.ptClosesPoly( ):
self.closePolygon()
elif self.mousePosScaled is not None:
if not self.drawPolyClosed and self.drawPoly.isEmpty() :
self.mousePosOnZoom = self.mousePos
self.addPtToPoly( self.mousePosScaled )
# Otherwise end a possible dragging
elif self.drawPolyClosed:
self.draggedPt = -1
else:
if self.in_progress_bbox is not None:
if self.in_progress_bbox.width() > 20:
description = QtGui.QInputDialog.getText(self, "Error Description", "Please describe the labeling error briefly.")
if description[1] and description[0]:
self.corrections.append(CorrectionBox(self.in_progress_bbox, annotation=description[0]))
#last_annotation = self.in_progress_annotation #TODO: self?
self.corrections[self.selected_correction].unselect()
self.selected_correction = len(self.corrections)-1
self.corrections[self.selected_correction].select()
self.addChange( "Added correction.")
self.in_progress_annotation = None
self.in_progress_bbox = None
# Handle right click
elif event.button() == QtCore.Qt.RightButton:
self.toggleZoom( event.posF() )
# Redraw
self.update()
# Mouse wheel scrolled
def wheelEvent(self, event):
deltaDegree = event.delta() / 8 # Rotation in degree
deltaSteps = deltaDegree / 15 # Usually one step on the mouse is 15 degrees
if self.config.zoom:
# If shift is pressed, change zoom window size
if event.modifiers() and QtCore.Qt.Key_Shift:
self.config.zoomSize += deltaSteps * 10
self.config.zoomSize = max( self.config.zoomSize, 10 )
self.config.zoomSize = min( self.config.zoomSize, 1000 )
# Change zoom factor
else:
self.config.zoomFactor += deltaSteps * 0.05
self.config.zoomFactor = max( self.config.zoomFactor, 0.1 )
self.config.zoomFactor = min( self.config.zoomFactor, 10 )
self.update()
# Key pressed
def keyPressEvent(self,e):
# Ctrl key changes mouse cursor
if e.key() == QtCore.Qt.Key_Control:
QtGui.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
# Backspace deletes last point from polygon
elif e.key() == QtCore.Qt.Key_Backspace:
if not self.drawPolyClosed:
del self.drawPoly[-1]
self.update()
# set alpha to temporary zero
elif e.key() == QtCore.Qt.Key_0:
self.transpTempZero = True
self.update()
elif e.key() == QtCore.Qt.Key_E:
self.select_next_correction()
elif e.key() == QtCore.Qt.Key_R:
self.select_previous_correction()
elif e.key() == QtCore.Qt.Key_1:
self.modify_correction_type(CorrectionBox.types.TO_CORRECT)
elif e.key() == QtCore.Qt.Key_2:
self.modify_correction_type(CorrectionBox.types.TO_REVIEW)
elif e.key() == QtCore.Qt.Key_3:
self.modify_correction_type(CorrectionBox.types.RESOLVED)
elif e.key() == QtCore.Qt.Key_4:
self.modify_correction_type(CorrectionBox.types.QUESTION)
elif e.key() == QtCore.Qt.Key_D and self.config.correctionMode:
self.delete_selected_annotation()
elif e.key() == QtCore.Qt.Key_M and self.config.correctionMode:
self.modify_correction_description()
# Key released
def keyReleaseEvent(self,e):
# Ctrl key changes mouse cursor
if e.key() == QtCore.Qt.Key_Control:
QtGui.QApplication.restoreOverrideCursor()
# check for zero to release temporary zero
# somehow, for the numpad key in some machines, a check on Insert is needed aswell
elif e.key() == QtCore.Qt.Key_0 or e.key() == QtCore.Qt.Key_Insert:
self.transpTempZero = False
self.update()
#############################
## Little helper methods
#############################
# Helper method that sets tooltip and statustip
# Provide an QAction and the tip text
# This text is appended with a hotkeys and then assigned
def setTip( self, action, tip ):
tip += " (Hotkeys: '" + "', '".join([str(s.toString()) for s in action.shortcuts()]) + "')"
action.setStatusTip(tip)
action.setToolTip(tip)
# Set the mouse positions
# There are the original positions refering to the screen
# Scaled refering to the image
# And a zoom version, where the mouse movement is artificially slowed down
def updateMousePos( self, mousePosOrig ):
if self.config.zoomFactor <= 1 or (self.drawPolyClosed or self.drawPoly.isEmpty()):
sens = 1.0
else :
sens = 1.0/pow(self.config.zoomFactor, 3);
if self.config.zoom and self.mousePosOnZoom is not None:
mousePos = QtCore.QPointF(round((1-sens)*self.mousePosOnZoom.x() + (sens)*mousePosOrig.x()), round((1-sens)*self.mousePosOnZoom.y() + sens*mousePosOrig.y()))
else :
mousePos = mousePosOrig
mousePosScaled = QtCore.QPointF( float(mousePos.x() - self.xoff) / self.scale , float(mousePos.y() - self.yoff) / self.scale )
mouseOutsideImage = not self.image.rect().contains( mousePosScaled.toPoint() )
mousePosScaled.setX( max( mousePosScaled.x() , 0. ) )
mousePosScaled.setY( max( mousePosScaled.y() , 0. ) )
mousePosScaled.setX( min( mousePosScaled.x() , self.image.rect().right() ) )
mousePosScaled.setY( min( mousePosScaled.y() , self.image.rect().bottom() ) )
if not self.image.rect().contains( mousePosScaled.toPoint() ):
self.mousePos = None
self.mousePosScaled = None
self.mousePosOrig = None
self.updateMouseObject()
self.update()
return
self.mousePos = mousePos
self.mousePosScaled = mousePosScaled
self.mousePosOrig = mousePosOrig
self.mouseOutsideImage = mouseOutsideImage
# Toggle the zoom and update all mouse positions
def toggleZoom(self, mousePosOrig):
self.config.zoom = not self.config.zoom
if self.config.zoom:
self.mousePosOnZoom = self.mousePos
# Update the mouse position afterwards
self.updateMousePos( mousePosOrig )
else:
# Update the mouse position first
self.updateMousePos( mousePosOrig )
# Update the dragged point to the non-zoom point
if not self.config.correctionMode and self.draggedPt >= 0:
self.drawPoly.replace( self.draggedPt , self.mousePosScaled )
# Get the point/edge index within the given polygon that is close to the given point
# Returns (-1,-1) if none is close enough
# Returns (i,i) if the point with index i is closed
# Returns (i,i+1) if the edge from points i to i+1 is closest
def getClosestPoint( self, poly, pt ):
closest = (-1,-1)
distTh = 4.0
dist = 1e9 # should be enough
for i in range(poly.size()):
curDist = self.ptDist(poly[i],pt)
if curDist < dist:
closest = (i,i)
dist = curDist
# Close enough?
if dist <= distTh:
return closest
# Otherwise see if the polygon is closed, but a line is close enough
if self.drawPolyClosed and poly.size() >= 2:
for i in range(poly.size()):
pt1 = poly[i]
j = i+1
if j == poly.size():
j = 0
pt2 = poly[j]
edge = QtCore.QLineF(pt1,pt2)
normal = edge.normalVector()
normalThroughMouse = QtCore.QLineF( pt.x(),pt.y(),pt.x()+normal.dx(),pt.y()+normal.dy() )
intersectionPt = QtCore.QPointF()
intersectionType = edge.intersect( normalThroughMouse , intersectionPt )
if intersectionType == QtCore.QLineF.BoundedIntersection:
curDist = self.ptDist(intersectionPt,pt)
if curDist < dist:
closest = (i,j)
dist = curDist
# Close enough?
if dist <= distTh:
return closest
# If we didnt return yet, we didnt find anything
return (-1,-1)
# Get distance between two points
def ptDist( self, pt1, pt2 ):
# A line between both
line = QtCore.QLineF( pt1 , pt2 )
# Length
lineLength = line.length()
return lineLength
# Determine if the given point closes the drawn polygon (snapping)
def ptClosesPoly(self):
if self.drawPoly.isEmpty():
return False
if self.mousePosScaled is None:
return False
closestPt = self.getClosestPoint( self.drawPoly, self.mousePosScaled )
return closestPt==(0,0)
# Draw a point using the given QPainter qp
# If its the first point in a polygon its drawn in green
# if not in red
# Also the radius might be increased
def drawPoint(self, qp, pt, isFirst, increaseRadius):
# The first in green
if isFirst:
qp.setBrush(QtGui.QBrush(QtGui.QColor(0,255,0),QtCore.Qt.SolidPattern))
# Other in red
else:
qp.setBrush(QtGui.QBrush(QtGui.QColor(255,0,0),QtCore.Qt.SolidPattern))
# Standard radius
r = 3.0
# Increase maybe
if increaseRadius:
r *= 2.5
# Draw
qp.drawEllipse( pt, r, r )
# Determine if the given candidate for a label path makes sense
def isLabelPathValid(self,labelPath):
return os.path.isdir(labelPath)
# Ask the user to select a label
# If you like, you can give an object ID for a better dialog texting
# Note that giving an object ID assumes that its current label is the default label
# If you dont, the message "Select new label" is used
# Return is (label, ok). 'ok' is false if the user pressed Cancel
def getLabelFromUser(self, defaultLabel = "", objID = -1):
# Reset the status bar to this message when leaving
restoreMessage = self.statusBar().currentMessage()
# Update defaultLabel
if not defaultLabel:
defaultLabel = self.defaultLabel
# List of possible labels
items = QtCore.QStringList(name2label.keys())
items.sort()
default = items.indexOf(defaultLabel)
if default < 0:
self.statusBar().showMessage( 'The selected label is missing in the internal color map.' )
return
# Specify title
dlgTitle = "Select label"
message = dlgTitle
question = dlgTitle
if objID >= 0:
message = "Select new label for object {0} with current label {1}".format( objID, defaultLabel )
question = "Label for object {0}".format(objID)
self.statusBar().showMessage(message)
# Create and wait for dialog
(item, ok) = QtGui.QInputDialog.getItem(self, dlgTitle, question, items, default, False)
# Process the answer a bit
item = str(item)
# Restore message
self.statusBar().showMessage( restoreMessage )
# Return
return (item, ok)
# Add a point to the drawn polygon
def addPtToPoly(self, pt):
self.drawPoly.append( pt )
# Enable actions that need a polygon
for act in self.actPolyOrSelObj:
act.setEnabled(True)
# Clear the drawn polygon
def clearPolygon(self):
# We do not clear, since the drawPoly might be a reference on an object one
self.drawPoly = QtGui.QPolygonF()
self.drawPolyClosed = False
# Disable actions that need a polygon
for act in self.actPolyOrSelObj:
act.setEnabled(bool(self.selObjs))
for act in self.actClosedPoly:
act.setEnabled(False)
# We just closed the polygon and need to deal with this situation
def closePolygon(self):
self.drawPolyClosed = True
for act in self.actClosedPoly:
act.setEnabled(True)
message = "What should I do with the polygon? Press n to create a new object, press Ctrl + Left Click to intersect with another object"
self.statusBar().showMessage(message)
# Intersect the drawn polygon with the mouse object
# and create a new object with same label and so on
def intersectPolygon(self):
# Cannot do anything without labels
if not self.annotation:
return
# Cannot do anything without a single selected object
if self.mouseObj < 0:
return
# The selected object that is modified
obj = self.annotation.objects[self.mouseObj]
# The intersection of the polygons
intersection = self.drawPoly.intersected( self.getPolygon(obj) )
if not intersection.isEmpty():
# Ask the user for a label
self.drawPoly = intersection
(label, ok) = self.getLabelFromUser( obj.label )
if ok and label:
# Append and create the new object
self.appendObject( label , intersection )
# Clear the drawn polygon
self.clearPolygon()
# Default message
self.statusBar().showMessage( self.defaultStatusbar )
# Deselect
self.deselectAllObjects()
# Redraw
self.update()
# Merge the drawn polygon with the mouse object
# and create a new object with same label and so on
def mergePolygon(self):
# Cannot do anything without labels
if not self.annotation:
return
# Cannot do anything without a single selected object
if self.mouseObj < 0:
return
# The selected object that is modified
obj = self.annotation.objects[self.mouseObj]
# The union of the polygons
union = self.drawPoly.united( self.getPolygon(obj) )
if not union.isEmpty():
# Ask the user for a label
self.drawPoly = union
(label, ok) = self.getLabelFromUser( obj.label )
if ok and label:
# Append and create the new object
self.appendObject( label , union )
# Clear the drawn polygon
self.clearPolygon()
# Default message
self.statusBar().showMessage( self.defaultStatusbar )
# Deselect
self.deselectAllObjects()
# Redraw
self.update()
# Edit an object's polygon or clear the polygon if multiple objects are selected
def initPolygonFromObject(self):
# Cannot do anything without labels
if not self.annotation:
return
# Cannot do anything without any selected object
if not self.selObjs:
return
# If there are multiple objects selected, we clear the polygon
if len(self.selObjs) > 1:
self.clearPolygon()
self.update()
return
# The selected object that is used for init
obj = self.annotation.objects[self.selObjs[-1]]
# Make a reference to the polygon
self.drawPoly = self.getPolygon(obj)
# Make sure its closed
self.drawPolyClosed = True
# Update toolbar icons
# Enable actions that need a polygon
for act in self.actPolyOrSelObj:
act.setEnabled(True)
# Enable actions that need a closed polygon
for act in self.actClosedPoly:
act.setEnabled(True)
# Redraw
self.update()
# Create new object
def appendObject(self, label, polygon):
# Create empty annotation object
# if first object
if not self.annotation:
self.annotation = Annotation()
# Search the highest ID
newID = 0
for obj in self.annotation.objects:
if obj.id >= newID:
newID = obj.id + 1
# New object
# Insert the object in the labels list
obj = CsObject()
obj.label = label
obj.polygon = [ Point(p.x(),p.y()) for p in polygon ]
obj.id = newID
obj.deleted = 0
obj.verified = 0
obj.user = getpass.getuser()
obj.updateDate()
self.annotation.objects.append(obj)
# Append to changes
self.addChange( "Created object {0} with label {1}".format( newID, label ) )
# Clear the drawn polygon
self.deselectAllObjects()
self.clearPolygon()
# select the new object
self.mouseObj = 0
self.selectObject()
# Helper for leaving an image
# Returns true if the image can be left, false if not
# Checks for possible changes and asks the user if they should be saved
# If the user says yes, then they are saved and true is returned
def checkAndSave(self):
# Without changes it's ok to leave the image
if not self.changes:
return True
# Backup of status message
restoreMessage = self.statusBar().currentMessage()
# Create the dialog
dlgTitle = "Save changes?"
self.statusBar().showMessage(dlgTitle)
text = "Do you want to save the following changes?\n"
for c in self.changes:
text += "- " + c + '\n'
buttons = QtGui.QMessageBox.Save | QtGui.QMessageBox.Discard | QtGui.QMessageBox.Cancel
ret = QtGui.QMessageBox.question(self, dlgTitle, text, buttons, QtGui.QMessageBox.Save )
proceed = False
# If the user selected yes -> save
if ret == QtGui.QMessageBox.Save:
proceed = self.save()
# If the user selected to discard the changes, clear them
elif ret == QtGui.QMessageBox.Discard:
self.clearChanges( )
proceed = True
# Otherwise prevent leaving the image
else:
proceed = False
self.statusBar().showMessage( restoreMessage )
return proceed
# Actually save a screenshot
def doScreenshot(self):
# For creating the screenshot we re-use the label drawing function
# However, we draw in an image using a QPainter
# Create such an image
img = QtGui.QImage( self.image )
# Create a QPainter that can perform draw actions within a widget or image
qp = QtGui.QPainter()
# Begin drawing in the image
qp.begin(img)
# Remember some settings
xoff = self.xoff
yoff = self.yoff
scale = self.scale
w = self.w
h = self.h
# Update scale
self.xoff = 0
self.yoff = 0
self.scale = 1
self.w = self.image.width()
self.h = self.image.height()
# Detactivate the highlighted object
self.highlightObjs = []
# Blur the license plates
# make this variabel a member and use as option if desired
blurLicensePlates = True
if blurLicensePlates:
self.blurLicensePlates(qp)
# Draw the labels on top
ignore = []
if blurLicensePlates:
ignore.append( 'numberplate' )
self.drawLabels(qp,ignore)
# Finish drawing
qp.end()
# Reset scale and stuff
self.xoff = xoff
self.yoff = yoff
self.scale = scale
self.w = w
self.h = h
# Generate the real filename for saving
file = self.config.screenshotFilename
# Replace occurance of %c with the city name (as directory)
# Generate the directory if necessary
cityIdx = file.find('%c')
if cityIdx >= 0:
if self.config.cityName:
dir = os.path.join( file[:cityIdx] , self.config.cityName )
if not os.path.exists(dir):
os.makedirs(dir)
file = file.replace( '%c',self.config.cityName + '/', 1 )
if file.find('%c') > 0:
message = "Found multiple '%c' in screenshot filename. Not allowed"
file = None
else:
message = "Do not have a city name. Cannot replace '%c' in screenshot filename."
file = None
# Replace occurances of %i with the image filename (without extension)
if file:
file = file.replace( '%i',os.path.splitext(os.path.basename(self.config.currentFile))[0] )
# Add extension .png if no extension given
if file:
if not os.path.splitext(file)[1]:
file += '.png'
# Save
if file:
success = img.save(file)
if success:
message = "Saved screenshot to " + file
else:
message = "Failed to save screenshot"
self.statusBar().showMessage(message)
# Update to reset everything to the correct state
self.update()
# Blur the license plates
# Argument is a qPainter
# Thus, only use this method for screenshots.
def blurLicensePlates(self,qp):
# license plate name
searchedNames = [ 'license plate' ]
# the image
img = self.image
# Draw all objects
for obj in self.annotation.objects:
# Some are flagged to not be drawn. Skip them
if not obj.draw:
continue
# The label of the object
name = obj.label
# If we do not know a color for this label, skip
if not name2label.has_key( name ):
continue
# If we do not blur this label, skip
if not name in searchedNames:
continue
# Scale the polygon properly
polyToDraw = self.getPolygon(obj) * QtGui.QTransform.fromScale(self.scale,self.scale)
bb = polyToDraw.boundingRect()
# Get the mean color within the polygon
meanR = 0
meanG = 0
meanB = 0
num = 0
for y in range( max(int(bb.top()),0) , min(int(bb.bottom()+1.5),img.height()) ):
for x in range( max(int(bb.left()),0) , min(int(bb.right()+1.5),img.width()) ):
col = img.pixel(x,y)
meanR += QtGui.QColor(col).red()
meanG += QtGui.QColor(col).green()
meanB += QtGui.QColor(col).blue()
num += 1
meanR /= float(num)
meanG /= float(num)
meanB /= float(num)
col = QtGui.QColor( meanR , meanG , meanB )
qp.setPen(col)
brush = QtGui.QBrush( col, QtCore.Qt.SolidPattern )
qp.setBrush(brush)
# Default drawing
qp.drawPolygon( polyToDraw )
# Update the object that is selected by the current mouse curser
def updateMouseObject(self):
self.mouseObj = -1
if self.mousePosScaled is None:
return
if not self.annotation or not self.annotation.objects:
return
for idx in reversed(range(len(self.annotation.objects))):
obj = self.annotation.objects[idx]
if obj.draw and self.getPolygon(obj).containsPoint(self.mousePosScaled, QtCore.Qt.OddEvenFill):
self.mouseObj = idx
break
# Print info about the currently selected object at the status bar
def infoOnSelectedObject(self):
if not self.selObjs:
return
objID = self.selObjs[-1]
if self.annotation and objID >= 0:
obj = self.annotation.objects[objID]
self.statusBar().showMessage("Label of object {0}: {1}".format(obj.id,obj.label))
#else:
# self.statusBar().showMessage(self.defaultStatusbar)
# Make the object selected by the mouse the real selected object
def selectObject(self):
# If there is no mouse selection, we are good
if self.mouseObj < 0:
self.deselectObject()
return
# Append the object to selection if it's not in there
if not self.mouseObj in self.selObjs:
self.selObjs.append( self.mouseObj )
# Otherwise remove the object
else:
self.deselectObject()
# update polygon
self.initPolygonFromObject()
# If we have selected objects make the toolbar actions active
if self.selObjs:
for act in self.actSelObj + self.actPolyOrSelObj:
act.setEnabled(True)
# If we have a single selected object make their toolbar actions active
for act in self.singleActSelObj:
act.setEnabled(len(self.selObjs) == 1)
self.infoOnSelectedObject()
# Deselect object
def deselectObject(self):
# If there is no object to deselect, we are good
if not self.selObjs:
return
# If the mouse does not select and object, remove the last one
if self.mouseObj < 0:
del self.selObjs[-1]
# Otherwise try to find the mouse obj in the list
if self.mouseObj in self.selObjs:
self.selObjs.remove(self.mouseObj)
# No object left?
if not self.selObjs:
for act in self.actSelObj:
act.setEnabled(False)
for act in self.actPolyOrSelObj:
act.setEnabled(bool(self.drawPoly))
# If we have a single selected object make their toolbar actions active
for act in self.singleActSelObj:
act.setEnabled(len(self.selObjs) == 1)
self.infoOnSelectedObject()
# Deselect all objects
def deselectAllObjects(self):
# If there is no object to deselect, we are good
self.selObjs = []
self.mouseObj = -1
for act in self.actSelObj:
act.setEnabled(False)
# If we have a single selected object make their toolbar actions active
for act in self.singleActSelObj:
act.setEnabled(len(self.selObjs) == 1)
self.infoOnSelectedObject()
# Modify the layer of the selected object
# Move the layer up (negative offset) or down (postive offset)
def modifyLayer(self, offset):
# Cannot do anything without labels
if not self.annotation:
return
# Cannot do anything without a single selected object
if len(self.selObjs) != 1:
return
# The selected object that is modified
obj = self.annotation.objects[self.selObjs[-1]]
# The index in the label list we are right now
oldidx = self.selObjs[-1]
# The index we want to move to
newidx = oldidx + offset
# Make sure not not exceed zero and the list
newidx = max(newidx,0)
newidx = min(newidx,len(self.annotation.objects)-1)
# If new and old idx are equal, there is nothing to do
if oldidx == newidx:
return
# Move the entry in the labels list
self.annotation.objects.insert(newidx, self.annotation.objects.pop(oldidx))
# Update the selected object to the new index
self.selObjs[-1] = newidx
self.statusBar().showMessage("Moved object {0} with label {1} to layer {2}".format(obj.id,obj.label,newidx))
# Check if we moved the object the first time
if not obj.id in self.changedLayer:
self.changedLayer.append(obj.id)
self.addChange( "Changed layer for object {0} with label {1}".format( obj.id, obj.label ) )
# Add a new change
def addChange(self, text):
if not text:
return
self.changes.append( text )
for act in self.actChanges:
act.setEnabled(True)
# Clear list of changes
def clearChanges(self):
self.changes = []
self.changedLayer = []
self.changedPolygon = []
for act in self.actChanges:
act.setEnabled(False)
# Clear the current labels
def clearAnnotation(self):
self.annotation = None
self.clearChanges()
self.deselectAllObjects()
self.clearPolygon()
self.config.currentLabelFile = ""
def clearCorrections(self):
self.correctionXML = None
self.corrections = []
#self.clearChanges() #TODO perhaps?
#self.clearPolygon()
self.config.currentCorrectionFile = ""
# Get the filename where to load/save labels
# Returns empty string if not possible
# Set the createDirs to true, if you want to create needed directories
def getLabelFilename( self , createDirs = False ):
# We need the name of the current city
if not self.config.cityName:
return ""
# And we need to have a directory where labels should be searched
if not self.config.labelPath:
return ""
# Without the name of the current images, there is also nothing we can do
if not self.config.currentFile:
return ""
# Check if the label directory is valid. This folder is selected by the user
# and thus expected to exist
if not self.isLabelPathValid(self.config.labelPath):
return ""
# Dirs are not automatically created in this version of the tool
if not os.path.isdir( self.config.labelPath ):
return ""
labelDir = self.config.labelPath
# extension of ground truth files
if self.config.gtType:
ext = self.gtExt.format('_'+self.config.gtType)
else:
ext = self.gtExt.format('')
# Generate the filename of the label file
filename = os.path.basename( self.config.currentFile )
filename = filename.replace( self.imageExt , ext )
filename = os.path.join( labelDir , filename )
filename = os.path.normpath(filename)
return filename
# Get the filename where to load/save labels
# Returns empty string if not possible
# Set the createDirs to true, if you want to create needed directories
def getCorrectionFilename( self , createDirs = False ):
# And we need to have a directory where corrections are stored
if not self.config.correctionPath:
return ""
# Without the name of the current images, there is also nothing we can do
if not self.config.currentFile:
return ""
# Folder where to store the labels
correctionDir = self.config.correctionPath
# If the folder does not exist, create it if allowed
if not os.path.isdir( correctionDir ):
if createDirs:
os.makedirs( correctionDir )
if not os.path.isdir( correctionDir ):
return ""
else:
return ""
# Generate the filename of the label file
filename = os.path.basename( self.config.currentFile )
filename = filename.replace( self.imageExt ,'.xml')
filename = os.path.join( correctionDir , filename )
filename = os.path.normpath(filename)
return filename
# Disable the popup menu on right click
def createPopupMenu(self):
pass
def main():
app = QtGui.QApplication(sys.argv)
tool = CityscapesLabelTool()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| gpl-3.0 |
Robbie1977/TGscripts | plJHwarpToTemplate.py | 1 | 8574 | import os, re, shutil, subprocess, datetime, socket
ba = '/groups/sciserv/flyolympiad/vnc_align/toolkit/JBA/brainaligner'
cmtkdir = '/usr/local/cmtk/bin/'
fiji = '/usr/local/Fiji/ImageJ-linux64'
Rawconv = '~/script/raw2nrrdCrop.ijm'
Nrrdconv = '~/script/nrrd2rawUncrop.ijm'
Tfile = '~/template/flyVNCtemplate20xDaC.nrrd'
TfileR = '~/template/flyVNCtemplate20xDa.raw'
TfileM = '~/template/flyVNCtemplate20xDa.marker'
Qual = '~/script/Quality.py'
outdir = os.getcwd() + '/'
fo = open("PLwarp.txt",'r')
filelist = fo.readlines()
fo.close()
hostn = socket.gethostname()
runid = os.getpid()
procid = '[' + hostn + ';' + str(runid) + ']'
for fname in filelist:
fo = open("stop.txt",'r')
stoplist = fo.readlines()
if (hostn + '\n') in stoplist:
print 'Stop requested!'
else:
fname = fname.replace('\n','').replace('/disk/data/VFB/IMAGE_DATA/Janelia2012/TG/logs/',outdir)
try:
if os.path.exists(fname):
os.rename(fname,fname.replace('.lsm','~.lsm').replace('.raw','~.raw'))
basename = fname.replace(outdir,'').replace('.lsm','').replace('20130404_s/','').replace('.raw','').replace('Rigid/','').replace('/groups/sciserv/flyolympiad/vnc_align/20130404_lsms/','')
with open("PLwarp.log", "a") as myfile: # Log entry for process time and error checking
myfile.write(basename + ', Started JH warp, ' + procid + ', ' + str(datetime.datetime.now()) + '\n')
FloatFile = fname.replace('.lsm','~.lsm').replace('.raw','~.raw')
GxDF = outdir + basename + '-global.raw'
Goutput = basename + '-rigid.raw'
Axform = outdir + basename + '-rigid-affine.xform'
Foutput = Goutput.replace('-rigid.raw', '-rigid_C2.nrrd')
SigFile = Goutput.replace('-rigid.raw', '-rigid_C1.nrrd')
W5xform = outdir + basename + '-rigid-fastwarp.xform'
W5output = outdir + basename + '-rigid-BGwarp.nrrd'
Wsigout = outdir + basename + '-rigid-SGwarp.nrrd'
Routput = basename + '-rigid-warp.raw'
Loutput = basename + '-rigid-warp-local'
print 'Warping file %s...' % fname
#check for complete skip
if os.path.exists(W5xform):
print 'Warp5 output already exists - skipping.'
else:
#Generate the Initial Transform
if os.path.exists(Goutput):
print 'Global alignment already exists - skipping.'
else:
return_code = subprocess.call('nice ' + ba + ' -t %s -s %s -o %s -F %s -w 0 -C 0 -c 1 -B 1024 -Y' % (TfileR, FloatFile, Goutput, GxDF), shell=True)
print 'Brain Aligner Global alignment returned: %d' % return_code
#Convert raw to nrrd
return_code = subprocess.call('nice xvfb-run ' + fiji + ' -macro %s %s' % (Rawconv, Goutput), shell=True)
print 'Fiji/ImageJ conversion returned: %d' % return_code
#Generate the Affine Transform
if os.path.exists(Axform):
print 'Affine xform already exists - skipping.'
else:
FloatFile = Foutput
return_code = subprocess.call('nice ' + cmtkdir + 'registration --dofs 6,9 --auto-multi-levels 4 --match-histograms -o %s %s %s' % (Axform + '_part', Tfile, FloatFile), shell=True)
os.rename(Axform + '_part', Axform)
print 'registration returned: %d' % return_code
#Generate the Warped Transform
if os.path.exists(W5xform):
print 'Warp5 xform already exists - skipping.'
else:
return_code = subprocess.call('nice ' + cmtkdir + 'warp -o %s --grid-spacing 80 --exploration 30 --coarsest 4 --match-histograms --accuracy 0.2 --refine 4 --energy-weight 1e-1 --initial %s %s %s' % (W5xform + '_part', Axform, Tfile, FloatFile), shell=True) #coarsest adjusted from 8 to 4 as per greg sug.
os.rename(W5xform + '_part', W5xform)
print 'warp (5) returned: %d' % return_code
#Output a file to show the Warped Transform
if os.path.exists(W5output):
print 'Warp5 output already exists - skipping.'
else:
return_code = subprocess.call('nice ' + cmtkdir + 'reformatx -o %s --floating %s %s %s' % (W5output, FloatFile, Tfile, W5xform), shell=True)
print 'reformatx returned: %d' % return_code
print 'Completed background warpimg for %s.' % basename
if os.path.exists(Wsigout):
print 'Signal warp output already exists - skipping.'
else:
return_code = subprocess.call('nice ' + cmtkdir + 'reformatx -o %s --floating %s %s %s' % (Wsigout, SigFile, Tfile, W5xform), shell=True)
print 'reformatx returned: %d' % return_code
print 'Completed signal warpimg for %s.' % basename
if os.path.exists(Routput):
print 'RAW warp output already exists - skipping.'
else:
return_code = subprocess.call('nice xvfb-run ' + fiji + ' -macro %s %s' % (Nrrdconv, Routput), shell=True)
print 'Fiji returned: %d' % return_code
print 'Completed generating RAW warp for %s.' % basename
# if os.path.exists(Loutput + '.raw'):
# print 'Brianaligner local output already exists - skipping.'
# else:
# return_code = subprocess.call('nice ' + ba + ' -t %s -s %s -L %s -o %s -w 10 -C 0 -c 0 -H 2 -B 1024' % (TfileR, Routput, TfileM, Loutput + '.raw'), shell=True) #
# print 'Brainaligner returned: %d' % return_code
# print 'Completed generating RAW warp for %s.' % basename
if os.path.exists(Routput + '_qual.csv'):
print 'Quality measure already exists - skipping.'
else:
return_code = subprocess.call('nice python %s %s %s %s_qual.csv' % (Qual, W5output, Tfile, Routput), shell=True)
print 'Qual returned: %d' % return_code
print 'Completed generating Qual measure for %s.' % basename
if os.path.exists(W5output):
#os.remove(fname.replace('_blue',''))
#shutil.move(fname.replace('_blue',''),fname.replace('logs/','logs/nrrds/'))
#os.remove(Goutput)
#os.remove(Ioutput) Add if used
#shutil.rmtree(Axform, ignore_errors=True)
#os.remove(Aoutput)
#os.remove(W5xform) #Needed for Signal Channel Warp
with open("PLdone.txt", "a") as myfile:
myfile.write(Routput + '\n')
#os.remove(W5output) #Needed for checking only
print 'Clean-up for %s done.' % basename
with open("PLwarp.log", "a") as myfile: # Log entry for process time and error checking
myfile.write(basename + ', Finished JH warp, ' + procid + ', ' + str(datetime.datetime.now()) + '\n')
else:
print 'Failed warpimg for %s.' % basename
os.rename(fname.replace('_blue',''),fname.replace('_blue','_blue_error'))
with open("PLwarp.log", "a") as myfile: # Log entry for process time and error checking
myfile.write(basename + ', Failed JH warp, ' + procid + ', ' + str(datetime.datetime.now()) + '\n')
except OSError as e:
print 'Skiping file'
with open("PLwarp.log", "a") as myfile: # Log entry for process time and error checking
myfile.write(basename + ', Error during JH warp: ' + e.strerror + ', ' + procid + ', ' + str(datetime.datetime.now()) + '\n')
print 'All Done.'
| mit |
tartopum/Lactum | setup.py | 1 | 1422 | import os
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
import lactum
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
with open("README.md", "r") as f:
readme = f.read()
def reqs(*f):
def strip_comments(l):
return l.split("#", 1)[0].strip()
return list(filter(None, [strip_comments(l) for l in open(os.path.join(os.getcwd(), *f)).readlines()]))
requirements = reqs("requirements.txt")
test_requirements = reqs("requirements-dev.txt")
test_requirements = requirements + test_requirements[1:]
setup(
name="lactum",
description="",
long_description=readme,
author="Vayel",
author_email="vincent.lefoulon@free.fr",
url="https://github.com/tartopum/Lactum",
packages=["lactum"],
package_dir={"lactum": "lactum"},
include_package_data=True,
install_requires=requirements,
license="MIT",
zip_safe=False,
classifiers=[
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3.5"
],
cmdclass={"test": PyTest},
tests_require=test_requirements
)
| mit |
funkring/fdoo | addons-funkring/at_sale_layout_ext/sale.py | 1 | 1573 | # -*- coding: utf-8 -*-
#############################################################################
#
# Copyright (c) 2007 Martin Reisenhofer <martin.reisenhofer@funkring.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class SaleLayoutCategory(osv.Model):
_inherit = "sale_layout.category"
_columns = {
"order_id" : fields.many2one("sale.order", "Order", ondelete="cascade")
}
class sale_order(osv.Model):
_inherit = "sale.order"
_columns = {
"layout_categ_ids" : fields.one2many("sale_layout.category", "order_id", "Layout Categories")
}
class sale_order_line(osv.Model):
_inherit = "sale.order.line"
_columns = {
"prod_categ_id" : fields.related("product_id", "categ_id", string="Category", type="many2one", relation="product.category", readonly=True)
}
| agpl-3.0 |
arrabito/DIRAC | Core/Utilities/Time.py | 2 | 8945 | """
DIRAC Times module
Support for basic Date and Time operations
based on system datetime module.
It provides common interface to UTC timestamps,
converter to string types and back.
The following datetime classes are used in the returned objects:
- dateTime = datetime.datetime
- date = datetime.date
- time = datetime.timedelta
Useful timedelta constant are also provided to
define time intervals.
Notice: datetime.timedelta objects allow multiplication and division by interger
but not by float. Thus:
- DIRAC.Times.second * 1.5 is not allowed
- DIRAC.Times.second * 3 / 2 is allowed
An timeInterval class provides a method to check
if a give datetime is in the defined interval.
"""
import time as nativetime
import datetime
from types import StringTypes
import sys
__RCSID__ = "$Id$"
# Some useful constants for time operations
microsecond = datetime.timedelta(microseconds=1)
second = datetime.timedelta(seconds=1)
minute = datetime.timedelta(minutes=1)
hour = datetime.timedelta(hours=1)
day = datetime.timedelta(days=1)
week = datetime.timedelta(days=7)
dt = datetime.datetime(2000, 1, 1)
def timeThis(method):
""" Function to be used as a decorator for timing other functions/methods
"""
def timed(*args, **kw):
""" What actually times
"""
ts = nativetime.time()
result = method(*args, **kw)
if sys.stdout.isatty():
return result
te = nativetime.time()
pre = dt.utcnow().strftime("%Y-%m-%d %H:%M:%S UTC ")
try:
pre += args[0].log.getName() + '/' + args[0].log.getSubName() + ' TIME: ' + args[0].transString
except AttributeError:
try:
pre += args[0].log.getName() + ' TIME: ' + args[0].transString
except AttributeError:
try:
pre += args[0].log.getName() + '/' + args[0].log.getSubName() + ' TIME: '
except AttributeError:
pre += 'TIME: '
except IndexError:
pre += 'TIME: '
argsLen = ''
if args:
try:
if isinstance(args[1], (list, dict)):
argsLen = "arguments len: %d" % len(args[1])
except IndexError:
if kw:
try:
if isinstance(kw.items()[0][1], (list, dict)):
argsLen = "arguments len: %d" % len(kw.items()[0][1])
except IndexError:
argsLen = ''
print "%s Exec time ===> function %r %s -> %2.2f sec" % (pre, method.__name__, argsLen, te - ts)
return result
return timed
def dateTime():
"""
Return current UTC datetime, as datetime.datetime object
"""
return dt.utcnow()
def date(myDateTime=None):
"""
Return current UTC date, as datetime.date object
if a _dateTimeType is pass as argument its associated date is returned
"""
if type(myDateTime) == _dateTimeType:
return myDateTime.date()
return dateTime().date()
def time(myDateTime=None):
"""
Return current UTC time, as datetime.time object
if a _dateTimeType is pass as argument its associated time is returned
"""
if not type(myDateTime) == _dateTimeType:
myDateTime = dateTime()
return myDateTime - datetime.datetime(myDateTime.year, myDateTime.month, myDateTime.day)
def toEpoch(dateTimeObject=None):
"""
Get seconds since epoch
"""
if not dateTimeObject:
dateTimeObject = dateTime()
return nativetime.mktime(dateTimeObject.timetuple())
def fromEpoch(epoch):
"""
Get datetime object from epoch
"""
return dt.fromtimestamp(epoch)
def to2K(dateTimeObject=None):
"""
Get seconds, with microsecond precission, since 2K
"""
if not dateTimeObject:
dateTimeObject = dateTime()
delta = dateTimeObject - dt
return delta.days * 86400 + delta.seconds + delta.microseconds / 1000000.
def from2K(seconds2K=None):
"""
Get date from seconds since 2K
"""
if not seconds2K:
seconds2K = to2K(dt)
return dt + int(seconds2K) * second + int(seconds2K % 1 * 1000000) * microsecond
def toString(myDate=None):
"""
Convert to String
if argument type is neither _dateTimeType, _dateType, nor _timeType
the current dateTime converted to String is returned instead
Notice: datetime.timedelta are converted to strings using the format:
[day] days [hour]:[min]:[sec]:[microsec]
where hour, min, sec, microsec are always positive integers,
and day carries the sign.
To keep internal consistency we are using:
[hour]:[min]:[sec]:[microsec]
where min, sec, microsec are alwys positive intergers and hour carries the
sign.
"""
if type(myDate) == _dateTimeType:
return str(myDate)
elif type(myDate) == _dateType:
return str(myDate)
elif type(myDate) == _timeType:
return '%02d:%02d:%02d.%06d' % (myDate.days * 24 + myDate.seconds / 3600,
myDate.seconds % 3600 / 60,
myDate.seconds % 60,
myDate.microseconds)
else:
return toString(dateTime())
def fromString(myDate=None):
"""
Convert date/time/datetime String back to appropriated objects
The format of the string it is assume to be that returned by toString method.
See notice on toString method
On Error, return None
"""
if StringTypes.__contains__(type(myDate)):
if myDate.find(' ') > 0:
dateTimeTuple = myDate.split(' ')
dateTuple = dateTimeTuple[0].split('-')
try:
return (datetime.datetime(year=dateTuple[0],
month=dateTuple[1],
day=dateTuple[2]) +
fromString(dateTimeTuple[1]))
# return dt.combine( fromString( dateTimeTuple[0] ),
# fromString( dateTimeTuple[1] ) )
except:
try:
return (datetime.datetime(year=int(dateTuple[0]),
month=int(dateTuple[1]),
day=int(dateTuple[2])) +
fromString(dateTimeTuple[1]))
except ValueError:
return None
# return dt.combine( fromString( dateTimeTuple[0] ),
# fromString( dateTimeTuple[1] ) )
elif myDate.find(':') > 0:
timeTuple = myDate.replace('.', ':').split(':')
try:
if len(timeTuple) == 4:
return datetime.timedelta(hours=int(timeTuple[0]),
minutes=int(timeTuple[1]),
seconds=int(timeTuple[2]),
microseconds=int(timeTuple[3]))
elif len(timeTuple) == 3:
try:
return datetime.timedelta(hours=int(timeTuple[0]),
minutes=int(timeTuple[1]),
seconds=int(timeTuple[2]),
microseconds=0)
except ValueError:
return None
else:
return None
except:
return None
elif myDate.find('-') > 0:
dateTuple = myDate.split('-')
try:
return datetime.date(int(dateTuple[0]), int(dateTuple[1]), int(dateTuple[2]))
except:
return None
return None
class timeInterval:
"""
Simple class to define a timeInterval object able to check if a given
dateTime is inside
"""
def __init__(self, initialDateTime, intervalTimeDelta):
"""
Initialization method, it requires the initial dateTime and the
timedelta that define the limits.
The upper limit is not included thus it is [begin,end)
If not properly initialized an error flag is set, and subsequent calls
to any method will return None
"""
if (type(initialDateTime) != _dateTimeType or
type(intervalTimeDelta) != _timeType):
self.__error = True
return None
self.__error = False
if intervalTimeDelta.days < 0:
self.__startDateTime = initialDateTime + intervalTimeDelta
self.__endDateTime = initialDateTime
else:
self.__startDateTime = initialDateTime
self.__endDateTime = initialDateTime + intervalTimeDelta
def includes(self, myDateTime):
"""
"""
if self.__error:
return None
if type(myDateTime) != _dateTimeType:
return None
if myDateTime < self.__startDateTime:
return False
if myDateTime >= self.__endDateTime:
return False
return True
def queryTime(f):
""" Decorator to measure the function call time
"""
def measureQueryTime(*args, **kwargs):
start = nativetime.time()
result = f(*args, **kwargs)
if result['OK'] and 'QueryTime' not in result:
result['QueryTime'] = nativetime.time() - start
return result
return measureQueryTime
_dateTimeType = type(dateTime())
_dateType = type(date())
_timeType = type(time())
_allTimeTypes = (_dateTimeType, _timeType)
_allDateTypes = (_dateTimeType, _dateType)
_allTypes = (_dateTimeType, _dateType, _timeType)
| gpl-3.0 |
ovnicraft/edx-platform | openedx/core/djangoapps/user_api/preferences/tests/test_views.py | 49 | 22793 | # -*- coding: utf-8 -*-
"""
Unit tests for preference APIs.
"""
import unittest
import ddt
import json
from mock import patch
from django.core.urlresolvers import reverse
from django.conf import settings
from django.test.testcases import TransactionTestCase
from rest_framework.test import APIClient
from student.tests.factories import UserFactory
from ...accounts.tests.test_views import UserAPITestCase
from ..api import set_user_preference
from .test_api import get_expected_validation_developer_message, get_expected_key_error_user_message
TOO_LONG_PREFERENCE_KEY = u"x" * 256
@ddt.ddt
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class TestPreferencesAPI(UserAPITestCase):
"""
Unit tests /api/user/v1/accounts/{username}/
"""
def setUp(self):
super(TestPreferencesAPI, self).setUp()
self.url_endpoint_name = "preferences_api"
self.url = reverse(self.url_endpoint_name, kwargs={'username': self.user.username})
def test_anonymous_access(self):
"""
Test that an anonymous client (not logged in) cannot call GET or PATCH.
"""
self.send_get(self.anonymous_client, expected_status=401)
self.send_patch(self.anonymous_client, {}, expected_status=401)
def test_unsupported_methods(self):
"""
Test that DELETE, POST, and PUT are not supported.
"""
self.client.login(username=self.user.username, password=self.test_password)
self.assertEqual(405, self.client.put(self.url).status_code)
self.assertEqual(405, self.client.post(self.url).status_code)
self.assertEqual(405, self.client.delete(self.url).status_code)
def test_get_different_user(self):
"""
Test that a client (logged in) cannot get the preferences information for a different client.
"""
self.different_client.login(username=self.different_user.username, password=self.test_password)
self.send_get(self.different_client, expected_status=404)
@ddt.data(
("client", "user"),
("staff_client", "staff_user"),
)
@ddt.unpack
def test_get_unknown_user(self, api_client, username):
"""
Test that requesting a user who does not exist returns a 404.
"""
client = self.login_client(api_client, username)
response = client.get(reverse(self.url_endpoint_name, kwargs={'username': "does_not_exist"}))
self.assertEqual(404, response.status_code)
def test_get_preferences_default(self):
"""
Test that a client (logged in) can get her own preferences information (verifying the default
state before any preferences are stored).
"""
self.client.login(username=self.user.username, password=self.test_password)
response = self.send_get(self.client)
self.assertEqual({}, response.data)
@ddt.data(
("client", "user"),
("staff_client", "staff_user"),
)
@ddt.unpack
def test_get_preferences(self, api_client, user):
"""
Test that a client (logged in) can get her own preferences information. Also verifies that a "is_staff"
user can get the preferences information for other users.
"""
# Create some test preferences values.
set_user_preference(self.user, "dict_pref", {"int_key": 10})
set_user_preference(self.user, "string_pref", "value")
# Log in the client and do the GET.
client = self.login_client(api_client, user)
response = self.send_get(client)
self.assertEqual({"dict_pref": "{'int_key': 10}", "string_pref": "value"}, response.data)
@ddt.data(
("client", "user"),
("staff_client", "staff_user"),
)
@ddt.unpack
def test_patch_unknown_user(self, api_client, user):
"""
Test that trying to update preferences for a user who does not exist returns a 404.
"""
client = self.login_client(api_client, user)
response = client.patch(
reverse(self.url_endpoint_name, kwargs={'username': "does_not_exist"}),
data=json.dumps({"string_pref": "value"}), content_type="application/merge-patch+json"
)
self.assertEqual(404, response.status_code)
def test_patch_bad_content_type(self):
"""
Test the behavior of patch when an incorrect content_type is specified.
"""
self.client.login(username=self.user.username, password=self.test_password)
self.send_patch(self.client, {}, content_type="application/json", expected_status=415)
self.send_patch(self.client, {}, content_type="application/xml", expected_status=415)
def test_create_preferences(self):
"""
Test that a client (logged in) can create her own preferences information.
"""
self._do_create_preferences_test(True)
def test_create_preferences_inactive(self):
"""
Test that a client (logged in but not active) can create her own preferences information.
"""
self._do_create_preferences_test(False)
def _do_create_preferences_test(self, is_active):
"""
Internal helper to generalize the creation of a set of preferences
"""
self.client.login(username=self.user.username, password=self.test_password)
if not is_active:
self.user.is_active = False
self.user.save()
self.send_patch(
self.client,
{
"dict_pref": {"int_key": 10},
"string_pref": "value",
},
expected_status=204
)
response = self.send_get(self.client)
self.assertEqual({u"dict_pref": u"{u'int_key': 10}", u"string_pref": u"value"}, response.data)
@ddt.data(
("different_client", "different_user"),
("staff_client", "staff_user"),
)
@ddt.unpack
def test_create_preferences_other_user(self, api_client, user):
"""
Test that a client (logged in) cannot create preferences for another user.
"""
client = self.login_client(api_client, user)
self.send_patch(
client,
{
"dict_pref": {"int_key": 10},
"string_pref": "value",
},
expected_status=403 if user == "staff_user" else 404,
)
def test_update_preferences(self):
"""
Test that a client (logged in) can update her own preferences information.
"""
# Create some test preferences values.
set_user_preference(self.user, "dict_pref", {"int_key": 10})
set_user_preference(self.user, "string_pref", "value")
set_user_preference(self.user, "extra_pref", "extra_value")
# Send the patch request
self.client.login(username=self.user.username, password=self.test_password)
self.send_patch(
self.client,
{
"string_pref": "updated_value",
"new_pref": "new_value",
"extra_pref": None,
},
expected_status=204
)
# Verify that GET returns the updated preferences
response = self.send_get(self.client)
expected_preferences = {
"dict_pref": "{'int_key': 10}",
"string_pref": "updated_value",
"new_pref": "new_value",
}
self.assertEqual(expected_preferences, response.data)
def test_update_preferences_bad_data(self):
"""
Test that a client (logged in) receives appropriate errors for a bad update.
"""
# Create some test preferences values.
set_user_preference(self.user, "dict_pref", {"int_key": 10})
set_user_preference(self.user, "string_pref", "value")
set_user_preference(self.user, "extra_pref", "extra_value")
# Send the patch request
self.client.login(username=self.user.username, password=self.test_password)
response = self.send_patch(
self.client,
{
"string_pref": "updated_value",
TOO_LONG_PREFERENCE_KEY: "new_value",
"new_pref": "new_value",
u"empty_pref_ȻħȺɍłɇs": "",
},
expected_status=400
)
self.assertTrue(response.data.get("field_errors", None))
field_errors = response.data["field_errors"]
self.assertEquals(
field_errors,
{
TOO_LONG_PREFERENCE_KEY: {
"developer_message": get_expected_validation_developer_message(
TOO_LONG_PREFERENCE_KEY, "new_value"
),
"user_message": get_expected_key_error_user_message(
TOO_LONG_PREFERENCE_KEY, "new_value"
),
},
u"empty_pref_ȻħȺɍłɇs": {
"developer_message": u"Preference 'empty_pref_ȻħȺɍłɇs' cannot be set to an empty value.",
"user_message": u"Preference 'empty_pref_ȻħȺɍłɇs' cannot be set to an empty value.",
},
}
)
# Verify that GET returns the original preferences
response = self.send_get(self.client)
expected_preferences = {
u"dict_pref": u"{'int_key': 10}",
u"string_pref": u"value",
u"extra_pref": u"extra_value",
}
self.assertEqual(expected_preferences, response.data)
def test_update_preferences_bad_request(self):
"""
Test that a client (logged in) receives appropriate errors for a bad request.
"""
self.client.login(username=self.user.username, password=self.test_password)
# Verify a non-dict request
response = self.send_patch(self.client, "non_dict_request", expected_status=400)
self.assertEqual(
response.data,
{
"developer_message": u"No data provided for user preference update",
"user_message": u"No data provided for user preference update"
}
)
# Verify an empty dict request
response = self.send_patch(self.client, {}, expected_status=400)
self.assertEqual(
response.data,
{
"developer_message": u"No data provided for user preference update",
"user_message": u"No data provided for user preference update"
}
)
@ddt.data(
("different_client", "different_user"),
("staff_client", "staff_user"),
)
@ddt.unpack
def test_update_preferences_other_user(self, api_client, user):
"""
Test that a client (logged in) cannot update preferences for another user.
"""
# Create some test preferences values.
set_user_preference(self.user, "dict_pref", {"int_key": 10})
set_user_preference(self.user, "string_pref", "value")
set_user_preference(self.user, "extra_pref", "extra_value")
# Send the patch request
client = self.login_client(api_client, user)
self.send_patch(
client,
{
"string_pref": "updated_value",
"new_pref": "new_value",
"extra_pref": None,
},
expected_status=403 if user == "staff_user" else 404
)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class TestPreferencesAPITransactions(TransactionTestCase):
"""
Tests the transactional behavior of the preferences API
"""
test_password = "test"
def setUp(self):
super(TestPreferencesAPITransactions, self).setUp()
self.client = APIClient()
self.user = UserFactory.create(password=self.test_password)
self.url = reverse("preferences_api", kwargs={'username': self.user.username})
@patch('openedx.core.djangoapps.user_api.models.UserPreference.delete')
def test_update_preferences_rollback(self, delete_user_preference):
"""
Verify that updating preferences is transactional when a failure happens.
"""
# Create some test preferences values.
set_user_preference(self.user, "a", "1")
set_user_preference(self.user, "b", "2")
set_user_preference(self.user, "c", "3")
# Send a PATCH request with two updates and a delete. The delete should fail
# after one of the updates has happened, in which case the whole operation
# should be rolled back.
delete_user_preference.side_effect = [Exception, None]
self.client.login(username=self.user.username, password=self.test_password)
json_data = {
"a": "2",
"b": None,
"c": "1",
}
response = self.client.patch(self.url, data=json.dumps(json_data), content_type="application/merge-patch+json")
self.assertEqual(400, response.status_code)
# Verify that GET returns the original preferences
response = self.client.get(self.url)
expected_preferences = {
"a": "1",
"b": "2",
"c": "3",
}
self.assertEqual(expected_preferences, response.data)
@ddt.ddt
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class TestPreferencesDetailAPI(UserAPITestCase):
"""
Unit tests /api/user/v1/accounts/{username}/{preference_key}
"""
def setUp(self):
super(TestPreferencesDetailAPI, self).setUp()
self.test_pref_key = "test_key"
self.test_pref_value = "test_value"
set_user_preference(self.user, self.test_pref_key, self.test_pref_value)
self.url_endpoint_name = "preferences_detail_api"
self._set_url(self.test_pref_key)
def _set_url(self, preference_key):
"""
Sets the url attribute including the username and provided preference key
"""
self.url = reverse(
self.url_endpoint_name,
kwargs={'username': self.user.username, 'preference_key': preference_key}
)
def test_anonymous_user_access(self):
"""
Test that an anonymous client (logged in) cannot manipulate preferences.
"""
self.send_get(self.anonymous_client, expected_status=401)
self.send_put(self.anonymous_client, "new_value", expected_status=401)
self.send_delete(self.anonymous_client, expected_status=401)
def test_unsupported_methods(self):
"""
Test that POST and PATCH are not supported.
"""
self.client.login(username=self.user.username, password=self.test_password)
self.assertEqual(405, self.client.post(self.url).status_code)
self.assertEqual(405, self.client.patch(self.url).status_code)
def test_different_user_access(self):
"""
Test that a client (logged in) cannot manipulate a preference for a different client.
"""
self.different_client.login(username=self.different_user.username, password=self.test_password)
self.send_get(self.different_client, expected_status=404)
self.send_put(self.different_client, "new_value", expected_status=404)
self.send_delete(self.different_client, expected_status=404)
@ddt.data(
("client", "user"),
("staff_client", "staff_user"),
)
@ddt.unpack
def test_get_unknown_user(self, api_client, username):
"""
Test that requesting a user who does not exist returns a 404.
"""
client = self.login_client(api_client, username)
response = client.get(
reverse(self.url_endpoint_name, kwargs={'username': "does_not_exist", 'preference_key': self.test_pref_key})
)
self.assertEqual(404, response.status_code)
def test_get_preference_does_not_exist(self):
"""
Test that a 404 is returned if the user does not have a preference with the given preference_key.
"""
self._set_url("does_not_exist")
self.client.login(username=self.user.username, password=self.test_password)
response = self.send_get(self.client, expected_status=404)
self.assertIsNone(response.data)
@ddt.data(
("client", "user"),
("staff_client", "staff_user"),
)
@ddt.unpack
def test_get_preference(self, api_client, user):
"""
Test that a client (logged in) can get her own preferences information. Also verifies that a "is_staff"
user can get the preferences information for other users.
"""
client = self.login_client(api_client, user)
response = self.send_get(client)
self.assertEqual(self.test_pref_value, response.data)
# Test a different value.
set_user_preference(self.user, "dict_pref", {"int_key": 10})
self._set_url("dict_pref")
response = self.send_get(client)
self.assertEqual("{'int_key': 10}", response.data)
def test_create_preference(self):
"""
Test that a client (logged in) can create a preference.
"""
self._do_create_preference_test(True)
def test_create_preference_inactive(self):
"""
Test that a client (logged in but not active) can create a preference.
"""
self._do_create_preference_test(False)
def _do_create_preference_test(self, is_active):
"""
Generalization of the actual test workflow
"""
self.client.login(username=self.user.username, password=self.test_password)
if not is_active:
self.user.is_active = False
self.user.save()
self._set_url("new_key")
new_value = "new value"
self.send_put(self.client, new_value)
response = self.send_get(self.client)
self.assertEqual(new_value, response.data)
@ddt.data(
(None,),
("",),
(" ",),
)
@ddt.unpack
def test_create_empty_preference(self, preference_value):
"""
Test that a client (logged in) cannot create an empty preference.
"""
self._set_url("new_key")
self.client.login(username=self.user.username, password=self.test_password)
response = self.send_put(self.client, preference_value, expected_status=400)
self.assertEqual(
response.data,
{
"developer_message": u"Preference 'new_key' cannot be set to an empty value.",
"user_message": u"Preference 'new_key' cannot be set to an empty value."
}
)
self.send_get(self.client, expected_status=404)
def test_create_preference_too_long_key(self):
"""
Test that a client cannot create preferences with bad keys
"""
self.client.login(username=self.user.username, password=self.test_password)
too_long_preference_key = "x" * 256
new_value = "new value"
self._set_url(too_long_preference_key)
response = self.send_put(self.client, new_value, expected_status=400)
self.assertEquals(
response.data,
{
"developer_message": get_expected_validation_developer_message(too_long_preference_key, new_value),
"user_message": get_expected_key_error_user_message(too_long_preference_key, new_value),
}
)
@ddt.data(
("different_client", "different_user"),
("staff_client", "staff_user"),
)
@ddt.unpack
def test_create_preference_other_user(self, api_client, user):
"""
Test that a client (logged in) cannot create a preference for a different user.
"""
# Verify that a new preference cannot be created
self._set_url("new_key")
client = self.login_client(api_client, user)
new_value = "new value"
self.send_put(client, new_value, expected_status=403 if user == "staff_user" else 404)
@ddt.data(
(u"new value",),
(10,),
({u"int_key": 10},)
)
@ddt.unpack
def test_update_preference(self, preference_value):
"""
Test that a client (logged in) can update a preference.
"""
self.client.login(username=self.user.username, password=self.test_password)
self.send_put(self.client, preference_value)
response = self.send_get(self.client)
self.assertEqual(unicode(preference_value), response.data)
@ddt.data(
("different_client", "different_user"),
("staff_client", "staff_user"),
)
@ddt.unpack
def test_update_preference_other_user(self, api_client, user):
"""
Test that a client (logged in) cannot update a preference for another user.
"""
client = self.login_client(api_client, user)
new_value = "new value"
self.send_put(client, new_value, expected_status=403 if user == "staff_user" else 404)
@ddt.data(
(None,),
("",),
(" ",),
)
@ddt.unpack
def test_update_preference_to_empty(self, preference_value):
"""
Test that a client (logged in) cannot update a preference to null.
"""
self.client.login(username=self.user.username, password=self.test_password)
response = self.send_put(self.client, preference_value, expected_status=400)
self.assertEqual(
response.data,
{
"developer_message": u"Preference 'test_key' cannot be set to an empty value.",
"user_message": u"Preference 'test_key' cannot be set to an empty value."
}
)
response = self.send_get(self.client)
self.assertEqual(self.test_pref_value, response.data)
def test_delete_preference(self):
"""
Test that a client (logged in) can delete her own preference.
"""
self.client.login(username=self.user.username, password=self.test_password)
# Verify that a preference can be deleted
self.send_delete(self.client)
self.send_get(self.client, expected_status=404)
# Verify that deleting a non-existent preference throws a 404
self.send_delete(self.client, expected_status=404)
@ddt.data(
("different_client", "different_user"),
("staff_client", "staff_user"),
)
@ddt.unpack
def test_delete_preference_other_user(self, api_client, user):
"""
Test that a client (logged in) cannot delete a preference for another user.
"""
client = self.login_client(api_client, user)
self.send_delete(client, expected_status=403 if user == "staff_user" else 404)
| agpl-3.0 |
ArtsiomCh/tensorflow | tensorflow/python/saved_model/saved_model.py | 102 | 1626 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Convenience functions to save a model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.saved_model import builder
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import main_op
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.saved_model import utils
# pylint: enable=unused-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
"builder",
"constants",
"loader",
"main_op",
"signature_constants",
"signature_def_utils",
"tag_constants",
"utils",
]
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
martinwicke/tensorflow | tensorflow/contrib/metrics/python/kernel_tests/set_ops_test.py | 22 | 34996 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for set_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
_DTYPES = set([
tf.int8, tf.int16, tf.int32, tf.int64, tf.uint8, tf.uint16, tf.string])
def _values(values, dtype):
return np.array(
values,
dtype=(np.unicode if (dtype == tf.string) else dtype.as_numpy_dtype))
def _constant(values, dtype):
return tf.constant(_values(values, dtype), dtype=dtype)
def _dense_to_sparse(dense, dtype):
indices = []
values = []
max_row_len = 0
for row in dense:
max_row_len = max(max_row_len, len(row))
shape = [len(dense), max_row_len]
row_ix = 0
for row in dense:
col_ix = 0
for cell in row:
indices.append([row_ix, col_ix])
values.append(str(cell) if dtype == tf.string else cell)
col_ix += 1
row_ix += 1
return tf.SparseTensor(
tf.constant(indices, tf.int64),
tf.constant(values, dtype),
tf.constant(shape, tf.int64))
class SetOpsTest(test_util.TensorFlowTestCase):
def test_set_size_2d(self):
for dtype in _DTYPES:
self._test_set_size_2d(dtype)
def _test_set_size_2d(self, dtype):
self.assertAllEqual(
[1], self._set_size(_dense_to_sparse([[1]], dtype)))
self.assertAllEqual(
[2, 1], self._set_size(_dense_to_sparse([[1, 9], [1]], dtype)))
self.assertAllEqual(
[3, 0], self._set_size(_dense_to_sparse([[1, 9, 2], []], dtype)))
self.assertAllEqual(
[0, 3], self._set_size(_dense_to_sparse([[], [1, 9, 2]], dtype)))
def test_set_size_duplicates_2d(self):
for dtype in _DTYPES:
self._test_set_size_duplicates_2d(dtype)
def _test_set_size_duplicates_2d(self, dtype):
self.assertAllEqual(
[1], self._set_size(_dense_to_sparse([[1, 1, 1, 1, 1, 1]], dtype)))
self.assertAllEqual(
[2, 7, 3, 0, 1],
self._set_size(_dense_to_sparse([
[1, 9],
[6, 7, 8, 8, 6, 7, 5, 3, 3, 0, 6, 6, 9, 0, 0, 0],
[999, 1, -1000],
[],
[-1]
], dtype)))
def test_set_size_3d(self):
for dtype in _DTYPES:
self._test_set_size_3d(dtype)
def test_set_size_3d_invalid_indices(self):
for dtype in _DTYPES:
self._test_set_size_3d(dtype, invalid_indices=True)
def _test_set_size_3d(self, dtype, invalid_indices=False):
if invalid_indices:
indices = tf.constant([
[0, 1, 0], [0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
[0, 0, 0], [0, 0, 2], # 0,0
# 2,0
[2, 1, 1] # 2,1
], tf.int64)
else:
indices = tf.constant([
[0, 0, 0], [0, 0, 2], # 0,0
[0, 1, 0], [0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
# 2,0
[2, 1, 1] # 2,1
], tf.int64)
sp = tf.SparseTensor(
indices,
_constant([
1, 9, # 0,0
3, 3, # 0,1
1, # 1,0
9, 7, 8, # 1,1
# 2,0
5 # 2,1
], dtype),
tf.constant([3, 2, 3], tf.int64))
if invalid_indices:
with self.assertRaisesRegexp(tf.OpError, "out of order"):
self._set_size(sp)
else:
self.assertAllEqual([
[2, # 0,0
1], # 0,1
[1, # 1,0
3], # 1,1
[0, # 2,0
1] # 2,1
], self._set_size(sp))
def _set_size(self, sparse_data):
# Validate that we get the same results with or without `validate_indices`.
ops = [
tf.contrib.metrics.set_size(sparse_data, validate_indices=True),
tf.contrib.metrics.set_size(sparse_data, validate_indices=False)
]
for op in ops:
self.assertEqual(None, op.get_shape().dims)
self.assertEqual(tf.int32, op.dtype)
with self.test_session() as sess:
results = sess.run(ops)
self.assertAllEqual(results[0], results[1])
return results[0]
def test_set_intersection_multirow_2d(self):
for dtype in _DTYPES:
self._test_set_intersection_multirow_2d(dtype)
def _test_set_intersection_multirow_2d(self, dtype):
a_values = [[9, 1, 5], [2, 4, 3]]
b_values = [[1, 9], [1]]
expected_indices = [[0, 0], [0, 1]]
expected_values = _values([1, 9], dtype)
expected_shape = [2, 2]
expected_counts = [2, 0]
# Dense to sparse.
a = _constant(a_values, dtype=dtype)
sp_b = _dense_to_sparse(b_values, dtype=dtype)
intersection = self._set_intersection(a, sp_b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, intersection, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_intersection_count(a, sp_b))
# Sparse to sparse.
sp_a = _dense_to_sparse(a_values, dtype=dtype)
intersection = self._set_intersection(sp_a, sp_b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, intersection, dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_intersection_count(sp_a, sp_b))
def test_dense_set_intersection_multirow_2d(self):
for dtype in _DTYPES:
self._test_dense_set_intersection_multirow_2d(dtype)
def _test_dense_set_intersection_multirow_2d(self, dtype):
a_values = [[9, 1, 5], [2, 4, 3]]
b_values = [[1, 9], [1, 5]]
expected_indices = [[0, 0], [0, 1]]
expected_values = _values([1, 9], dtype)
expected_shape = [2, 2]
expected_counts = [2, 0]
# Dense to dense.
a = _constant(a_values, dtype)
b = _constant(b_values, dtype)
intersection = self._set_intersection(a, b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, intersection, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_intersection_count(a, b))
def test_set_intersection_duplicates_2d(self):
for dtype in _DTYPES:
self._test_set_intersection_duplicates_2d(dtype)
def _test_set_intersection_duplicates_2d(self, dtype):
a_values = [[1, 1, 3]]
b_values = [[1]]
expected_indices = [[0, 0]]
expected_values = _values([1], dtype)
expected_shape = [1, 1]
expected_counts = [1]
# Dense to dense.
a = _constant(a_values, dtype=dtype)
b = _constant(b_values, dtype=dtype)
intersection = self._set_intersection(a, b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, intersection, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_intersection_count(a, b))
# Dense to sparse.
sp_b = _dense_to_sparse(b_values, dtype=dtype)
intersection = self._set_intersection(a, sp_b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, intersection, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_intersection_count(a, sp_b))
# Sparse to sparse.
sp_a = _dense_to_sparse(a_values, dtype=dtype)
intersection = self._set_intersection(sp_a, sp_b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, intersection, dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_intersection_count(sp_a, sp_b))
def test_set_intersection_3d(self):
for dtype in _DTYPES:
self._test_set_intersection_3d(dtype=dtype)
def test_set_intersection_3d_invalid_indices(self):
for dtype in _DTYPES:
self._test_set_intersection_3d(dtype=dtype, invalid_indices=True)
def _test_set_intersection_3d(self, dtype, invalid_indices=False):
if invalid_indices:
indices = tf.constant([
[0, 1, 0], [0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
[0, 0, 0], [0, 0, 2], # 0,0
# 2,0
[2, 1, 1] # 2,1
# 3,*
], tf.int64)
else:
indices = tf.constant([
[0, 0, 0], [0, 0, 2], # 0,0
[0, 1, 0], [0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
# 2,0
[2, 1, 1] # 2,1
# 3,*
], tf.int64)
sp_a = tf.SparseTensor(
indices,
_constant([
1, 9, # 0,0
3, 3, # 0,1
1, # 1,0
9, 7, 8, # 1,1
# 2,0
5 # 2,1
# 3,*
], dtype),
tf.constant([4, 2, 3], tf.int64))
sp_b = tf.SparseTensor(
tf.constant([
[0, 0, 0], [0, 0, 3], # 0,0
# 0,1
[1, 0, 0], # 1,0
[1, 1, 0], [1, 1, 1], # 1,1
[2, 0, 1], # 2,0
[2, 1, 1], # 2,1
[3, 0, 0], # 3,0
[3, 1, 0] # 3,1
], tf.int64),
_constant([
1, 3, # 0,0
# 0,1
3, # 1,0
7, 8, # 1,1
2, # 2,0
5, # 2,1
4, # 3,0
4 # 3,1
], dtype),
tf.constant([4, 2, 4], tf.int64))
if invalid_indices:
with self.assertRaisesRegexp(tf.OpError, "out of order"):
self._set_intersection(sp_a, sp_b)
else:
expected_indices = [
[0, 0, 0], # 0,0
# 0,1
# 1,0
[1, 1, 0], [1, 1, 1], # 1,1
# 2,0
[2, 1, 0], # 2,1
# 3,*
]
expected_values = _values([
1, # 0,0
# 0,1
# 1,0
7, 8, # 1,1
# 2,0
5, # 2,1
# 3,*
], dtype)
expected_shape = [4, 2, 2]
expected_counts = [[
1, # 0,0
0 # 0,1
], [
0, # 1,0
2 # 1,1
], [
0, # 2,0
1 # 2,1
], [
0, # 3,0
0 # 3,1
]]
# Sparse to sparse.
intersection = self._set_intersection(sp_a, sp_b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, intersection, dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_intersection_count(sp_a, sp_b))
# NOTE: sparse_to_dense doesn't support uint8 and uint16.
if dtype not in [tf.uint8, tf.uint16]:
# Dense to sparse.
a = tf.cast(
tf.sparse_to_dense(
sp_a.indices,
sp_a.shape,
sp_a.values,
default_value="-1" if dtype == tf.string else -1),
dtype=dtype)
intersection = self._set_intersection(a, sp_b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, intersection, dtype=dtype)
self.assertAllEqual(
expected_counts, self._set_intersection_count(a, sp_b))
# Dense to dense.
b = tf.cast(
tf.sparse_to_dense(
sp_b.indices,
sp_b.shape,
sp_b.values,
default_value="-2" if dtype == tf.string else -2),
dtype=dtype)
intersection = self._set_intersection(a, b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, intersection, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_intersection_count(a, b))
def _assert_shapes(self, input_tensor, result_sparse_tensor):
expected_rows = (None if isinstance(input_tensor, tf.SparseTensor) else
input_tensor.get_shape().as_list()[0])
expected_rank = (None if isinstance(input_tensor, tf.SparseTensor) else
input_tensor.get_shape().ndims)
self.assertAllEqual((expected_rows, expected_rank),
result_sparse_tensor.indices.get_shape().as_list())
self.assertAllEqual((expected_rows,),
result_sparse_tensor.values.get_shape().as_list())
self.assertAllEqual((expected_rank,),
result_sparse_tensor.shape.get_shape().as_list())
def _set_intersection(self, a, b):
# Validate that we get the same results with or without `validate_indices`.
ops = [
tf.contrib.metrics.set_intersection(a, b, validate_indices=True),
tf.contrib.metrics.set_intersection(a, b, validate_indices=False)
]
for op in ops:
self._assert_shapes(a, op)
with self.test_session() as sess:
results = sess.run(ops)
self.assertAllEqual(results[0].indices, results[1].indices)
self.assertAllEqual(results[0].values, results[1].values)
self.assertAllEqual(results[0].shape, results[1].shape)
return results[0]
def _set_intersection_count(self, a, b):
op = tf.contrib.metrics.set_size(tf.contrib.metrics.set_intersection(a, b))
with self.test_session() as sess:
return sess.run(op)
def test_set_difference_multirow_2d(self):
for dtype in _DTYPES:
self._test_set_difference_multirow_2d(dtype)
def _test_set_difference_multirow_2d(self, dtype):
a_values = [[1, 1, 1], [1, 5, 9], [4, 5, 3], [5, 5, 1]]
b_values = [[], [1, 2], [1, 2, 2], []]
# a - b.
expected_indices = [
[0, 0], [1, 0], [1, 1], [2, 0], [2, 1], [2, 2], [3, 0], [3, 1]
]
expected_values = _values([1, 5, 9, 3, 4, 5, 1, 5], dtype)
expected_shape = [4, 3]
expected_counts = [1, 2, 3, 2]
# Dense to sparse.
a = _constant(a_values, dtype=dtype)
sp_b = _dense_to_sparse(b_values, dtype=dtype)
difference = self._set_difference(a, sp_b, True)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, difference, dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(a, sp_b, True))
# Sparse to sparse.
sp_a = _dense_to_sparse(a_values, dtype=dtype)
difference = self._set_difference(sp_a, sp_b, True)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, difference, dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(sp_a, sp_b, True))
# b - a.
expected_indices = [[1, 0], [2, 0], [2, 1]]
expected_values = _values([2, 1, 2], dtype)
expected_shape = [4, 2]
expected_counts = [0, 1, 2, 0]
# Dense to sparse.
difference = self._set_difference(a, sp_b, False)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, difference, dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(a, sp_b, False))
# Sparse to sparse.
difference = self._set_difference(sp_a, sp_b, False)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, difference, dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(sp_a, sp_b, False))
def test_dense_set_difference_multirow_2d(self):
for dtype in _DTYPES:
self._test_dense_set_difference_multirow_2d(dtype)
def _test_dense_set_difference_multirow_2d(self, dtype):
a_values = [[1, 5, 9], [4, 5, 3]]
b_values = [[1, 2, 6], [1, 2, 2]]
# a - b.
expected_indices = [[0, 0], [0, 1], [1, 0], [1, 1], [1, 2]]
expected_values = _values([5, 9, 3, 4, 5], dtype)
expected_shape = [2, 3]
expected_counts = [2, 3]
# Dense to dense.
a = _constant(a_values, dtype=dtype)
b = _constant(b_values, dtype=dtype)
difference = self._set_difference(a, b, True)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, difference, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_difference_count(a, b, True))
# b - a.
expected_indices = [[0, 0], [0, 1], [1, 0], [1, 1]]
expected_values = _values([2, 6, 1, 2], dtype)
expected_shape = [2, 2]
expected_counts = [2, 2]
# Dense to dense.
difference = self._set_difference(a, b, False)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, difference, dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(a, b, False))
def test_sparse_set_difference_multirow_2d(self):
for dtype in _DTYPES:
self._test_sparse_set_difference_multirow_2d(dtype)
def _test_sparse_set_difference_multirow_2d(self, dtype):
sp_a = _dense_to_sparse(
[[], [1, 5, 9], [4, 5, 3, 3, 4, 5], [5, 1]], dtype=dtype)
sp_b = _dense_to_sparse([[], [1, 2], [1, 2, 2], []], dtype=dtype)
# a - b.
expected_indices = [[1, 0], [1, 1], [2, 0], [2, 1], [2, 2], [3, 0], [3, 1]]
expected_values = _values([5, 9, 3, 4, 5, 1, 5], dtype)
expected_shape = [4, 3]
expected_counts = [0, 2, 3, 2]
difference = self._set_difference(sp_a, sp_b, True)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, difference, dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(sp_a, sp_b, True))
# b - a.
expected_indices = [[1, 0], [2, 0], [2, 1]]
expected_values = _values([2, 1, 2], dtype)
expected_shape = [4, 2]
expected_counts = [0, 1, 2, 0]
difference = self._set_difference(sp_a, sp_b, False)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, difference, dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(sp_a, sp_b, False))
def test_set_difference_duplicates_2d(self):
for dtype in _DTYPES:
self._test_set_difference_duplicates_2d(dtype)
def _test_set_difference_duplicates_2d(self, dtype):
a_values = [[1, 1, 3]]
b_values = [[1, 2, 2]]
# a - b.
expected_indices = [[0, 0]]
expected_values = _values([3], dtype)
expected_shape = [1, 1]
expected_counts = [1]
# Dense to sparse.
a = _constant(a_values, dtype=dtype)
sp_b = _dense_to_sparse(b_values, dtype=dtype)
difference = self._set_difference(a, sp_b, True)
self._assert_set_operation(
expected_indices, expected_values, expected_shape, difference,
dtype=dtype)
self.assertAllEqual(
expected_counts, self._set_difference_count(a, sp_b, True))
# Sparse to sparse.
sp_a = _dense_to_sparse(a_values, dtype=dtype)
difference = self._set_difference(sp_a, sp_b, True)
self._assert_set_operation(
expected_indices, expected_values, expected_shape, difference,
dtype=dtype)
self.assertAllEqual(
expected_counts, self._set_difference_count(a, sp_b, True))
# b - a.
expected_indices = [[0, 0]]
expected_values = _values([2], dtype)
expected_shape = [1, 1]
expected_counts = [1]
# Dense to sparse.
difference = self._set_difference(a, sp_b, False)
self._assert_set_operation(
expected_indices, expected_values, expected_shape, difference,
dtype=dtype)
self.assertAllEqual(
expected_counts, self._set_difference_count(a, sp_b, False))
# Sparse to sparse.
difference = self._set_difference(sp_a, sp_b, False)
self._assert_set_operation(
expected_indices, expected_values, expected_shape, difference,
dtype=dtype)
self.assertAllEqual(
expected_counts, self._set_difference_count(a, sp_b, False))
def test_sparse_set_difference_3d(self):
for dtype in _DTYPES:
self._test_sparse_set_difference_3d(dtype)
def test_sparse_set_difference_3d_invalid_indices(self):
for dtype in _DTYPES:
self._test_sparse_set_difference_3d(dtype, invalid_indices=True)
def _test_sparse_set_difference_3d(self, dtype, invalid_indices=False):
if invalid_indices:
indices = tf.constant([
[0, 1, 0], [0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
[0, 0, 0], [0, 0, 2], # 0,0
# 2,0
[2, 1, 1] # 2,1
# 3,*
], tf.int64)
else:
indices = tf.constant([
[0, 0, 0], [0, 0, 2], # 0,0
[0, 1, 0], [0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
# 2,0
[2, 1, 1] # 2,1
# 3,*
], tf.int64)
sp_a = tf.SparseTensor(
indices,
_constant([
1, 9, # 0,0
3, 3, # 0,1
1, # 1,0
9, 7, 8, # 1,1
# 2,0
5 # 2,1
# 3,*
], dtype),
tf.constant([4, 2, 3], tf.int64))
sp_b = tf.SparseTensor(
tf.constant([
[0, 0, 0], [0, 0, 3], # 0,0
# 0,1
[1, 0, 0], # 1,0
[1, 1, 0], [1, 1, 1], # 1,1
[2, 0, 1], # 2,0
[2, 1, 1], # 2,1
[3, 0, 0], # 3,0
[3, 1, 0] # 3,1
], tf.int64),
_constant([
1, 3, # 0,0
# 0,1
3, # 1,0
7, 8, # 1,1
2, # 2,0
5, # 2,1
4, # 3,0
4 # 3,1
], dtype),
tf.constant([4, 2, 4], tf.int64))
if invalid_indices:
with self.assertRaisesRegexp(tf.OpError, "out of order"):
self._set_difference(sp_a, sp_b, False)
with self.assertRaisesRegexp(tf.OpError, "out of order"):
self._set_difference(sp_a, sp_b, True)
else:
# a-b
expected_indices = [
[0, 0, 0], # 0,0
[0, 1, 0], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0], # 1,1
# 2,*
# 3,*
]
expected_values = _values([
9, # 0,0
3, # 0,1
1, # 1,0
9, # 1,1
# 2,*
# 3,*
], dtype)
expected_shape = [4, 2, 1]
expected_counts = [[
1, # 0,0
1 # 0,1
], [
1, # 1,0
1 # 1,1
], [
0, # 2,0
0 # 2,1
], [
0, # 3,0
0 # 3,1
]]
difference = self._set_difference(sp_a, sp_b, True)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, difference, dtype=dtype)
self.assertAllEqual(
expected_counts, self._set_difference_count(sp_a, sp_b))
# b-a
expected_indices = [
[0, 0, 0], # 0,0
# 0,1
[1, 0, 0], # 1,0
# 1,1
[2, 0, 0], # 2,0
# 2,1
[3, 0, 0], # 3,0
[3, 1, 0] # 3,1
]
expected_values = _values([
3, # 0,0
# 0,1
3, # 1,0
# 1,1
2, # 2,0
# 2,1
4, # 3,0
4, # 3,1
], dtype)
expected_shape = [4, 2, 1]
expected_counts = [[
1, # 0,0
0 # 0,1
], [
1, # 1,0
0 # 1,1
], [
1, # 2,0
0 # 2,1
], [
1, # 3,0
1 # 3,1
]]
difference = self._set_difference(sp_a, sp_b, False)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, difference, dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(sp_a, sp_b, False))
def _set_difference(self, a, b, aminusb=True):
# Validate that we get the same results with or without `validate_indices`.
ops = [
tf.contrib.metrics.set_difference(
a, b, aminusb=aminusb, validate_indices=True),
tf.contrib.metrics.set_difference(
a, b, aminusb=aminusb, validate_indices=False)
]
for op in ops:
self._assert_shapes(a, op)
with self.test_session() as sess:
results = sess.run(ops)
self.assertAllEqual(results[0].indices, results[1].indices)
self.assertAllEqual(results[0].values, results[1].values)
self.assertAllEqual(results[0].shape, results[1].shape)
return results[0]
def _set_difference_count(self, a, b, aminusb=True):
op = tf.contrib.metrics.set_size(
tf.contrib.metrics.set_difference(a, b, aminusb))
with self.test_session() as sess:
return sess.run(op)
def test_set_union_multirow_2d(self):
for dtype in _DTYPES:
self._test_set_union_multirow_2d(dtype)
def _test_set_union_multirow_2d(self, dtype):
a_values = [[9, 1, 5], [2, 4, 3]]
b_values = [[1, 9], [1]]
expected_indices = [[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2], [1, 3]]
expected_values = _values([1, 5, 9, 1, 2, 3, 4], dtype)
expected_shape = [2, 4]
expected_counts = [3, 4]
# Dense to sparse.
a = _constant(a_values, dtype=dtype)
sp_b = _dense_to_sparse(b_values, dtype=dtype)
union = self._set_union(a, sp_b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, union, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_union_count(a, sp_b))
# Sparse to sparse.
sp_a = _dense_to_sparse(a_values, dtype=dtype)
union = self._set_union(sp_a, sp_b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, union, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_union_count(sp_a, sp_b))
def test_dense_set_union_multirow_2d(self):
for dtype in _DTYPES:
self._test_dense_set_union_multirow_2d(dtype)
def _test_dense_set_union_multirow_2d(self, dtype):
a_values = [[9, 1, 5], [2, 4, 3]]
b_values = [[1, 9], [1, 2]]
expected_indices = [[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2], [1, 3]]
expected_values = _values([1, 5, 9, 1, 2, 3, 4], dtype)
expected_shape = [2, 4]
expected_counts = [3, 4]
# Dense to dense.
a = _constant(a_values, dtype=dtype)
b = _constant(b_values, dtype=dtype)
union = self._set_union(a, b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, union, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_union_count(a, b))
def test_set_union_duplicates_2d(self):
for dtype in _DTYPES:
self._test_set_union_duplicates_2d(dtype)
def _test_set_union_duplicates_2d(self, dtype):
a_values = [[1, 1, 3]]
b_values = [[1]]
expected_indices = [[0, 0], [0, 1]]
expected_values = _values([1, 3], dtype)
expected_shape = [1, 2]
# Dense to sparse.
a = _constant(a_values, dtype=dtype)
sp_b = _dense_to_sparse(b_values, dtype=dtype)
union = self._set_union(a, sp_b)
self._assert_set_operation(
expected_indices, expected_values, expected_shape, union, dtype=dtype)
self.assertAllEqual([2], self._set_union_count(a, sp_b))
# Sparse to sparse.
sp_a = _dense_to_sparse(a_values, dtype=dtype)
union = self._set_union(sp_a, sp_b)
self._assert_set_operation(
expected_indices, expected_values, expected_shape, union, dtype=dtype)
self.assertAllEqual([2], self._set_union_count(sp_a, sp_b))
def test_sparse_set_union_3d(self):
for dtype in _DTYPES:
self._test_sparse_set_union_3d(dtype)
def test_sparse_set_union_3d_invalid_indices(self):
for dtype in _DTYPES:
self._test_sparse_set_union_3d(dtype, invalid_indices=True)
def _test_sparse_set_union_3d(self, dtype, invalid_indices=False):
if invalid_indices:
indices = tf.constant([
[0, 1, 0], [0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[0, 0, 0], [0, 0, 2], # 0,0
[1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
# 2,0
[2, 1, 1] # 2,1
# 3,*
], tf.int64)
else:
indices = tf.constant([
[0, 0, 0], [0, 0, 2], # 0,0
[0, 1, 0], [0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
# 2,0
[2, 1, 1] # 2,1
# 3,*
], tf.int64)
sp_a = tf.SparseTensor(
indices,
_constant([
1, 9, # 0,0
3, 3, # 0,1
1, # 1,0
9, 7, 8, # 1,1
# 2,0
5 # 2,1
# 3,*
], dtype),
tf.constant([4, 2, 3], tf.int64))
sp_b = tf.SparseTensor(
tf.constant([
[0, 0, 0], [0, 0, 3], # 0,0
# 0,1
[1, 0, 0], # 1,0
[1, 1, 0], [1, 1, 1], # 1,1
[2, 0, 1], # 2,0
[2, 1, 1], # 2,1
[3, 0, 0], # 3,0
[3, 1, 0] # 3,1
], tf.int64),
_constant([
1, 3, # 0,0
# 0,1
3, # 1,0
7, 8, # 1,1
2, # 2,0
5, # 2,1
4, # 3,0
4 # 3,1
], dtype),
tf.constant([4, 2, 4], tf.int64))
if invalid_indices:
with self.assertRaisesRegexp(tf.OpError, "out of order"):
self._set_union(sp_a, sp_b)
else:
expected_indices = [
[0, 0, 0], [0, 0, 1], [0, 0, 2], # 0,0
[0, 1, 0], # 0,1
[1, 0, 0], [1, 0, 1], # 1,0
[1, 1, 0], [1, 1, 1], [1, 1, 2], # 1,1
[2, 0, 0], # 2,0
[2, 1, 0], # 2,1
[3, 0, 0], # 3,0
[3, 1, 0], # 3,1
]
expected_values = _values([
1, 3, 9, # 0,0
3, # 0,1
1, 3, # 1,0
7, 8, 9, # 1,1
2, # 2,0
5, # 2,1
4, # 3,0
4, # 3,1
], dtype)
expected_shape = [4, 2, 3]
expected_counts = [[
3, # 0,0
1 # 0,1
], [
2, # 1,0
3 # 1,1
], [
1, # 2,0
1 # 2,1
], [
1, # 3,0
1 # 3,1
]]
intersection = self._set_union(sp_a, sp_b)
self._assert_set_operation(expected_indices, expected_values,
expected_shape, intersection, dtype=dtype)
self.assertAllEqual(expected_counts, self._set_union_count(sp_a, sp_b))
def _set_union(self, a, b):
# Validate that we get the same results with or without `validate_indices`.
ops = [
tf.contrib.metrics.set_union(a, b, validate_indices=True),
tf.contrib.metrics.set_union(a, b, validate_indices=False)
]
for op in ops:
self._assert_shapes(a, op)
with self.test_session() as sess:
results = sess.run(ops)
self.assertAllEqual(results[0].indices, results[1].indices)
self.assertAllEqual(results[0].values, results[1].values)
self.assertAllEqual(results[0].shape, results[1].shape)
return results[0]
def _set_union_count(self, a, b):
op = tf.contrib.metrics.set_size(tf.contrib.metrics.set_union(a, b))
with self.test_session() as sess:
return sess.run(op)
def _assert_set_operation(self, expected_indices, expected_values,
expected_shape, sparse_tensor, dtype):
self.assertAllEqual(expected_indices, sparse_tensor.indices)
self.assertAllEqual(len(expected_indices), len(expected_values))
self.assertAllEqual(len(expected_values), len(sparse_tensor.values))
expected_set = set()
actual_set = set()
last_indices = None
for indices, expected_value, actual_value in zip(
expected_indices, expected_values, sparse_tensor.values):
if dtype == tf.string:
actual_value = actual_value.decode("utf-8")
if last_indices and (last_indices[:-1] != indices[:-1]):
self.assertEqual(
expected_set, actual_set, "Expected %s, got %s, at %s." % (
expected_set, actual_set, indices))
expected_set.clear()
actual_set.clear()
expected_set.add(expected_value)
actual_set.add(actual_value)
last_indices = indices
self.assertEqual(
expected_set, actual_set, "Expected %s, got %s, at %s." % (
expected_set, actual_set, last_indices))
self.assertAllEqual(expected_shape, sparse_tensor.shape)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
geky/mbed | tools/host_tests/tcpecho_server_loop.py | 73 | 1349 | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Be sure that the tools directory is in the search path
import sys
from os.path import join, abspath, dirname
ROOT = abspath(join(dirname(__file__), "..", ".."))
sys.path.insert(0, ROOT)
from mbed_settings import LOCALHOST
from SocketServer import BaseRequestHandler, TCPServer
class TCP_EchoHandler(BaseRequestHandler):
def handle(self):
print "\nHandle connection from:", self.client_address
while True:
data = self.request.recv(1024)
if not data: break
self.request.sendall(data)
self.request.close()
print "socket closed"
if __name__ == '__main__':
server = TCPServer((LOCALHOST, 7), TCP_EchoHandler)
print "listening for connections on:", (LOCALHOST, 7)
server.serve_forever()
| apache-2.0 |
golismero/golismero | golismero/api/text/text_utils.py | 8 | 7440 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Text manipulation utilities.
"""
__license__ = """
GoLismero 2.0 - The web knife - Copyright (C) 2011-2014
Golismero project site: https://github.com/golismero
Golismero project mail: contact@golismero-project.com
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
__all__ = [
"char_count", "line_count", "word_count", "generate_random_string",
"uncamelcase", "hexdump", "to_utf8", "split_first",
]
import re
from random import choice
from re import finditer
from string import ascii_letters, digits, printable
#------------------------------------------------------------------------------
def char_count(text):
"""
:param text: Text.
:type text: str
:returns: Number of printable characters in text.
:rtype: int
:raises: TypeError
"""
return sum(1 for _ in finditer(r"\w", text))
#------------------------------------------------------------------------------
def line_count(text):
"""
:param text: Text.
:type text: str
:returns: Number of lines in text.
:rtype: int
:raises: TypeError
"""
if not isinstance(text, basestring):
raise TypeError("Expected basestring, got '%s' instead" % type(text))
count = text.count("\n")
if not text.endswith("\n"):
count += 1
return count
#------------------------------------------------------------------------------
def word_count(text):
"""
:param text: Text.
:type text: str
:returns: Number of words in text.
:rtype: int
:raises: TypeError
"""
return sum(1 for _ in finditer(r"\w+", text))
#------------------------------------------------------------------------------
def generate_random_string(length = 30):
"""
Generates a random string of the specified length.
The key space used to generate random strings are:
- ASCII letters (both lowercase and uppercase).
- Digits (0-9).
>>> from golismero.api.text.text_utils import generate_random_string
>>> generate_random_string(10)
Asi91Ujsn5
>>> generate_random_string(30)
8KNLs981jc0h1ls8b2ks01bc7slgu2
:param length: Desired string length.
:type length: int
:raises: TypeError
"""
m_available_chars = ascii_letters + digits
return "".join(choice(m_available_chars) for _ in xrange(length))
#------------------------------------------------------------------------------
# Adapted from: http://stackoverflow.com/a/2560017/426293
__uncamelcase_re = re.compile("%s|%s|%s" % (
r"(?<=[A-Z])(?=[A-Z][a-z])",
r"(?<=[^A-Z])(?=[A-Z])",
r"(?<=[A-Za-z])(?=[^A-Za-z])",
))
def uncamelcase(string):
"""
Converts a CamelCase string into a human-readable string.
Examples::
>>> uncamelcase("lowercase")
'lowercase'
>>> uncamelcase("Class")
'Class'
>>> uncamelcase("MyClass")
'My Class'
>>> uncamelcase("HTML")
'HTML'
>>> uncamelcase("PDFLoader")
'PDF Loader'
>>> uncamelcase("AString")
'A String'
>>> uncamelcase("SimpleXMLParser")
'Simple XML Parser'
>>> uncamelcase("GL11Version")
'GL 11 Version'
>>> uncamelcase("99Bottles")
'99 Bottles'
>>> uncamelcase("May5")
'May 5'
>>> uncamelcase("BFG9000")
'BFG 9000'
:param string: CamelCase string.
:type string: str
:returns: Human-readable string.
:rtype: str
:raises: TypeError
"""
if not isinstance(string, basestring):
raise TypeError("Expected basestring, got '%s' instead" % type(string))
string = string.replace("_", " ")
string = __uncamelcase_re.sub(" ", string)
while " " in string:
string = string.replace(" ", " ")
return string
#------------------------------------------------------------------------------
def hexdump(s):
"""
Produce an hexadecimal output from a binary string.
:param s: Binary string to dump.
:type s: str
:returns: Hexadecimal output.
:rtype: str
:raises: TypeError
"""
if not isinstance(s, basestring):
raise TypeError("Expected basestring, got '%s' instead" % type(s))
a = []
for i in xrange(0, len(s), 16):
h1 = " ".join("%.2x" % ord(c) for c in s[i:i+8])
h2 = " ".join("%.2x" % ord(c) for c in s[i+8:i+16])
d = "".join(c if c in printable else "." for c in s[i:i+16])
a.append("%-32s-%-32s %s\n" % (h1, h2, d))
return "".join(a)
#------------------------------------------------------------------------------
def to_utf8(s):
"""
Convert the given Unicode string into an UTF-8 encoded string.
If the argument is already a normal Python string, nothing is done.
So this function can be used as a filter to normalize string arguments.
:param s: Unicode string to convert.
:type s: basestring
:returns: Converted string.
:rtype: str
"""
if isinstance(s, unicode):
return s.encode("UTF-8")
if type(s) is not str and isinstance(s, str):
return str(s)
return s
#------------------------------------------------------------------------------
# This function was borrowed from the urllib3 project.
#
# Urllib3 is copyright 2008-2012 Andrey Petrov and contributors (see
# CONTRIBUTORS.txt) and is released under the MIT License:
# http://www.opensource.org/licenses/mit-license.php
# http://raw.github.com/shazow/urllib3/master/CONTRIBUTORS.txt
#
def split_first(s, delims):
"""
Given a string and an another delimiters as strings, split on the first found
delimiter. Return the two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example: ::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delimiters.
Not ideal for a large number of delimiters.
.. warning: This function was borrowed from the urllib3 project.
It may be removed in future versions of GoLismero.
:param s: string to delimit to.
:type s: str
:param delims: string with delimits characters
:type delims: str
:return: a tuple as format: (FIRST_OCCURRENCE, REST_OF_TEXT, MATCHING_CHAR)
:rtype: (str, str, str|None)
:raises: TypeError
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx+1:], min_delim
| gpl-2.0 |
kmpf/uap | tools/segemehl_2017_reformatCigar.py | 1 | 4801 | #!/bin/bash
"exec" "`dirname $0`/../python_env/bin/python" "$0" "$@"
#"exec" "python" "$0" "$@"
# ^^^
# the cmd above ensures that the correct python environment is
# selected to execute this script.
# The correct environment is the one belonging to uap, since all
# neccessary python modules are installed there.
# filename: segemehl_2017_reformatCigar.py
# author: Jana Hertel
# date: 2017/06/07
# version: 1.0
# description: Reformat the cigar string such that htseq-count is able to process
# the according SAM files. Consecutive values for 'ix', 'j=' and 'kM'
# are summed up and replaced by nM with n being the sum of i, j and k.
import argparse
import sys
import re
from multiprocessing import Pool
import itertools
parser = argparse.ArgumentParser(
description='Python script to process a large file '
'using multi-processing.')
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
parser.add_argument(
'--in-file',
dest='my_file_in',
required=True,
type=argparse.FileType('r'),
help='A large file whose lines are independent from each other and '
'can be processed separately.')
parser.add_argument('--threads', dest='my_cores', default=1,
type=int,
help='Number of CPUs 2B used. Default: 1')
parser.add_argument(
'--blocksize',
dest='my_bufsize',
default=2,
type=int,
help='Size of buffer to read the input file (in MB). Default: 2')
args = parser.parse_args()
##########################################################################
# my_range(start, end, step)
#
# This function creates a range with a user defined step to walk through.
# returns: the respective new start values
def my_range(start, end, step):
while start <= end:
yield start
start += step
##########################################################################
##########################################################################
# process_line(line)
#
# function that does something with the line:
# in this case:
# - split the line into columns by tab
# - returns the columns separated by tab
def process_line(lines):
newlines = list()
c = 0
for line in lines:
c += 1
columns = line.strip().split('\t')
# don't process header lines
if(columns[0][:1] == "@"):
newlines.append(line.strip())
continue
cigar = columns[5]
x = re.split(r'(\D)', cigar)
# split cigar string and sum up consecutive values
# for '=' and 'X' (match and mismatch)
# leave values as they are for 'I','D' and 'N' (del, insertion, split)
M = 0
cigar_new = ''
for j in range(1, len(x) - 1, 2):
# match or mismatch
if x[j] == '=' or x[j] == 'X' or x[j] == 'M':
M = M + int(x[j - 1])
else: # del or ins
if M > 0:
# print the previous match/mismatch
cigar_new += str(M) + "M"
M = 0
# anything else but '=', 'X' or 'M'
cigar_new += x[j - 1] + x[j]
if M > 0:
cigar_new += str(M) + "M"
if cigar_new == "0M*":
cigar_new = "*"
# print the sam line with the new cigar string to stdout
new_line = ""
for k in range(0, 5):
new_line += "%s\t" % columns[k]
new_line += "%s\t" % cigar_new
for k in range(6, len(columns)):
new_line += "%s" % columns[k]
if(not k == len(columns)):
new_line += "\t"
newlines.append(new_line)
return newlines
# END: process_line(line)
##########################################################################
if __name__ == '__main__':
# create my_cores -1 pools, 1 control + the remaining for processing the
# lines
p = Pool(args.my_cores)
a = list()
eof_reached = False
# bufsize needs to be provided in bytes
# argument provided megabytes
bufsize = args.my_bufsize * 1000000
while not eof_reached:
for i in range(args.my_cores - 1):
linelist = args.my_file_in.readlines(bufsize)
if len(linelist) == 0:
eof_reached = True
else:
a.append(linelist) # ~ 2MB chunks
l = p.map(process_line, a)
for j in l:
print('\n'.join(j))
a[:] = [] # delete processed lines from the list
# this works in principle.. too much i/o
# for line in p.imap(process_line, args.my_file_in):
# print line, # the coma prevents printing an additional new line
# idea for mp:
# read file in chunks of the size 1/args.my_cores
# --> each chunk in one process
| gpl-3.0 |
xadahiya/django-chartjs | docs/conf.py | 5 | 8415 | # -*- coding: utf-8 -*-
#
# django-chartjs documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 27 11:37:23 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# Minimal Django settings. Required to use sphinx.ext.autodoc, because
# django-chartjs depends on Django...
from django.conf import settings
settings.configure(
DATABASES={}, # Required to load ``django.views.generic``.
)
doc_dir = os.path.dirname(os.path.abspath(__file__))
project_dir = os.path.dirname(doc_dir)
version_filename = os.path.join(project_dir, 'VERSION')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.doctest', 'sphinx.ext.coverage']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-chartjs'
copyright = u'2013, Novapost'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = open(version_filename).read().strip()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['globaltoc.html',
'relations.html',
'sourcelink.html',
'searchbox.html'],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-chartjsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-chartjs.tex', u'django-chartjs Documentation',
u'Novapost', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-chartjs', u'django-chartjs Documentation',
[u'Novapost'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-chartjs', u'django-chartjs Documentation',
u'Novapost', 'django-chartjs', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| bsd-3-clause |
SNoiraud/gramps | gramps/gen/config.py | 4 | 14825 | # -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2005-2007 Donald N. Allingham
# Copyright (C) 2008-2009 Gary Burton
# Copyright (C) 2009-2012 Doug Blank <doug.blank@gmail.com>
# Copyright (C) 2011 Tim G L Lyons
# Copyright (C) 2015- Serge Noiraud
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
This package implements access to Gramps configuration.
"""
#---------------------------------------------------------------
#
# Gramps imports
#
#---------------------------------------------------------------
import os
import re
import logging
#---------------------------------------------------------------
#
# Gramps imports
#
#---------------------------------------------------------------
from .const import HOME_DIR, USER_HOME, VERSION_DIR
from .utils.configmanager import ConfigManager
from .const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
# _T_ is a gramps-defined keyword -- see po/update_po.py and po/genpot.sh
def _T_(value, context=''): # enable deferred translations
return "%s\x04%s" % (context, value) if context else value
#---------------------------------------------------------------
#
# Constants
#
#---------------------------------------------------------------
INIFILE = os.path.join(VERSION_DIR, "gramps.ini")
#---------------------------------------------------------------
#
# Module functions
#
#---------------------------------------------------------------
def register(key, value):
""" Module shortcut to register key, value """
return CONFIGMAN.register(key, value)
def get(key):
""" Module shortcut to get value from key """
return CONFIGMAN.get(key)
def get_default(key):
""" Module shortcut to get default from key """
return CONFIGMAN.get_default(key)
def has_default(key):
""" Module shortcut to get see if there is a default for key """
return CONFIGMAN.has_default(key)
def get_sections():
""" Module shortcut to get all section names of settings """
return CONFIGMAN.get_sections()
def get_section_settings(section):
""" Module shortcut to get all settings of a section """
return CONFIGMAN.get_section_settings(section)
def set(key, value):
""" Module shortcut to set value from key """
return CONFIGMAN.set(key, value)
def is_set(key):
""" Module shortcut to set value from key """
return CONFIGMAN.is_set(key)
def save(filename=None):
""" Module shortcut to save config file """
return CONFIGMAN.save(filename)
def connect(key, func):
"""
Module shortcut to connect a key to a callback func.
Returns a unique callback ID number.
"""
return CONFIGMAN.connect(key, func)
def disconnect(callback_id):
""" Module shortcut to remove callback by ID number """
return CONFIGMAN.disconnect(callback_id)
def reset(key=None):
""" Module shortcut to reset some or all config data """
return CONFIGMAN.reset(key)
def load(filename=None, oldstyle=False):
""" Module shortcut to load an INI file into config data """
return CONFIGMAN.load(filename, oldstyle)
def emit(key):
""" Module shortcut to call all callbacks associated with key """
return CONFIGMAN.emit(key)
#---------------------------------------------------------------
#
# Register the system-wide settings in a singleton config manager
#
#---------------------------------------------------------------
CONFIGMAN = ConfigManager(INIFILE, "plugins")
register('behavior.addmedia-image-dir', '')
register('behavior.addmedia-relative-path', False)
register('behavior.autoload', False)
register('behavior.avg-generation-gap', 20)
register('behavior.check-for-addon-updates', 0)
register('behavior.check-for-addon-update-types', ["new"])
register('behavior.last-check-for-addon-updates', "1970/01/01")
register('behavior.previously-seen-addon-updates', [])
register('behavior.do-not-show-previously-seen-addon-updates', True)
register('behavior.date-about-range', 50)
register('behavior.date-after-range', 50)
register('behavior.date-before-range', 50)
register('behavior.generation-depth', 15)
register('behavior.max-age-prob-alive', 110)
register('behavior.max-sib-age-diff', 20)
register('behavior.min-generation-years', 13)
register('behavior.owner-warn', False)
register('behavior.pop-plugin-status', False)
register('behavior.recent-export-type', 3)
register('behavior.runcheck', False)
register('behavior.spellcheck', False)
register('behavior.startup', 0)
register('behavior.surname-guessing', 0)
register('behavior.translator-needed', True)
register('behavior.use-tips', False)
register('behavior.welcome', 100)
register('behavior.web-search-url', 'http://google.com/#&q=%(text)s')
register('behavior.addons-url', "https://raw.githubusercontent.com/gramps-project/addons/master/gramps52")
register('database.backend', 'sqlite')
register('database.compress-backup', True)
register('database.backup-path', USER_HOME)
register('database.backup-on-exit', True)
register('database.autobackup', 0)
register('database.path', os.path.join(HOME_DIR, 'grampsdb'))
register('database.host', '')
register('database.port', '')
register('export.proxy-order',
[["privacy", 0],
["living", 0],
["person", 0],
["note", 0],
["reference", 0]]
)
register('geography.center-lon', 0.0)
register('geography.lock', False)
register('geography.center-lat', 0.0)
register('geography.map', "person")
register('geography.map_service', 1)
register('geography.zoom', 0)
register('geography.zoom_when_center', 12)
register('geography.show_cross', False)
register('geography.path', "")
register('geography.use-keypad', True)
register('geography.personal-map', "")
# note that other calls to "register" are done in realtime (when
# needed), for instance to four 'interface.clipboard' variables --
# so do a recursive grep for "setup_configs" to see all the (base) names
register('interface.dont-ask', False)
register('interface.view-categories',
["Dashboard", "People", "Relationships", "Families",
"Ancestry", "Events", "Places", "Geography", "Sources",
"Citations", "Repositories", "Media", "Notes"])
register('interface.filter', False)
register('interface.fullscreen', False)
register('interface.grampletbar-close', False)
register('interface.ignore-gexiv2', False)
register('interface.ignore-pil', False)
register('interface.ignore-osmgpsmap', False)
register('interface.main-window-height', 500)
register('interface.main-window-horiz-position', 15)
register('interface.main-window-vert-position', 10)
register('interface.main-window-width', 775)
register('interface.mapservice', 'OpenStreetMap')
register('interface.open-with-default-viewer', False)
register('interface.pedview-layout', 0)
register('interface.pedview-show-images', True)
register('interface.pedview-show-marriage', False)
register('interface.pedview-tree-size', 5)
register('interface.pedview-tree-direction', 2)
register('interface.pedview-show-unknown-people', False)
register('interface.place-name-height', 100)
register('interface.place-name-width', 450)
register('interface.sidebar-text', True)
register('interface.size-checked', False)
register('interface.statusbar', 1)
register('interface.toolbar-on', True)
register('interface.toolbar-text', False)
register('interface.view', True)
register('interface.surname-box-height', 150)
register('interface.treemodel-cache-size', 1000)
register('paths.recent-export-dir', USER_HOME)
register('paths.recent-file', '')
register('paths.recent-import-dir', USER_HOME)
register('paths.report-directory', USER_HOME)
register('paths.website-directory', USER_HOME)
register('paths.website-cms-uri', '')
register('paths.website-cal-uri', '')
register('paths.website-extra-page-uri', '')
register('paths.website-extra-page-name', '')
register('paths.quick-backup-directory', USER_HOME)
register('paths.quick-backup-filename',
"%(filename)s_%(year)d-%(month)02d-%(day)02d.%(extension)s")
register('preferences.quick-backup-include-mode', False)
register('preferences.date-format', 0)
register('preferences.calendar-format-report', 0)
register('preferences.cprefix', 'C%04d')
register('preferences.default-source', False)
register('preferences.tag-on-import', False)
register('preferences.tag-on-import-format', _("Imported %Y/%m/%d %H:%M:%S"))
register('preferences.eprefix', 'E%04d')
register('preferences.family-warn', True)
register('preferences.fprefix', 'F%04d')
register('preferences.hide-ep-msg', False)
register('preferences.invalid-date-format', "<b>%s</b>")
register('preferences.iprefix', 'I%04d')
register('preferences.name-format', 1)
register('preferences.place-format', 0)
register('preferences.place-auto', True)
register('preferences.patronimic-surname', False)
register('preferences.no-given-text', "[%s]" % _("Missing Given Name"))
register('preferences.no-record-text', "[%s]" % _("Missing Record"))
register('preferences.no-surname-text', "[%s]" % _("Missing Surname"))
register('preferences.nprefix', 'N%04d')
register('preferences.online-maps', False)
register('preferences.oprefix', 'O%04d')
register('preferences.paper-metric', 0)
register('preferences.paper-preference', 'Letter')
register('preferences.pprefix', 'P%04d')
register('preferences.private-given-text', "%s" % _T_("[Living]"))
register('preferences.private-record-text', "[%s]" % _("Private Record"))
register('preferences.private-surname-text', "%s" % _T_("[Living]"))
register('preferences.rprefix', 'R%04d')
register('preferences.sprefix', 'S%04d')
register('preferences.use-last-view', False)
register('preferences.last-view', '')
register('preferences.last-views', [])
register('preferences.family-relation-type', 3) # UNKNOWN
register('preferences.age-display-precision', 1)
register('colors.scheme', 0)
register('colors.male-alive', ['#b8cee6', '#1f344a'])
register('colors.male-dead', ['#b8cee6', '#2d3039'])
register('colors.female-alive', ['#feccf0', '#62242D'])
register('colors.female-dead', ['#feccf0', '#3a292b'])
register('colors.unknown-alive', ['#f3dbb6', '#75507B'])
register('colors.unknown-dead', ['#f3dbb6', '#35103b'])
register('colors.family', ['#eeeeee', '#454545'])
register('colors.family-married', ['#eeeeee', '#454545'])
register('colors.family-unmarried', ['#eeeeee', '#454545'])
register('colors.family-civil-union', ['#eeeeee', '#454545'])
register('colors.family-unknown', ['#eeeeee', '#454545'])
register('colors.family-divorced', ['#ffdede', '#5c3636'])
register('colors.home-person', ['#bbe68a', '#304918'])
register('colors.border-male-alive', ['#1f4986', '#171d26'])
register('colors.border-male-dead', ['#000000', '#000000'])
register('colors.border-female-alive', ['#861f69', '#261111'])
register('colors.border-female-dead', ['#000000', '#000000'])
register('colors.border-unknown-alive', ['#8e5801', '#8e5801'])
register('colors.border-unknown-dead', ['#000000', '#000000'])
register('colors.border-family', ['#cccccc', '#252525'])
register('colors.border-family-divorced', ['#ff7373', '#720b0b'])
register('researcher.researcher-addr', '')
register('researcher.researcher-locality', '')
register('researcher.researcher-city', '')
register('researcher.researcher-country', '')
register('researcher.researcher-email', '')
register('researcher.researcher-name', '')
register('researcher.researcher-phone', '')
register('researcher.researcher-postal', '')
register('researcher.researcher-state', '')
register('plugin.hiddenplugins', [])
register('plugin.addonplugins', [])
register('utf8.in-use', False)
register('utf8.available-fonts', [])
register('utf8.selected-font', "")
register('utf8.death-symbol', 13)
if __debug__: # enable a simple CLI test to see if the datestrings exist
register('test.january', _("|January", "localized lexeme inflections"))
#---------------------------------------------------------------
#
# Upgrade Conversions go here.
#
#---------------------------------------------------------------
# If we have not already upgraded to this version,
# we can tell by seeing if there is a key file for this version:
if not os.path.exists(CONFIGMAN.filename):
# If not, let's read old if there:
if os.path.exists(os.path.join(HOME_DIR, "keys.ini")):
# read it in old style:
logging.warning("Importing old key file 'keys.ini'...")
CONFIGMAN.load(os.path.join(HOME_DIR, "keys.ini"),
oldstyle=True)
logging.warning("Done importing old key file 'keys.ini'")
# other version upgrades here...
# check previous version of gramps:
fullpath, filename = os.path.split(CONFIGMAN.filename)
fullpath, previous = os.path.split(fullpath)
match = re.match(r'gramps(\d*)', previous)
if match:
# cycle back looking for previous versions of gramps
for i in range(1, 20): # check back 2 gramps versions
# -----------------------------------------
# TODO: Assumes minor version is a decimal, not semantic versioning
# Uses ordering ... 4.9, 5.0, 5.1, ...
# Not ... 4.9, 4.10, 4.11, 5.0, 5.1, ...
# If not true, need to add a different method to auto upgrade.
# Perhaps addings specific list of versions to check
# -----------------------------------------
digits = str(int(match.groups()[0]) - i)
previous_grampsini = os.path.join(fullpath, "gramps" + digits,
filename)
if os.path.exists(previous_grampsini):
logging.warning("Importing old config file '%s'...",
previous_grampsini)
CONFIGMAN.load(previous_grampsini)
logging.warning("Done importing old config file '%s'",
previous_grampsini)
break
#---------------------------------------------------------------
#
# Now, load the settings from the config file, if one
#
#---------------------------------------------------------------
CONFIGMAN.load()
config = CONFIGMAN
if config.get('database.backend') == 'bsddb':
config.set('database.backend', 'sqlite')
| gpl-2.0 |
nathangeffen/tbonline-old | tbonlineproject/external/markdown/odict.py | 143 | 5157 | class OrderedDict(dict):
"""
A dictionary that keeps its keys in the order in which they're inserted.
Copied from Django's SortedDict with some modifications.
"""
def __new__(cls, *args, **kwargs):
instance = super(OrderedDict, cls).__new__(cls, *args, **kwargs)
instance.keyOrder = []
return instance
def __init__(self, data=None):
if data is None:
data = {}
super(OrderedDict, self).__init__(data)
if isinstance(data, dict):
self.keyOrder = data.keys()
else:
self.keyOrder = []
for key, value in data:
if key not in self.keyOrder:
self.keyOrder.append(key)
def __deepcopy__(self, memo):
from copy import deepcopy
return self.__class__([(key, deepcopy(value, memo))
for key, value in self.iteritems()])
def __setitem__(self, key, value):
super(OrderedDict, self).__setitem__(key, value)
if key not in self.keyOrder:
self.keyOrder.append(key)
def __delitem__(self, key):
super(OrderedDict, self).__delitem__(key)
self.keyOrder.remove(key)
def __iter__(self):
for k in self.keyOrder:
yield k
def pop(self, k, *args):
result = super(OrderedDict, self).pop(k, *args)
try:
self.keyOrder.remove(k)
except ValueError:
# Key wasn't in the dictionary in the first place. No problem.
pass
return result
def popitem(self):
result = super(OrderedDict, self).popitem()
self.keyOrder.remove(result[0])
return result
def items(self):
return zip(self.keyOrder, self.values())
def iteritems(self):
for key in self.keyOrder:
yield key, super(OrderedDict, self).__getitem__(key)
def keys(self):
return self.keyOrder[:]
def iterkeys(self):
return iter(self.keyOrder)
def values(self):
return [super(OrderedDict, self).__getitem__(k) for k in self.keyOrder]
def itervalues(self):
for key in self.keyOrder:
yield super(OrderedDict, self).__getitem__(key)
def update(self, dict_):
for k, v in dict_.items():
self.__setitem__(k, v)
def setdefault(self, key, default):
if key not in self.keyOrder:
self.keyOrder.append(key)
return super(OrderedDict, self).setdefault(key, default)
def value_for_index(self, index):
"""Return the value of the item at the given zero-based index."""
return self[self.keyOrder[index]]
def insert(self, index, key, value):
"""Insert the key, value pair before the item with the given index."""
if key in self.keyOrder:
n = self.keyOrder.index(key)
del self.keyOrder[n]
if n < index:
index -= 1
self.keyOrder.insert(index, key)
super(OrderedDict, self).__setitem__(key, value)
def copy(self):
"""Return a copy of this object."""
# This way of initializing the copy means it works for subclasses, too.
obj = self.__class__(self)
obj.keyOrder = self.keyOrder[:]
return obj
def __repr__(self):
"""
Replace the normal dict.__repr__ with a version that returns the keys
in their sorted order.
"""
return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()])
def clear(self):
super(OrderedDict, self).clear()
self.keyOrder = []
def index(self, key):
""" Return the index of a given key. """
return self.keyOrder.index(key)
def index_for_location(self, location):
""" Return index or None for a given location. """
if location == '_begin':
i = 0
elif location == '_end':
i = None
elif location.startswith('<') or location.startswith('>'):
i = self.index(location[1:])
if location.startswith('>'):
if i >= len(self):
# last item
i = None
else:
i += 1
else:
raise ValueError('Not a valid location: "%s". Location key '
'must start with a ">" or "<".' % location)
return i
def add(self, key, value, location):
""" Insert by key location. """
i = self.index_for_location(location)
if i is not None:
self.insert(i, key, value)
else:
self.__setitem__(key, value)
def link(self, key, location):
""" Change location of an existing item. """
n = self.keyOrder.index(key)
del self.keyOrder[n]
i = self.index_for_location(location)
try:
if i is not None:
self.keyOrder.insert(i, key)
else:
self.keyOrder.append(key)
except Error:
# restore to prevent data loss and reraise
self.keyOrder.insert(n, key)
raise Error
| mit |
hansey/youtube-dl | youtube_dl/extractor/howstuffworks.py | 106 | 4404 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
find_xpath_attr,
int_or_none,
js_to_json,
unescapeHTML,
)
class HowStuffWorksIE(InfoExtractor):
_VALID_URL = r'https?://[\da-z-]+\.howstuffworks\.com/(?:[^/]+/)*(?:\d+-)?(?P<id>.+?)-video\.htm'
_TESTS = [
{
'url': 'http://adventure.howstuffworks.com/5266-cool-jobs-iditarod-musher-video.htm',
'info_dict': {
'id': '450221',
'ext': 'flv',
'title': 'Cool Jobs - Iditarod Musher',
'description': 'Cold sleds, freezing temps and warm dog breath... an Iditarod musher\'s dream. Kasey-Dee Gardner jumps on a sled to find out what the big deal is.',
'display_id': 'cool-jobs-iditarod-musher',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 161,
},
},
{
'url': 'http://adventure.howstuffworks.com/7199-survival-zone-food-and-water-in-the-savanna-video.htm',
'info_dict': {
'id': '453464',
'ext': 'mp4',
'title': 'Survival Zone: Food and Water In the Savanna',
'description': 'Learn how to find both food and water while trekking in the African savannah. In this video from the Discovery Channel.',
'display_id': 'survival-zone-food-and-water-in-the-savanna',
'thumbnail': 're:^https?://.*\.jpg$',
},
},
{
'url': 'http://entertainment.howstuffworks.com/arts/2706-sword-swallowing-1-by-dan-meyer-video.htm',
'info_dict': {
'id': '440011',
'ext': 'flv',
'title': 'Sword Swallowing #1 by Dan Meyer',
'description': 'Video footage (1 of 3) used by permission of the owner Dan Meyer through Sword Swallowers Association International <www.swordswallow.org>',
'display_id': 'sword-swallowing-1-by-dan-meyer',
'thumbnail': 're:^https?://.*\.jpg$',
},
},
{
'url': 'http://shows.howstuffworks.com/stuff-to-blow-your-mind/optical-illusions-video.htm',
'only_matching': True,
}
]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
clip_js = self._search_regex(
r'(?s)var clip = ({.*?});', webpage, 'clip info')
clip_info = self._parse_json(
clip_js, display_id, transform_source=js_to_json)
video_id = clip_info['content_id']
formats = []
m3u8_url = clip_info.get('m3u8')
if m3u8_url:
formats += self._extract_m3u8_formats(m3u8_url, video_id, 'mp4')
for video in clip_info.get('mp4', []):
formats.append({
'url': video['src'],
'format_id': video['bitrate'],
'vbr': int(video['bitrate'].rstrip('k')),
})
if not formats:
smil = self._download_xml(
'http://services.media.howstuffworks.com/videos/%s/smil-service.smil' % video_id,
video_id, 'Downloading video SMIL')
http_base = find_xpath_attr(
smil,
'./{0}head/{0}meta'.format('{http://www.w3.org/2001/SMIL20/Language}'),
'name',
'httpBase').get('content')
URL_SUFFIX = '?v=2.11.3&fp=LNX 11,2,202,356&r=A&g=A'
for video in smil.findall(
'./{0}body/{0}switch/{0}video'.format('{http://www.w3.org/2001/SMIL20/Language}')):
vbr = int_or_none(video.attrib['system-bitrate'], scale=1000)
formats.append({
'url': '%s/%s%s' % (http_base, video.attrib['src'], URL_SUFFIX),
'format_id': '%dk' % vbr,
'vbr': vbr,
})
self._sort_formats(formats)
return {
'id': '%s' % video_id,
'display_id': display_id,
'title': unescapeHTML(clip_info['clip_title']),
'description': unescapeHTML(clip_info.get('caption')),
'thumbnail': clip_info.get('video_still_url'),
'duration': clip_info.get('duration'),
'formats': formats,
}
| unlicense |
MrNuggles/HeyBoet-Telegram-Bot | temboo/Library/Amazon/SQS/__init__.py | 4 | 1878 | from temboo.Library.Amazon.SQS.AddPermission import AddPermission, AddPermissionInputSet, AddPermissionResultSet, AddPermissionChoreographyExecution
from temboo.Library.Amazon.SQS.ChangeMessageVisibility import ChangeMessageVisibility, ChangeMessageVisibilityInputSet, ChangeMessageVisibilityResultSet, ChangeMessageVisibilityChoreographyExecution
from temboo.Library.Amazon.SQS.CreateQueue import CreateQueue, CreateQueueInputSet, CreateQueueResultSet, CreateQueueChoreographyExecution
from temboo.Library.Amazon.SQS.DeleteMessage import DeleteMessage, DeleteMessageInputSet, DeleteMessageResultSet, DeleteMessageChoreographyExecution
from temboo.Library.Amazon.SQS.DeleteQueue import DeleteQueue, DeleteQueueInputSet, DeleteQueueResultSet, DeleteQueueChoreographyExecution
from temboo.Library.Amazon.SQS.GetQueueAttributes import GetQueueAttributes, GetQueueAttributesInputSet, GetQueueAttributesResultSet, GetQueueAttributesChoreographyExecution
from temboo.Library.Amazon.SQS.ListQueues import ListQueues, ListQueuesInputSet, ListQueuesResultSet, ListQueuesChoreographyExecution
from temboo.Library.Amazon.SQS.ReceiveMessage import ReceiveMessage, ReceiveMessageInputSet, ReceiveMessageResultSet, ReceiveMessageChoreographyExecution
from temboo.Library.Amazon.SQS.RemovePermission import RemovePermission, RemovePermissionInputSet, RemovePermissionResultSet, RemovePermissionChoreographyExecution
from temboo.Library.Amazon.SQS.SendMessage import SendMessage, SendMessageInputSet, SendMessageResultSet, SendMessageChoreographyExecution
from temboo.Library.Amazon.SQS.SendMessageBatch import SendMessageBatch, SendMessageBatchInputSet, SendMessageBatchResultSet, SendMessageBatchChoreographyExecution
from temboo.Library.Amazon.SQS.SetQueueAttributes import SetQueueAttributes, SetQueueAttributesInputSet, SetQueueAttributesResultSet, SetQueueAttributesChoreographyExecution
| gpl-3.0 |
REVLWorld/elasticsearch-dsl-py | test_elasticsearch_dsl/test_integration/test_search.py | 1 | 2708 | from elasticsearch import TransportError
from elasticsearch_dsl import Search, DocType, Date, String, MultiSearch, \
MetaField, Index, Q
from .test_data import DATA
from pytest import raises
class Repository(DocType):
created_at = Date()
description = String(analyzer='snowball')
tags = String(index='not_analyzed')
class Meta:
index = 'git'
doc_type = 'repos'
class Commit(DocType):
class Meta:
doc_type = 'commits'
index = 'git'
parent = MetaField(type='repos')
def test_inner_hits_are_wrapped_in_response(data_client):
i = Index('git')
i.doc_type(Repository)
i.doc_type(Commit)
s = i.search()[0:1].doc_type(Commit).query('has_parent', type='repos', inner_hits={}, query=Q('match_all'))
response = s.execute()
commit = response.hits[0]
assert isinstance(commit.meta.inner_hits.repos, response.__class__)
assert isinstance(commit.meta.inner_hits.repos[0], Repository)
def test_suggest_can_be_run_separately(data_client):
s = Search()
s = s.suggest('simple_suggestion', 'elasticserach', term={'field': 'organization'})
response = s.execute_suggest()
assert response.success()
assert response.simple_suggestion[0].options[0].text == 'elasticsearch'
def test_scan_respects_doc_types(data_client):
repos = list(Repository.search().scan())
assert 1 == len(repos)
assert isinstance(repos[0], Repository)
def test_scan_iterates_through_all_docs(data_client):
s = Search(index='git').filter('term', _type='commits')
commits = list(s.scan())
assert 52 == len(commits)
assert set(d['_id'] for d in DATA if d['_type'] == 'commits') == set(c.meta.id for c in commits)
def test_response_is_cached(data_client):
s = Repository.search()
repos = list(s)
assert hasattr(s, '_response')
assert s._response.hits == repos
def test_multi_search(data_client):
s1 = Repository.search()
s2 = Search(doc_type='commits')
ms = MultiSearch(index='git')
ms = ms.add(s1).add(s2)
r1, r2 = ms.execute()
assert 1 == len(r1)
assert isinstance(r1[0], Repository)
assert r1.search is s1
assert 52 == r2.hits.total
assert r2.search is s2
def test_multi_missing(data_client):
s1 = Repository.search()
s2 = Search(doc_type='commits')
s3 = Search(index='does_not_exist')
ms = MultiSearch()
ms = ms.add(s1).add(s2).add(s3)
with raises(TransportError):
ms.execute()
r1, r2, r3 = ms.execute(raise_on_error=False)
assert 1 == len(r1)
assert isinstance(r1[0], Repository)
assert r1.search is s1
assert 52 == r2.hits.total
assert r2.search is s2
assert r3 is None
| apache-2.0 |
timothsp/where2ate | venv/lib/python3.3/site-packages/pip/_vendor/requests/packages/chardet/eucjpprober.py | 2919 | 3678 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCJPDistributionAnalysis
from .jpcntx import EUCJPContextAnalysis
from .mbcssm import EUCJPSMModel
class EUCJPProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCJPSMModel)
self._mDistributionAnalyzer = EUCJPDistributionAnalysis()
self._mContextAnalyzer = EUCJPContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "EUC-JP"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
# PY3K: aBuf is a byte array, so aBuf[i] is an int, not a byte
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar, charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i - 1:i + 1], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| cc0-1.0 |
cynngah/uofthacksIV | generate-jobs/lib/python2.7/site-packages/wheel/signatures/__init__.py | 565 | 3779 | """
Create and verify jws-js format Ed25519 signatures.
"""
__all__ = [ 'sign', 'verify' ]
import json
from ..util import urlsafe_b64decode, urlsafe_b64encode, native, binary
ed25519ll = None
ALG = "Ed25519"
def get_ed25519ll():
"""Lazy import-and-test of ed25519 module"""
global ed25519ll
if not ed25519ll:
try:
import ed25519ll # fast (thousands / s)
except (ImportError, OSError): # pragma nocover
from . import ed25519py as ed25519ll # pure Python (hundreds / s)
test()
return ed25519ll
def sign(payload, keypair):
"""Return a JWS-JS format signature given a JSON-serializable payload and
an Ed25519 keypair."""
get_ed25519ll()
#
header = {
"alg": ALG,
"jwk": {
"kty": ALG, # alg -> kty in jwk-08.
"vk": native(urlsafe_b64encode(keypair.vk))
}
}
encoded_header = urlsafe_b64encode(binary(json.dumps(header, sort_keys=True)))
encoded_payload = urlsafe_b64encode(binary(json.dumps(payload, sort_keys=True)))
secured_input = b".".join((encoded_header, encoded_payload))
sig_msg = ed25519ll.crypto_sign(secured_input, keypair.sk)
signature = sig_msg[:ed25519ll.SIGNATUREBYTES]
encoded_signature = urlsafe_b64encode(signature)
return {"recipients":
[{"header":native(encoded_header),
"signature":native(encoded_signature)}],
"payload": native(encoded_payload)}
def assertTrue(condition, message=""):
if not condition:
raise ValueError(message)
def verify(jwsjs):
"""Return (decoded headers, payload) if all signatures in jwsjs are
consistent, else raise ValueError.
Caller must decide whether the keys are actually trusted."""
get_ed25519ll()
# XXX forbid duplicate keys in JSON input using object_pairs_hook (2.7+)
recipients = jwsjs["recipients"]
encoded_payload = binary(jwsjs["payload"])
headers = []
for recipient in recipients:
assertTrue(len(recipient) == 2, "Unknown recipient key {0}".format(recipient))
h = binary(recipient["header"])
s = binary(recipient["signature"])
header = json.loads(native(urlsafe_b64decode(h)))
assertTrue(header["alg"] == ALG,
"Unexpected algorithm {0}".format(header["alg"]))
if "alg" in header["jwk"] and not "kty" in header["jwk"]:
header["jwk"]["kty"] = header["jwk"]["alg"] # b/w for JWK < -08
assertTrue(header["jwk"]["kty"] == ALG, # true for Ed25519
"Unexpected key type {0}".format(header["jwk"]["kty"]))
vk = urlsafe_b64decode(binary(header["jwk"]["vk"]))
secured_input = b".".join((h, encoded_payload))
sig = urlsafe_b64decode(s)
sig_msg = sig+secured_input
verified_input = native(ed25519ll.crypto_sign_open(sig_msg, vk))
verified_header, verified_payload = verified_input.split('.')
verified_header = binary(verified_header)
decoded_header = native(urlsafe_b64decode(verified_header))
headers.append(json.loads(decoded_header))
verified_payload = binary(verified_payload)
# only return header, payload that have passed through the crypto library.
payload = json.loads(native(urlsafe_b64decode(verified_payload)))
return headers, payload
def test():
kp = ed25519ll.crypto_sign_keypair()
payload = {'test': 'onstartup'}
jwsjs = json.loads(json.dumps(sign(payload, kp)))
verify(jwsjs)
jwsjs['payload'] += 'x'
try:
verify(jwsjs)
except ValueError:
pass
else: # pragma no cover
raise RuntimeError("No error from bad wheel.signatures payload.")
| mit |
ilastikdev/ilastik | ilastik/applets/splitBodySupervoxelExport/splitBodySupervoxelExportApplet.py | 4 | 1673 | ###############################################################################
# ilastik: interactive learning and segmentation toolkit
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# In addition, as a special exception, the copyright holders of
# ilastik give you permission to combine ilastik with applets,
# workflows and plugins which are not covered under the GNU
# General Public License.
#
# See the LICENSE file for details. License information is also available
# on the ilastik web site at:
# http://ilastik.org/license.html
###############################################################################
from ilastik.applets.base.standardApplet import StandardApplet
from opSplitBodySupervoxelExport import OpSplitBodySupervoxelExport
class SplitBodySupervoxelExportApplet( StandardApplet ):
def __init__( self, workflow ):
super(SplitBodySupervoxelExportApplet, self).__init__("Split-body supervoxel export", workflow)
@property
def singleLaneOperatorClass(self):
return OpSplitBodySupervoxelExport
@property
def singleLaneGuiClass(self):
from splitBodySupervoxelExportGui import SplitBodySupervoxelExportGui
return SplitBodySupervoxelExportGui
@property
def broadcastingSlots(self):
return []
@property
def dataSerializers(self):
return []
| gpl-3.0 |
kc4271/batch_downloader | requests/api.py | 361 | 4344 | # -*- coding: utf-8 -*-
"""
requests.api
~~~~~~~~~~~~
This module implements the Requests API.
:copyright: (c) 2012 by Kenneth Reitz.
:license: Apache2, see LICENSE for more details.
"""
from . import sessions
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of 'name': file-like-objects (or {'name': ('filename', fileobj)}) for multipart encoding upload.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) Float describing the timeout of the request in seconds.
:param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
Usage::
>>> import requests
>>> req = requests.request('GET', 'http://httpbin.org/get')
<Response [200]>
"""
session = sessions.Session()
return session.request(method=method, url=url, **kwargs)
def get(url, **kwargs):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return request('get', url, **kwargs)
def options(url, **kwargs):
"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return request('options', url, **kwargs)
def head(url, **kwargs):
"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', False)
return request('head', url, **kwargs)
def post(url, data=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('post', url, data=data, **kwargs)
def put(url, data=None, **kwargs):
"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('delete', url, **kwargs)
| mit |
kaksmet/servo | components/script/dom/bindings/codegen/parser/tests/test_const.py | 134 | 3000 | import WebIDL
def WebIDLTest(parser, harness):
parser.parse("""
interface TestConsts {
const byte zero = 0;
const byte b = -1;
const octet o = 2;
const short s = -3;
const unsigned short us = 0x4;
const long l = -0X5;
const unsigned long ul = 6;
const unsigned long long ull = 7;
const long long ll = -010;
const boolean t = true;
const boolean f = false;
const boolean? n = null;
const boolean? nt = true;
const boolean? nf = false;
};
""")
results = parser.finish()
harness.ok(True, "TestConsts interface parsed without error.")
harness.check(len(results), 1, "Should be one production.")
iface = results[0]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should be an IDLInterface")
harness.check(iface.identifier.QName(), "::TestConsts", "Interface has the right QName")
harness.check(iface.identifier.name, "TestConsts", "Interface has the right name")
harness.check(len(iface.members), 14, "Expect 14 members")
consts = iface.members
def checkConst(const, QName, name, type, value):
harness.ok(isinstance(const, WebIDL.IDLConst),
"Should be an IDLConst")
harness.ok(const.isConst(), "Const is a const")
harness.ok(not const.isAttr(), "Const is not an attr")
harness.ok(not const.isMethod(), "Const is not a method")
harness.check(const.identifier.QName(), QName, "Const has the right QName")
harness.check(const.identifier.name, name, "Const has the right name")
harness.check(str(const.type), type, "Const has the right type")
harness.ok(const.type.isPrimitive(), "All consts should be primitive")
harness.check(str(const.value.type), str(const.type),
"Const's value has the same type as the type")
harness.check(const.value.value, value, "Const value has the right value.")
checkConst(consts[0], "::TestConsts::zero", "zero", "Byte", 0)
checkConst(consts[1], "::TestConsts::b", "b", "Byte", -1)
checkConst(consts[2], "::TestConsts::o", "o", "Octet", 2)
checkConst(consts[3], "::TestConsts::s", "s", "Short", -3)
checkConst(consts[4], "::TestConsts::us", "us", "UnsignedShort", 4)
checkConst(consts[5], "::TestConsts::l", "l", "Long", -5)
checkConst(consts[6], "::TestConsts::ul", "ul", "UnsignedLong", 6)
checkConst(consts[7], "::TestConsts::ull", "ull", "UnsignedLongLong", 7)
checkConst(consts[8], "::TestConsts::ll", "ll", "LongLong", -8)
checkConst(consts[9], "::TestConsts::t", "t", "Boolean", True)
checkConst(consts[10], "::TestConsts::f", "f", "Boolean", False)
checkConst(consts[11], "::TestConsts::n", "n", "BooleanOrNull", None)
checkConst(consts[12], "::TestConsts::nt", "nt", "BooleanOrNull", True)
checkConst(consts[13], "::TestConsts::nf", "nf", "BooleanOrNull", False)
| mpl-2.0 |
GbalsaC/bitnamiP | lms/djangoapps/ccx/tests/test_models.py | 33 | 12589 | """
tests for the models
"""
from datetime import datetime, timedelta
from django.utils.timezone import UTC
from mock import patch
from nose.plugins.attrib import attr
from student.models import CourseEnrollment # pylint: disable=import-error
from student.roles import CourseCcxCoachRole # pylint: disable=import-error
from student.tests.factories import ( # pylint: disable=import-error
AdminFactory,
CourseEnrollmentFactory,
UserFactory,
)
from util.tests.test_date_utils import fake_ugettext
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import (
CourseFactory,
check_mongo_calls
)
from .factories import (
CcxFactory,
CcxFutureMembershipFactory,
)
from ..models import (
CcxMembership,
CcxFutureMembership,
)
from ..overrides import override_field_for_ccx
@attr('shard_1')
class TestCcxMembership(ModuleStoreTestCase):
"""Unit tests for the CcxMembership model
"""
def setUp(self):
"""common setup for all tests"""
super(TestCcxMembership, self).setUp()
self.course = course = CourseFactory.create()
coach = AdminFactory.create()
role = CourseCcxCoachRole(course.id)
role.add_users(coach)
self.ccx = CcxFactory(course_id=course.id, coach=coach)
enrollment = CourseEnrollmentFactory.create(course_id=course.id)
self.enrolled_user = enrollment.user
self.unenrolled_user = UserFactory.create()
def create_future_enrollment(self, user, auto_enroll=True):
"""
utility method to create future enrollment
"""
pfm = CcxFutureMembershipFactory.create(
ccx=self.ccx,
email=user.email,
auto_enroll=auto_enroll
)
return pfm
def has_course_enrollment(self, user):
"""
utility method to create future enrollment
"""
enrollment = CourseEnrollment.objects.filter(
user=user, course_id=self.course.id
)
return enrollment.exists()
def has_ccx_membership(self, user):
"""
verify ccx membership
"""
membership = CcxMembership.objects.filter(
student=user, ccx=self.ccx, active=True
)
return membership.exists()
def has_ccx_future_membership(self, user):
"""
verify future ccx membership
"""
future_membership = CcxFutureMembership.objects.filter(
email=user.email, ccx=self.ccx
)
return future_membership.exists()
def call_mut(self, student, future_membership):
"""
Call the method undser test
"""
CcxMembership.auto_enroll(student, future_membership)
def test_ccx_auto_enroll_unregistered_user(self):
"""verify auto_enroll works when user is not enrolled in the MOOC
n.b. After auto_enroll, user will have both a MOOC enrollment and a
CCX membership
"""
user = self.unenrolled_user
pfm = self.create_future_enrollment(user)
self.assertTrue(self.has_ccx_future_membership(user))
self.assertFalse(self.has_course_enrollment(user))
# auto_enroll user
self.call_mut(user, pfm)
self.assertTrue(self.has_course_enrollment(user))
self.assertTrue(self.has_ccx_membership(user))
self.assertFalse(self.has_ccx_future_membership(user))
def test_ccx_auto_enroll_registered_user(self):
"""verify auto_enroll works when user is enrolled in the MOOC
"""
user = self.enrolled_user
pfm = self.create_future_enrollment(user)
self.assertTrue(self.has_ccx_future_membership(user))
self.assertTrue(self.has_course_enrollment(user))
self.call_mut(user, pfm)
self.assertTrue(self.has_course_enrollment(user))
self.assertTrue(self.has_ccx_membership(user))
self.assertFalse(self.has_ccx_future_membership(user))
def test_future_membership_disallows_auto_enroll(self):
"""verify that the CcxFutureMembership can veto auto_enroll
"""
user = self.unenrolled_user
pfm = self.create_future_enrollment(user, auto_enroll=False)
self.assertTrue(self.has_ccx_future_membership(user))
self.assertFalse(self.has_course_enrollment(user))
self.assertRaises(ValueError, self.call_mut, user, pfm)
self.assertFalse(self.has_course_enrollment(user))
self.assertFalse(self.has_ccx_membership(user))
self.assertTrue(self.has_ccx_future_membership(user))
@attr('shard_1')
class TestCCX(ModuleStoreTestCase):
"""Unit tests for the CustomCourseForEdX model
"""
def setUp(self):
"""common setup for all tests"""
super(TestCCX, self).setUp()
self.course = course = CourseFactory.create()
coach = AdminFactory.create()
role = CourseCcxCoachRole(course.id)
role.add_users(coach)
self.ccx = CcxFactory(course_id=course.id, coach=coach)
def set_ccx_override(self, field, value):
"""Create a field override for the test CCX on <field> with <value>"""
override_field_for_ccx(self.ccx, self.course, field, value)
def test_ccx_course_is_correct_course(self):
"""verify that the course property of a ccx returns the right course"""
expected = self.course
actual = self.ccx.course
self.assertEqual(expected, actual)
def test_ccx_course_caching(self):
"""verify that caching the propery works to limit queries"""
with check_mongo_calls(1):
# these statements are used entirely to demonstrate the
# instance-level caching of these values on CCX objects. The
# check_mongo_calls context is the point here.
self.ccx.course # pylint: disable=pointless-statement
with check_mongo_calls(0):
self.ccx.course # pylint: disable=pointless-statement
def test_ccx_start_is_correct(self):
"""verify that the start datetime for a ccx is correctly retrieved
Note that after setting the start field override microseconds are
truncated, so we can't do a direct comparison between before and after.
For this reason we test the difference between and make sure it is less
than one second.
"""
expected = datetime.now(UTC())
self.set_ccx_override('start', expected)
actual = self.ccx.start # pylint: disable=no-member
diff = expected - actual
self.assertTrue(abs(diff.total_seconds()) < 1)
def test_ccx_start_caching(self):
"""verify that caching the start property works to limit queries"""
now = datetime.now(UTC())
self.set_ccx_override('start', now)
with check_mongo_calls(1):
# these statements are used entirely to demonstrate the
# instance-level caching of these values on CCX objects. The
# check_mongo_calls context is the point here.
self.ccx.start # pylint: disable=pointless-statement, no-member
with check_mongo_calls(0):
self.ccx.start # pylint: disable=pointless-statement, no-member
def test_ccx_due_without_override(self):
"""verify that due returns None when the field has not been set"""
actual = self.ccx.due # pylint: disable=no-member
self.assertIsNone(actual)
def test_ccx_due_is_correct(self):
"""verify that the due datetime for a ccx is correctly retrieved"""
expected = datetime.now(UTC())
self.set_ccx_override('due', expected)
actual = self.ccx.due # pylint: disable=no-member
diff = expected - actual
self.assertTrue(abs(diff.total_seconds()) < 1)
def test_ccx_due_caching(self):
"""verify that caching the due property works to limit queries"""
expected = datetime.now(UTC())
self.set_ccx_override('due', expected)
with check_mongo_calls(1):
# these statements are used entirely to demonstrate the
# instance-level caching of these values on CCX objects. The
# check_mongo_calls context is the point here.
self.ccx.due # pylint: disable=pointless-statement, no-member
with check_mongo_calls(0):
self.ccx.due # pylint: disable=pointless-statement, no-member
def test_ccx_has_started(self):
"""verify that a ccx marked as starting yesterday has started"""
now = datetime.now(UTC())
delta = timedelta(1)
then = now - delta
self.set_ccx_override('start', then)
self.assertTrue(self.ccx.has_started()) # pylint: disable=no-member
def test_ccx_has_not_started(self):
"""verify that a ccx marked as starting tomorrow has not started"""
now = datetime.now(UTC())
delta = timedelta(1)
then = now + delta
self.set_ccx_override('start', then)
self.assertFalse(self.ccx.has_started()) # pylint: disable=no-member
def test_ccx_has_ended(self):
"""verify that a ccx that has a due date in the past has ended"""
now = datetime.now(UTC())
delta = timedelta(1)
then = now - delta
self.set_ccx_override('due', then)
self.assertTrue(self.ccx.has_ended()) # pylint: disable=no-member
def test_ccx_has_not_ended(self):
"""verify that a ccx that has a due date in the future has not eneded
"""
now = datetime.now(UTC())
delta = timedelta(1)
then = now + delta
self.set_ccx_override('due', then)
self.assertFalse(self.ccx.has_ended()) # pylint: disable=no-member
def test_ccx_without_due_date_has_not_ended(self):
"""verify that a ccx without a due date has not ended"""
self.assertFalse(self.ccx.has_ended()) # pylint: disable=no-member
# ensure that the expected localized format will be found by the i18n
# service
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"SHORT_DATE_FORMAT": "%b %d, %Y",
}))
def test_start_datetime_short_date(self):
"""verify that the start date for a ccx formats properly by default"""
start = datetime(2015, 1, 1, 12, 0, 0, tzinfo=UTC())
expected = "Jan 01, 2015"
self.set_ccx_override('start', start)
actual = self.ccx.start_datetime_text() # pylint: disable=no-member
self.assertEqual(expected, actual)
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"DATE_TIME_FORMAT": "%b %d, %Y at %H:%M",
}))
def test_start_datetime_date_time_format(self):
"""verify that the DATE_TIME format also works as expected"""
start = datetime(2015, 1, 1, 12, 0, 0, tzinfo=UTC())
expected = "Jan 01, 2015 at 12:00 UTC"
self.set_ccx_override('start', start)
actual = self.ccx.start_datetime_text('DATE_TIME') # pylint: disable=no-member
self.assertEqual(expected, actual)
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"SHORT_DATE_FORMAT": "%b %d, %Y",
}))
def test_end_datetime_short_date(self):
"""verify that the end date for a ccx formats properly by default"""
end = datetime(2015, 1, 1, 12, 0, 0, tzinfo=UTC())
expected = "Jan 01, 2015"
self.set_ccx_override('due', end)
actual = self.ccx.end_datetime_text() # pylint: disable=no-member
self.assertEqual(expected, actual)
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"DATE_TIME_FORMAT": "%b %d, %Y at %H:%M",
}))
def test_end_datetime_date_time_format(self):
"""verify that the DATE_TIME format also works as expected"""
end = datetime(2015, 1, 1, 12, 0, 0, tzinfo=UTC())
expected = "Jan 01, 2015 at 12:00 UTC"
self.set_ccx_override('due', end)
actual = self.ccx.end_datetime_text('DATE_TIME') # pylint: disable=no-member
self.assertEqual(expected, actual)
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"DATE_TIME_FORMAT": "%b %d, %Y at %H:%M",
}))
def test_end_datetime_no_due_date(self):
"""verify that without a due date, the end date is an empty string"""
expected = ''
actual = self.ccx.end_datetime_text() # pylint: disable=no-member
self.assertEqual(expected, actual)
actual = self.ccx.end_datetime_text('DATE_TIME') # pylint: disable=no-member
self.assertEqual(expected, actual)
| agpl-3.0 |
Lektorium-LLC/edx-platform | common/djangoapps/edxmako/shortcuts.py | 5 | 7383 | # Copyright (c) 2008 Mikeal Rogers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from urlparse import urljoin
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.template import Context
from edxmako import lookup_template
from edxmako.request_context import get_template_request_context
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.theming.helpers import get_template_path, is_request_in_themed_site
log = logging.getLogger(__name__)
def marketing_link(name):
"""Returns the correct URL for a link to the marketing site
depending on if the marketing site is enabled
Since the marketing site is enabled by a setting, we have two
possible URLs for certain links. This function is to decides
which URL should be provided.
"""
# link_map maps URLs from the marketing site to the old equivalent on
# the Django site
link_map = settings.MKTG_URL_LINK_MAP
enable_mktg_site = configuration_helpers.get_value(
'ENABLE_MKTG_SITE',
settings.FEATURES.get('ENABLE_MKTG_SITE', False)
)
marketing_urls = configuration_helpers.get_value(
'MKTG_URLS',
settings.MKTG_URLS
)
if enable_mktg_site and name in marketing_urls:
# special case for when we only want the root marketing URL
if name == 'ROOT':
return marketing_urls.get('ROOT')
# Using urljoin here allows us to enable a marketing site and set
# a site ROOT, but still specify absolute URLs for other marketing
# URLs in the MKTG_URLS setting
# e.g. urljoin('http://marketing.com', 'http://open-edx.org/about') >>> 'http://open-edx.org/about'
return urljoin(marketing_urls.get('ROOT'), marketing_urls.get(name))
# only link to the old pages when the marketing site isn't on
elif not enable_mktg_site and name in link_map:
# don't try to reverse disabled marketing links
if link_map[name] is not None:
return reverse(link_map[name])
else:
log.debug("Cannot find corresponding link for name: %s", name)
return '#'
def is_any_marketing_link_set(names):
"""
Returns a boolean if any given named marketing links are configured.
"""
return any(is_marketing_link_set(name) for name in names)
def is_marketing_link_set(name):
"""
Returns a boolean if a given named marketing link is configured.
"""
enable_mktg_site = configuration_helpers.get_value(
'ENABLE_MKTG_SITE',
settings.FEATURES.get('ENABLE_MKTG_SITE', False)
)
marketing_urls = configuration_helpers.get_value(
'MKTG_URLS',
settings.MKTG_URLS
)
if enable_mktg_site:
return name in marketing_urls
else:
return name in settings.MKTG_URL_LINK_MAP
def marketing_link_context_processor(request):
"""
A django context processor to give templates access to marketing URLs
Returns a dict whose keys are the marketing link names usable with the
marketing_link method (e.g. 'ROOT', 'CONTACT', etc.) prefixed with
'MKTG_URL_' and whose values are the corresponding URLs as computed by the
marketing_link method.
"""
marketing_urls = configuration_helpers.get_value(
'MKTG_URLS',
settings.MKTG_URLS
)
return dict(
[
("MKTG_URL_" + k, marketing_link(k))
for k in (
settings.MKTG_URL_LINK_MAP.viewkeys() |
marketing_urls.viewkeys()
)
]
)
def footer_context_processor(request): # pylint: disable=unused-argument
"""
Checks the site name to determine whether to use the edX.org footer or the Open Source Footer.
"""
return dict(
[
("IS_REQUEST_IN_MICROSITE", is_request_in_themed_site())
]
)
def render_to_string(template_name, dictionary, context=None, namespace='main', request=None):
"""
Render a Mako template to as a string.
The following values are available to all templates:
settings: the django settings object
EDX_ROOT_URL: settings.EDX_ROOT_URL
marketing_link: The :func:`marketing_link` function
is_any_marketing_link_set: The :func:`is_any_marketing_link_set` function
is_marketing_link_set: The :func:`is_marketing_link_set` function
Arguments:
template_name: The name of the template to render. Will be loaded
from the template paths specified in configuration.
dictionary: A dictionary of variables to insert into the template during
rendering.
context: A :class:`~django.template.Context` with values to make
available to the template.
namespace: The Mako namespace to find the named template in.
request: The request to use to construct the RequestContext for rendering
this template. If not supplied, the current request will be used.
"""
template_name = get_template_path(template_name)
context_instance = Context(dictionary)
# add dictionary to context_instance
context_instance.update(dictionary or {})
# collapse context_instance to a single dictionary for mako
context_dictionary = {}
context_instance['settings'] = settings
context_instance['EDX_ROOT_URL'] = settings.EDX_ROOT_URL
context_instance['marketing_link'] = marketing_link
context_instance['is_any_marketing_link_set'] = is_any_marketing_link_set
context_instance['is_marketing_link_set'] = is_marketing_link_set
# In various testing contexts, there might not be a current request context.
request_context = get_template_request_context(request)
if request_context:
for item in request_context:
context_dictionary.update(item)
for item in context_instance:
context_dictionary.update(item)
if context:
context_dictionary.update(context)
# "Fix" CSRF token by evaluating the lazy object
KEY_CSRF_TOKENS = ('csrf_token', 'csrf')
for key in KEY_CSRF_TOKENS:
if key in context_dictionary:
context_dictionary[key] = unicode(context_dictionary[key])
# fetch and render template
template = lookup_template(namespace, template_name)
return template.render_unicode(**context_dictionary)
def render_to_response(template_name, dictionary=None, context_instance=None, namespace='main', request=None, **kwargs):
"""
Returns a HttpResponse whose content is filled with the result of calling
lookup.get_template(args[0]).render with the passed arguments.
"""
dictionary = dictionary or {}
return HttpResponse(render_to_string(template_name, dictionary, context_instance, namespace, request), **kwargs)
| agpl-3.0 |
LouTheBrew/troposphere | examples/Redshift.py | 28 | 3265 | # Converted from Redshift.template located at:
# http://aws.amazon.com/cloudformation/aws-cloudformation-templates/
from troposphere import Template, Parameter, Ref, Equals
from troposphere import If, Output, Join, GetAtt
from troposphere.redshift import Cluster, ClusterParameterGroup
from troposphere.redshift import AmazonRedshiftParameter
t = Template()
t.add_version("2010-09-09")
t.add_description(
"AWS CloudFormation Sample Template: Redshift cluster")
dbname = t.add_parameter(Parameter(
"DatabaseName",
Description="The name of the first database to be created when the "
"redshift cluster is created",
Type="String",
Default="defaultdb",
AllowedPattern="([a-z]|[0-9])+",
))
clustertype = t.add_parameter(Parameter(
"ClusterType",
Description="The type of the cluster",
Type="String",
Default="single-node",
AllowedValues=[
"single-node",
"multi-mode"
],
))
numberofnodes = t.add_parameter(Parameter(
"NumberOfNodes",
Description="The number of compute nodes in the redshift cluster. "
"When cluster type is specified as: 1) single-node, the NumberOfNodes "
"parameter should be specified as 1, 2) multi-node, the NumberOfNodes "
"parameter should be greater than 1",
Type="Number",
Default="1",
))
nodetype = t.add_parameter(Parameter(
"NodeType",
Description="The node type to be provisioned for the redshift cluster",
Type="String",
Default="dw2.large",
))
masterusername = t.add_parameter(Parameter(
"MasterUsername",
Description="The user name associated with the master user account for "
"the redshift cluster that is being created",
Type="String",
Default="defaultuser",
AllowedPattern="([a-z])([a-z]|[0-9])*",
NoEcho=True,
))
masteruserpassword = t.add_parameter(Parameter(
"MasterUserPassword",
Description="The password associated with the master user account for the "
"redshift cluster that is being created.",
Type="String",
NoEcho=True,
))
conditions = {
"IsMultiNodeCluster": Equals(
Ref("ClusterType"),
"multi-mode"
),
}
for k in conditions:
t.add_condition(k, conditions[k])
redshiftcluster = t.add_resource(Cluster(
"RedshiftCluster",
ClusterType=Ref("ClusterType"),
NumberOfNodes=If("IsMultiNodeCluster",
Ref("NumberOfNodes"), Ref("AWS::NoValue")),
NodeType=Ref("NodeType"),
DBName=Ref("DatabaseName"),
MasterUsername=Ref("MasterUsername"),
MasterUserPassword=Ref("MasterUserPassword"),
ClusterParameterGroupName=Ref("RedshiftClusterParameterGroup"),
DeletionPolicy="Snapshot",
))
amazonredshiftparameter1 = AmazonRedshiftParameter(
"AmazonRedshiftParameter1",
ParameterName="enable_user_activity_logging",
ParameterValue="true",
)
redshiftclusterparametergroup = t.add_resource(ClusterParameterGroup(
"RedshiftClusterParameterGroup",
Description="Cluster parameter group",
ParameterGroupFamily="redshift-1.0",
Parameters=[amazonredshiftparameter1],
))
t.add_output(Output(
"ClusterEndpoint",
Value=Join(":", [GetAtt(redshiftcluster, "Endpoint.Address"),
GetAtt(redshiftcluster, "Endpoint.Port")]),
))
print(t.to_json())
| bsd-2-clause |
coxley/ansible | lib/ansible/plugins/connections/chroot.py | 35 | 6882 | # Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2013, Maykel Moya <mmoya@speedyrails.com>
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import distutils.spawn
import os
import shlex
import subprocess
import traceback
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.plugins.connections import ConnectionBase
from ansible.utils.path import is_executable
from ansible.utils.unicode import to_bytes
class Connection(ConnectionBase):
''' Local chroot based connections '''
BUFSIZE = 65536
has_pipelining = False
def __init__(self, *args, **kwargs):
super(Connection, self).__init__(*args, **kwargs)
self.chroot = self._play_context.remote_addr
if os.geteuid() != 0:
raise AnsibleError("chroot connection requires running as root")
# we're running as root on the local system so do some
# trivial checks for ensuring 'host' is actually a chroot'able dir
if not os.path.isdir(self.chroot):
raise AnsibleError("%s is not a directory" % self.chroot)
chrootsh = os.path.join(self.chroot, 'bin/sh')
if not is_executable(chrootsh):
raise AnsibleError("%s does not look like a chrootable dir (/bin/sh missing)" % self.chroot)
self.chroot_cmd = distutils.spawn.find_executable('chroot')
if not self.chroot_cmd:
raise AnsibleError("chroot command not found in PATH")
@property
def transport(self):
''' used to identify this connection object '''
return 'chroot'
def _connect(self, port=None):
''' connect to the chroot; nothing to do here '''
self._display.vvv("THIS IS A LOCAL CHROOT DIR", host=self.chroot)
return self
def _generate_cmd(self, executable, cmd):
if executable:
local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd]
else:
# Prev to python2.7.3, shlex couldn't handle unicode type strings
cmd = to_bytes(cmd)
cmd = shlex.split(cmd)
local_cmd = [self.chroot_cmd, self.chroot]
local_cmd += cmd
return local_cmd
def _buffered_exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None, stdin=subprocess.PIPE):
''' run a command on the chroot. This is only needed for implementing
put_file() get_file() so that we don't have to read the whole file
into memory.
compared to exec_command() it looses some niceties like being able to
return the process's exit code immediately.
'''
if sudoable and self._play_context.become and self._play_context.become_method not in self.become_methods_supported:
raise AnsibleError("Internal Error: this module does not support running commands via %s" % self._play_context.become_method)
if in_data:
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
# We enter zone as root so we ignore privilege escalation (probably need to fix in case we have to become a specific used [ex: postgres admin])?
local_cmd = self._generate_cmd(executable, cmd)
self._display.vvv("EXEC %s" % (local_cmd), host=self.chroot)
# FIXME: cwd= needs to be set to the basedir of the playbook, which
# should come from loader, but is not in the connection plugins
p = subprocess.Popen(local_cmd, shell=False,
stdin=stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p
def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
''' run a command on the chroot '''
p = self._buffered_exec_command(cmd, tmp_path, become_user, sudoable, executable, in_data)
stdout, stderr = p.communicate()
return (p.returncode, '', stdout, stderr)
def put_file(self, in_path, out_path):
''' transfer a file from local to chroot '''
self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.chroot)
try:
with open(in_path, 'rb') as in_file:
try:
p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, self.BUFSIZE), None, stdin=in_file)
except OSError:
raise AnsibleError("chroot connection requires dd command in the chroot")
try:
stdout, stderr = p.communicate()
except:
traceback.print_exc()
raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
if p.returncode != 0:
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
except IOError:
raise AnsibleError("file or module does not exist at: %s" % in_path)
def fetch_file(self, in_path, out_path):
''' fetch a file from chroot to local '''
self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.chroot)
try:
p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, self.BUFSIZE), None)
except OSError:
raise AnsibleError("chroot connection requires dd command in the chroot")
with open(out_path, 'wb+') as out_file:
try:
chunk = p.stdout.read(self.BUFSIZE)
while chunk:
out_file.write(chunk)
chunk = p.stdout.read(self.BUFSIZE)
except:
traceback.print_exc()
raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
stdout, stderr = p.communicate()
if p.returncode != 0:
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
def close(self):
''' terminate the connection; nothing to do here '''
pass
| gpl-3.0 |
minrk/nbgrader | nbgrader/preprocessors/lockcells.py | 3 | 1069 | from traitlets import Bool
from .. import utils
from . import NbGraderPreprocessor
class LockCells(NbGraderPreprocessor):
"""A preprocessor for making cells undeletable."""
lock_solution_cells = Bool(True, config=True, help="Whether solution cells are undeletable")
lock_grade_cells = Bool(True, config=True, help="Whether grade cells are undeletable")
lock_readonly_cells = Bool(True, config=True, help="Whether readonly cells are undeletable")
lock_all_cells = Bool(False, config=True, help="Whether all assignment cells are undeletable")
def preprocess_cell(self, cell, resources, cell_index):
if self.lock_all_cells:
cell.metadata['deletable'] = False
elif self.lock_grade_cells and utils.is_grade(cell):
cell.metadata['deletable'] = False
elif self.lock_solution_cells and utils.is_solution(cell):
cell.metadata['deletable'] = False
elif self.lock_readonly_cells and utils.is_locked(cell):
cell.metadata['deletable'] = False
return cell, resources
| bsd-3-clause |
rangadi/incubator-beam | sdks/python/apache_beam/transforms/ptransform_test.py | 4 | 81197 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the PTransform and descendants."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import operator
import re
import sys
import unittest
from builtins import map
from builtins import range
from builtins import zip
from functools import reduce
import hamcrest as hc
from nose.plugins.attrib import attr
import apache_beam as beam
import apache_beam.pvalue as pvalue
import apache_beam.transforms.combiners as combine
import apache_beam.typehints as typehints
from apache_beam.io.iobase import Read
from apache_beam.metrics import Metrics
from apache_beam.metrics.metric import MetricsFilter
from apache_beam.options.pipeline_options import TypeOptions
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.transforms import window
from apache_beam.transforms.core import _GroupByKeyOnly
from apache_beam.transforms.display import DisplayData
from apache_beam.transforms.display import DisplayDataItem
from apache_beam.transforms.ptransform import PTransform
from apache_beam.typehints import with_input_types
from apache_beam.typehints import with_output_types
from apache_beam.typehints.typehints_test import TypeHintTestCase
from apache_beam.utils.windowed_value import WindowedValue
# Disable frequent lint warning due to pipe operator for chaining transforms.
# pylint: disable=expression-not-assigned
class PTransformTest(unittest.TestCase):
# Enable nose tests running in parallel
_multiprocess_can_split_ = True
@classmethod
def setUpClass(cls):
# Method has been renamed in Python 3
if sys.version_info[0] < 3:
cls.assertCountEqual = cls.assertItemsEqual
def assertStartswith(self, msg, prefix):
self.assertTrue(msg.startswith(prefix),
'"%s" does not start with "%s"' % (msg, prefix))
def test_str(self):
self.assertEqual('<PTransform(PTransform) label=[PTransform]>',
str(PTransform()))
pa = TestPipeline()
res = pa | 'ALabel' >> beam.Create([1, 2])
self.assertEqual('AppliedPTransform(ALabel/Read, Read)',
str(res.producer))
pc = TestPipeline()
res = pc | beam.Create([1, 2])
inputs_tr = res.producer.transform
inputs_tr.inputs = ('ci',)
self.assertEqual(
"""<Read(PTransform) label=[Read] inputs=('ci',)>""",
str(inputs_tr))
pd = TestPipeline()
res = pd | beam.Create([1, 2])
side_tr = res.producer.transform
side_tr.side_inputs = (4,)
self.assertEqual(
'<Read(PTransform) label=[Read] side_inputs=(4,)>',
str(side_tr))
inputs_tr.side_inputs = ('cs',)
self.assertEqual(
"""<Read(PTransform) label=[Read] """
"""inputs=('ci',) side_inputs=('cs',)>""",
str(inputs_tr))
def test_do_with_do_fn(self):
class AddNDoFn(beam.DoFn):
def process(self, element, addon):
return [element + addon]
pipeline = TestPipeline()
pcoll = pipeline | 'Start' >> beam.Create([1, 2, 3])
result = pcoll | 'Do' >> beam.ParDo(AddNDoFn(), 10)
assert_that(result, equal_to([11, 12, 13]))
pipeline.run()
def test_do_with_unconstructed_do_fn(self):
class MyDoFn(beam.DoFn):
def process(self):
pass
pipeline = TestPipeline()
pcoll = pipeline | 'Start' >> beam.Create([1, 2, 3])
with self.assertRaises(ValueError):
pcoll | 'Do' >> beam.ParDo(MyDoFn) # Note the lack of ()'s
def test_do_with_callable(self):
pipeline = TestPipeline()
pcoll = pipeline | 'Start' >> beam.Create([1, 2, 3])
result = pcoll | 'Do' >> beam.FlatMap(lambda x, addon: [x + addon], 10)
assert_that(result, equal_to([11, 12, 13]))
pipeline.run()
def test_do_with_side_input_as_arg(self):
pipeline = TestPipeline()
side = pipeline | 'Side' >> beam.Create([10])
pcoll = pipeline | 'Start' >> beam.Create([1, 2, 3])
result = pcoll | 'Do' >> beam.FlatMap(
lambda x, addon: [x + addon], pvalue.AsSingleton(side))
assert_that(result, equal_to([11, 12, 13]))
pipeline.run()
def test_do_with_side_input_as_keyword_arg(self):
pipeline = TestPipeline()
side = pipeline | 'Side' >> beam.Create([10])
pcoll = pipeline | 'Start' >> beam.Create([1, 2, 3])
result = pcoll | 'Do' >> beam.FlatMap(
lambda x, addon: [x + addon], addon=pvalue.AsSingleton(side))
assert_that(result, equal_to([11, 12, 13]))
pipeline.run()
def test_do_with_do_fn_returning_string_raises_warning(self):
pipeline = TestPipeline()
pipeline._options.view_as(TypeOptions).runtime_type_check = True
pcoll = pipeline | 'Start' >> beam.Create(['2', '9', '3'])
pcoll | 'Do' >> beam.FlatMap(lambda x: x + '1')
# Since the DoFn directly returns a string we should get an error warning
# us.
with self.assertRaises(typehints.TypeCheckError) as cm:
pipeline.run()
expected_error_prefix = ('Returning a str from a ParDo or FlatMap '
'is discouraged.')
self.assertStartswith(cm.exception.args[0], expected_error_prefix)
def test_do_with_do_fn_returning_dict_raises_warning(self):
pipeline = TestPipeline()
pipeline._options.view_as(TypeOptions).runtime_type_check = True
pcoll = pipeline | 'Start' >> beam.Create(['2', '9', '3'])
pcoll | 'Do' >> beam.FlatMap(lambda x: {x: '1'})
# Since the DoFn directly returns a dict we should get an error warning
# us.
with self.assertRaises(typehints.TypeCheckError) as cm:
pipeline.run()
expected_error_prefix = ('Returning a dict from a ParDo or FlatMap '
'is discouraged.')
self.assertStartswith(cm.exception.args[0], expected_error_prefix)
def test_do_with_multiple_outputs_maintains_unique_name(self):
pipeline = TestPipeline()
pcoll = pipeline | 'Start' >> beam.Create([1, 2, 3])
r1 = pcoll | 'A' >> beam.FlatMap(lambda x: [x + 1]).with_outputs(main='m')
r2 = pcoll | 'B' >> beam.FlatMap(lambda x: [x + 2]).with_outputs(main='m')
assert_that(r1.m, equal_to([2, 3, 4]), label='r1')
assert_that(r2.m, equal_to([3, 4, 5]), label='r2')
pipeline.run()
# TODO(BEAM-3544): Disable this test in streaming temporarily.
# Remove sickbay-streaming tag after it's resolved.
@attr('ValidatesRunner', 'sickbay-streaming')
def test_read_metrics(self):
from apache_beam.io.utils import CountingSource
class CounterDoFn(beam.DoFn):
def __init__(self):
# This counter is unused.
self.received_records = Metrics.counter(self.__class__,
'receivedRecords')
def process(self, element):
self.received_records.inc()
pipeline = TestPipeline()
(pipeline | Read(CountingSource(100)) | beam.ParDo(CounterDoFn()))
res = pipeline.run()
res.wait_until_finish()
# This counter is defined in utils.CountingSource.
metric_results = res.metrics().query(MetricsFilter()
.with_name('recordsRead'))
outputs_counter = metric_results['counters'][0]
self.assertEqual(outputs_counter.key.step, 'Read')
self.assertEqual(outputs_counter.key.metric.name, 'recordsRead')
self.assertEqual(outputs_counter.committed, 100)
self.assertEqual(outputs_counter.attempted, 100)
@attr('ValidatesRunner')
def test_par_do_with_multiple_outputs_and_using_yield(self):
class SomeDoFn(beam.DoFn):
"""A custom DoFn using yield."""
def process(self, element):
yield element
if element % 2 == 0:
yield pvalue.TaggedOutput('even', element)
else:
yield pvalue.TaggedOutput('odd', element)
pipeline = TestPipeline()
nums = pipeline | 'Some Numbers' >> beam.Create([1, 2, 3, 4])
results = nums | 'ClassifyNumbers' >> beam.ParDo(
SomeDoFn()).with_outputs('odd', 'even', main='main')
assert_that(results.main, equal_to([1, 2, 3, 4]))
assert_that(results.odd, equal_to([1, 3]), label='assert:odd')
assert_that(results.even, equal_to([2, 4]), label='assert:even')
pipeline.run()
@attr('ValidatesRunner')
def test_par_do_with_multiple_outputs_and_using_return(self):
def some_fn(v):
if v % 2 == 0:
return [v, pvalue.TaggedOutput('even', v)]
return [v, pvalue.TaggedOutput('odd', v)]
pipeline = TestPipeline()
nums = pipeline | 'Some Numbers' >> beam.Create([1, 2, 3, 4])
results = nums | 'ClassifyNumbers' >> beam.FlatMap(
some_fn).with_outputs('odd', 'even', main='main')
assert_that(results.main, equal_to([1, 2, 3, 4]))
assert_that(results.odd, equal_to([1, 3]), label='assert:odd')
assert_that(results.even, equal_to([2, 4]), label='assert:even')
pipeline.run()
@attr('ValidatesRunner')
def test_undeclared_outputs(self):
pipeline = TestPipeline()
nums = pipeline | 'Some Numbers' >> beam.Create([1, 2, 3, 4])
results = nums | 'ClassifyNumbers' >> beam.FlatMap(
lambda x: [x,
pvalue.TaggedOutput('even' if x % 2 == 0 else 'odd', x),
pvalue.TaggedOutput('extra', x)]
).with_outputs()
assert_that(results[None], equal_to([1, 2, 3, 4]))
assert_that(results.odd, equal_to([1, 3]), label='assert:odd')
assert_that(results.even, equal_to([2, 4]), label='assert:even')
pipeline.run()
@attr('ValidatesRunner')
def test_multiple_empty_outputs(self):
pipeline = TestPipeline()
nums = pipeline | 'Some Numbers' >> beam.Create([1, 3, 5])
results = nums | 'ClassifyNumbers' >> beam.FlatMap(
lambda x: [x,
pvalue.TaggedOutput('even' if x % 2 == 0 else 'odd', x)]
).with_outputs()
assert_that(results[None], equal_to([1, 3, 5]))
assert_that(results.odd, equal_to([1, 3, 5]), label='assert:odd')
assert_that(results.even, equal_to([]), label='assert:even')
pipeline.run()
def test_do_requires_do_fn_returning_iterable(self):
# This function is incorrect because it returns an object that isn't an
# iterable.
def incorrect_par_do_fn(x):
return x + 5
pipeline = TestPipeline()
pipeline._options.view_as(TypeOptions).runtime_type_check = True
pcoll = pipeline | 'Start' >> beam.Create([2, 9, 3])
pcoll | 'Do' >> beam.FlatMap(incorrect_par_do_fn)
# It's a requirement that all user-defined functions to a ParDo return
# an iterable.
with self.assertRaises(typehints.TypeCheckError) as cm:
pipeline.run()
expected_error_prefix = 'FlatMap and ParDo must return an iterable.'
self.assertStartswith(cm.exception.args[0], expected_error_prefix)
def test_do_fn_with_finish(self):
class MyDoFn(beam.DoFn):
def process(self, element):
pass
def finish_bundle(self):
yield WindowedValue('finish', -1, [window.GlobalWindow()])
pipeline = TestPipeline()
pcoll = pipeline | 'Start' >> beam.Create([1, 2, 3])
result = pcoll | 'Do' >> beam.ParDo(MyDoFn())
# May have many bundles, but each has a start and finish.
def matcher():
def match(actual):
equal_to(['finish'])(list(set(actual)))
equal_to([1])([actual.count('finish')])
return match
assert_that(result, matcher())
pipeline.run()
def test_do_fn_with_start(self):
class MyDoFn(beam.DoFn):
def __init__(self):
self.state = 'init'
def start_bundle(self):
self.state = 'started'
def process(self, element):
if self.state == 'started':
yield 'started'
self.state = 'process'
pipeline = TestPipeline()
pcoll = pipeline | 'Start' >> beam.Create([1, 2, 3])
result = pcoll | 'Do' >> beam.ParDo(MyDoFn())
# May have many bundles, but each has a start and finish.
def matcher():
def match(actual):
equal_to(['started'])(list(set(actual)))
equal_to([1])([actual.count('started')])
return match
assert_that(result, matcher())
pipeline.run()
def test_do_fn_with_start_error(self):
class MyDoFn(beam.DoFn):
def start_bundle(self):
return [1]
def process(self, element):
pass
pipeline = TestPipeline()
pipeline | 'Start' >> beam.Create([1, 2, 3]) | 'Do' >> beam.ParDo(MyDoFn())
with self.assertRaises(RuntimeError):
pipeline.run()
def test_filter(self):
pipeline = TestPipeline()
pcoll = pipeline | 'Start' >> beam.Create([1, 2, 3, 4])
result = pcoll | 'Filter' >> beam.Filter(lambda x: x % 2 == 0)
assert_that(result, equal_to([2, 4]))
pipeline.run()
class _MeanCombineFn(beam.CombineFn):
def create_accumulator(self):
return (0, 0)
def add_input(self, sum_count, element):
(sum_, count) = sum_count
return sum_ + element, count + 1
def merge_accumulators(self, accumulators):
sums, counts = zip(*accumulators)
return sum(sums), sum(counts)
def extract_output(self, sum_count):
(sum_, count) = sum_count
if not count:
return float('nan')
return sum_ / float(count)
def test_combine_with_combine_fn(self):
vals = [1, 2, 3, 4, 5, 6, 7]
pipeline = TestPipeline()
pcoll = pipeline | 'Start' >> beam.Create(vals)
result = pcoll | 'Mean' >> beam.CombineGlobally(self._MeanCombineFn())
assert_that(result, equal_to([sum(vals) // len(vals)]))
pipeline.run()
def test_combine_with_callable(self):
vals = [1, 2, 3, 4, 5, 6, 7]
pipeline = TestPipeline()
pcoll = pipeline | 'Start' >> beam.Create(vals)
result = pcoll | beam.CombineGlobally(sum)
assert_that(result, equal_to([sum(vals)]))
pipeline.run()
def test_combine_with_side_input_as_arg(self):
values = [1, 2, 3, 4, 5, 6, 7]
pipeline = TestPipeline()
pcoll = pipeline | 'Start' >> beam.Create(values)
divisor = pipeline | 'Divisor' >> beam.Create([2])
result = pcoll | 'Max' >> beam.CombineGlobally(
# Multiples of divisor only.
lambda vals, d: max(v for v in vals if v % d == 0),
pvalue.AsSingleton(divisor)).without_defaults()
filt_vals = [v for v in values if v % 2 == 0]
assert_that(result, equal_to([max(filt_vals)]))
pipeline.run()
def test_combine_per_key_with_combine_fn(self):
vals_1 = [1, 2, 3, 4, 5, 6, 7]
vals_2 = [2, 4, 6, 8, 10, 12, 14]
pipeline = TestPipeline()
pcoll = pipeline | 'Start' >> beam.Create(([('a', x) for x in vals_1] +
[('b', x) for x in vals_2]))
result = pcoll | 'Mean' >> beam.CombinePerKey(self._MeanCombineFn())
assert_that(result, equal_to([('a', sum(vals_1) // len(vals_1)),
('b', sum(vals_2) // len(vals_2))]))
pipeline.run()
def test_combine_per_key_with_callable(self):
vals_1 = [1, 2, 3, 4, 5, 6, 7]
vals_2 = [2, 4, 6, 8, 10, 12, 14]
pipeline = TestPipeline()
pcoll = pipeline | 'Start' >> beam.Create(([('a', x) for x in vals_1] +
[('b', x) for x in vals_2]))
result = pcoll | beam.CombinePerKey(sum)
assert_that(result, equal_to([('a', sum(vals_1)), ('b', sum(vals_2))]))
pipeline.run()
def test_combine_per_key_with_side_input_as_arg(self):
vals_1 = [1, 2, 3, 4, 5, 6, 7]
vals_2 = [2, 4, 6, 8, 10, 12, 14]
pipeline = TestPipeline()
pcoll = pipeline | 'Start' >> beam.Create(([('a', x) for x in vals_1] +
[('b', x) for x in vals_2]))
divisor = pipeline | 'Divisor' >> beam.Create([2])
result = pcoll | beam.CombinePerKey(
lambda vals, d: max(v for v in vals if v % d == 0),
pvalue.AsSingleton(divisor)) # Multiples of divisor only.
m_1 = max(v for v in vals_1 if v % 2 == 0)
m_2 = max(v for v in vals_2 if v % 2 == 0)
assert_that(result, equal_to([('a', m_1), ('b', m_2)]))
pipeline.run()
def test_group_by_key(self):
pipeline = TestPipeline()
pcoll = pipeline | 'start' >> beam.Create(
[(1, 1), (2, 1), (3, 1), (1, 2), (2, 2), (1, 3)])
result = pcoll | 'Group' >> beam.GroupByKey()
assert_that(result, equal_to([(1, [1, 2, 3]), (2, [1, 2]), (3, [1])]))
pipeline.run()
def test_partition_with_partition_fn(self):
class SomePartitionFn(beam.PartitionFn):
def partition_for(self, element, num_partitions, offset):
return (element % 3) + offset
pipeline = TestPipeline()
pcoll = pipeline | 'Start' >> beam.Create([0, 1, 2, 3, 4, 5, 6, 7, 8])
# Attempt nominal partition operation.
partitions = pcoll | 'Part 1' >> beam.Partition(SomePartitionFn(), 4, 1)
assert_that(partitions[0], equal_to([]))
assert_that(partitions[1], equal_to([0, 3, 6]), label='p1')
assert_that(partitions[2], equal_to([1, 4, 7]), label='p2')
assert_that(partitions[3], equal_to([2, 5, 8]), label='p3')
pipeline.run()
# Check that a bad partition label will yield an error. For the
# DirectRunner, this error manifests as an exception.
pipeline = TestPipeline()
pcoll = pipeline | 'Start' >> beam.Create([0, 1, 2, 3, 4, 5, 6, 7, 8])
partitions = pcoll | 'Part 2' >> beam.Partition(SomePartitionFn(), 4, 10000)
with self.assertRaises(ValueError):
pipeline.run()
def test_partition_with_callable(self):
pipeline = TestPipeline()
pcoll = pipeline | 'Start' >> beam.Create([0, 1, 2, 3, 4, 5, 6, 7, 8])
partitions = (
pcoll | 'part' >> beam.Partition(
lambda e, n, offset: (e % 3) + offset, 4,
1))
assert_that(partitions[0], equal_to([]))
assert_that(partitions[1], equal_to([0, 3, 6]), label='p1')
assert_that(partitions[2], equal_to([1, 4, 7]), label='p2')
assert_that(partitions[3], equal_to([2, 5, 8]), label='p3')
pipeline.run()
def test_partition_followed_by_flatten_and_groupbykey(self):
"""Regression test for an issue with how partitions are handled."""
pipeline = TestPipeline()
contents = [('aa', 1), ('bb', 2), ('aa', 2)]
created = pipeline | 'A' >> beam.Create(contents)
partitioned = created | 'B' >> beam.Partition(lambda x, n: len(x) % n, 3)
flattened = partitioned | 'C' >> beam.Flatten()
grouped = flattened | 'D' >> beam.GroupByKey()
assert_that(grouped, equal_to([('aa', [1, 2]), ('bb', [2])]))
pipeline.run()
def test_flatten_pcollections(self):
pipeline = TestPipeline()
pcoll_1 = pipeline | 'Start 1' >> beam.Create([0, 1, 2, 3])
pcoll_2 = pipeline | 'Start 2' >> beam.Create([4, 5, 6, 7])
result = (pcoll_1, pcoll_2) | 'Flatten' >> beam.Flatten()
assert_that(result, equal_to([0, 1, 2, 3, 4, 5, 6, 7]))
pipeline.run()
def test_flatten_no_pcollections(self):
pipeline = TestPipeline()
with self.assertRaises(ValueError):
() | 'PipelineArgMissing' >> beam.Flatten()
result = () | 'Empty' >> beam.Flatten(pipeline=pipeline)
assert_that(result, equal_to([]))
pipeline.run()
def test_flatten_same_pcollections(self):
pipeline = TestPipeline()
pc = pipeline | beam.Create(['a', 'b'])
assert_that((pc, pc, pc) | beam.Flatten(), equal_to(['a', 'b'] * 3))
pipeline.run()
def test_flatten_pcollections_in_iterable(self):
pipeline = TestPipeline()
pcoll_1 = pipeline | 'Start 1' >> beam.Create([0, 1, 2, 3])
pcoll_2 = pipeline | 'Start 2' >> beam.Create([4, 5, 6, 7])
result = [pcoll for pcoll in (pcoll_1, pcoll_2)] | beam.Flatten()
assert_that(result, equal_to([0, 1, 2, 3, 4, 5, 6, 7]))
pipeline.run()
def test_flatten_input_type_must_be_iterable(self):
# Inputs to flatten *must* be an iterable.
with self.assertRaises(ValueError):
4 | beam.Flatten()
def test_flatten_input_type_must_be_iterable_of_pcolls(self):
# Inputs to flatten *must* be an iterable of PCollections.
with self.assertRaises(TypeError):
{'l': 'test'} | beam.Flatten()
with self.assertRaises(TypeError):
set([1, 2, 3]) | beam.Flatten()
@attr('ValidatesRunner')
def test_flatten_multiple_pcollections_having_multiple_consumers(self):
pipeline = TestPipeline()
input = pipeline | 'Start' >> beam.Create(['AA', 'BBB', 'CC'])
def split_even_odd(element):
tag = 'even_length' if len(element) % 2 == 0 else 'odd_length'
return pvalue.TaggedOutput(tag, element)
even_length, odd_length = (input | beam.Map(split_even_odd)
.with_outputs('even_length', 'odd_length'))
merged = (even_length, odd_length) | 'Flatten' >> beam.Flatten()
assert_that(merged, equal_to(['AA', 'BBB', 'CC']))
assert_that(even_length, equal_to(['AA', 'CC']), label='assert:even')
assert_that(odd_length, equal_to(['BBB']), label='assert:odd')
pipeline.run()
def test_co_group_by_key_on_list(self):
pipeline = TestPipeline()
pcoll_1 = pipeline | 'Start 1' >> beam.Create(
[('a', 1), ('a', 2), ('b', 3), ('c', 4)])
pcoll_2 = pipeline | 'Start 2' >> beam.Create(
[('a', 5), ('a', 6), ('c', 7), ('c', 8)])
result = (pcoll_1, pcoll_2) | beam.CoGroupByKey()
assert_that(result, equal_to([('a', ([1, 2], [5, 6])),
('b', ([3], [])),
('c', ([4], [7, 8]))]))
pipeline.run()
def test_co_group_by_key_on_iterable(self):
pipeline = TestPipeline()
pcoll_1 = pipeline | 'Start 1' >> beam.Create(
[('a', 1), ('a', 2), ('b', 3), ('c', 4)])
pcoll_2 = pipeline | 'Start 2' >> beam.Create(
[('a', 5), ('a', 6), ('c', 7), ('c', 8)])
result = [pc for pc in (pcoll_1, pcoll_2)] | beam.CoGroupByKey()
assert_that(result, equal_to([('a', ([1, 2], [5, 6])),
('b', ([3], [])),
('c', ([4], [7, 8]))]))
pipeline.run()
def test_co_group_by_key_on_dict(self):
pipeline = TestPipeline()
pcoll_1 = pipeline | 'Start 1' >> beam.Create(
[('a', 1), ('a', 2), ('b', 3), ('c', 4)])
pcoll_2 = pipeline | 'Start 2' >> beam.Create(
[('a', 5), ('a', 6), ('c', 7), ('c', 8)])
result = {'X': pcoll_1, 'Y': pcoll_2} | beam.CoGroupByKey()
assert_that(result, equal_to([('a', {'X': [1, 2], 'Y': [5, 6]}),
('b', {'X': [3], 'Y': []}),
('c', {'X': [4], 'Y': [7, 8]})]))
pipeline.run()
def test_group_by_key_input_must_be_kv_pairs(self):
pipeline = TestPipeline()
pcolls = pipeline | 'A' >> beam.Create([1, 2, 3, 4, 5])
with self.assertRaises(typehints.TypeCheckError) as e:
pcolls | 'D' >> beam.GroupByKey()
pipeline.run()
self.assertStartswith(
e.exception.args[0],
'Input type hint violation at D: expected '
'Tuple[TypeVariable[K], TypeVariable[V]]')
def test_group_by_key_only_input_must_be_kv_pairs(self):
pipeline = TestPipeline()
pcolls = pipeline | 'A' >> beam.Create(['a', 'b', 'f'])
with self.assertRaises(typehints.TypeCheckError) as cm:
pcolls | 'D' >> _GroupByKeyOnly()
pipeline.run()
expected_error_prefix = ('Input type hint violation at D: expected '
'Tuple[TypeVariable[K], TypeVariable[V]]')
self.assertStartswith(cm.exception.args[0], expected_error_prefix)
def test_keys_and_values(self):
pipeline = TestPipeline()
pcoll = pipeline | 'Start' >> beam.Create(
[(3, 1), (2, 1), (1, 1), (3, 2), (2, 2), (3, 3)])
keys = pcoll.apply(beam.Keys('keys'))
vals = pcoll.apply(beam.Values('vals'))
assert_that(keys, equal_to([1, 2, 2, 3, 3, 3]), label='assert:keys')
assert_that(vals, equal_to([1, 1, 1, 2, 2, 3]), label='assert:vals')
pipeline.run()
def test_kv_swap(self):
pipeline = TestPipeline()
pcoll = pipeline | 'Start' >> beam.Create(
[(6, 3), (1, 2), (7, 1), (5, 2), (3, 2)])
result = pcoll.apply(beam.KvSwap(), label='swap')
assert_that(result, equal_to([(1, 7), (2, 1), (2, 3), (2, 5), (3, 6)]))
pipeline.run()
def test_remove_duplicates(self):
pipeline = TestPipeline()
pcoll = pipeline | 'Start' >> beam.Create(
[6, 3, 1, 1, 9, 'pleat', 'pleat', 'kazoo', 'navel'])
result = pcoll.apply(beam.RemoveDuplicates())
assert_that(result, equal_to([1, 3, 6, 9, 'pleat', 'kazoo', 'navel']))
pipeline.run()
def test_chained_ptransforms(self):
pipeline = TestPipeline()
t = (beam.Map(lambda x: (x, 1))
| beam.GroupByKey()
| beam.Map(lambda x_ones: (x_ones[0], sum(x_ones[1]))))
result = pipeline | 'Start' >> beam.Create(['a', 'a', 'b']) | t
assert_that(result, equal_to([('a', 2), ('b', 1)]))
pipeline.run()
def test_apply_to_list(self):
self.assertCountEqual(
[1, 2, 3], [0, 1, 2] | 'AddOne' >> beam.Map(lambda x: x + 1))
self.assertItemsEqual([1],
[0, 1, 2] | 'Odd' >> beam.Filter(lambda x: x % 2))
self.assertCountEqual([1, 2, 100, 3],
([1, 2, 3], [100]) | beam.Flatten())
join_input = ([('k', 'a')],
[('k', 'b'), ('k', 'c')])
self.assertCountEqual([('k', (['a'], ['b', 'c']))],
join_input | beam.CoGroupByKey())
def test_multi_input_ptransform(self):
class DisjointUnion(PTransform):
def expand(self, pcollections):
return (pcollections
| beam.Flatten()
| beam.Map(lambda x: (x, None))
| beam.GroupByKey()
| beam.Map(lambda kv: kv[0]))
self.assertEqual([1, 2, 3], sorted(([1, 2], [2, 3]) | DisjointUnion()))
def test_apply_to_crazy_pvaluish(self):
class NestedFlatten(PTransform):
"""A PTransform taking and returning nested PValueish.
Takes as input a list of dicts, and returns a dict with the corresponding
values flattened.
"""
def _extract_input_pvalues(self, pvalueish):
pvalueish = list(pvalueish)
return pvalueish, sum([list(p.values()) for p in pvalueish], [])
def expand(self, pcoll_dicts):
keys = reduce(operator.or_, [set(p.keys()) for p in pcoll_dicts])
res = {}
for k in keys:
res[k] = [p[k] for p in pcoll_dicts if k in p] | k >> beam.Flatten()
return res
res = [{'a': [1, 2, 3]},
{'a': [4, 5, 6], 'b': ['x', 'y', 'z']},
{'a': [7, 8], 'b': ['x', 'y'], 'c': []}] | NestedFlatten()
self.assertEqual(3, len(res))
self.assertEqual([1, 2, 3, 4, 5, 6, 7, 8], sorted(res['a']))
self.assertEqual(['x', 'x', 'y', 'y', 'z'], sorted(res['b']))
self.assertEqual([], sorted(res['c']))
def test_named_tuple(self):
MinMax = collections.namedtuple('MinMax', ['min', 'max'])
class MinMaxTransform(PTransform):
def expand(self, pcoll):
return MinMax(
min=pcoll | beam.CombineGlobally(min).without_defaults(),
max=pcoll | beam.CombineGlobally(max).without_defaults())
res = [1, 2, 4, 8] | MinMaxTransform()
self.assertIsInstance(res, MinMax)
self.assertEqual(res, MinMax(min=[1], max=[8]))
flat = res | beam.Flatten()
self.assertEqual(sorted(flat), [1, 8])
def test_tuple_twice(self):
class Duplicate(PTransform):
def expand(self, pcoll):
return pcoll, pcoll
res1, res2 = [1, 2, 4, 8] | Duplicate()
self.assertEqual(sorted(res1), [1, 2, 4, 8])
self.assertEqual(sorted(res2), [1, 2, 4, 8])
@beam.ptransform_fn
def SamplePTransform(pcoll):
"""Sample transform using the @ptransform_fn decorator."""
map_transform = 'ToPairs' >> beam.Map(lambda v: (v, None))
combine_transform = 'Group' >> beam.CombinePerKey(lambda vs: None)
keys_transform = 'RemoveDuplicates' >> beam.Keys()
return pcoll | map_transform | combine_transform | keys_transform
class PTransformLabelsTest(unittest.TestCase):
class CustomTransform(beam.PTransform):
pardo = None
def expand(self, pcoll):
self.pardo = '*Do*' >> beam.FlatMap(lambda x: [x + 1])
return pcoll | self.pardo
def test_chained_ptransforms(self):
"""Tests that chaining gets proper nesting."""
pipeline = TestPipeline()
map1 = 'Map1' >> beam.Map(lambda x: (x, 1))
gbk = 'Gbk' >> beam.GroupByKey()
map2 = 'Map2' >> beam.Map(lambda x_ones2: (x_ones2[0], sum(x_ones2[1])))
t = (map1 | gbk | map2)
result = pipeline | 'Start' >> beam.Create(['a', 'a', 'b']) | t
self.assertTrue('Map1|Gbk|Map2/Map1' in pipeline.applied_labels)
self.assertTrue('Map1|Gbk|Map2/Gbk' in pipeline.applied_labels)
self.assertTrue('Map1|Gbk|Map2/Map2' in pipeline.applied_labels)
assert_that(result, equal_to([('a', 2), ('b', 1)]))
pipeline.run()
def test_apply_custom_transform_without_label(self):
pipeline = TestPipeline()
pcoll = pipeline | 'PColl' >> beam.Create([1, 2, 3])
custom = PTransformLabelsTest.CustomTransform()
result = pipeline.apply(custom, pcoll)
self.assertTrue('CustomTransform' in pipeline.applied_labels)
self.assertTrue('CustomTransform/*Do*' in pipeline.applied_labels)
assert_that(result, equal_to([2, 3, 4]))
pipeline.run()
def test_apply_custom_transform_with_label(self):
pipeline = TestPipeline()
pcoll = pipeline | 'PColl' >> beam.Create([1, 2, 3])
custom = PTransformLabelsTest.CustomTransform('*Custom*')
result = pipeline.apply(custom, pcoll)
self.assertTrue('*Custom*' in pipeline.applied_labels)
self.assertTrue('*Custom*/*Do*' in pipeline.applied_labels)
assert_that(result, equal_to([2, 3, 4]))
pipeline.run()
def test_combine_without_label(self):
vals = [1, 2, 3, 4, 5, 6, 7]
pipeline = TestPipeline()
pcoll = pipeline | 'Start' >> beam.Create(vals)
combine = beam.CombineGlobally(sum)
result = pcoll | combine
self.assertTrue('CombineGlobally(sum)' in pipeline.applied_labels)
assert_that(result, equal_to([sum(vals)]))
pipeline.run()
def test_apply_ptransform_using_decorator(self):
pipeline = TestPipeline()
pcoll = pipeline | 'PColl' >> beam.Create([1, 2, 3])
_ = pcoll | '*Sample*' >> SamplePTransform()
self.assertTrue('*Sample*' in pipeline.applied_labels)
self.assertTrue('*Sample*/ToPairs' in pipeline.applied_labels)
self.assertTrue('*Sample*/Group' in pipeline.applied_labels)
self.assertTrue('*Sample*/RemoveDuplicates' in pipeline.applied_labels)
def test_combine_with_label(self):
vals = [1, 2, 3, 4, 5, 6, 7]
pipeline = TestPipeline()
pcoll = pipeline | 'Start' >> beam.Create(vals)
combine = '*Sum*' >> beam.CombineGlobally(sum)
result = pcoll | combine
self.assertTrue('*Sum*' in pipeline.applied_labels)
assert_that(result, equal_to([sum(vals)]))
pipeline.run()
def check_label(self, ptransform, expected_label):
pipeline = TestPipeline()
pipeline | 'Start' >> beam.Create([('a', 1)]) | ptransform
actual_label = sorted(pipeline.applied_labels - {'Start', 'Start/Read'})[0]
self.assertEqual(expected_label, re.sub(r'\d{3,}', '#', actual_label))
def test_default_labels(self):
self.check_label(beam.Map(len), r'Map(len)')
self.check_label(beam.Map(lambda x: x),
r'Map(<lambda at ptransform_test.py:#>)')
self.check_label(beam.FlatMap(list), r'FlatMap(list)')
self.check_label(beam.Filter(sum), r'Filter(sum)')
self.check_label(beam.CombineGlobally(sum), r'CombineGlobally(sum)')
self.check_label(beam.CombinePerKey(sum), r'CombinePerKey(sum)')
class MyDoFn(beam.DoFn):
def process(self):
pass
self.check_label(beam.ParDo(MyDoFn()), r'ParDo(MyDoFn)')
def test_label_propogation(self):
self.check_label('TestMap' >> beam.Map(len), r'TestMap')
self.check_label('TestLambda' >> beam.Map(lambda x: x), r'TestLambda')
self.check_label('TestFlatMap' >> beam.FlatMap(list), r'TestFlatMap')
self.check_label('TestFilter' >> beam.Filter(sum), r'TestFilter')
self.check_label('TestCG' >> beam.CombineGlobally(sum), r'TestCG')
self.check_label('TestCPK' >> beam.CombinePerKey(sum), r'TestCPK')
class MyDoFn(beam.DoFn):
def process(self):
pass
self.check_label('TestParDo' >> beam.ParDo(MyDoFn()), r'TestParDo')
class PTransformTestDisplayData(unittest.TestCase):
def test_map_named_function(self):
tr = beam.Map(len)
dd = DisplayData.create_from(tr)
nspace = 'apache_beam.transforms.core.CallableWrapperDoFn'
expected_item = DisplayDataItem('len', key='fn',
label='Transform Function',
namespace=nspace)
hc.assert_that(dd.items, hc.has_item(expected_item))
def test_map_anonymous_function(self):
tr = beam.Map(lambda x: x)
dd = DisplayData.create_from(tr)
nspace = 'apache_beam.transforms.core.CallableWrapperDoFn'
expected_item = DisplayDataItem('<lambda>', key='fn',
label='Transform Function',
namespace=nspace)
hc.assert_that(dd.items, hc.has_item(expected_item))
def test_flatmap_named_function(self):
tr = beam.FlatMap(list)
dd = DisplayData.create_from(tr)
nspace = 'apache_beam.transforms.core.CallableWrapperDoFn'
expected_item = DisplayDataItem('list', key='fn',
label='Transform Function',
namespace=nspace)
hc.assert_that(dd.items, hc.has_item(expected_item))
def test_flatmap_anonymous_function(self):
tr = beam.FlatMap(lambda x: [x])
dd = DisplayData.create_from(tr)
nspace = 'apache_beam.transforms.core.CallableWrapperDoFn'
expected_item = DisplayDataItem('<lambda>', key='fn',
label='Transform Function',
namespace=nspace)
hc.assert_that(dd.items, hc.has_item(expected_item))
def test_filter_named_function(self):
tr = beam.Filter(sum)
dd = DisplayData.create_from(tr)
nspace = 'apache_beam.transforms.core.CallableWrapperDoFn'
expected_item = DisplayDataItem('sum', key='fn',
label='Transform Function',
namespace=nspace)
hc.assert_that(dd.items, hc.has_item(expected_item))
def test_filter_anonymous_function(self):
tr = beam.Filter(lambda x: x // 30)
dd = DisplayData.create_from(tr)
nspace = 'apache_beam.transforms.core.CallableWrapperDoFn'
expected_item = DisplayDataItem('<lambda>', key='fn',
label='Transform Function',
namespace=nspace)
hc.assert_that(dd.items, hc.has_item(expected_item))
class PTransformTypeCheckTestCase(TypeHintTestCase):
def assertStartswith(self, msg, prefix):
self.assertTrue(msg.startswith(prefix),
'"%s" does not start with "%s"' % (msg, prefix))
def setUp(self):
self.p = TestPipeline()
def test_do_fn_pipeline_pipeline_type_check_satisfied(self):
@with_input_types(int, int)
@with_output_types(int)
class AddWithFive(beam.DoFn):
def process(self, element, five):
return [element + five]
d = (self.p
| 'T' >> beam.Create([1, 2, 3]).with_output_types(int)
| 'Add' >> beam.ParDo(AddWithFive(), 5))
assert_that(d, equal_to([6, 7, 8]))
self.p.run()
def test_do_fn_pipeline_pipeline_type_check_violated(self):
@with_input_types(str, str)
@with_output_types(str)
class ToUpperCaseWithPrefix(beam.DoFn):
def process(self, element, prefix):
return [prefix + element.upper()]
with self.assertRaises(typehints.TypeCheckError) as e:
(self.p
| 'T' >> beam.Create([1, 2, 3]).with_output_types(int)
| 'Upper' >> beam.ParDo(ToUpperCaseWithPrefix(), 'hello'))
self.assertEqual("Type hint violation for 'Upper': "
"requires <type 'str'> but got <type 'int'> for element",
e.exception.args[0])
def test_do_fn_pipeline_runtime_type_check_satisfied(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
@with_input_types(int, int)
@with_output_types(int)
class AddWithNum(beam.DoFn):
def process(self, element, num):
return [element + num]
d = (self.p
| 'T' >> beam.Create([1, 2, 3]).with_output_types(int)
| 'Add' >> beam.ParDo(AddWithNum(), 5))
assert_that(d, equal_to([6, 7, 8]))
self.p.run()
def test_do_fn_pipeline_runtime_type_check_violated(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
@with_input_types(int, int)
@with_output_types(int)
class AddWithNum(beam.DoFn):
def process(self, element, num):
return [element + num]
with self.assertRaises(typehints.TypeCheckError) as e:
(self.p
| 'T' >> beam.Create(['1', '2', '3']).with_output_types(str)
| 'Add' >> beam.ParDo(AddWithNum(), 5))
self.p.run()
self.assertEqual("Type hint violation for 'Add': "
"requires <type 'int'> but got <type 'str'> for element",
e.exception.args[0])
def test_pardo_does_not_type_check_using_type_hint_decorators(self):
@with_input_types(a=int)
@with_output_types(typehints.List[str])
def int_to_str(a):
return [str(a)]
# The function above is expecting an int for its only parameter. However, it
# will receive a str instead, which should result in a raised exception.
with self.assertRaises(typehints.TypeCheckError) as e:
(self.p
| 'S' >> beam.Create(['b', 'a', 'r']).with_output_types(str)
| 'ToStr' >> beam.FlatMap(int_to_str))
self.assertEqual("Type hint violation for 'ToStr': "
"requires <type 'int'> but got <type 'str'> for a",
e.exception.args[0])
def test_pardo_properly_type_checks_using_type_hint_decorators(self):
@with_input_types(a=str)
@with_output_types(typehints.List[str])
def to_all_upper_case(a):
return [a.upper()]
# If this type-checks than no error should be raised.
d = (self.p
| 'T' >> beam.Create(['t', 'e', 's', 't']).with_output_types(str)
| 'Case' >> beam.FlatMap(to_all_upper_case))
assert_that(d, equal_to(['T', 'E', 'S', 'T']))
self.p.run()
# Output type should have been recognized as 'str' rather than List[str] to
# do the flatten part of FlatMap.
self.assertEqual(str, d.element_type)
def test_pardo_does_not_type_check_using_type_hint_methods(self):
# The first ParDo outputs pcoll's of type int, however the second ParDo is
# expecting pcoll's of type str instead.
with self.assertRaises(typehints.TypeCheckError) as e:
(self.p
| 'S' >> beam.Create(['t', 'e', 's', 't']).with_output_types(str)
| ('Score' >> beam.FlatMap(lambda x: [1] if x == 't' else [2])
.with_input_types(str).with_output_types(int))
| ('Upper' >> beam.FlatMap(lambda x: [x.upper()])
.with_input_types(str).with_output_types(str)))
self.assertEqual("Type hint violation for 'Upper': "
"requires <type 'str'> but got <type 'int'> for x",
e.exception.args[0])
def test_pardo_properly_type_checks_using_type_hint_methods(self):
# Pipeline should be created successfully without an error
d = (self.p
| 'S' >> beam.Create(['t', 'e', 's', 't']).with_output_types(str)
| 'Dup' >> beam.FlatMap(lambda x: [x + x])
.with_input_types(str).with_output_types(str)
| 'Upper' >> beam.FlatMap(lambda x: [x.upper()])
.with_input_types(str).with_output_types(str))
assert_that(d, equal_to(['TT', 'EE', 'SS', 'TT']))
self.p.run()
def test_map_does_not_type_check_using_type_hints_methods(self):
# The transform before 'Map' has indicated that it outputs PCollections with
# int's, while Map is expecting one of str.
with self.assertRaises(typehints.TypeCheckError) as e:
(self.p
| 'S' >> beam.Create([1, 2, 3, 4]).with_output_types(int)
| 'Upper' >> beam.Map(lambda x: x.upper())
.with_input_types(str).with_output_types(str))
self.assertEqual("Type hint violation for 'Upper': "
"requires <type 'str'> but got <type 'int'> for x",
e.exception.args[0])
def test_map_properly_type_checks_using_type_hints_methods(self):
# No error should be raised if this type-checks properly.
d = (self.p
| 'S' >> beam.Create([1, 2, 3, 4]).with_output_types(int)
| 'ToStr' >> beam.Map(lambda x: str(x))
.with_input_types(int).with_output_types(str))
assert_that(d, equal_to(['1', '2', '3', '4']))
self.p.run()
def test_map_does_not_type_check_using_type_hints_decorator(self):
@with_input_types(s=str)
@with_output_types(str)
def upper(s):
return s.upper()
# Hinted function above expects a str at pipeline construction.
# However, 'Map' should detect that Create has hinted an int instead.
with self.assertRaises(typehints.TypeCheckError) as e:
(self.p
| 'S' >> beam.Create([1, 2, 3, 4]).with_output_types(int)
| 'Upper' >> beam.Map(upper))
self.assertEqual("Type hint violation for 'Upper': "
"requires <type 'str'> but got <type 'int'> for s",
e.exception.args[0])
def test_map_properly_type_checks_using_type_hints_decorator(self):
@with_input_types(a=bool)
@with_output_types(int)
def bool_to_int(a):
return int(a)
# If this type-checks than no error should be raised.
d = (self.p
| 'Bools' >> beam.Create([True, False, True]).with_output_types(bool)
| 'ToInts' >> beam.Map(bool_to_int))
assert_that(d, equal_to([1, 0, 1]))
self.p.run()
def test_filter_does_not_type_check_using_type_hints_method(self):
# Filter is expecting an int but instead looks to the 'left' and sees a str
# incoming.
with self.assertRaises(typehints.TypeCheckError) as e:
(self.p
| 'Strs' >> beam.Create(['1', '2', '3', '4', '5']).with_output_types(str)
| 'Lower' >> beam.Map(lambda x: x.lower())
.with_input_types(str).with_output_types(str)
| 'Below 3' >> beam.Filter(lambda x: x < 3).with_input_types(int))
self.assertEqual("Type hint violation for 'Below 3': "
"requires <type 'int'> but got <type 'str'> for x",
e.exception.args[0])
def test_filter_type_checks_using_type_hints_method(self):
# No error should be raised if this type-checks properly.
d = (self.p
| beam.Create(['1', '2', '3', '4', '5']).with_output_types(str)
| 'ToInt' >> beam.Map(lambda x: int(x))
.with_input_types(str).with_output_types(int)
| 'Below 3' >> beam.Filter(lambda x: x < 3).with_input_types(int))
assert_that(d, equal_to([1, 2]))
self.p.run()
def test_filter_does_not_type_check_using_type_hints_decorator(self):
@with_input_types(a=float)
def more_than_half(a):
return a > 0.50
# Func above was hinted to only take a float, yet an int will be passed.
with self.assertRaises(typehints.TypeCheckError) as e:
(self.p
| 'Ints' >> beam.Create([1, 2, 3, 4]).with_output_types(int)
| 'Half' >> beam.Filter(more_than_half))
self.assertEqual("Type hint violation for 'Half': "
"requires <type 'float'> but got <type 'int'> for a",
e.exception.args[0])
def test_filter_type_checks_using_type_hints_decorator(self):
@with_input_types(b=int)
def half(b):
import random
return bool(random.choice([0, 1]))
# Filter should deduce that it returns the same type that it takes.
(self.p
| 'Str' >> beam.Create(range(5)).with_output_types(int)
| 'Half' >> beam.Filter(half)
| 'ToBool' >> beam.Map(lambda x: bool(x))
.with_input_types(int).with_output_types(bool))
def test_group_by_key_only_output_type_deduction(self):
d = (self.p
| 'Str' >> beam.Create(['t', 'e', 's', 't']).with_output_types(str)
| ('Pair' >> beam.Map(lambda x: (x, ord(x)))
.with_output_types(typehints.KV[str, str]))
| _GroupByKeyOnly())
# Output type should correctly be deduced.
# GBK-only should deduce that KV[A, B] is turned into KV[A, Iterable[B]].
self.assertCompatible(typehints.KV[str, typehints.Iterable[str]],
d.element_type)
def test_group_by_key_output_type_deduction(self):
d = (self.p
| 'Str' >> beam.Create(range(20)).with_output_types(int)
| ('PairNegative' >> beam.Map(lambda x: (x % 5, -x))
.with_output_types(typehints.KV[int, int]))
| beam.GroupByKey())
# Output type should correctly be deduced.
# GBK should deduce that KV[A, B] is turned into KV[A, Iterable[B]].
self.assertCompatible(typehints.KV[int, typehints.Iterable[int]],
d.element_type)
def test_group_by_key_only_does_not_type_check(self):
# GBK will be passed raw int's here instead of some form of KV[A, B].
with self.assertRaises(typehints.TypeCheckError) as e:
(self.p
| beam.Create([1, 2, 3]).with_output_types(int)
| 'F' >> _GroupByKeyOnly())
self.assertEqual("Input type hint violation at F: "
"expected Tuple[TypeVariable[K], TypeVariable[V]], "
"got <type 'int'>",
e.exception.args[0])
def test_group_by_does_not_type_check(self):
# Create is returning a List[int, str], rather than a KV[int, str] that is
# aliased to Tuple[int, str].
with self.assertRaises(typehints.TypeCheckError) as e:
(self.p
| (beam.Create([[1], [2]])
.with_output_types(typehints.Iterable[int]))
| 'T' >> beam.GroupByKey())
self.assertEqual("Input type hint violation at T: "
"expected Tuple[TypeVariable[K], TypeVariable[V]], "
"got Iterable[int]",
e.exception.args[0])
def test_pipeline_checking_pardo_insufficient_type_information(self):
self.p._options.view_as(TypeOptions).type_check_strictness = 'ALL_REQUIRED'
# Type checking is enabled, but 'Create' doesn't pass on any relevant type
# information to the ParDo.
with self.assertRaises(typehints.TypeCheckError) as e:
(self.p
| 'Nums' >> beam.Create(range(5))
| 'ModDup' >> beam.FlatMap(lambda x: (x % 2, x)))
self.assertEqual('Pipeline type checking is enabled, however no output '
'type-hint was found for the PTransform Create(Nums)',
e.exception.args[0])
def test_pipeline_checking_gbk_insufficient_type_information(self):
self.p._options.view_as(TypeOptions).type_check_strictness = 'ALL_REQUIRED'
# Type checking is enabled, but 'Map' doesn't pass on any relevant type
# information to GBK-only.
with self.assertRaises(typehints.TypeCheckError) as e:
(self.p
| 'Nums' >> beam.Create(range(5)).with_output_types(int)
| 'ModDup' >> beam.Map(lambda x: (x % 2, x))
| _GroupByKeyOnly())
self.assertEqual('Pipeline type checking is enabled, however no output '
'type-hint was found for the PTransform '
'ParDo(ModDup)',
e.exception.args[0])
def test_disable_pipeline_type_check(self):
self.p._options.view_as(TypeOptions).pipeline_type_check = False
# The pipeline below should raise a TypeError, however pipeline type
# checking was disabled above.
(self.p
| 'T' >> beam.Create([1, 2, 3]).with_output_types(int)
| 'Lower' >> beam.Map(lambda x: x.lower())
.with_input_types(str).with_output_types(str))
def test_run_time_type_checking_enabled_type_violation(self):
self.p._options.view_as(TypeOptions).pipeline_type_check = False
self.p._options.view_as(TypeOptions).runtime_type_check = True
@with_output_types(str)
@with_input_types(x=int)
def int_to_string(x):
return str(x)
# Function above has been type-hinted to only accept an int. But in the
# pipeline execution it'll be passed a string due to the output of Create.
(self.p
| 'T' >> beam.Create(['some_string'])
| 'ToStr' >> beam.Map(int_to_string))
with self.assertRaises(typehints.TypeCheckError) as e:
self.p.run()
self.assertStartswith(
e.exception.args[0],
"Runtime type violation detected within ParDo(ToStr): "
"Type-hint for argument: 'x' violated. "
"Expected an instance of <type 'int'>, "
"instead found some_string, an instance of <type 'str'>.")
def test_run_time_type_checking_enabled_types_satisfied(self):
self.p._options.view_as(TypeOptions).pipeline_type_check = False
self.p._options.view_as(TypeOptions).runtime_type_check = True
@with_output_types(typehints.KV[int, str])
@with_input_types(x=str)
def group_with_upper_ord(x):
return (ord(x.upper()) % 5, x)
# Pipeline checking is off, but the above function should satisfy types at
# run-time.
result = (self.p
| 'T' >> beam.Create(['t', 'e', 's', 't', 'i', 'n', 'g'])
.with_output_types(str)
| 'GenKeys' >> beam.Map(group_with_upper_ord)
| 'O' >> beam.GroupByKey())
assert_that(result, equal_to([(1, ['g']),
(3, ['s', 'i', 'n']),
(4, ['t', 'e', 't'])]))
self.p.run()
def test_pipeline_checking_satisfied_but_run_time_types_violate(self):
self.p._options.view_as(TypeOptions).pipeline_type_check = False
self.p._options.view_as(TypeOptions).runtime_type_check = True
@with_output_types(typehints.KV[bool, int])
@with_input_types(a=int)
def is_even_as_key(a):
# Simulate a programming error, should be: return (a % 2 == 0, a)
# However this returns KV[int, int]
return (a % 2, a)
(self.p
| 'Nums' >> beam.Create(range(5)).with_output_types(int)
| 'IsEven' >> beam.Map(is_even_as_key)
| 'Parity' >> beam.GroupByKey())
# Although all the types appear to be correct when checked at pipeline
# construction. Runtime type-checking should detect the 'is_even_as_key' is
# returning Tuple[int, int], instead of Tuple[bool, int].
with self.assertRaises(typehints.TypeCheckError) as e:
self.p.run()
self.assertStartswith(
e.exception.args[0],
"Runtime type violation detected within ParDo(IsEven): "
"Tuple[bool, int] hint type-constraint violated. "
"The type of element #0 in the passed tuple is incorrect. "
"Expected an instance of type bool, "
"instead received an instance of type int.")
def test_pipeline_checking_satisfied_run_time_checking_satisfied(self):
self.p._options.view_as(TypeOptions).pipeline_type_check = False
@with_output_types(typehints.KV[bool, int])
@with_input_types(a=int)
def is_even_as_key(a):
# The programming error in the above test-case has now been fixed.
# Everything should properly type-check.
return (a % 2 == 0, a)
result = (self.p
| 'Nums' >> beam.Create(range(5)).with_output_types(int)
| 'IsEven' >> beam.Map(is_even_as_key)
| 'Parity' >> beam.GroupByKey())
assert_that(result, equal_to([(False, [1, 3]), (True, [0, 2, 4])]))
self.p.run()
def test_pipeline_runtime_checking_violation_simple_type_input(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
self.p._options.view_as(TypeOptions).pipeline_type_check = False
# The type-hinted applied via the 'with_input_types()' method indicates the
# ParDo should receive an instance of type 'str', however an 'int' will be
# passed instead.
with self.assertRaises(typehints.TypeCheckError) as e:
(self.p
| beam.Create([1, 2, 3])
| ('ToInt' >> beam.FlatMap(lambda x: [int(x)])
.with_input_types(str).with_output_types(int)))
self.p.run()
self.assertStartswith(
e.exception.args[0],
"Runtime type violation detected within ParDo(ToInt): "
"Type-hint for argument: 'x' violated. "
"Expected an instance of <type 'str'>, "
"instead found 1, an instance of <type 'int'>.")
def test_pipeline_runtime_checking_violation_composite_type_input(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
self.p._options.view_as(TypeOptions).pipeline_type_check = False
with self.assertRaises(typehints.TypeCheckError) as e:
(self.p
| beam.Create([(1, 3.0), (2, 4.9), (3, 9.5)])
| ('Add' >> beam.FlatMap(lambda x_y: [x_y[0] + x_y[1]])
.with_input_types(typehints.Tuple[int, int]).with_output_types(int))
)
self.p.run()
self.assertStartswith(
e.exception.args[0],
"Runtime type violation detected within ParDo(Add): "
"Type-hint for argument: 'x_y' violated: "
"Tuple[int, int] hint type-constraint violated. "
"The type of element #1 in the passed tuple is incorrect. "
"Expected an instance of type int, instead received an instance "
"of type float.")
def test_pipeline_runtime_checking_violation_simple_type_output(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
self.p._options.view_as(TypeOptions).pipeline_type_check = False
# The type-hinted applied via the 'returns()' method indicates the ParDo
# should output an instance of type 'int', however a 'float' will be
# generated instead.
print("HINTS", ('ToInt' >> beam.FlatMap(
lambda x: [float(x)]).with_input_types(int).with_output_types(
int)).get_type_hints())
with self.assertRaises(typehints.TypeCheckError) as e:
(self.p
| beam.Create([1, 2, 3])
| ('ToInt' >> beam.FlatMap(lambda x: [float(x)])
.with_input_types(int).with_output_types(int))
)
self.p.run()
self.assertStartswith(
e.exception.args[0],
"Runtime type violation detected within "
"ParDo(ToInt): "
"According to type-hint expected output should be "
"of type <type 'int'>. Instead, received '1.0', "
"an instance of type <type 'float'>.")
def test_pipeline_runtime_checking_violation_composite_type_output(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
self.p._options.view_as(TypeOptions).pipeline_type_check = False
# The type-hinted applied via the 'returns()' method indicates the ParDo
# should return an instance of type: Tuple[float, int]. However, an instance
# of 'int' will be generated instead.
with self.assertRaises(typehints.TypeCheckError) as e:
(self.p
| beam.Create([(1, 3.0), (2, 4.9), (3, 9.5)])
| ('Swap' >> beam.FlatMap(lambda x_y1: [x_y1[0] + x_y1[1]])
.with_input_types(typehints.Tuple[int, float])
.with_output_types(typehints.Tuple[float, int]))
)
self.p.run()
self.assertStartswith(
e.exception.args[0],
"Runtime type violation detected within "
"ParDo(Swap): Tuple type constraint violated. "
"Valid object instance must be of type 'tuple'. Instead, "
"an instance of 'float' was received.")
def test_pipeline_runtime_checking_violation_with_side_inputs_decorator(self):
self.p._options.view_as(TypeOptions).pipeline_type_check = False
self.p._options.view_as(TypeOptions).runtime_type_check = True
@with_output_types(int)
@with_input_types(a=int, b=int)
def add(a, b):
return a + b
with self.assertRaises(typehints.TypeCheckError) as e:
(self.p | beam.Create([1, 2, 3, 4]) | 'Add 1' >> beam.Map(add, 1.0))
self.p.run()
self.assertStartswith(
e.exception.args[0],
"Runtime type violation detected within ParDo(Add 1): "
"Type-hint for argument: 'b' violated. "
"Expected an instance of <type 'int'>, "
"instead found 1.0, an instance of <type 'float'>.")
def test_pipeline_runtime_checking_violation_with_side_inputs_via_method(self): # pylint: disable=line-too-long
self.p._options.view_as(TypeOptions).runtime_type_check = True
self.p._options.view_as(TypeOptions).pipeline_type_check = False
with self.assertRaises(typehints.TypeCheckError) as e:
(self.p
| beam.Create([1, 2, 3, 4])
| ('Add 1' >> beam.Map(lambda x, one: x + one, 1.0)
.with_input_types(int, int)
.with_output_types(float)))
self.p.run()
self.assertStartswith(
e.exception.args[0],
"Runtime type violation detected within ParDo(Add 1): "
"Type-hint for argument: 'one' violated. "
"Expected an instance of <type 'int'>, "
"instead found 1.0, an instance of <type 'float'>.")
def test_combine_properly_pipeline_type_checks_using_decorator(self):
@with_output_types(int)
@with_input_types(ints=typehints.Iterable[int])
def sum_ints(ints):
return sum(ints)
d = (self.p
| 'T' >> beam.Create([1, 2, 3]).with_output_types(int)
| 'Sum' >> beam.CombineGlobally(sum_ints))
self.assertEqual(int, d.element_type)
assert_that(d, equal_to([6]))
self.p.run()
def test_combine_func_type_hint_does_not_take_iterable_using_decorator(self):
@with_output_types(int)
@with_input_types(a=int)
def bad_combine(a):
5 + a
with self.assertRaises(typehints.TypeCheckError) as e:
(self.p
| 'M' >> beam.Create([1, 2, 3]).with_output_types(int)
| 'Add' >> beam.CombineGlobally(bad_combine))
self.assertEqual(
"All functions for a Combine PTransform must accept a "
"single argument compatible with: Iterable[Any]. "
"Instead a function with input type: <type 'int'> was received.",
e.exception.args[0])
def test_combine_pipeline_type_propagation_using_decorators(self):
@with_output_types(int)
@with_input_types(ints=typehints.Iterable[int])
def sum_ints(ints):
return sum(ints)
@with_output_types(typehints.List[int])
@with_input_types(n=int)
def range_from_zero(n):
return list(range(n+1))
d = (self.p
| 'T' >> beam.Create([1, 2, 3]).with_output_types(int)
| 'Sum' >> beam.CombineGlobally(sum_ints)
| 'Range' >> beam.ParDo(range_from_zero))
self.assertEqual(int, d.element_type)
assert_that(d, equal_to([0, 1, 2, 3, 4, 5, 6]))
self.p.run()
def test_combine_runtime_type_check_satisfied_using_decorators(self):
self.p._options.view_as(TypeOptions).pipeline_type_check = False
@with_output_types(int)
@with_input_types(ints=typehints.Iterable[int])
def iter_mul(ints):
return reduce(operator.mul, ints, 1)
d = (self.p
| 'K' >> beam.Create([5, 5, 5, 5]).with_output_types(int)
| 'Mul' >> beam.CombineGlobally(iter_mul))
assert_that(d, equal_to([625]))
self.p.run()
def test_combine_runtime_type_check_violation_using_decorators(self):
self.p._options.view_as(TypeOptions).pipeline_type_check = False
self.p._options.view_as(TypeOptions).runtime_type_check = True
# Combine fn is returning the incorrect type
@with_output_types(int)
@with_input_types(ints=typehints.Iterable[int])
def iter_mul(ints):
return str(reduce(operator.mul, ints, 1))
with self.assertRaises(typehints.TypeCheckError) as e:
(self.p
| 'K' >> beam.Create([5, 5, 5, 5]).with_output_types(int)
| 'Mul' >> beam.CombineGlobally(iter_mul))
self.p.run()
self.assertStartswith(
e.exception.args[0],
"Runtime type violation detected within "
"Mul/CombinePerKey: "
"Type-hint for return type violated. "
"Expected an instance of <type 'int'>, instead found")
def test_combine_pipeline_type_check_using_methods(self):
d = (self.p
| beam.Create(['t', 'e', 's', 't']).with_output_types(str)
| ('concat' >> beam.CombineGlobally(lambda s: ''.join(s))
.with_input_types(str).with_output_types(str)))
def matcher(expected):
def match(actual):
equal_to(expected)(list(actual[0]))
return match
assert_that(d, matcher('estt'))
self.p.run()
def test_combine_runtime_type_check_using_methods(self):
self.p._options.view_as(TypeOptions).pipeline_type_check = False
self.p._options.view_as(TypeOptions).runtime_type_check = True
d = (self.p
| beam.Create(range(5)).with_output_types(int)
| ('Sum' >> beam.CombineGlobally(lambda s: sum(s))
.with_input_types(int).with_output_types(int)))
assert_that(d, equal_to([10]))
self.p.run()
def test_combine_pipeline_type_check_violation_using_methods(self):
with self.assertRaises(typehints.TypeCheckError) as e:
(self.p
| beam.Create(range(3)).with_output_types(int)
| ('SortJoin' >> beam.CombineGlobally(lambda s: ''.join(sorted(s)))
.with_input_types(str).with_output_types(str)))
self.assertEqual("Input type hint violation at SortJoin: "
"expected <type 'str'>, got <type 'int'>",
e.exception.args[0])
def test_combine_runtime_type_check_violation_using_methods(self):
self.p._options.view_as(TypeOptions).pipeline_type_check = False
self.p._options.view_as(TypeOptions).runtime_type_check = True
with self.assertRaises(typehints.TypeCheckError) as e:
(self.p
| beam.Create(range(3)).with_output_types(int)
| ('SortJoin' >> beam.CombineGlobally(lambda s: ''.join(sorted(s)))
.with_input_types(str).with_output_types(str)))
self.p.run()
self.assertStartswith(
e.exception.args[0],
"Runtime type violation detected within "
"ParDo(SortJoin/KeyWithVoid): "
"Type-hint for argument: 'v' violated. "
"Expected an instance of <type 'str'>, "
"instead found 0, an instance of <type 'int'>.")
def test_combine_insufficient_type_hint_information(self):
self.p._options.view_as(TypeOptions).type_check_strictness = 'ALL_REQUIRED'
with self.assertRaises(typehints.TypeCheckError) as e:
(self.p
| 'E' >> beam.Create(range(3)).with_output_types(int)
| 'SortJoin' >> beam.CombineGlobally(lambda s: ''.join(sorted(s)))
| 'F' >> beam.Map(lambda x: x + 1))
self.assertStartswith(
e.exception.args[0],
'Pipeline type checking is enabled, '
'however no output type-hint was found for the PTransform '
'ParDo('
'SortJoin/CombinePerKey/')
def test_mean_globally_pipeline_checking_satisfied(self):
d = (self.p
| 'C' >> beam.Create(range(5)).with_output_types(int)
| 'Mean' >> combine.Mean.Globally())
self.assertEqual(float, d.element_type)
assert_that(d, equal_to([2.0]))
self.p.run()
def test_mean_globally_pipeline_checking_violated(self):
with self.assertRaises(typehints.TypeCheckError) as e:
(self.p
| 'C' >> beam.Create(['test']).with_output_types(str)
| 'Mean' >> combine.Mean.Globally())
self.assertEqual(
"Type hint violation for 'CombinePerKey': "
"requires Tuple[TypeVariable[K], Union[float, int, long]] "
"but got Tuple[None, str] for element",
e.exception.args[0])
def test_mean_globally_runtime_checking_satisfied(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
d = (self.p
| 'C' >> beam.Create(range(5)).with_output_types(int)
| 'Mean' >> combine.Mean.Globally())
self.assertEqual(float, d.element_type)
assert_that(d, equal_to([2.0]))
self.p.run()
def test_mean_globally_runtime_checking_violated(self):
self.p._options.view_as(TypeOptions).pipeline_type_check = False
self.p._options.view_as(TypeOptions).runtime_type_check = True
with self.assertRaises(typehints.TypeCheckError) as e:
(self.p
| 'C' >> beam.Create(['t', 'e', 's', 't']).with_output_types(str)
| 'Mean' >> combine.Mean.Globally())
self.p.run()
self.assertEqual("Runtime type violation detected for transform input "
"when executing ParDoFlatMap(Combine): Tuple[Any, "
"Iterable[Union[int, float]]] hint type-constraint "
"violated. The type of element #1 in the passed tuple "
"is incorrect. Iterable[Union[int, float]] hint "
"type-constraint violated. The type of element #0 in "
"the passed Iterable is incorrect: Union[int, float] "
"type-constraint violated. Expected an instance of one "
"of: ('int', 'float'), received str instead.",
e.exception.args[0])
def test_mean_per_key_pipeline_checking_satisfied(self):
d = (self.p
| beam.Create(range(5)).with_output_types(int)
| ('EvenGroup' >> beam.Map(lambda x: (not x % 2, x))
.with_output_types(typehints.KV[bool, int]))
| 'EvenMean' >> combine.Mean.PerKey())
self.assertCompatible(typehints.KV[bool, float], d.element_type)
assert_that(d, equal_to([(False, 2.0), (True, 2.0)]))
self.p.run()
def test_mean_per_key_pipeline_checking_violated(self):
with self.assertRaises(typehints.TypeCheckError) as e:
(self.p
| beam.Create(map(str, range(5))).with_output_types(str)
| ('UpperPair' >> beam.Map(lambda x: (x.upper(), x))
.with_output_types(typehints.KV[str, str]))
| 'EvenMean' >> combine.Mean.PerKey())
self.p.run()
self.assertEqual(
"Type hint violation for 'CombinePerKey(MeanCombineFn)': "
"requires Tuple[TypeVariable[K], Union[float, int, long]] "
"but got Tuple[str, str] for element",
e.exception.args[0])
def test_mean_per_key_runtime_checking_satisfied(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
d = (self.p
| beam.Create(range(5)).with_output_types(int)
| ('OddGroup' >> beam.Map(lambda x: (bool(x % 2), x))
.with_output_types(typehints.KV[bool, int]))
| 'OddMean' >> combine.Mean.PerKey())
self.assertCompatible(typehints.KV[bool, float], d.element_type)
assert_that(d, equal_to([(False, 2.0), (True, 2.0)]))
self.p.run()
def test_mean_per_key_runtime_checking_violated(self):
self.p._options.view_as(TypeOptions).pipeline_type_check = False
self.p._options.view_as(TypeOptions).runtime_type_check = True
with self.assertRaises(typehints.TypeCheckError) as e:
(self.p
| beam.Create(range(5)).with_output_types(int)
| ('OddGroup' >> beam.Map(lambda x: (x, str(bool(x % 2))))
.with_output_types(typehints.KV[int, str]))
| 'OddMean' >> combine.Mean.PerKey())
self.p.run()
self.assertStartswith(
e.exception.args[0],
"Runtime type violation detected within "
"OddMean/CombinePerKey(MeanCombineFn): "
"Type-hint for argument: 'element' violated: "
"Union[float, int, long] type-constraint violated. "
"Expected an instance of one of: ('float', 'int', 'long'), "
"received str instead")
def test_count_globally_pipeline_type_checking_satisfied(self):
d = (self.p
| 'P' >> beam.Create(range(5)).with_output_types(int)
| 'CountInt' >> combine.Count.Globally())
self.assertEqual(int, d.element_type)
assert_that(d, equal_to([5]))
self.p.run()
def test_count_globally_runtime_type_checking_satisfied(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
d = (self.p
| 'P' >> beam.Create(range(5)).with_output_types(int)
| 'CountInt' >> combine.Count.Globally())
self.assertEqual(int, d.element_type)
assert_that(d, equal_to([5]))
self.p.run()
def test_count_perkey_pipeline_type_checking_satisfied(self):
d = (self.p
| beam.Create(range(5)).with_output_types(int)
| ('EvenGroup' >> beam.Map(lambda x: (not x % 2, x))
.with_output_types(typehints.KV[bool, int]))
| 'CountInt' >> combine.Count.PerKey())
self.assertCompatible(typehints.KV[bool, int], d.element_type)
assert_that(d, equal_to([(False, 2), (True, 3)]))
self.p.run()
def test_count_perkey_pipeline_type_checking_violated(self):
with self.assertRaises(typehints.TypeCheckError) as e:
(self.p
| beam.Create(range(5)).with_output_types(int)
| 'CountInt' >> combine.Count.PerKey())
self.assertEqual(
"Type hint violation for 'CombinePerKey(CountCombineFn)': "
"requires Tuple[TypeVariable[K], Any] "
"but got <type 'int'> for element",
e.exception.args[0])
def test_count_perkey_runtime_type_checking_satisfied(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
d = (self.p
| beam.Create(['t', 'e', 's', 't']).with_output_types(str)
| 'DupKey' >> beam.Map(lambda x: (x, x))
.with_output_types(typehints.KV[str, str])
| 'CountDups' >> combine.Count.PerKey())
self.assertCompatible(typehints.KV[str, int], d.element_type)
assert_that(d, equal_to([('e', 1), ('s', 1), ('t', 2)]))
self.p.run()
def test_count_perelement_pipeline_type_checking_satisfied(self):
d = (self.p
| beam.Create([1, 1, 2, 3]).with_output_types(int)
| 'CountElems' >> combine.Count.PerElement())
self.assertCompatible(typehints.KV[int, int], d.element_type)
assert_that(d, equal_to([(1, 2), (2, 1), (3, 1)]))
self.p.run()
def test_count_perelement_pipeline_type_checking_violated(self):
self.p._options.view_as(TypeOptions).type_check_strictness = 'ALL_REQUIRED'
with self.assertRaises(typehints.TypeCheckError) as e:
(self.p
| 'f' >> beam.Create([1, 1, 2, 3])
| 'CountElems' >> combine.Count.PerElement())
self.assertEqual('Pipeline type checking is enabled, however no output '
'type-hint was found for the PTransform '
'Create(f)',
e.exception.args[0])
def test_count_perelement_runtime_type_checking_satisfied(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
d = (self.p
| beam.Create([True, True, False, True, True])
.with_output_types(bool)
| 'CountElems' >> combine.Count.PerElement())
self.assertCompatible(typehints.KV[bool, int], d.element_type)
assert_that(d, equal_to([(False, 1), (True, 4)]))
self.p.run()
def test_top_of_pipeline_checking_satisfied(self):
d = (self.p
| beam.Create(range(5, 11)).with_output_types(int)
| 'Top 3' >> combine.Top.Of(3, lambda x, y: x < y))
self.assertCompatible(typehints.Iterable[int],
d.element_type)
assert_that(d, equal_to([[10, 9, 8]]))
self.p.run()
def test_top_of_runtime_checking_satisfied(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
d = (self.p
| beam.Create(list('testing')).with_output_types(str)
| 'AciiTop' >> combine.Top.Of(3, lambda x, y: x < y))
self.assertCompatible(typehints.Iterable[str], d.element_type)
assert_that(d, equal_to([['t', 't', 's']]))
self.p.run()
def test_per_key_pipeline_checking_violated(self):
with self.assertRaises(typehints.TypeCheckError) as e:
(self.p
| beam.Create(range(100)).with_output_types(int)
| 'Num + 1' >> beam.Map(lambda x: x + 1).with_output_types(int)
| 'TopMod' >> combine.Top.PerKey(1, lambda a, b: a < b))
self.assertEqual(
"Type hint violation for 'CombinePerKey(TopCombineFn)': "
"requires Tuple[TypeVariable[K], TypeVariable[T]] "
"but got <type 'int'> for element",
e.exception.args[0])
def test_per_key_pipeline_checking_satisfied(self):
d = (self.p
| beam.Create(range(100)).with_output_types(int)
| ('GroupMod 3' >> beam.Map(lambda x: (x % 3, x))
.with_output_types(typehints.KV[int, int]))
| 'TopMod' >> combine.Top.PerKey(1, lambda a, b: a < b))
self.assertCompatible(typehints.Tuple[int, typehints.Iterable[int]],
d.element_type)
assert_that(d, equal_to([(0, [99]), (1, [97]), (2, [98])]))
self.p.run()
def test_per_key_runtime_checking_satisfied(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
d = (self.p
| beam.Create(range(21))
| ('GroupMod 3' >> beam.Map(lambda x: (x % 3, x))
.with_output_types(typehints.KV[int, int]))
| 'TopMod' >> combine.Top.PerKey(1, lambda a, b: a < b))
self.assertCompatible(typehints.KV[int, typehints.Iterable[int]],
d.element_type)
assert_that(d, equal_to([(0, [18]), (1, [19]), (2, [20])]))
self.p.run()
def test_sample_globally_pipeline_satisfied(self):
d = (self.p
| beam.Create([2, 2, 3, 3]).with_output_types(int)
| 'Sample' >> combine.Sample.FixedSizeGlobally(3))
self.assertCompatible(typehints.Iterable[int], d.element_type)
def matcher(expected_len):
def match(actual):
equal_to([expected_len])([len(actual[0])])
return match
assert_that(d, matcher(3))
self.p.run()
def test_sample_globally_runtime_satisfied(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
d = (self.p
| beam.Create([2, 2, 3, 3]).with_output_types(int)
| 'Sample' >> combine.Sample.FixedSizeGlobally(2))
self.assertCompatible(typehints.Iterable[int], d.element_type)
def matcher(expected_len):
def match(actual):
equal_to([expected_len])([len(actual[0])])
return match
assert_that(d, matcher(2))
self.p.run()
def test_sample_per_key_pipeline_satisfied(self):
d = (self.p
| (beam.Create([(1, 2), (1, 2), (2, 3), (2, 3)])
.with_output_types(typehints.KV[int, int]))
| 'Sample' >> combine.Sample.FixedSizePerKey(2))
self.assertCompatible(typehints.KV[int, typehints.Iterable[int]],
d.element_type)
def matcher(expected_len):
def match(actual):
for _, sample in actual:
equal_to([expected_len])([len(sample)])
return match
assert_that(d, matcher(2))
self.p.run()
def test_sample_per_key_runtime_satisfied(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
d = (self.p
| (beam.Create([(1, 2), (1, 2), (2, 3), (2, 3)])
.with_output_types(typehints.KV[int, int]))
| 'Sample' >> combine.Sample.FixedSizePerKey(1))
self.assertCompatible(typehints.KV[int, typehints.Iterable[int]],
d.element_type)
def matcher(expected_len):
def match(actual):
for _, sample in actual:
equal_to([expected_len])([len(sample)])
return match
assert_that(d, matcher(1))
self.p.run()
def test_to_list_pipeline_check_satisfied(self):
d = (self.p
| beam.Create((1, 2, 3, 4)).with_output_types(int)
| combine.ToList())
self.assertCompatible(typehints.List[int], d.element_type)
def matcher(expected):
def match(actual):
equal_to(expected)(actual[0])
return match
assert_that(d, matcher([1, 2, 3, 4]))
self.p.run()
def test_to_list_runtime_check_satisfied(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
d = (self.p
| beam.Create(list('test')).with_output_types(str)
| combine.ToList())
self.assertCompatible(typehints.List[str], d.element_type)
def matcher(expected):
def match(actual):
equal_to(expected)(actual[0])
return match
assert_that(d, matcher(['e', 's', 't', 't']))
self.p.run()
def test_to_dict_pipeline_check_violated(self):
with self.assertRaises(typehints.TypeCheckError) as e:
(self.p
| beam.Create([1, 2, 3, 4]).with_output_types(int)
| combine.ToDict())
self.assertEqual(
"Type hint violation for 'CombinePerKey': "
"requires "
"Tuple[TypeVariable[K], Tuple[TypeVariable[K], TypeVariable[V]]] "
"but got Tuple[None, int] for element",
e.exception.args[0])
def test_to_dict_pipeline_check_satisfied(self):
d = (self.p
| beam.Create(
[(1, 2), (3, 4)]).with_output_types(typehints.Tuple[int, int])
| combine.ToDict())
self.assertCompatible(typehints.Dict[int, int], d.element_type)
assert_that(d, equal_to([{1: 2, 3: 4}]))
self.p.run()
def test_to_dict_runtime_check_satisfied(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
d = (self.p
| (beam.Create([('1', 2), ('3', 4)])
.with_output_types(typehints.Tuple[str, int]))
| combine.ToDict())
self.assertCompatible(typehints.Dict[str, int], d.element_type)
assert_that(d, equal_to([{'1': 2, '3': 4}]))
self.p.run()
def test_runtime_type_check_python_type_error(self):
self.p._options.view_as(TypeOptions).runtime_type_check = True
with self.assertRaises(TypeError) as e:
(self.p
| beam.Create([1, 2, 3]).with_output_types(int)
| 'Len' >> beam.Map(lambda x: len(x)).with_output_types(int))
self.p.run()
# Our special type-checking related TypeError shouldn't have been raised.
# Instead the above pipeline should have triggered a regular Python runtime
# TypeError.
self.assertEqual("object of type 'int' has no len() [while running 'Len']",
e.exception.args[0])
self.assertFalse(isinstance(e, typehints.TypeCheckError))
def test_pardo_type_inference(self):
self.assertEqual(int,
beam.Filter(lambda x: False).infer_output_type(int))
self.assertEqual(typehints.Tuple[str, int],
beam.Map(lambda x: (x, 1)).infer_output_type(str))
def test_gbk_type_inference(self):
self.assertEqual(
typehints.Tuple[str, typehints.Iterable[int]],
_GroupByKeyOnly().infer_output_type(typehints.KV[str, int]))
def test_pipeline_inference(self):
created = self.p | beam.Create(['a', 'b', 'c'])
mapped = created | 'pair with 1' >> beam.Map(lambda x: (x, 1))
grouped = mapped | beam.GroupByKey()
self.assertEqual(str, created.element_type)
self.assertEqual(typehints.KV[str, int], mapped.element_type)
self.assertEqual(typehints.KV[str, typehints.Iterable[int]],
grouped.element_type)
def test_inferred_bad_kv_type(self):
with self.assertRaises(typehints.TypeCheckError) as e:
_ = (self.p
| beam.Create(['a', 'b', 'c'])
| 'Ungroupable' >> beam.Map(lambda x: (x, 0, 1.0))
| beam.GroupByKey())
self.assertEqual('Input type hint violation at GroupByKey: '
'expected Tuple[TypeVariable[K], TypeVariable[V]], '
'got Tuple[str, int, float]',
e.exception.args[0])
def test_type_inference_command_line_flag_toggle(self):
self.p._options.view_as(TypeOptions).pipeline_type_check = False
x = self.p | 'C1' >> beam.Create([1, 2, 3, 4])
self.assertIsNone(x.element_type)
self.p._options.view_as(TypeOptions).pipeline_type_check = True
x = self.p | 'C2' >> beam.Create([1, 2, 3, 4])
self.assertEqual(int, x.element_type)
def test_eager_execution(self):
doubled = [1, 2, 3, 4] | beam.Map(lambda x: 2 * x)
self.assertEqual([2, 4, 6, 8], doubled)
def test_eager_execution_tagged_outputs(self):
result = [1, 2, 3, 4] | beam.Map(
lambda x: pvalue.TaggedOutput('bar', 2 * x)).with_outputs('bar')
self.assertEqual([2, 4, 6, 8], result.bar)
with self.assertRaises(KeyError,
msg='Tag \'foo\' is not a defined output tag'):
result.foo
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
ddy88958620/lib | Python/scrapy/swedishtruckparts/parts4scania.py | 2 | 1983 | import re
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
import csv
from product_spiders.items import Product, ProductLoader
class Parts4ScaniaSpider(BaseSpider):
name = 'parts4scania.co.uk'
allowed_domains = ['www.parts4scania.co.uk']
start_urls = ('http://www.parts4scania.co.uk',)
def __init__(self, *args, **kwargs):
super(Parts4ScaniaSpider, self).__init__(*args, **kwargs)
def parse(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
# categories
categories = hxs.select('//table[@id="NavigationBar4"]//a/@href').extract()
for category in categories:
url = urljoin_rfc(get_base_url(response), category)
yield Request(url)
# pages
# next_page = hxs.select('').extract()
# if next_page:
# url = urljoin_rfc(get_base_url(response), next_page[0])
# yield Request(url)
# products
for product in self.parse_product(response):
yield product
def parse_product(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
# products
products = hxs.select(u'//b[contains(text(), "\xa3")]/../..')
for product in products:
product_loader = ProductLoader(item=Product(), selector=product)
product_loader.add_xpath('name', './b/font/text()')
product_loader.add_value('url', response.url)
price = product.select(u'.//b[contains(text(), "\xa3")]/text()').re('\xa3(.*[0-9])')
if not price:
continue
product_loader.add_value('price', price)
yield product_loader.load_item()
| apache-2.0 |
15Dkatz/pants | tests/python/pants_test/backend/graph_info/tasks/test_filter.py | 10 | 8657 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from textwrap import dedent
from pants.backend.docgen.targets.doc import Page
from pants.backend.graph_info.tasks.filter import Filter
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.python.targets.python_library import PythonLibrary
from pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary
from pants.base.exceptions import TaskError
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.build_graph.target import Target
from pants_test.tasks.task_test_base import ConsoleTaskTestBase
# TODO: This test should create some dummy target types, instead of depending on other backends.
class BaseFilterTest(ConsoleTaskTestBase):
@property
def alias_groups(self):
return BuildFileAliases(
targets={
'target': Target,
'java_library': JavaLibrary,
'page': Page,
'python_library': PythonLibrary,
'python_requirement_library': PythonRequirementLibrary,
}
)
@classmethod
def task_type(cls):
return Filter
class FilterEmptyTargetsTest(BaseFilterTest):
def test_no_filters(self):
self.assert_console_output()
def test_type(self):
self.assert_console_output(options={'type': ['page']})
self.assert_console_output(options={'type': ['java_library']})
def test_regex(self):
self.assert_console_output(options={'regex': ['^common']})
self.assert_console_output(options={'regex': ['-^common']})
class FilterTest(BaseFilterTest):
def setUp(self):
super(FilterTest, self).setUp()
requirement_injected = set()
def add_to_build_file(path, name, *deps):
if path not in requirement_injected:
self.add_to_build_file(path, "python_requirement_library(name='foo')")
requirement_injected.add(path)
all_deps = ["'{0}'".format(dep) for dep in deps] + ["':foo'"]
self.add_to_build_file(path, dedent("""
python_library(name='{name}',
sources=[],
dependencies=[{all_deps}],
tags=['{tag}']
)
""".format(name=name, tag=name + "_tag", all_deps=','.join(all_deps))))
add_to_build_file('common/a', 'a')
add_to_build_file('common/b', 'b')
add_to_build_file('common/c', 'c')
add_to_build_file('overlaps', 'one', 'common/a', 'common/b')
add_to_build_file('overlaps', 'two', 'common/a', 'common/c')
add_to_build_file('overlaps', 'three', 'common/a', 'overlaps:one')
def test_roots(self):
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
targets=self.targets('common/::'),
extra_targets=self.targets('overlaps/::')
)
def test_nodups(self):
targets = [self.target('common/b')] * 2
self.assertEqual(2, len(targets))
self.assert_console_output(
'common/b:b',
targets=targets
)
def test_no_filters(self):
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
'overlaps:one',
'overlaps:two',
'overlaps:three',
'overlaps:foo',
targets=self.targets('::')
)
def test_filter_type(self):
self.assert_console_output(
'common/a:a',
'common/b:b',
'common/c:c',
'overlaps:one',
'overlaps:two',
'overlaps:three',
targets=self.targets('::'),
options={'type': ['python_library']}
)
self.assert_console_output(
'common/a:foo',
'common/b:foo',
'common/c:foo',
'overlaps:foo',
targets=self.targets('::'),
options={'type': ['-python_library']}
)
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
'overlaps:one',
'overlaps:two',
'overlaps:three',
'overlaps:foo',
targets=self.targets('::'),
# Note that the comma is inside the string, so these are ORed.
options={'type': ['python_requirement_library,python_library']}
)
def test_filter_multiple_types(self):
# A target can only have one type, so the output should be empty.
self.assert_console_output(
targets=self.targets('::'),
options={'type': ['python_requirement_library', 'python_library']}
)
def test_filter_target(self):
self.assert_console_output(
'common/a:a',
'overlaps:foo',
targets=self.targets('::'),
options={'target': ['common/a,overlaps/:foo']}
)
self.assert_console_output(
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
'overlaps:two',
'overlaps:three',
targets=self.targets('::'),
options={'target': ['-common/a:a,overlaps:one,overlaps:foo']}
)
def test_filter_ancestor(self):
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'overlaps:one',
'overlaps:foo',
targets=self.targets('::'),
options={'ancestor': ['overlaps:one,overlaps:foo']}
)
self.assert_console_output(
'common/c:c',
'common/c:foo',
'overlaps:two',
'overlaps:three',
targets=self.targets('::'),
options={'ancestor': ['-overlaps:one,overlaps:foo']}
)
def test_filter_ancestor_out_of_context(self):
"""Tests that targets outside of the context used as filters are parsed before use."""
# Add an additional un-injected target, and then use it as a filter.
self.add_to_build_file("blacklist", "target(name='blacklist', dependencies=['common/a'])")
self.assert_console_output(
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
'overlaps:one',
'overlaps:two',
'overlaps:three',
'overlaps:foo',
targets=self.targets('::'),
options={'ancestor': ['-blacklist']}
)
def test_filter_ancestor_not_passed_targets(self):
"""Tests filtering targets based on an ancestor not in that list of targets."""
# Add an additional un-injected target, and then use it as a filter.
self.add_to_build_file("blacklist", "target(name='blacklist', dependencies=['common/a'])")
self.assert_console_output(
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
targets=self.targets('common/::'), # blacklist is not in the list of targets
options={'ancestor': ['-blacklist']}
)
self.assert_console_output(
'common/a:a', # a: _should_ show up if we don't filter.
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
targets=self.targets('common/::'),
options={'ancestor': []}
)
def test_filter_regex(self):
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
targets=self.targets('::'),
options={'regex': ['^common']}
)
self.assert_console_output(
'common/a:foo',
'common/b:foo',
'common/c:foo',
'overlaps:one',
'overlaps:two',
'overlaps:three',
'overlaps:foo',
targets=self.targets('::'),
options={'regex': ['+foo,^overlaps']}
)
self.assert_console_output(
'overlaps:one',
'overlaps:two',
'overlaps:three',
targets=self.targets('::'),
options={'regex': ['-^common,foo$']}
)
# Invalid regex.
self.assert_console_raises(TaskError,
targets=self.targets('::'),
options={'regex': ['abc)']}
)
def test_filter_tag_regex(self):
# Filter two.
self.assert_console_output(
'overlaps:three',
targets=self.targets('::'),
options={'tag_regex': ['+e(?=e)']}
)
# Removals.
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
'overlaps:foo',
'overlaps:three',
targets=self.targets('::'),
options={'tag_regex': ['-one|two']}
)
# Invalid regex.
self.assert_console_raises(TaskError,
targets=self.targets('::'),
options={'tag_regex': ['abc)']}
)
| apache-2.0 |
antin/Open-Knesset | presence/BeautifulSoup.py | 51 | 79254 | """Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup parses a (possibly invalid) XML or HTML document into a
tree representation. It provides methods and Pythonic idioms that make
it easy to navigate, search, and modify the tree.
A well-formed XML/HTML document yields a well-formed data
structure. An ill-formed XML/HTML document yields a correspondingly
ill-formed data structure. If your document is only locally
well-formed, you can use this library to find and process the
well-formed part of it.
Beautiful Soup works with Python 2.2 and up. It has no external
dependencies, but you'll have more success at converting data to UTF-8
if you also install these three packages:
* chardet, for auto-detecting character encodings
http://chardet.feedparser.org/
* cjkcodecs and iconv_codec, which add more encodings to the ones supported
by stock Python.
http://cjkpython.i18n.org/
Beautiful Soup defines classes for two main parsing strategies:
* BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific
language that kind of looks like XML.
* BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid
or invalid. This class has web browser-like heuristics for
obtaining a sensible parse tree in the face of common HTML errors.
Beautiful Soup also defines a class (UnicodeDammit) for autodetecting
the encoding of an HTML or XML document, and converting it to
Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/documentation.html
Here, have some legalese:
Copyright (c) 2004-2009, Leonard Richardson
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the the Beautiful Soup Consortium and All
Night Kosher Bakery nor the names of its contributors may be
used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT.
"""
from __future__ import generators
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "3.0.8"
__copyright__ = "Copyright (c) 2004-2009 Leonard Richardson"
__license__ = "New-style BSD"
from sgmllib import SGMLParser, SGMLParseError
import codecs
import markupbase
import types
import re
import sgmllib
try:
from htmlentitydefs import name2codepoint
except ImportError:
name2codepoint = {}
try:
set
except NameError:
from sets import Set as set
#These hacks make Beautiful Soup able to parse XML with namespaces
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match
DEFAULT_OUTPUT_ENCODING = "utf-8"
def _match_css_class(str):
"""Build a RE to match the given CSS class."""
return re.compile(r"(^|.*\s)%s($|\s)" % str)
# First, the classes that represent markup elements.
class PageElement(object):
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
def setup(self, parent=None, previous=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous = previous
self.next = None
self.previousSibling = None
self.nextSibling = None
if self.parent and self.parent.contents:
self.previousSibling = self.parent.contents[-1]
self.previousSibling.nextSibling = self
def replaceWith(self, replaceWith):
oldParent = self.parent
myIndex = self.parent.index(self)
if hasattr(replaceWith, "parent")\
and replaceWith.parent is self.parent:
# We're replacing this element with one of its siblings.
index = replaceWith.parent.index(replaceWith)
if index and index < myIndex:
# Furthermore, it comes before this element. That
# means that when we extract it, the index of this
# element will change.
myIndex = myIndex - 1
self.extract()
oldParent.insert(myIndex, replaceWith)
def replaceWithChildren(self):
myParent = self.parent
myIndex = self.parent.index(self)
self.extract()
reversedChildren = list(self.contents)
reversedChildren.reverse()
for child in reversedChildren:
myParent.insert(myIndex, child)
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent:
try:
del self.parent.contents[self.parent.index(self)]
except ValueError:
pass
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
lastChild = self._lastRecursiveChild()
nextElement = lastChild.next
if self.previous:
self.previous.next = nextElement
if nextElement:
nextElement.previous = self.previous
self.previous = None
lastChild.next = None
self.parent = None
if self.previousSibling:
self.previousSibling.nextSibling = self.nextSibling
if self.nextSibling:
self.nextSibling.previousSibling = self.previousSibling
self.previousSibling = self.nextSibling = None
return self
def _lastRecursiveChild(self):
"Finds the last element beneath this object to be parsed."
lastChild = self
while hasattr(lastChild, 'contents') and lastChild.contents:
lastChild = lastChild.contents[-1]
return lastChild
def insert(self, position, newChild):
if isinstance(newChild, basestring) \
and not isinstance(newChild, NavigableString):
newChild = NavigableString(newChild)
position = min(position, len(self.contents))
if hasattr(newChild, 'parent') and newChild.parent is not None:
# We're 'inserting' an element that's already one
# of this object's children.
if newChild.parent is self:
index = self.index(newChild)
if index > position:
# Furthermore we're moving it further down the
# list of this object's children. That means that
# when we extract this element, our target index
# will jump down one.
position = position - 1
newChild.extract()
newChild.parent = self
previousChild = None
if position == 0:
newChild.previousSibling = None
newChild.previous = self
else:
previousChild = self.contents[position-1]
newChild.previousSibling = previousChild
newChild.previousSibling.nextSibling = newChild
newChild.previous = previousChild._lastRecursiveChild()
if newChild.previous:
newChild.previous.next = newChild
newChildsLastElement = newChild._lastRecursiveChild()
if position >= len(self.contents):
newChild.nextSibling = None
parent = self
parentsNextSibling = None
while not parentsNextSibling:
parentsNextSibling = parent.nextSibling
parent = parent.parent
if not parent: # This is the last element in the document.
break
if parentsNextSibling:
newChildsLastElement.next = parentsNextSibling
else:
newChildsLastElement.next = None
else:
nextChild = self.contents[position]
newChild.nextSibling = nextChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
newChildsLastElement.next = nextChild
if newChildsLastElement.next:
newChildsLastElement.next.previous = newChildsLastElement
self.contents.insert(position, newChild)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def findNext(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._findOne(self.findAllNext, name, attrs, text, **kwargs)
def findAllNext(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.nextGenerator,
**kwargs)
def findNextSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._findOne(self.findNextSiblings, name, attrs, text,
**kwargs)
def findNextSiblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.nextSiblingGenerator, **kwargs)
fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x
def findPrevious(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs)
def findAllPrevious(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.previousGenerator,
**kwargs)
fetchPrevious = findAllPrevious # Compatibility with pre-3.x
def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._findOne(self.findPreviousSiblings, name, attrs, text,
**kwargs)
def findPreviousSiblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.previousSiblingGenerator, **kwargs)
fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x
def findParent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _findOne because findParents takes a different
# set of arguments.
r = None
l = self.findParents(name, attrs, 1)
if l:
r = l[0]
return r
def findParents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._findAll(name, attrs, None, limit, self.parentGenerator,
**kwargs)
fetchParents = findParents # Compatibility with pre-3.x
#These methods do the real heavy lifting.
def _findOne(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _findAll(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
# Special case some findAll* searches
# findAll*(True)
elif not limit and name is True and not attrs and not kwargs:
return [element for element in generator()
if isinstance(element, Tag)]
# findAll*('tag-name')
elif not limit and isinstance(name, basestring) and not attrs \
and not kwargs:
return [element for element in generator()
if isinstance(element, Tag) and element.name == name]
# Build a SoupStrainer
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
g = generator()
while True:
try:
i = g.next()
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These Generators can be used to navigate starting from both
#NavigableStrings and Tags.
def nextGenerator(self):
i = self
while i is not None:
i = i.next
yield i
def nextSiblingGenerator(self):
i = self
while i is not None:
i = i.nextSibling
yield i
def previousGenerator(self):
i = self
while i is not None:
i = i.previous
yield i
def previousSiblingGenerator(self):
i = self
while i is not None:
i = i.previousSibling
yield i
def parentGenerator(self):
i = self
while i is not None:
i = i.parent
yield i
# Utility methods
def substituteEncoding(self, str, encoding=None):
encoding = encoding or "utf-8"
return str.replace("%SOUP-ENCODING%", encoding)
def toEncoding(self, s, encoding=None):
"""Encodes an object to a string in some encoding, or to Unicode.
."""
if isinstance(s, unicode):
if encoding:
s = s.encode(encoding)
elif isinstance(s, str):
if encoding:
s = s.encode(encoding)
else:
s = unicode(s)
else:
if encoding:
s = self.toEncoding(str(s), encoding)
else:
s = unicode(s)
return s
class NavigableString(unicode, PageElement):
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, unicode):
return unicode.__new__(cls, value)
return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __getnewargs__(self):
return (NavigableString.__str__(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)
def __unicode__(self):
return str(self).decode(DEFAULT_OUTPUT_ENCODING)
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
if encoding:
return self.encode(encoding)
else:
return self
class CData(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<![CDATA[%s]]>" % NavigableString.__str__(self, encoding)
class ProcessingInstruction(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
output = self
if "%SOUP-ENCODING%" in output:
output = self.substituteEncoding(output, encoding)
return "<?%s?>" % self.toEncoding(output, encoding)
class Comment(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!--%s-->" % NavigableString.__str__(self, encoding)
class Declaration(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!%s>" % NavigableString.__str__(self, encoding)
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def _invert(h):
"Cheap function to invert a hash."
i = {}
for k,v in h.items():
i[v] = k
return i
XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'",
"quot" : '"',
"amp" : "&",
"lt" : "<",
"gt" : ">" }
XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS)
def _convertEntities(self, match):
"""Used in a call to re.sub to replace HTML, XML, and numeric
entities with the appropriate Unicode characters. If HTML
entities are being converted, any unrecognized entities are
escaped."""
x = match.group(1)
if self.convertHTMLEntities and x in name2codepoint:
return unichr(name2codepoint[x])
elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:
if self.convertXMLEntities:
return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]
else:
return u'&%s;' % x
elif len(x) > 0 and x[0] == '#':
# Handle numeric entities
if len(x) > 1 and x[1] == 'x':
return unichr(int(x[2:], 16))
else:
return unichr(int(x[1:]))
elif self.escapeUnrecognizedEntities:
return u'&%s;' % x
else:
return u'&%s;' % x
def __init__(self, parser, name, attrs=None, parent=None,
previous=None):
"Basic constructor."
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected
self.parserClass = parser.__class__
self.isSelfClosing = parser.isSelfClosingTag(name)
self.name = name
if attrs is None:
attrs = []
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
self.containsSubstitutions = False
self.convertHTMLEntities = parser.convertHTMLEntities
self.convertXMLEntities = parser.convertXMLEntities
self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities
# Convert any HTML, XML, or numeric entities in the attribute values.
convert = lambda(k, val): (k,
re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);",
self._convertEntities,
val))
self.attrs = map(convert, self.attrs)
def getString(self):
if (len(self.contents) == 1
and isinstance(self.contents[0], NavigableString)):
return self.contents[0]
def setString(self, string):
"""Replace the contents of the tag with a string"""
self.clear()
self.append(string)
string = property(getString, setString)
def getText(self, separator=u""):
if not len(self.contents):
return u""
stopNode = self._lastRecursiveChild().next
strings = []
current = self.contents[0]
while current is not stopNode:
if isinstance(current, NavigableString):
strings.append(current.strip())
current = current.next
return separator.join(strings)
text = property(getText)
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self._getAttrMap().get(key, default)
def clear(self):
"""Extract all children."""
for child in self.contents[:]:
child.extract()
def index(self, element):
for i, child in enumerate(self.contents):
if child is element:
return i
raise ValueError("Tag.index: element not in tag")
def has_key(self, key):
return self._getAttrMap().has_key(key)
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self._getAttrMap()[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self._getAttrMap()
self.attrMap[key] = value
found = False
for i in range(0, len(self.attrs)):
if self.attrs[i][0] == key:
self.attrs[i] = (key, value)
found = True
if not found:
self.attrs.append((key, value))
self._getAttrMap()[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
for item in self.attrs:
if item[0] == key:
self.attrs.remove(item)
#We don't break because bad HTML can define the same
#attribute multiple times.
self._getAttrMap()
if self.attrMap.has_key(key):
del self.attrMap[key]
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
findAll() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return apply(self.findAll, args, kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3:
return self.find(tag[:-3])
elif tag.find('__') != 0:
return self.find(tag)
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag)
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag.
NOTE: right now this will return false if two tags have the
same attributes in a different order. Should this be fixed?"""
if other is self:
return True
if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other):
return False
for i in range(0, len(self.contents)):
if self.contents[i] != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.__str__(encoding)
def __unicode__(self):
return self.__str__(None)
BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|"
+ "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)"
+ ")")
def _sub_entity(self, x):
"""Used with a regular expression to substitute the
appropriate XML entity for an XML special character."""
return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";"
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Returns a string or Unicode representation of this tag and
its contents. To get Unicode, pass None for encoding.
NOTE: since Python's HTML parser consumes whitespace, this
method is not certain to reproduce the whitespace present in
the original string."""
encodedName = self.toEncoding(self.name, encoding)
attrs = []
if self.attrs:
for key, val in self.attrs:
fmt = '%s="%s"'
if isinstance(val, basestring):
if self.containsSubstitutions and '%SOUP-ENCODING%' in val:
val = self.substituteEncoding(val, encoding)
# The attribute value either:
#
# * Contains no embedded double quotes or single quotes.
# No problem: we enclose it in double quotes.
# * Contains embedded single quotes. No problem:
# double quotes work here too.
# * Contains embedded double quotes. No problem:
# we enclose it in single quotes.
# * Embeds both single _and_ double quotes. This
# can't happen naturally, but it can happen if
# you modify an attribute value after parsing
# the document. Now we have a bit of a
# problem. We solve it by enclosing the
# attribute in single quotes, and escaping any
# embedded single quotes to XML entities.
if '"' in val:
fmt = "%s='%s'"
if "'" in val:
# TODO: replace with apos when
# appropriate.
val = val.replace("'", "&squot;")
# Now we're okay w/r/t quotes. But the attribute
# value might also contain angle brackets, or
# ampersands that aren't part of entities. We need
# to escape those to XML entities too.
val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)
attrs.append(fmt % (self.toEncoding(key, encoding),
self.toEncoding(val, encoding)))
close = ''
closeTag = ''
if self.isSelfClosing:
close = ' /'
else:
closeTag = '</%s>' % encodedName
indentTag, indentContents = 0, 0
if prettyPrint:
indentTag = indentLevel
space = (' ' * (indentTag-1))
indentContents = indentTag + 1
contents = self.renderContents(encoding, prettyPrint, indentContents)
if self.hidden:
s = contents
else:
s = []
attributeString = ''
if attrs:
attributeString = ' ' + ' '.join(attrs)
if prettyPrint:
s.append(space)
s.append('<%s%s%s>' % (encodedName, attributeString, close))
if prettyPrint:
s.append("\n")
s.append(contents)
if prettyPrint and contents and contents[-1] != "\n":
s.append("\n")
if prettyPrint and closeTag:
s.append(space)
s.append(closeTag)
if prettyPrint and closeTag and self.nextSibling:
s.append("\n")
s = ''.join(s)
return s
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
if len(self.contents) == 0:
return
current = self.contents[0]
while current is not None:
next = current.next
if isinstance(current, Tag):
del current.contents[:]
current.parent = None
current.previous = None
current.previousSibling = None
current.next = None
current.nextSibling = None
current = next
def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING):
return self.__str__(encoding, True)
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Renders the contents of this tag as a string in the given
encoding. If encoding is None, returns a Unicode string.."""
s=[]
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.__str__(encoding)
elif isinstance(c, Tag):
s.append(c.__str__(encoding, prettyPrint, indentLevel))
if text and prettyPrint:
text = text.strip()
if text:
if prettyPrint:
s.append(" " * (indentLevel-1))
s.append(text)
if prettyPrint:
s.append("\n")
return ''.join(s)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.findAll(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def findAll(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.recursiveChildGenerator
if not recursive:
generator = self.childGenerator
return self._findAll(name, attrs, text, limit, generator, **kwargs)
findChildren = findAll
# Pre-3.x compatibility methods
first = find
fetch = findAll
def fetchText(self, text=None, recursive=True, limit=None):
return self.findAll(text=text, recursive=recursive, limit=limit)
def firstText(self, text=None, recursive=True):
return self.find(text=text, recursive=recursive)
#Private methods
def _getAttrMap(self):
"""Initializes a map representation of this tag's attributes,
if not already initialized."""
if not getattr(self, 'attrMap'):
self.attrMap = {}
for (key, value) in self.attrs:
self.attrMap[key] = value
return self.attrMap
#Generator methods
def childGenerator(self):
# Just use the iterator from the contents
return iter(self.contents)
def recursiveChildGenerator(self):
if not len(self.contents):
raise StopIteration
stopNode = self._lastRecursiveChild().next
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next
# Next, a couple classes to represent queries and their results.
class SoupStrainer:
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = name
if isinstance(attrs, basestring):
kwargs['class'] = _match_css_class(attrs)
attrs = None
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
self.attrs = attrs
self.text = text
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def searchTag(self, markupName=None, markupAttrs={}):
found = None
markup = None
if isinstance(markupName, Tag):
markup = markupName
markupAttrs = markup
callFunctionWithTagData = callable(self.name) \
and not isinstance(markupName, Tag)
if (not self.name) \
or callFunctionWithTagData \
or (markup and self._matches(markup, self.name)) \
or (not markup and self._matches(markupName, self.name)):
if callFunctionWithTagData:
match = self.name(markupName, markupAttrs)
else:
match = True
markupAttrMap = None
for attr, matchAgainst in self.attrs.items():
if not markupAttrMap:
if hasattr(markupAttrs, 'get'):
markupAttrMap = markupAttrs
else:
markupAttrMap = {}
for k,v in markupAttrs:
markupAttrMap[k] = v
attrValue = markupAttrMap.get(attr)
if not self._matches(attrValue, matchAgainst):
match = False
break
if match:
if markup:
found = markup
else:
found = markupName
return found
def search(self, markup):
#print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if hasattr(markup, "__iter__") \
and not isinstance(markup, Tag):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text:
found = self.searchTag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isinstance(markup, basestring):
if self._matches(markup, self.text):
found = markup
else:
raise Exception, "I don't know how to match against a %s" \
% markup.__class__
return found
def _matches(self, markup, matchAgainst):
#print "Matching %s against %s" % (markup, matchAgainst)
result = False
if matchAgainst is True:
result = markup is not None
elif callable(matchAgainst):
result = matchAgainst(markup)
else:
#Custom match methods take the tag as an argument, but all
#other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
if markup and not isinstance(markup, basestring):
markup = unicode(markup)
#Now we know that chunk is either a string, or None.
if hasattr(matchAgainst, 'match'):
# It's a regexp object.
result = markup and matchAgainst.search(markup)
elif hasattr(matchAgainst, '__iter__'): # list-like
result = markup in matchAgainst
elif hasattr(matchAgainst, 'items'):
result = markup.has_key(matchAgainst)
elif matchAgainst and isinstance(markup, basestring):
if isinstance(markup, unicode):
matchAgainst = unicode(matchAgainst)
else:
matchAgainst = str(matchAgainst)
if not result:
result = matchAgainst == markup
return result
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source):
list.__init__([])
self.source = source
# Now, some helper functions.
def buildTagMap(default, *args):
"""Turns a list of maps, lists, or scalars into a single map.
Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and
NESTING_RESET_TAGS maps out of lists and partial maps."""
built = {}
for portion in args:
if hasattr(portion, 'items'):
#It's a map. Merge it.
for k,v in portion.items():
built[k] = v
elif hasattr(portion, '__iter__'): # is a list
#It's a list. Map each item to the default.
for k in portion:
built[k] = default
else:
#It's a scalar. Map it to the default.
built[portion] = default
return built
# Now, the parser classes.
class BeautifulStoneSoup(Tag, SGMLParser):
"""This class contains the basic parser and search code. It defines
a parser that knows nothing about tag behavior except for the
following:
You can't close a tag without closing all the tags it encloses.
That is, "<foo><bar></foo>" actually means
"<foo><bar></bar></foo>".
[Another possible explanation is "<foo><bar /></foo>", but since
this class defines no SELF_CLOSING_TAGS, it will never use that
explanation.]
This class is useful for parsing XML or made-up markup languages,
or when BeautifulSoup makes an assumption counter to what you were
expecting."""
SELF_CLOSING_TAGS = {}
NESTABLE_TAGS = {}
RESET_NESTING_TAGS = {}
QUOTE_TAGS = {}
PRESERVE_WHITESPACE_TAGS = []
MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'),
lambda x: x.group(1) + ' />'),
(re.compile('<!\s+([^<>]*)>'),
lambda x: '<!' + x.group(1) + '>')
]
ROOT_TAG_NAME = u'[document]'
HTML_ENTITIES = "html"
XML_ENTITIES = "xml"
XHTML_ENTITIES = "xhtml"
# TODO: This only exists for backwards-compatibility
ALL_ENTITIES = XHTML_ENTITIES
# Used when determining whether a text node is all whitespace and
# can be replaced with a single space. A text node that contains
# fancy Unicode spaces (usually non-breaking) should be left
# alone.
STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, }
def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None,
markupMassage=True, smartQuotesTo=XML_ENTITIES,
convertEntities=None, selfClosingTags=None, isHTML=False):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser.
sgmllib will process most bad HTML, and the BeautifulSoup
class has some tricks for dealing with some HTML that kills
sgmllib, but Beautiful Soup can nonetheless choke or lose data
if your data uses self-closing tags or declarations
incorrectly.
By default, Beautiful Soup uses regexes to sanitize input,
avoiding the vast majority of these problems. If the problems
don't apply to you, pass in False for markupMassage, and
you'll get better performance.
The default parser massage techniques fix the two most common
instances of invalid HTML that choke sgmllib:
<br/> (No space between name of closing tag and tag close)
<! --Comment--> (Extraneous whitespace in declaration)
You can pass in a custom list of (RE object, replace method)
tuples to get Beautiful Soup to scrub your input the way you
want."""
self.parseOnlyThese = parseOnlyThese
self.fromEncoding = fromEncoding
self.smartQuotesTo = smartQuotesTo
self.convertEntities = convertEntities
# Set the rules for how we'll deal with the entities we
# encounter
if self.convertEntities:
# It doesn't make sense to convert encoded characters to
# entities even while you're converting entities to Unicode.
# Just convert it all to Unicode.
self.smartQuotesTo = None
if convertEntities == self.HTML_ENTITIES:
self.convertXMLEntities = False
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = True
elif convertEntities == self.XHTML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = False
elif convertEntities == self.XML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
else:
self.convertXMLEntities = False
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags)
SGMLParser.__init__(self)
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
self.markup = markup
self.markupMassage = markupMassage
try:
self._feed(isHTML=isHTML)
except StopParsing:
pass
self.markup = None # The markup can now be GCed
def convert_charref(self, name):
"""This method fixes a bug in Python's SGMLParser."""
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127 : # ASCII ends at 127, not 255
return
return self.convert_codepoint(n)
def _feed(self, inDocumentEncoding=None, isHTML=False):
# Convert the document to Unicode.
markup = self.markup
if isinstance(markup, unicode):
if not hasattr(self, 'originalEncoding'):
self.originalEncoding = None
else:
dammit = UnicodeDammit\
(markup, [self.fromEncoding, inDocumentEncoding],
smartQuotesTo=self.smartQuotesTo, isHTML=isHTML)
markup = dammit.unicode
self.originalEncoding = dammit.originalEncoding
self.declaredHTMLEncoding = dammit.declaredHTMLEncoding
if markup:
if self.markupMassage:
if not hasattr(self.markupMassage, "__iter__"):
self.markupMassage = self.MARKUP_MASSAGE
for fix, m in self.markupMassage:
markup = fix.sub(m, markup)
# TODO: We get rid of markupMassage so that the
# soup object can be deepcopied later on. Some
# Python installations can't copy regexes. If anyone
# was relying on the existence of markupMassage, this
# might cause problems.
del(self.markupMassage)
self.reset()
SGMLParser.feed(self, markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def __getattr__(self, methodName):
"""This method routes method call requests to either the SGMLParser
superclass or the Tag superclass, depending on the method name."""
#print "__getattr__ called on %s.%s" % (self.__class__, methodName)
if methodName.startswith('start_') or methodName.startswith('end_') \
or methodName.startswith('do_'):
return SGMLParser.__getattr__(self, methodName)
elif not methodName.startswith('__'):
return Tag.__getattr__(self, methodName)
else:
raise AttributeError
def isSelfClosingTag(self, name):
"""Returns true iff the given string is the name of a
self-closing tag according to this parser."""
return self.SELF_CLOSING_TAGS.has_key(name) \
or self.instanceSelfClosingTags.has_key(name)
def reset(self):
Tag.__init__(self, self, self.ROOT_TAG_NAME)
self.hidden = 1
SGMLParser.reset(self)
self.currentData = []
self.currentTag = None
self.tagStack = []
self.quoteStack = []
self.pushTag(self)
def popTag(self):
tag = self.tagStack.pop()
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
def endData(self, containerClass=NavigableString):
if self.currentData:
currentData = u''.join(self.currentData)
if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and
not set([tag.name for tag in self.tagStack]).intersection(
self.PRESERVE_WHITESPACE_TAGS)):
if '\n' in currentData:
currentData = '\n'
else:
currentData = ' '
self.currentData = []
if self.parseOnlyThese and len(self.tagStack) <= 1 and \
(not self.parseOnlyThese.text or \
not self.parseOnlyThese.search(currentData)):
return
o = containerClass(currentData)
o.setup(self.currentTag, self.previous)
if self.previous:
self.previous.next = o
self.previous = o
self.currentTag.contents.append(o)
def _popToTag(self, name, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
return
numPops = 0
mostRecentTag = None
for i in range(len(self.tagStack)-1, 0, -1):
if name == self.tagStack[i].name:
numPops = len(self.tagStack)-i
break
if not inclusivePop:
numPops = numPops - 1
for i in range(0, numPops):
mostRecentTag = self.popTag()
return mostRecentTag
def _smartPop(self, name):
"""We need to pop up to the previous tag of this type, unless
one of this tag's nesting reset triggers comes between this
tag and the previous tag of this type, OR unless this tag is a
generic nesting trigger and another generic nesting trigger
comes between this tag and the previous tag of this type.
Examples:
<p>Foo<b>Bar *<p>* should pop to 'p', not 'b'.
<p>Foo<table>Bar *<p>* should pop to 'table', not 'p'.
<p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'.
<li><ul><li> *<li>* should pop to 'ul', not the first 'li'.
<tr><table><tr> *<tr>* should pop to 'table', not the first 'tr'
<td><tr><td> *<td>* should pop to 'tr', not the first 'td'
"""
nestingResetTriggers = self.NESTABLE_TAGS.get(name)
isNestable = nestingResetTriggers != None
isResetNesting = self.RESET_NESTING_TAGS.has_key(name)
popTo = None
inclusive = True
for i in range(len(self.tagStack)-1, 0, -1):
p = self.tagStack[i]
if (not p or p.name == name) and not isNestable:
#Non-nestable tags get popped to the top or to their
#last occurance.
popTo = name
break
if (nestingResetTriggers is not None
and p.name in nestingResetTriggers) \
or (nestingResetTriggers is None and isResetNesting
and self.RESET_NESTING_TAGS.has_key(p.name)):
#If we encounter one of the nesting reset triggers
#peculiar to this tag, or we encounter another tag
#that causes nesting to reset, pop up to but not
#including that tag.
popTo = p.name
inclusive = False
break
p = p.parent
if popTo:
self._popToTag(popTo, inclusive)
def unknown_starttag(self, name, attrs, selfClosing=0):
#print "Start tag %s: %s" % (name, attrs)
if self.quoteStack:
#This is not a real tag.
#print "<%s> is not real!" % name
attrs = ''.join([' %s="%s"' % (x, y) for x, y in attrs])
self.handle_data('<%s%s>' % (name, attrs))
return
self.endData()
if not self.isSelfClosingTag(name) and not selfClosing:
self._smartPop(name)
if self.parseOnlyThese and len(self.tagStack) <= 1 \
and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)):
return
tag = Tag(self, name, attrs, self.currentTag, self.previous)
if self.previous:
self.previous.next = tag
self.previous = tag
self.pushTag(tag)
if selfClosing or self.isSelfClosingTag(name):
self.popTag()
if name in self.QUOTE_TAGS:
#print "Beginning quote (%s)" % name
self.quoteStack.append(name)
self.literal = 1
return tag
def unknown_endtag(self, name):
#print "End tag %s" % name
if self.quoteStack and self.quoteStack[-1] != name:
#This is not a real end tag.
#print "</%s> is not real!" % name
self.handle_data('</%s>' % name)
return
self.endData()
self._popToTag(name)
if self.quoteStack and self.quoteStack[-1] == name:
self.quoteStack.pop()
self.literal = (len(self.quoteStack) > 0)
def handle_data(self, data):
self.currentData.append(data)
def _toStringSubclass(self, text, subclass):
"""Adds a certain piece of text to the tree as a NavigableString
subclass."""
self.endData()
self.handle_data(text)
self.endData(subclass)
def handle_pi(self, text):
"""Handle a processing instruction as a ProcessingInstruction
object, possibly one with a %SOUP-ENCODING% slot into which an
encoding will be plugged later."""
if text[:3] == "xml":
text = u"xml version='1.0' encoding='%SOUP-ENCODING%'"
self._toStringSubclass(text, ProcessingInstruction)
def handle_comment(self, text):
"Handle comments as Comment objects."
self._toStringSubclass(text, Comment)
def handle_charref(self, ref):
"Handle character references as data."
if self.convertEntities:
data = unichr(int(ref))
else:
data = '&#%s;' % ref
self.handle_data(data)
def handle_entityref(self, ref):
"""Handle entity references as data, possibly converting known
HTML and/or XML entity references to the corresponding Unicode
characters."""
data = None
if self.convertHTMLEntities:
try:
data = unichr(name2codepoint[ref])
except KeyError:
pass
if not data and self.convertXMLEntities:
data = self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref)
if not data and self.convertHTMLEntities and \
not self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref):
# TODO: We've got a problem here. We're told this is
# an entity reference, but it's not an XML entity
# reference or an HTML entity reference. Nonetheless,
# the logical thing to do is to pass it through as an
# unrecognized entity reference.
#
# Except: when the input is "&carol;" this function
# will be called with input "carol". When the input is
# "AT&T", this function will be called with input
# "T". We have no way of knowing whether a semicolon
# was present originally, so we don't know whether
# this is an unknown entity or just a misplaced
# ampersand.
#
# The more common case is a misplaced ampersand, so I
# escape the ampersand and omit the trailing semicolon.
data = "&%s" % ref
if not data:
# This case is different from the one above, because we
# haven't already gone through a supposedly comprehensive
# mapping of entities to Unicode characters. We might not
# have gone through any mapping at all. So the chances are
# very high that this is a real entity, and not a
# misplaced ampersand.
data = "&%s;" % ref
self.handle_data(data)
def handle_decl(self, data):
"Handle DOCTYPEs and the like as Declaration objects."
self._toStringSubclass(data, Declaration)
def parse_declaration(self, i):
"""Treat a bogus SGML declaration as raw data. Treat a CDATA
declaration as a CData object."""
j = None
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
k = len(self.rawdata)
data = self.rawdata[i+9:k]
j = k+3
self._toStringSubclass(data, CData)
else:
try:
j = SGMLParser.parse_declaration(self, i)
except SGMLParseError:
toHandle = self.rawdata[i:]
self.handle_data(toHandle)
j = i + len(toHandle)
return j
class BeautifulSoup(BeautifulStoneSoup):
"""This parser knows the following facts about HTML:
* Some tags have no closing tag and should be interpreted as being
closed as soon as they are encountered.
* The text inside some tags (ie. 'script') may contain tags which
are not really part of the document and which should be parsed
as text, not tags. If you want to parse the text as tags, you can
always fetch it and parse it explicitly.
* Tag nesting rules:
Most tags can't be nested at all. For instance, the occurance of
a <p> tag should implicitly close the previous <p> tag.
<p>Para1<p>Para2
should be transformed into:
<p>Para1</p><p>Para2
Some tags can be nested arbitrarily. For instance, the occurance
of a <blockquote> tag should _not_ implicitly close the previous
<blockquote> tag.
Alice said: <blockquote>Bob said: <blockquote>Blah
should NOT be transformed into:
Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah
Some tags can be nested, but the nesting is reset by the
interposition of other tags. For instance, a <tr> tag should
implicitly close the previous <tr> tag within the same <table>,
but not close a <tr> tag in another table.
<table><tr>Blah<tr>Blah
should be transformed into:
<table><tr>Blah</tr><tr>Blah
but,
<tr>Blah<table><tr>Blah
should NOT be transformed into
<tr>Blah<table></tr><tr>Blah
Differing assumptions about tag nesting rules are a major source
of problems with the BeautifulSoup class. If BeautifulSoup is not
treating as nestable a tag your page author treats as nestable,
try ICantBelieveItsBeautifulSoup, MinimalSoup, or
BeautifulStoneSoup before writing your own subclass."""
def __init__(self, *args, **kwargs):
if not kwargs.has_key('smartQuotesTo'):
kwargs['smartQuotesTo'] = self.HTML_ENTITIES
kwargs['isHTML'] = True
BeautifulStoneSoup.__init__(self, *args, **kwargs)
SELF_CLOSING_TAGS = buildTagMap(None,
('br' , 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base', 'col'))
PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea'])
QUOTE_TAGS = {'script' : None, 'textarea' : None}
#According to the HTML standard, each of these inline tags can
#contain another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_INLINE_TAGS = ('span', 'font', 'q', 'object', 'bdo', 'sub', 'sup',
'center')
#According to the HTML standard, these block tags can contain
#another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_BLOCK_TAGS = ('blockquote', 'div', 'fieldset', 'ins', 'del')
#Lists can contain other lists, but there are restrictions.
NESTABLE_LIST_TAGS = { 'ol' : [],
'ul' : [],
'li' : ['ul', 'ol'],
'dl' : [],
'dd' : ['dl'],
'dt' : ['dl'] }
#Tables can contain other tables, but there are restrictions.
NESTABLE_TABLE_TAGS = {'table' : [],
'tr' : ['table', 'tbody', 'tfoot', 'thead'],
'td' : ['tr'],
'th' : ['tr'],
'thead' : ['table'],
'tbody' : ['table'],
'tfoot' : ['table'],
}
NON_NESTABLE_BLOCK_TAGS = ('address', 'form', 'p', 'pre')
#If one of these tags is encountered, all tags up to the next tag of
#this type are popped.
RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript',
NON_NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS,
NESTABLE_TABLE_TAGS)
NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS)
# Used to detect the charset in a META tag; see start_meta
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def start_meta(self, attrs):
"""Beautiful Soup can detect a charset included in a META tag,
try to convert the document to that charset, and re-parse the
document from the beginning."""
httpEquiv = None
contentType = None
contentTypeIndex = None
tagNeedsEncodingSubstitution = False
for i in range(0, len(attrs)):
key, value = attrs[i]
key = key.lower()
if key == 'http-equiv':
httpEquiv = value
elif key == 'content':
contentType = value
contentTypeIndex = i
if httpEquiv and contentType: # It's an interesting meta tag.
match = self.CHARSET_RE.search(contentType)
if match:
if (self.declaredHTMLEncoding is not None or
self.originalEncoding == self.fromEncoding):
# An HTML encoding was sniffed while converting
# the document to Unicode, or an HTML encoding was
# sniffed during a previous pass through the
# document, or an encoding was specified
# explicitly and it worked. Rewrite the meta tag.
def rewrite(match):
return match.group(1) + "%SOUP-ENCODING%"
newAttr = self.CHARSET_RE.sub(rewrite, contentType)
attrs[contentTypeIndex] = (attrs[contentTypeIndex][0],
newAttr)
tagNeedsEncodingSubstitution = True
else:
# This is our first pass through the document.
# Go through it again with the encoding information.
newCharset = match.group(3)
if newCharset and newCharset != self.originalEncoding:
self.declaredHTMLEncoding = newCharset
self._feed(self.declaredHTMLEncoding)
raise StopParsing
pass
tag = self.unknown_starttag("meta", attrs)
if tag and tagNeedsEncodingSubstitution:
tag.containsSubstitutions = True
class StopParsing(Exception):
pass
class ICantBelieveItsBeautifulSoup(BeautifulSoup):
"""The BeautifulSoup class is oriented towards skipping over
common HTML errors like unclosed tags. However, sometimes it makes
errors of its own. For instance, consider this fragment:
<b>Foo<b>Bar</b></b>
This is perfectly valid (if bizarre) HTML. However, the
BeautifulSoup class will implicitly close the first b tag when it
encounters the second 'b'. It will think the author wrote
"<b>Foo<b>Bar", and didn't close the first 'b' tag, because
there's no real-world reason to bold something that's already
bold. When it encounters '</b></b>' it will close two more 'b'
tags, for a grand total of three tags closed instead of two. This
can throw off the rest of your document structure. The same is
true of a number of other tags, listed below.
It's much more common for someone to forget to close a 'b' tag
than to actually use nested 'b' tags, and the BeautifulSoup class
handles the common case. This class handles the not-co-common
case: where you can't believe someone wrote what they did, but
it's valid HTML and BeautifulSoup screwed up by assuming it
wouldn't be."""
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \
('em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong',
'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b',
'big')
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ('noscript')
NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS)
class MinimalSoup(BeautifulSoup):
"""The MinimalSoup class is for parsing HTML that contains
pathologically bad markup. It makes no assumptions about tag
nesting, but it does know which tags are self-closing, that
<script> tags contain Javascript and should not be parsed, that
META tags may contain encoding information, and so on.
This also makes it better for subclassing than BeautifulStoneSoup
or BeautifulSoup."""
RESET_NESTING_TAGS = buildTagMap('noscript')
NESTABLE_TAGS = {}
class BeautifulSOAP(BeautifulStoneSoup):
"""This class will push a tag with only a single string child into
the tag's parent as an attribute. The attribute's name is the tag
name, and the value is the string child. An example should give
the flavor of the change:
<foo><bar>baz</bar></foo>
=>
<foo bar="baz"><bar>baz</bar></foo>
You can then access fooTag['bar'] instead of fooTag.barTag.string.
This is, of course, useful for scraping structures that tend to
use subelements instead of attributes, such as SOAP messages. Note
that it modifies its input, so don't print the modified version
out.
I'm not sure how many people really want to use this class; let me
know if you do. Mainly I like the name."""
def popTag(self):
if len(self.tagStack) > 1:
tag = self.tagStack[-1]
parent = self.tagStack[-2]
parent._getAttrMap()
if (isinstance(tag, Tag) and len(tag.contents) == 1 and
isinstance(tag.contents[0], NavigableString) and
not parent.attrMap.has_key(tag.name)):
parent[tag.name] = tag.contents[0]
BeautifulStoneSoup.popTag(self)
#Enterprise class names! It has come to our attention that some people
#think the names of the Beautiful Soup parser classes are too silly
#and "unprofessional" for use in enterprise screen-scraping. We feel
#your pain! For such-minded folk, the Beautiful Soup Consortium And
#All-Night Kosher Bakery recommends renaming this file to
#"RobustParser.py" (or, in cases of extreme enterprisiness,
#"RobustParserBeanInterface.class") and using the following
#enterprise-friendly class aliases:
class RobustXMLParser(BeautifulStoneSoup):
pass
class RobustHTMLParser(BeautifulSoup):
pass
class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup):
pass
class RobustInsanelyWackAssHTMLParser(MinimalSoup):
pass
class SimplifyingSOAPParser(BeautifulSOAP):
pass
######################################################
#
# Bonus library: Unicode, Dammit
#
# This class forces XML data into a standard format (usually to UTF-8
# or Unicode). It is heavily based on code from Mark Pilgrim's
# Universal Feed Parser. It does not rewrite the XML or HTML to
# reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi
# (XML) and BeautifulSoup.start_meta (HTML).
# Autodetects character encodings.
# Download from http://chardet.feedparser.org/
try:
import chardet
# import chardet.constants
# chardet.constants._debug = 1
except ImportError:
chardet = None
# cjkcodecs and iconv_codec make Python know about more character encodings.
# Both are available from http://cjkpython.i18n.org/
# They're built in if you use Python 2.4.
try:
import cjkcodecs.aliases
except ImportError:
pass
try:
import iconv_codec
except ImportError:
pass
class UnicodeDammit:
"""A class for detecting the encoding of a *ML document and
converting it to a Unicode string. If the source encoding is
windows-1252, can replace MS smart quotes with their HTML or XML
equivalents."""
# This dictionary maps commonly seen values for "charset" in HTML
# meta tags to the corresponding Python codec names. It only covers
# values that aren't in Python's aliases and can't be determined
# by the heuristics in find_codec.
CHARSET_ALIASES = { "macintosh" : "mac-roman",
"x-sjis" : "shift-jis" }
def __init__(self, markup, overrideEncodings=[],
smartQuotesTo='xml', isHTML=False):
self.declaredHTMLEncoding = None
self.markup, documentEncoding, sniffedEncoding = \
self._detectEncoding(markup, isHTML)
self.smartQuotesTo = smartQuotesTo
self.triedEncodings = []
if markup == '' or isinstance(markup, unicode):
self.originalEncoding = None
self.unicode = unicode(markup)
return
u = None
for proposedEncoding in overrideEncodings:
u = self._convertFrom(proposedEncoding)
if u: break
if not u:
for proposedEncoding in (documentEncoding, sniffedEncoding):
u = self._convertFrom(proposedEncoding)
if u: break
# If no luck and we have auto-detection library, try that:
if not u and chardet and not isinstance(self.markup, unicode):
u = self._convertFrom(chardet.detect(self.markup)['encoding'])
# As a last resort, try utf-8 and windows-1252:
if not u:
for proposed_encoding in ("utf-8", "windows-1252"):
u = self._convertFrom(proposed_encoding)
if u: break
self.unicode = u
if not u: self.originalEncoding = None
def _subMSChar(self, orig):
"""Changes a MS smart quote character to an XML or HTML
entity."""
sub = self.MS_CHARS.get(orig)
if isinstance(sub, tuple):
if self.smartQuotesTo == 'xml':
sub = '&#x%s;' % sub[1]
else:
sub = '&%s;' % sub[0]
return sub
def _convertFrom(self, proposed):
proposed = self.find_codec(proposed)
if not proposed or proposed in self.triedEncodings:
return None
self.triedEncodings.append(proposed)
markup = self.markup
# Convert smart quotes to HTML if coming from an encoding
# that might have them.
if self.smartQuotesTo and proposed.lower() in("windows-1252",
"iso-8859-1",
"iso-8859-2"):
markup = re.compile("([\x80-\x9f])").sub \
(lambda(x): self._subMSChar(x.group(1)),
markup)
try:
# print "Trying to convert document to %s" % proposed
u = self._toUnicode(markup, proposed)
self.markup = u
self.originalEncoding = proposed
except Exception, e:
# print "That didn't work!"
# print e
return None
#print "Correct encoding: %s" % proposed
return self.markup
def _toUnicode(self, data, encoding):
'''Given a string and its encoding, decodes the string into Unicode.
%encoding is a string recognized by encodings.aliases'''
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
return newdata
def _detectEncoding(self, xml_data, isHTML=False):
"""Given a document, tries to detect its XML encoding."""
xml_encoding = sniffed_xml_encoding = None
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = self._ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \
and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \
(xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
sniffed_xml_encoding = 'ascii'
pass
except:
xml_encoding_match = None
xml_encoding_match = re.compile(
'^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
if not xml_encoding_match and isHTML:
regexp = re.compile('<\s*meta[^>]+charset=([^>]*?)[;\'">]', re.I)
xml_encoding_match = regexp.search(xml_data)
if xml_encoding_match is not None:
xml_encoding = xml_encoding_match.groups()[0].lower()
if isHTML:
self.declaredHTMLEncoding = xml_encoding
if sniffed_xml_encoding and \
(xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',
'iso-10646-ucs-4', 'ucs-4', 'csucs4',
'utf-16', 'utf-32', 'utf_16', 'utf_32',
'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
return xml_data, xml_encoding, sniffed_xml_encoding
def find_codec(self, charset):
return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \
or (charset and self._codec(charset.replace("-", ""))) \
or (charset and self._codec(charset.replace("-", "_"))) \
or charset
def _codec(self, charset):
if not charset: return charset
codec = None
try:
codecs.lookup(charset)
codec = charset
except (LookupError, ValueError):
pass
return codec
EBCDIC_TO_ASCII_MAP = None
def _ebcdic_to_ascii(self, s):
c = self.__class__
if not c.EBCDIC_TO_ASCII_MAP:
emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,
201,202,106,107,108,109,110,111,112,113,114,203,204,205,
206,207,208,209,126,115,116,117,118,119,120,121,122,210,
211,212,213,214,215,216,217,218,219,220,221,222,223,224,
225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72,
73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81,
82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89,
90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57,
250,251,252,253,254,255)
import string
c.EBCDIC_TO_ASCII_MAP = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(c.EBCDIC_TO_ASCII_MAP)
MS_CHARS = { '\x80' : ('euro', '20AC'),
'\x81' : ' ',
'\x82' : ('sbquo', '201A'),
'\x83' : ('fnof', '192'),
'\x84' : ('bdquo', '201E'),
'\x85' : ('hellip', '2026'),
'\x86' : ('dagger', '2020'),
'\x87' : ('Dagger', '2021'),
'\x88' : ('circ', '2C6'),
'\x89' : ('permil', '2030'),
'\x8A' : ('Scaron', '160'),
'\x8B' : ('lsaquo', '2039'),
'\x8C' : ('OElig', '152'),
'\x8D' : '?',
'\x8E' : ('#x17D', '17D'),
'\x8F' : '?',
'\x90' : '?',
'\x91' : ('lsquo', '2018'),
'\x92' : ('rsquo', '2019'),
'\x93' : ('ldquo', '201C'),
'\x94' : ('rdquo', '201D'),
'\x95' : ('bull', '2022'),
'\x96' : ('ndash', '2013'),
'\x97' : ('mdash', '2014'),
'\x98' : ('tilde', '2DC'),
'\x99' : ('trade', '2122'),
'\x9a' : ('scaron', '161'),
'\x9b' : ('rsaquo', '203A'),
'\x9c' : ('oelig', '153'),
'\x9d' : '?',
'\x9e' : ('#x17E', '17E'),
'\x9f' : ('Yuml', ''),}
#######################################################################
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print soup.prettify()
| bsd-3-clause |
elewis33/doorstop | doorstop/server/utilities.py | 1 | 1176 | """Shared functions for the `doorstop.server` package."""
from doorstop import common
from doorstop import settings
log = common.logger(__name__)
class StripPathMiddleware(object): # pylint: disable=R0903
"""WSGI middleware that strips trailing slashes from all URLs."""
def __init__(self, app):
self.app = app
def __call__(self, e, h): # pragma: no cover (integration test)
e['PATH_INFO'] = e['PATH_INFO'].rstrip('/')
return self.app(e, h)
def build_url(host=None, port=None, path=None):
"""Build the server's URL with optional path."""
host = host or settings.SERVER_HOST
port = port or settings.SERVER_PORT
log.debug("building URL: {} + {} + {}".format(host, port, path))
if not host:
return None
url = 'http://{}'.format(host)
if port != 80:
url += ':{}'.format(port)
if path:
url += path
return url
def json_response(request): # pragma: no cover (integration test)
"""Determine if the request's response should be JSON."""
if request.query.get('format') == 'json':
return True
else:
return request.content_type == 'application/json'
| lgpl-3.0 |
leorochael/odoo | addons/account/wizard/account_report_partner_ledger.py | 378 | 3538 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_partner_ledger(osv.osv_memory):
"""
This wizard will provide the partner Ledger report by periods, between any two dates.
"""
_name = 'account.partner.ledger'
_inherit = 'account.common.partner.report'
_description = 'Account Partner Ledger'
_columns = {
'initial_balance': fields.boolean('Include Initial Balances',
help='If you selected to filter by date or period, this field allow you to add a row to display the amount of debit/credit/balance that precedes the filter you\'ve set.'),
'filter': fields.selection([('filter_no', 'No Filters'), ('filter_date', 'Date'), ('filter_period', 'Periods'), ('unreconciled', 'Unreconciled Entries')], "Filter by", required=True),
'page_split': fields.boolean('One Partner Per Page', help='Display Ledger Report with One partner per page'),
'amount_currency': fields.boolean("With Currency", help="It adds the currency column on report if the currency differs from the company currency."),
'journal_ids': fields.many2many('account.journal', 'account_partner_ledger_journal_rel', 'account_id', 'journal_id', 'Journals', required=True),
}
_defaults = {
'initial_balance': False,
'page_split': False,
}
def onchange_filter(self, cr, uid, ids, filter='filter_no', fiscalyear_id=False, context=None):
res = super(account_partner_ledger, self).onchange_filter(cr, uid, ids, filter=filter, fiscalyear_id=fiscalyear_id, context=context)
if filter in ['filter_no', 'unreconciled']:
if filter == 'unreconciled':
res['value'].update({'fiscalyear_id': False})
res['value'].update({'initial_balance': False, 'period_from': False, 'period_to': False, 'date_from': False ,'date_to': False})
return res
def _print_report(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
data = self.pre_print_report(cr, uid, ids, data, context=context)
data['form'].update(self.read(cr, uid, ids, ['initial_balance', 'filter', 'page_split', 'amount_currency'])[0])
if data['form'].get('page_split') is True:
return self.pool['report'].get_action(cr, uid, [], 'account.report_partnerledgerother', data=data, context=context)
return self.pool['report'].get_action(cr, uid, [], 'account.report_partnerledger', data=data, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
qnib/qcollect | resources/diamond/collectors/filestat/filestat.py | 23 | 9658 | # coding=utf-8
"""
Uses lsof to collect data on number of open files per user per type
#### Config Options
* user_include - This is list of users to collect data for. If this is left
empty, its a wildcard to collector for all users (default = None)
* user_exclude - This is a list of users to exclude from collecting data. If
this is left empty, no specific users will be excluded (default = None)
* group_include - This is a list of groups to include in data collection. If
this DOES NOT override user_exclude. (default = None)
* group_exclude - This is a list of groups to exclude from collecting data.
It DOES NOT override user_include. (default = None)
* uid_min - This creates a floor for the user's uid. This means that it WILL
NOT collect data for any user with a uid LOWER than the specified minimum,
unless the user is told to be included by user_include (default = None)
* uid_max - This creates a ceiling for the user's uid. This means that it WILL
NOT collect data for any user with a uid HIGHER than the specified maximum,
unless the user is told to be included by user_include (default = None)
*** Priority Explanation ***
This is an explanation of the priority in which users, groups, and uid, are
evaluated. EXCLUDE ALWAYS OVERRULES INCLUDE within the same level (ie within
users or group)
* user_include/exclude (top level/priority)
* group_include/exclude (second level: if user not in user_include/exclude,
groups takes affect)
* uid_min/max (third level: if user not met above qualifications, uids
take affect)
* type_include - This is a list of file types to collect ('REG', 'DIR", 'FIFO'
, etc). If left empty, will collect for all file types. (Note: it suggested
to not leave type_include empty, as it would add significant load to your
graphite box(es) (default = None)
* type_exclude - This is a list of tile types to exclude from being collected
for. If left empty, no file types will be excluded. (default = None)
* collect_user_data - This enables or disables the collection of user specific
file handles. (default = False)
#### Dependencies
* /proc/sys/fs/file-nr
* /usr/sbin/lsof
"""
import diamond.collector
import re
import os
_RE = re.compile(r'(\d+)\s+(\d+)\s+(\d+)')
class FilestatCollector(diamond.collector.Collector):
PROC = '/proc/sys/fs/file-nr'
def get_default_config_help(self):
config_help = super(FilestatCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(FilestatCollector, self).get_default_config()
config.update({
'path': 'files',
'user_include': None,
'user_exclude': None,
'group_include': None,
'group_exclude': None,
'uid_min': 0,
'uid_max': 65536,
'type_include': None,
'type_exclude': None,
'collect_user_data': False
})
return config
def get_userlist(self):
"""
This collects all the users with open files on the system, and filters
based on the variables user_include and user_exclude
"""
# convert user/group lists to arrays if strings
if isinstance(self.config['user_include'], basestring):
self.config['user_include'] = self.config['user_include'].split()
if isinstance(self.config['user_exclude'], basestring):
self.config['user_exclude'] = self.config['user_exclude'].split()
if isinstance(self.config['group_include'], basestring):
self.config['group_include'] = self.config['group_include'].split()
if isinstance(self.config['group_exclude'], basestring):
self.config['group_exclude'] = self.config['group_exclude'].split()
rawusers = os.popen("lsof | awk '{ print $3 }' | sort | uniq -d"
).read().split()
userlist = []
# remove any not on the user include list
if (self.config['user_include'] is None
or len(self.config['user_include']) == 0):
userlist = rawusers
else:
# only work with specified include list, which is added at the end
userlist = []
# add any user in the group include list
addedByGroup = []
if (self.config['group_include'] is not None
and len(self.config['group_include']) > 0):
for u in rawusers:
self.log.info(u)
# get list of groups of user
user_groups = os.popen("id -Gn %s" % (u)).read().split()
for gi in self.config['group_include']:
if gi in user_groups and u not in userlist:
userlist.append(u)
addedByGroup.append(u)
break
# remove any user in the exclude group list
if (self.config['group_exclude'] is not None
and len(self.config['group_exclude']) > 0):
# create tmp list to iterate over while editing userlist
tmplist = userlist[:]
for u in tmplist:
# get list of groups of user
groups = os.popen("id -Gn %s" % (u)).read().split()
for gi in self.config['group_exclude']:
if gi in groups:
userlist.remove(u)
break
# remove any that aren't within the uid limits
# make sure uid_min/max are ints
self.config['uid_min'] = int(self.config['uid_min'])
self.config['uid_max'] = int(self.config['uid_max'])
tmplist = userlist[:]
for u in tmplist:
if (self.config['user_include'] is None
or u not in self.config['user_include']):
if u not in addedByGroup:
uid = int(os.popen("id -u %s" % (u)).read())
if (uid < self.config['uid_min']
and self.config['uid_min'] is not None
and u in userlist):
userlist.remove(u)
if (uid > self.config['uid_max']
and self.config['uid_max'] is not None
and u in userlist):
userlist.remove(u)
# add users that are in the users include list
if self.config['user_include'] is not None and len(
self.config['user_include']) > 0:
for u in self.config['user_include']:
if u in rawusers and u not in userlist:
userlist.append(u)
# remove any that is on the user exclude list
if self.config['user_exclude'] is not None and len(
self.config['user_exclude']) > 0:
for u in self.config['user_exclude']:
if u in userlist:
userlist.remove(u)
return userlist
def get_typelist(self):
"""
This collects all avaliable types and applies include/exclude filters
"""
typelist = []
# convert type list into arrays if strings
if isinstance(self.config['type_include'], basestring):
self.config['type_include'] = self.config['type_include'].split()
if isinstance(self.config['type_exclude'], basestring):
self.config['type_exclude'] = self.config['type_exclude'].split()
# remove any not in include list
if self.config['type_include'] is None or len(
self.config['type_include']) == 0:
typelist = os.popen("lsof | awk '{ print $5 }' | sort | uniq -d"
).read().split()
else:
typelist = self.config['type_include']
# remove any in the exclude list
if self.config['type_exclude'] is not None and len(
self.config['type_include']) > 0:
for t in self.config['type_exclude']:
if t in typelist:
typelist.remove(t)
return typelist
def process_lsof(self, users, types):
"""
Get the list of users and file types to collect for and collect the
data from lsof
"""
d = {}
for u in users:
d[u] = {}
tmp = os.popen("lsof -bu %s | awk '{ print $5 }'" % (
u)).read().split()
for t in types:
d[u][t] = tmp.count(t)
return d
def collect(self):
if not os.access(self.PROC, os.R_OK):
return None
# collect total open files
file = open(self.PROC)
for line in file:
match = _RE.match(line)
if match:
self.publish('assigned', int(match.group(1)))
self.publish('unused', int(match.group(2)))
self.publish('max', int(match.group(3)))
file.close()
# collect open files per user per type
if self.config['collect_user_data']:
data = self.process_lsof(self.get_userlist(), self.get_typelist())
for ukey in data.iterkeys():
for tkey in data[ukey].iterkeys():
self.log.debug('files.user.%s.%s %s' % (
ukey, tkey, int(data[ukey][tkey])))
self.publish('user.%s.%s' % (ukey, tkey),
int(data[ukey][tkey]))
| apache-2.0 |
sudheesh001/oh-mainline | mysite/search/migrations/0047_project_icons_mark_as_wrong.py | 17 | 10858 | # This file is part of OpenHatch.
# Copyright (C) 2010 Parker Phinney
# Copyright (C) 2010 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from south.db import db
from django.db import models
from mysite.search.models import *
class Migration:
def forwards(self, orm):
# Adding field 'Project.icon_is_wrong'
db.add_column('search_project', 'icon_is_wrong', orm['search.project:icon_is_wrong'])
def backwards(self, orm):
# Deleting field 'Project.icon_is_wrong'
db.delete_column('search_project', 'icon_is_wrong')
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'unique': 'True'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'search.answer': {
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'author_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['search.ProjectInvolvementQuestion']"}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'})
},
'search.bug': {
'as_appears_in_distribution': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'bize_size_tag_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'canonical_bug_link': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'concerns_just_documentation': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'date_reported': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'good_for_newcomers': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'last_polled': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1970, 1, 1, 0, 0)'}),
'last_touched': ('django.db.models.fields.DateTimeField', [], {}),
'looks_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'people_involved': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'submitter_realname': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'submitter_username': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'search.bugalert': {
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '255'}),
'how_many_bugs_at_time_of_request': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'query_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'search.hitcountcache': {
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'hashed_query': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'hit_count': ('django.db.models.fields.IntegerField', [], {}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'search.project': {
'cached_contributor_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'date_icon_was_fetched_from_ohloh': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'icon_for_profile': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_for_search_result': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_is_wrong': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'icon_raw': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_smaller_for_badge': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'logo_contains_name': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'unique': 'True'})
},
'search.projectinvolvementquestion': {
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_bug_style': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'key_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['search']
| agpl-3.0 |
bealdav/OpenUpgrade | addons/base_action_rule/tests/__init__.py | 178 | 1129 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import base_action_rule_test
checks = [
base_action_rule_test,
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
nagyistoce/odoo-dev-odoo | openerp/addons/test_converter/tests/test_html.py | 66 | 13431 | # -*- encoding: utf-8 -*-
import json
import os
import datetime
from lxml import etree
from openerp.tests import common
from openerp.tools import html_escape as e
from openerp.addons.base.ir import ir_qweb
directory = os.path.dirname(__file__)
class TestExport(common.TransactionCase):
_model = None
def setUp(self):
super(TestExport, self).setUp()
self.Model = self.registry(self._model)
def get_field(self, name):
return self.Model._fields[name]
def get_converter(self, name, type=None):
field = self.get_field(name)
for postfix in type, field.type, '':
fs = ['ir', 'qweb', 'field']
if postfix is None: continue
if postfix: fs.append(postfix)
try:
model = self.registry('.'.join(fs))
break
except KeyError: pass
return lambda value, options=None, context=None: e(model.value_to_html(
self.cr, self.uid, value, field, options=options, context=context))
class TestBasicExport(TestExport):
_model = 'test_converter.test_model'
class TestCharExport(TestBasicExport):
def test_char(self):
converter = self.get_converter('char')
value = converter('foo')
self.assertEqual(value, 'foo')
value = converter("foo<bar>")
self.assertEqual(value, "foo<bar>")
class TestIntegerExport(TestBasicExport):
def test_integer(self):
converter = self.get_converter('integer')
value = converter(42)
self.assertEqual(value, "42")
class TestFloatExport(TestBasicExport):
def setUp(self):
super(TestFloatExport, self).setUp()
self.registry('res.lang').write(self.cr, self.uid, [1], {
'grouping': '[3,0]'
})
def test_float(self):
converter = self.get_converter('float')
value = converter(42.0)
self.assertEqual(value, "42.0")
value = converter(42.0100)
self.assertEqual(value, "42.01")
value = converter(42.01234)
self.assertEqual(value, "42.01234")
value = converter(1234567.89)
self.assertEqual(value, '1,234,567.89')
def test_numeric(self):
converter = self.get_converter('numeric')
value = converter(42.0)
self.assertEqual(value, '42.00')
value = converter(42.01234)
self.assertEqual(value, '42.01')
class TestCurrencyExport(TestExport):
_model = 'test_converter.monetary'
def setUp(self):
super(TestCurrencyExport, self).setUp()
self.Currency = self.registry('res.currency')
self.base = self.create(self.Currency, name="Source", symbol=u'source')
def create(self, model, context=None, **values):
return model.browse(
self.cr, self.uid,
model.create(self.cr, self.uid, values, context=context),
context=context)
def convert(self, obj, dest):
converter = self.registry('ir.qweb.field.monetary')
options = {
'widget': 'monetary',
'display_currency': 'c2'
}
context = dict(inherit_branding=True)
converted = converter.to_html(
self.cr, self.uid, 'value', obj, options,
etree.Element('span'),
{'field': 'obj.value', 'field-options': json.dumps(options)},
'', ir_qweb.QWebContext(self.cr, self.uid, {'obj': obj, 'c2': dest, }),
context=context,
)
return converted
def test_currency_post(self):
currency = self.create(self.Currency, name="Test", symbol=u"test")
obj = self.create(self.Model, value=0.12)
converted = self.convert(obj, dest=currency)
self.assertEqual(
converted,
'<span data-oe-model="{obj._model._name}" data-oe-id="{obj.id}" '
'data-oe-field="value" data-oe-type="monetary" '
'data-oe-expression="obj.value">'
'<span class="oe_currency_value">0.12</span>'
' {symbol}</span>'.format(
obj=obj,
symbol=currency.symbol.encode('utf-8')
),)
def test_currency_pre(self):
currency = self.create(
self.Currency, name="Test", symbol=u"test", position='before')
obj = self.create(self.Model, value=0.12)
converted = self.convert(obj, dest=currency)
self.assertEqual(
converted,
'<span data-oe-model="{obj._model._name}" data-oe-id="{obj.id}" '
'data-oe-field="value" data-oe-type="monetary" '
'data-oe-expression="obj.value">'
'{symbol} '
'<span class="oe_currency_value">0.12</span>'
'</span>'.format(
obj=obj,
symbol=currency.symbol.encode('utf-8')
),)
def test_currency_precision(self):
""" Precision should be the currency's, not the float field's
"""
currency = self.create(self.Currency, name="Test", symbol=u"test",)
obj = self.create(self.Model, value=0.1234567)
converted = self.convert(obj, dest=currency)
self.assertEqual(
converted,
'<span data-oe-model="{obj._model._name}" data-oe-id="{obj.id}" '
'data-oe-field="value" data-oe-type="monetary" '
'data-oe-expression="obj.value">'
'<span class="oe_currency_value">0.12</span>'
' {symbol}</span>'.format(
obj=obj,
symbol=currency.symbol.encode('utf-8')
),)
class TestTextExport(TestBasicExport):
def test_text(self):
converter = self.get_converter('text')
value = converter("This is my text-kai")
self.assertEqual(value, "This is my text-kai")
value = converter("""
. The current line (address) in the buffer.
$ The last line in the buffer.
n The nth, line in the buffer where n is a number in the range [0,$].
$ The last line in the buffer.
- The previous line. This is equivalent to -1 and may be repeated with cumulative effect.
-n The nth previous line, where n is a non-negative number.
+ The next line. This is equivalent to +1 and may be repeated with cumulative effect.
""")
self.assertEqual(value, """<br>
. The current line (address) in the buffer.<br>
$ The last line in the buffer.<br>
n The nth, line in the buffer where n is a number in the range [0,$].<br>
$ The last line in the buffer.<br>
- The previous line. This is equivalent to -1 and may be repeated with cumulative effect.<br>
-n The nth previous line, where n is a non-negative number.<br>
+ The next line. This is equivalent to +1 and may be repeated with cumulative effect.<br>
""")
value = converter("""
fgdkls;hjas;lj <b>fdslkj</b> d;lasjfa lkdja <a href=http://spam.com>lfks</a>
fldkjsfhs <i style="color: red"><a href="http://spamspam.com">fldskjh</a></i>
""")
self.assertEqual(value, """<br>
fgdkls;hjas;lj <b>fdslkj</b> d;lasjfa lkdja <a href=http://spam.com>lfks</a><br>
fldkjsfhs <i style="color: red"><a href="http://spamspam.com">fldskjh</a></i><br>
""")
class TestMany2OneExport(TestBasicExport):
def test_many2one(self):
Sub = self.registry('test_converter.test_model.sub')
id0 = self.Model.create(self.cr, self.uid, {
'many2one': Sub.create(self.cr, self.uid, {'name': "Foo"})
})
id1 = self.Model.create(self.cr, self.uid, {
'many2one': Sub.create(self.cr, self.uid, {'name': "Fo<b>o</b>"})
})
def converter(record):
model = self.registry('ir.qweb.field.many2one')
return e(model.record_to_html(self.cr, self.uid, 'many2one', record))
value = converter(self.Model.browse(self.cr, self.uid, id0))
self.assertEqual(value, "Foo")
value = converter(self.Model.browse(self.cr, self.uid, id1))
self.assertEqual(value, "Fo<b>o</b>")
class TestBinaryExport(TestBasicExport):
def test_image(self):
field = self.get_field('binary')
converter = self.registry('ir.qweb.field.image')
with open(os.path.join(directory, 'test_vectors', 'image'), 'rb') as f:
content = f.read()
encoded_content = content.encode('base64')
value = e(converter.value_to_html(
self.cr, self.uid, encoded_content, field))
self.assertEqual(
value, '<img src="data:image/jpeg;base64,%s">' % (
encoded_content
))
with open(os.path.join(directory, 'test_vectors', 'pdf'), 'rb') as f:
content = f.read()
with self.assertRaises(ValueError):
e(converter.value_to_html(
self.cr, self.uid, 'binary', content.encode('base64'), field))
with open(os.path.join(directory, 'test_vectors', 'pptx'), 'rb') as f:
content = f.read()
with self.assertRaises(ValueError):
e(converter.value_to_html(
self.cr, self.uid, 'binary', content.encode('base64'), field))
class TestSelectionExport(TestBasicExport):
def test_selection(self):
[record] = self.Model.browse(self.cr, self.uid, [self.Model.create(self.cr, self.uid, {
'selection': 2,
'selection_str': 'C',
})])
converter = self.registry('ir.qweb.field.selection')
field_name = 'selection'
value = converter.record_to_html(self.cr, self.uid, field_name, record)
self.assertEqual(value, "réponse B")
field_name = 'selection_str'
value = converter.record_to_html(self.cr, self.uid, field_name, record)
self.assertEqual(value, "Qu'est-ce qu'il fout ce maudit pancake, tabernacle ?")
class TestHTMLExport(TestBasicExport):
def test_html(self):
converter = self.get_converter('html')
input = '<span>span</span>'
value = converter(input)
self.assertEqual(value, input)
class TestDatetimeExport(TestBasicExport):
def setUp(self):
super(TestDatetimeExport, self).setUp()
# set user tz to known value
Users = self.registry('res.users')
Users.write(self.cr, self.uid, self.uid, {
'tz': 'Pacific/Niue'
}, context=None)
def test_date(self):
converter = self.get_converter('date')
value = converter('2011-05-03')
# default lang/format is US
self.assertEqual(value, '05/03/2011')
def test_datetime(self):
converter = self.get_converter('datetime')
value = converter('2011-05-03 11:12:13')
# default lang/format is US
self.assertEqual(value, '05/03/2011 00:12:13')
def test_custom_format(self):
converter = self.get_converter('datetime')
converter2 = self.get_converter('date')
opts = {'format': 'MMMM d'}
value = converter('2011-03-02 11:12:13', options=opts)
value2 = converter2('2001-03-02', options=opts)
self.assertEqual(
value,
'March 2'
)
self.assertEqual(
value2,
'March 2'
)
class TestDurationExport(TestBasicExport):
def setUp(self):
super(TestDurationExport, self).setUp()
# needs to have lang installed otherwise falls back on en_US
self.registry('res.lang').load_lang(self.cr, self.uid, 'fr_FR')
def test_negative(self):
converter = self.get_converter('float', 'duration')
with self.assertRaises(ValueError):
converter(-4)
def test_missing_unit(self):
converter = self.get_converter('float', 'duration')
with self.assertRaises(ValueError):
converter(4)
def test_basic(self):
converter = self.get_converter('float', 'duration')
result = converter(4, {'unit': 'hour'}, {'lang': 'fr_FR'})
self.assertEqual(result, u'4 heures')
result = converter(50, {'unit': 'second'}, {'lang': 'fr_FR'})
self.assertEqual(result, u'50 secondes')
def test_multiple(self):
converter = self.get_converter('float', 'duration')
result = converter(1.5, {'unit': 'hour'}, {'lang': 'fr_FR'})
self.assertEqual(result, u"1 heure 30 minutes")
result = converter(72, {'unit': 'second'}, {'lang': 'fr_FR'})
self.assertEqual(result, u"1 minute 12 secondes")
class TestRelativeDatetime(TestBasicExport):
# not sure how a test based on "current time" should be tested. Even less
# so as it would mostly be a test of babel...
def setUp(self):
super(TestRelativeDatetime, self).setUp()
# needs to have lang installed otherwise falls back on en_US
self.registry('res.lang').load_lang(self.cr, self.uid, 'fr_FR')
def test_basic(self):
converter = self.get_converter('datetime', 'relative')
t = datetime.datetime.utcnow() - datetime.timedelta(hours=1)
result = converter(t, context={'lang': 'fr_FR'})
self.assertEqual(result, u"il y a 1 heure")
| agpl-3.0 |
nagyistoce/odoo-dev-odoo | addons/hr_expense/report/__init__.py | 380 | 1071 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_expense_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
raychorn/knowu | django/djangononrelsample2/django/contrib/localflavor/us/models.py | 196 | 1261 | from django.utils.translation import ugettext_lazy as _
from django.db.models.fields import CharField
from django.contrib.localflavor.us.us_states import STATE_CHOICES
from django.contrib.localflavor.us.us_states import USPS_CHOICES
class USStateField(CharField):
description = _("U.S. state (two uppercase letters)")
def __init__(self, *args, **kwargs):
kwargs['choices'] = STATE_CHOICES
kwargs['max_length'] = 2
super(USStateField, self).__init__(*args, **kwargs)
class USPostalCodeField(CharField):
description = _("U.S. postal code (two uppercase letters)")
def __init__(self, *args, **kwargs):
kwargs['choices'] = USPS_CHOICES
kwargs['max_length'] = 2
super(USPostalCodeField, self).__init__(*args, **kwargs)
class PhoneNumberField(CharField):
description = _("Phone number")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 20
super(PhoneNumberField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
from django.contrib.localflavor.us.forms import USPhoneNumberField
defaults = {'form_class': USPhoneNumberField}
defaults.update(kwargs)
return super(PhoneNumberField, self).formfield(**defaults)
| lgpl-3.0 |
TGThorax/python-ka2ring | src/leeftijd.py | 1 | 1962 | # Oefening: Vraag de geboortedatum van de gebruiker en zeg de leeftijd.
huidig_jaar = 2017
huidige_maand = 10
huidige_dag = 24
jaar = int(input("In welk jaar ben je geboren? "))
maand = int(input("En in welke maand? (getal) "))
# De dag moeten we pas weten als de geboortemaand deze maand is!
# Je kan het hier natuurlijk ook al vragen als je wilt.
leeftijd = huidig_jaar - jaar
if (maand > huidige_maand): # De gebruiker is nog niet verjaard
leeftijd -= 1 # hetzelfde als "leeftijd = leeftijd - 1"
elif (maand == huidige_maand):
dag = int(input("En welke dag? (getal) "))
if (dag > huidige_dag):
leeftijd -= 1
elif (dag == huidige_dag):
# leeftijd = leeftijd # Dat doet helemaal niets natuurlijk
print("Gelukkige verjaardag!")
# else: # Enkel (dag < huidige_dag) kan nog =>
# # De gebruiker is al verjaard deze maand!
# leeftijd = leeftijd # Maar er hoeft niets veranderd te worden.
# else: # De gebruiker is zeker al verjaard,
# # want enkel maand < huidige_maand kan nog!
# leeftijd = leeftijd # Maar we moeten niets aanpassen!
print("Dan ben je " + str(leeftijd) + " jaar oud.")
## Oefeningen zonder oplossing (mail bij vragen!).
#
# Oefening: start met leeftijd = huidig_jaar - jaar - 1 en verhoog die
# waarde wanneer nodig. Vergelijk de voorwaarden daar met die hier.
# Wat is het verschil in de tekens?
#
# Nog een oefening: kijk de waarden die de gebruiker ingeeft na.
# Zorg ervoor je geen dag kan invoeren die later komt dan vandaag.
# Probeer dat zo onafhankelijk mogelijk te doen van de bovenstaande code.
#
# Nadenkoefening: kan je bovenstaande 2 voorwaarden in de vorige opdracht uitvoeren
# zonder in de herhaling te vallen? (dat is geen uitdaging, maar een vraag!).
# Als je toch iets mag aanpassen aan bovenstaande code, kan het dan?
# Wat denk je dat de beste optie is? | mit |
matthiasdiener/spack | lib/spack/external/jinja2/parser.py | 89 | 35875 | # -*- coding: utf-8 -*-
"""
jinja2.parser
~~~~~~~~~~~~~
Implements the template parser.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from jinja2 import nodes
from jinja2.exceptions import TemplateSyntaxError, TemplateAssertionError
from jinja2.lexer import describe_token, describe_token_expr
from jinja2._compat import imap
_statement_keywords = frozenset(['for', 'if', 'block', 'extends', 'print',
'macro', 'include', 'from', 'import',
'set', 'with', 'autoescape'])
_compare_operators = frozenset(['eq', 'ne', 'lt', 'lteq', 'gt', 'gteq'])
_math_nodes = {
'add': nodes.Add,
'sub': nodes.Sub,
'mul': nodes.Mul,
'div': nodes.Div,
'floordiv': nodes.FloorDiv,
'mod': nodes.Mod,
}
class Parser(object):
"""This is the central parsing class Jinja2 uses. It's passed to
extensions and can be used to parse expressions or statements.
"""
def __init__(self, environment, source, name=None, filename=None,
state=None):
self.environment = environment
self.stream = environment._tokenize(source, name, filename, state)
self.name = name
self.filename = filename
self.closed = False
self.extensions = {}
for extension in environment.iter_extensions():
for tag in extension.tags:
self.extensions[tag] = extension.parse
self._last_identifier = 0
self._tag_stack = []
self._end_token_stack = []
def fail(self, msg, lineno=None, exc=TemplateSyntaxError):
"""Convenience method that raises `exc` with the message, passed
line number or last line number as well as the current name and
filename.
"""
if lineno is None:
lineno = self.stream.current.lineno
raise exc(msg, lineno, self.name, self.filename)
def _fail_ut_eof(self, name, end_token_stack, lineno):
expected = []
for exprs in end_token_stack:
expected.extend(imap(describe_token_expr, exprs))
if end_token_stack:
currently_looking = ' or '.join(
"'%s'" % describe_token_expr(expr)
for expr in end_token_stack[-1])
else:
currently_looking = None
if name is None:
message = ['Unexpected end of template.']
else:
message = ['Encountered unknown tag \'%s\'.' % name]
if currently_looking:
if name is not None and name in expected:
message.append('You probably made a nesting mistake. Jinja '
'is expecting this tag, but currently looking '
'for %s.' % currently_looking)
else:
message.append('Jinja was looking for the following tags: '
'%s.' % currently_looking)
if self._tag_stack:
message.append('The innermost block that needs to be '
'closed is \'%s\'.' % self._tag_stack[-1])
self.fail(' '.join(message), lineno)
def fail_unknown_tag(self, name, lineno=None):
"""Called if the parser encounters an unknown tag. Tries to fail
with a human readable error message that could help to identify
the problem.
"""
return self._fail_ut_eof(name, self._end_token_stack, lineno)
def fail_eof(self, end_tokens=None, lineno=None):
"""Like fail_unknown_tag but for end of template situations."""
stack = list(self._end_token_stack)
if end_tokens is not None:
stack.append(end_tokens)
return self._fail_ut_eof(None, stack, lineno)
def is_tuple_end(self, extra_end_rules=None):
"""Are we at the end of a tuple?"""
if self.stream.current.type in ('variable_end', 'block_end', 'rparen'):
return True
elif extra_end_rules is not None:
return self.stream.current.test_any(extra_end_rules)
return False
def free_identifier(self, lineno=None):
"""Return a new free identifier as :class:`~jinja2.nodes.InternalName`."""
self._last_identifier += 1
rv = object.__new__(nodes.InternalName)
nodes.Node.__init__(rv, 'fi%d' % self._last_identifier, lineno=lineno)
return rv
def parse_statement(self):
"""Parse a single statement."""
token = self.stream.current
if token.type != 'name':
self.fail('tag name expected', token.lineno)
self._tag_stack.append(token.value)
pop_tag = True
try:
if token.value in _statement_keywords:
return getattr(self, 'parse_' + self.stream.current.value)()
if token.value == 'call':
return self.parse_call_block()
if token.value == 'filter':
return self.parse_filter_block()
ext = self.extensions.get(token.value)
if ext is not None:
return ext(self)
# did not work out, remove the token we pushed by accident
# from the stack so that the unknown tag fail function can
# produce a proper error message.
self._tag_stack.pop()
pop_tag = False
self.fail_unknown_tag(token.value, token.lineno)
finally:
if pop_tag:
self._tag_stack.pop()
def parse_statements(self, end_tokens, drop_needle=False):
"""Parse multiple statements into a list until one of the end tokens
is reached. This is used to parse the body of statements as it also
parses template data if appropriate. The parser checks first if the
current token is a colon and skips it if there is one. Then it checks
for the block end and parses until if one of the `end_tokens` is
reached. Per default the active token in the stream at the end of
the call is the matched end token. If this is not wanted `drop_needle`
can be set to `True` and the end token is removed.
"""
# the first token may be a colon for python compatibility
self.stream.skip_if('colon')
# in the future it would be possible to add whole code sections
# by adding some sort of end of statement token and parsing those here.
self.stream.expect('block_end')
result = self.subparse(end_tokens)
# we reached the end of the template too early, the subparser
# does not check for this, so we do that now
if self.stream.current.type == 'eof':
self.fail_eof(end_tokens)
if drop_needle:
next(self.stream)
return result
def parse_set(self):
"""Parse an assign statement."""
lineno = next(self.stream).lineno
target = self.parse_assign_target(with_namespace=True)
if self.stream.skip_if('assign'):
expr = self.parse_tuple()
return nodes.Assign(target, expr, lineno=lineno)
filter_node = self.parse_filter(None)
body = self.parse_statements(('name:endset',),
drop_needle=True)
return nodes.AssignBlock(target, filter_node, body, lineno=lineno)
def parse_for(self):
"""Parse a for loop."""
lineno = self.stream.expect('name:for').lineno
target = self.parse_assign_target(extra_end_rules=('name:in',))
self.stream.expect('name:in')
iter = self.parse_tuple(with_condexpr=False,
extra_end_rules=('name:recursive',))
test = None
if self.stream.skip_if('name:if'):
test = self.parse_expression()
recursive = self.stream.skip_if('name:recursive')
body = self.parse_statements(('name:endfor', 'name:else'))
if next(self.stream).value == 'endfor':
else_ = []
else:
else_ = self.parse_statements(('name:endfor',), drop_needle=True)
return nodes.For(target, iter, body, else_, test,
recursive, lineno=lineno)
def parse_if(self):
"""Parse an if construct."""
node = result = nodes.If(lineno=self.stream.expect('name:if').lineno)
while 1:
node.test = self.parse_tuple(with_condexpr=False)
node.body = self.parse_statements(('name:elif', 'name:else',
'name:endif'))
node.elif_ = []
node.else_ = []
token = next(self.stream)
if token.test('name:elif'):
node = nodes.If(lineno=self.stream.current.lineno)
result.elif_.append(node)
continue
elif token.test('name:else'):
result.else_ = self.parse_statements(('name:endif',),
drop_needle=True)
break
return result
def parse_with(self):
node = nodes.With(lineno=next(self.stream).lineno)
targets = []
values = []
while self.stream.current.type != 'block_end':
lineno = self.stream.current.lineno
if targets:
self.stream.expect('comma')
target = self.parse_assign_target()
target.set_ctx('param')
targets.append(target)
self.stream.expect('assign')
values.append(self.parse_expression())
node.targets = targets
node.values = values
node.body = self.parse_statements(('name:endwith',),
drop_needle=True)
return node
def parse_autoescape(self):
node = nodes.ScopedEvalContextModifier(lineno=next(self.stream).lineno)
node.options = [
nodes.Keyword('autoescape', self.parse_expression())
]
node.body = self.parse_statements(('name:endautoescape',),
drop_needle=True)
return nodes.Scope([node])
def parse_block(self):
node = nodes.Block(lineno=next(self.stream).lineno)
node.name = self.stream.expect('name').value
node.scoped = self.stream.skip_if('name:scoped')
# common problem people encounter when switching from django
# to jinja. we do not support hyphens in block names, so let's
# raise a nicer error message in that case.
if self.stream.current.type == 'sub':
self.fail('Block names in Jinja have to be valid Python '
'identifiers and may not contain hyphens, use an '
'underscore instead.')
node.body = self.parse_statements(('name:endblock',), drop_needle=True)
self.stream.skip_if('name:' + node.name)
return node
def parse_extends(self):
node = nodes.Extends(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
return node
def parse_import_context(self, node, default):
if self.stream.current.test_any('name:with', 'name:without') and \
self.stream.look().test('name:context'):
node.with_context = next(self.stream).value == 'with'
self.stream.skip()
else:
node.with_context = default
return node
def parse_include(self):
node = nodes.Include(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
if self.stream.current.test('name:ignore') and \
self.stream.look().test('name:missing'):
node.ignore_missing = True
self.stream.skip(2)
else:
node.ignore_missing = False
return self.parse_import_context(node, True)
def parse_import(self):
node = nodes.Import(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
self.stream.expect('name:as')
node.target = self.parse_assign_target(name_only=True).name
return self.parse_import_context(node, False)
def parse_from(self):
node = nodes.FromImport(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
self.stream.expect('name:import')
node.names = []
def parse_context():
if self.stream.current.value in ('with', 'without') and \
self.stream.look().test('name:context'):
node.with_context = next(self.stream).value == 'with'
self.stream.skip()
return True
return False
while 1:
if node.names:
self.stream.expect('comma')
if self.stream.current.type == 'name':
if parse_context():
break
target = self.parse_assign_target(name_only=True)
if target.name.startswith('_'):
self.fail('names starting with an underline can not '
'be imported', target.lineno,
exc=TemplateAssertionError)
if self.stream.skip_if('name:as'):
alias = self.parse_assign_target(name_only=True)
node.names.append((target.name, alias.name))
else:
node.names.append(target.name)
if parse_context() or self.stream.current.type != 'comma':
break
else:
self.stream.expect('name')
if not hasattr(node, 'with_context'):
node.with_context = False
return node
def parse_signature(self, node):
node.args = args = []
node.defaults = defaults = []
self.stream.expect('lparen')
while self.stream.current.type != 'rparen':
if args:
self.stream.expect('comma')
arg = self.parse_assign_target(name_only=True)
arg.set_ctx('param')
if self.stream.skip_if('assign'):
defaults.append(self.parse_expression())
elif defaults:
self.fail('non-default argument follows default argument')
args.append(arg)
self.stream.expect('rparen')
def parse_call_block(self):
node = nodes.CallBlock(lineno=next(self.stream).lineno)
if self.stream.current.type == 'lparen':
self.parse_signature(node)
else:
node.args = []
node.defaults = []
node.call = self.parse_expression()
if not isinstance(node.call, nodes.Call):
self.fail('expected call', node.lineno)
node.body = self.parse_statements(('name:endcall',), drop_needle=True)
return node
def parse_filter_block(self):
node = nodes.FilterBlock(lineno=next(self.stream).lineno)
node.filter = self.parse_filter(None, start_inline=True)
node.body = self.parse_statements(('name:endfilter',),
drop_needle=True)
return node
def parse_macro(self):
node = nodes.Macro(lineno=next(self.stream).lineno)
node.name = self.parse_assign_target(name_only=True).name
self.parse_signature(node)
node.body = self.parse_statements(('name:endmacro',),
drop_needle=True)
return node
def parse_print(self):
node = nodes.Output(lineno=next(self.stream).lineno)
node.nodes = []
while self.stream.current.type != 'block_end':
if node.nodes:
self.stream.expect('comma')
node.nodes.append(self.parse_expression())
return node
def parse_assign_target(self, with_tuple=True, name_only=False,
extra_end_rules=None, with_namespace=False):
"""Parse an assignment target. As Jinja2 allows assignments to
tuples, this function can parse all allowed assignment targets. Per
default assignments to tuples are parsed, that can be disable however
by setting `with_tuple` to `False`. If only assignments to names are
wanted `name_only` can be set to `True`. The `extra_end_rules`
parameter is forwarded to the tuple parsing function. If
`with_namespace` is enabled, a namespace assignment may be parsed.
"""
if with_namespace and self.stream.look().type == 'dot':
token = self.stream.expect('name')
next(self.stream) # dot
attr = self.stream.expect('name')
target = nodes.NSRef(token.value, attr.value, lineno=token.lineno)
elif name_only:
token = self.stream.expect('name')
target = nodes.Name(token.value, 'store', lineno=token.lineno)
else:
if with_tuple:
target = self.parse_tuple(simplified=True,
extra_end_rules=extra_end_rules)
else:
target = self.parse_primary()
target.set_ctx('store')
if not target.can_assign():
self.fail('can\'t assign to %r' % target.__class__.
__name__.lower(), target.lineno)
return target
def parse_expression(self, with_condexpr=True):
"""Parse an expression. Per default all expressions are parsed, if
the optional `with_condexpr` parameter is set to `False` conditional
expressions are not parsed.
"""
if with_condexpr:
return self.parse_condexpr()
return self.parse_or()
def parse_condexpr(self):
lineno = self.stream.current.lineno
expr1 = self.parse_or()
while self.stream.skip_if('name:if'):
expr2 = self.parse_or()
if self.stream.skip_if('name:else'):
expr3 = self.parse_condexpr()
else:
expr3 = None
expr1 = nodes.CondExpr(expr2, expr1, expr3, lineno=lineno)
lineno = self.stream.current.lineno
return expr1
def parse_or(self):
lineno = self.stream.current.lineno
left = self.parse_and()
while self.stream.skip_if('name:or'):
right = self.parse_and()
left = nodes.Or(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_and(self):
lineno = self.stream.current.lineno
left = self.parse_not()
while self.stream.skip_if('name:and'):
right = self.parse_not()
left = nodes.And(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_not(self):
if self.stream.current.test('name:not'):
lineno = next(self.stream).lineno
return nodes.Not(self.parse_not(), lineno=lineno)
return self.parse_compare()
def parse_compare(self):
lineno = self.stream.current.lineno
expr = self.parse_math1()
ops = []
while 1:
token_type = self.stream.current.type
if token_type in _compare_operators:
next(self.stream)
ops.append(nodes.Operand(token_type, self.parse_math1()))
elif self.stream.skip_if('name:in'):
ops.append(nodes.Operand('in', self.parse_math1()))
elif (self.stream.current.test('name:not') and
self.stream.look().test('name:in')):
self.stream.skip(2)
ops.append(nodes.Operand('notin', self.parse_math1()))
else:
break
lineno = self.stream.current.lineno
if not ops:
return expr
return nodes.Compare(expr, ops, lineno=lineno)
def parse_math1(self):
lineno = self.stream.current.lineno
left = self.parse_concat()
while self.stream.current.type in ('add', 'sub'):
cls = _math_nodes[self.stream.current.type]
next(self.stream)
right = self.parse_concat()
left = cls(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_concat(self):
lineno = self.stream.current.lineno
args = [self.parse_math2()]
while self.stream.current.type == 'tilde':
next(self.stream)
args.append(self.parse_math2())
if len(args) == 1:
return args[0]
return nodes.Concat(args, lineno=lineno)
def parse_math2(self):
lineno = self.stream.current.lineno
left = self.parse_pow()
while self.stream.current.type in ('mul', 'div', 'floordiv', 'mod'):
cls = _math_nodes[self.stream.current.type]
next(self.stream)
right = self.parse_pow()
left = cls(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_pow(self):
lineno = self.stream.current.lineno
left = self.parse_unary()
while self.stream.current.type == 'pow':
next(self.stream)
right = self.parse_unary()
left = nodes.Pow(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_unary(self, with_filter=True):
token_type = self.stream.current.type
lineno = self.stream.current.lineno
if token_type == 'sub':
next(self.stream)
node = nodes.Neg(self.parse_unary(False), lineno=lineno)
elif token_type == 'add':
next(self.stream)
node = nodes.Pos(self.parse_unary(False), lineno=lineno)
else:
node = self.parse_primary()
node = self.parse_postfix(node)
if with_filter:
node = self.parse_filter_expr(node)
return node
def parse_primary(self):
token = self.stream.current
if token.type == 'name':
if token.value in ('true', 'false', 'True', 'False'):
node = nodes.Const(token.value in ('true', 'True'),
lineno=token.lineno)
elif token.value in ('none', 'None'):
node = nodes.Const(None, lineno=token.lineno)
else:
node = nodes.Name(token.value, 'load', lineno=token.lineno)
next(self.stream)
elif token.type == 'string':
next(self.stream)
buf = [token.value]
lineno = token.lineno
while self.stream.current.type == 'string':
buf.append(self.stream.current.value)
next(self.stream)
node = nodes.Const(''.join(buf), lineno=lineno)
elif token.type in ('integer', 'float'):
next(self.stream)
node = nodes.Const(token.value, lineno=token.lineno)
elif token.type == 'lparen':
next(self.stream)
node = self.parse_tuple(explicit_parentheses=True)
self.stream.expect('rparen')
elif token.type == 'lbracket':
node = self.parse_list()
elif token.type == 'lbrace':
node = self.parse_dict()
else:
self.fail("unexpected '%s'" % describe_token(token), token.lineno)
return node
def parse_tuple(self, simplified=False, with_condexpr=True,
extra_end_rules=None, explicit_parentheses=False):
"""Works like `parse_expression` but if multiple expressions are
delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
This method could also return a regular expression instead of a tuple
if no commas where found.
The default parsing mode is a full tuple. If `simplified` is `True`
only names and literals are parsed. The `no_condexpr` parameter is
forwarded to :meth:`parse_expression`.
Because tuples do not require delimiters and may end in a bogus comma
an extra hint is needed that marks the end of a tuple. For example
for loops support tuples between `for` and `in`. In that case the
`extra_end_rules` is set to ``['name:in']``.
`explicit_parentheses` is true if the parsing was triggered by an
expression in parentheses. This is used to figure out if an empty
tuple is a valid expression or not.
"""
lineno = self.stream.current.lineno
if simplified:
parse = self.parse_primary
elif with_condexpr:
parse = self.parse_expression
else:
parse = lambda: self.parse_expression(with_condexpr=False)
args = []
is_tuple = False
while 1:
if args:
self.stream.expect('comma')
if self.is_tuple_end(extra_end_rules):
break
args.append(parse())
if self.stream.current.type == 'comma':
is_tuple = True
else:
break
lineno = self.stream.current.lineno
if not is_tuple:
if args:
return args[0]
# if we don't have explicit parentheses, an empty tuple is
# not a valid expression. This would mean nothing (literally
# nothing) in the spot of an expression would be an empty
# tuple.
if not explicit_parentheses:
self.fail('Expected an expression, got \'%s\'' %
describe_token(self.stream.current))
return nodes.Tuple(args, 'load', lineno=lineno)
def parse_list(self):
token = self.stream.expect('lbracket')
items = []
while self.stream.current.type != 'rbracket':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbracket':
break
items.append(self.parse_expression())
self.stream.expect('rbracket')
return nodes.List(items, lineno=token.lineno)
def parse_dict(self):
token = self.stream.expect('lbrace')
items = []
while self.stream.current.type != 'rbrace':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbrace':
break
key = self.parse_expression()
self.stream.expect('colon')
value = self.parse_expression()
items.append(nodes.Pair(key, value, lineno=key.lineno))
self.stream.expect('rbrace')
return nodes.Dict(items, lineno=token.lineno)
def parse_postfix(self, node):
while 1:
token_type = self.stream.current.type
if token_type == 'dot' or token_type == 'lbracket':
node = self.parse_subscript(node)
# calls are valid both after postfix expressions (getattr
# and getitem) as well as filters and tests
elif token_type == 'lparen':
node = self.parse_call(node)
else:
break
return node
def parse_filter_expr(self, node):
while 1:
token_type = self.stream.current.type
if token_type == 'pipe':
node = self.parse_filter(node)
elif token_type == 'name' and self.stream.current.value == 'is':
node = self.parse_test(node)
# calls are valid both after postfix expressions (getattr
# and getitem) as well as filters and tests
elif token_type == 'lparen':
node = self.parse_call(node)
else:
break
return node
def parse_subscript(self, node):
token = next(self.stream)
if token.type == 'dot':
attr_token = self.stream.current
next(self.stream)
if attr_token.type == 'name':
return nodes.Getattr(node, attr_token.value, 'load',
lineno=token.lineno)
elif attr_token.type != 'integer':
self.fail('expected name or number', attr_token.lineno)
arg = nodes.Const(attr_token.value, lineno=attr_token.lineno)
return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
if token.type == 'lbracket':
args = []
while self.stream.current.type != 'rbracket':
if args:
self.stream.expect('comma')
args.append(self.parse_subscribed())
self.stream.expect('rbracket')
if len(args) == 1:
arg = args[0]
else:
arg = nodes.Tuple(args, 'load', lineno=token.lineno)
return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
self.fail('expected subscript expression', self.lineno)
def parse_subscribed(self):
lineno = self.stream.current.lineno
if self.stream.current.type == 'colon':
next(self.stream)
args = [None]
else:
node = self.parse_expression()
if self.stream.current.type != 'colon':
return node
next(self.stream)
args = [node]
if self.stream.current.type == 'colon':
args.append(None)
elif self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
if self.stream.current.type == 'colon':
next(self.stream)
if self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
else:
args.append(None)
return nodes.Slice(lineno=lineno, *args)
def parse_call(self, node):
token = self.stream.expect('lparen')
args = []
kwargs = []
dyn_args = dyn_kwargs = None
require_comma = False
def ensure(expr):
if not expr:
self.fail('invalid syntax for function call expression',
token.lineno)
while self.stream.current.type != 'rparen':
if require_comma:
self.stream.expect('comma')
# support for trailing comma
if self.stream.current.type == 'rparen':
break
if self.stream.current.type == 'mul':
ensure(dyn_args is None and dyn_kwargs is None)
next(self.stream)
dyn_args = self.parse_expression()
elif self.stream.current.type == 'pow':
ensure(dyn_kwargs is None)
next(self.stream)
dyn_kwargs = self.parse_expression()
else:
ensure(dyn_args is None and dyn_kwargs is None)
if self.stream.current.type == 'name' and \
self.stream.look().type == 'assign':
key = self.stream.current.value
self.stream.skip(2)
value = self.parse_expression()
kwargs.append(nodes.Keyword(key, value,
lineno=value.lineno))
else:
ensure(not kwargs)
args.append(self.parse_expression())
require_comma = True
self.stream.expect('rparen')
if node is None:
return args, kwargs, dyn_args, dyn_kwargs
return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs,
lineno=token.lineno)
def parse_filter(self, node, start_inline=False):
while self.stream.current.type == 'pipe' or start_inline:
if not start_inline:
next(self.stream)
token = self.stream.expect('name')
name = token.value
while self.stream.current.type == 'dot':
next(self.stream)
name += '.' + self.stream.expect('name').value
if self.stream.current.type == 'lparen':
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
else:
args = []
kwargs = []
dyn_args = dyn_kwargs = None
node = nodes.Filter(node, name, args, kwargs, dyn_args,
dyn_kwargs, lineno=token.lineno)
start_inline = False
return node
def parse_test(self, node):
token = next(self.stream)
if self.stream.current.test('name:not'):
next(self.stream)
negated = True
else:
negated = False
name = self.stream.expect('name').value
while self.stream.current.type == 'dot':
next(self.stream)
name += '.' + self.stream.expect('name').value
dyn_args = dyn_kwargs = None
kwargs = []
if self.stream.current.type == 'lparen':
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
elif (self.stream.current.type in ('name', 'string', 'integer',
'float', 'lparen', 'lbracket',
'lbrace') and not
self.stream.current.test_any('name:else', 'name:or',
'name:and')):
if self.stream.current.test('name:is'):
self.fail('You cannot chain multiple tests with is')
args = [self.parse_primary()]
else:
args = []
node = nodes.Test(node, name, args, kwargs, dyn_args,
dyn_kwargs, lineno=token.lineno)
if negated:
node = nodes.Not(node, lineno=token.lineno)
return node
def subparse(self, end_tokens=None):
body = []
data_buffer = []
add_data = data_buffer.append
if end_tokens is not None:
self._end_token_stack.append(end_tokens)
def flush_data():
if data_buffer:
lineno = data_buffer[0].lineno
body.append(nodes.Output(data_buffer[:], lineno=lineno))
del data_buffer[:]
try:
while self.stream:
token = self.stream.current
if token.type == 'data':
if token.value:
add_data(nodes.TemplateData(token.value,
lineno=token.lineno))
next(self.stream)
elif token.type == 'variable_begin':
next(self.stream)
add_data(self.parse_tuple(with_condexpr=True))
self.stream.expect('variable_end')
elif token.type == 'block_begin':
flush_data()
next(self.stream)
if end_tokens is not None and \
self.stream.current.test_any(*end_tokens):
return body
rv = self.parse_statement()
if isinstance(rv, list):
body.extend(rv)
else:
body.append(rv)
self.stream.expect('block_end')
else:
raise AssertionError('internal parsing error')
flush_data()
finally:
if end_tokens is not None:
self._end_token_stack.pop()
return body
def parse(self):
"""Parse the whole template into a `Template` node."""
result = nodes.Template(self.subparse(), lineno=1)
result.set_environment(self.environment)
return result
| lgpl-2.1 |
clarkfitzg/xray | setup.py | 2 | 4581 | #!/usr/bin/env python
import os
import re
import sys
import warnings
from setuptools import setup, find_packages
MAJOR = 0
MINOR = 5
MICRO = 1
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
QUALIFIER = ''
DISTNAME = 'xray'
LICENSE = 'Apache'
AUTHOR = 'xray Developers'
AUTHOR_EMAIL = 'xray-dev@googlegroups.com'
URL = 'https://github.com/xray/xray'
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Scientific/Engineering',
]
INSTALL_REQUIRES = ['numpy >= 1.7', 'pandas >= 0.15.0']
TESTS_REQUIRE = ['nose >= 1.0']
if sys.version_info[:2] < (2, 7):
TESTS_REQUIRE += ["unittest2 == 0.5.1"]
DESCRIPTION = "N-D labeled arrays and datasets in Python"
LONG_DESCRIPTION = """
**xray** is an open source project and Python package that aims to bring the
labeled data power of pandas_ to the physical sciences, by providing
N-dimensional variants of the core pandas data structures.
Our goal is to provide a pandas-like and pandas-compatible toolkit for
analytics on multi-dimensional arrays, rather than the tabular data for which
pandas excels. Our approach adopts the `Common Data Model`_ for self-
describing scientific data in widespread use in the Earth sciences:
``xray.Dataset`` is an in-memory representation of a netCDF file.
.. _pandas: http://pandas.pydata.org
.. _Common Data Model: http://www.unidata.ucar.edu/software/thredds/current/netcdf-java/CDM
.. _netCDF: http://www.unidata.ucar.edu/software/netcdf
.. _OPeNDAP: http://www.opendap.org/
Important links
---------------
- HTML documentation: http://xray.readthedocs.org
- Issue tracker: http://github.com/xray/xray/issues
- Source code: http://github.com/xray/xray
- PyData talk: https://www.youtube.com/watch?v=T5CZyNwBa9c
"""
# code to extract and write the version copied from pandas
FULLVERSION = VERSION
write_version = True
if not ISRELEASED:
import subprocess
FULLVERSION += '.dev'
pipe = None
for cmd in ['git', 'git.cmd']:
try:
pipe = subprocess.Popen(
[cmd, "describe", "--always", "--match", "v[0-9]*"],
stdout=subprocess.PIPE)
(so, serr) = pipe.communicate()
if pipe.returncode == 0:
break
except:
pass
if pipe is None or pipe.returncode != 0:
# no git, or not in git dir
if os.path.exists('xray/version.py'):
warnings.warn("WARNING: Couldn't get git revision, using existing xray/version.py")
write_version = False
else:
warnings.warn("WARNING: Couldn't get git revision, using generic version string")
else:
# have git, in git dir, but may have used a shallow clone (travis does this)
rev = so.strip()
# makes distutils blow up on Python 2.7
if sys.version_info[0] >= 3:
rev = rev.decode('ascii')
if not rev.startswith('v') and re.match("[a-zA-Z0-9]{7,9}", rev):
# partial clone, manually construct version string
# this is the format before we started using git-describe
# to get an ordering on dev version strings.
rev = "v%s.dev-%s" % (VERSION, rev)
# Strip leading v from tags format "vx.y.z" to get th version string
FULLVERSION = rev.lstrip('v')
else:
FULLVERSION += QUALIFIER
def write_version_py(filename=None):
cnt = """\
version = '%s'
short_version = '%s'
"""
if not filename:
filename = os.path.join(
os.path.dirname(__file__), 'xray', 'version.py')
a = open(filename, 'w')
try:
a.write(cnt % (FULLVERSION, VERSION))
finally:
a.close()
if write_version:
write_version_py()
setup(name=DISTNAME,
version=FULLVERSION,
license=LICENSE,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
classifiers=CLASSIFIERS,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
install_requires=INSTALL_REQUIRES,
tests_require=TESTS_REQUIRE,
url=URL,
test_suite='nose.collector',
packages=find_packages(),
package_data={'xray': ['test/data/*']})
| apache-2.0 |
pygeek/django | django/contrib/auth/decorators.py | 112 | 3097 | try:
from urllib.parse import urlparse
except ImportError: # Python 2
from urlparse import urlparse
from functools import wraps
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.exceptions import PermissionDenied
from django.utils.decorators import available_attrs
from django.utils.encoding import force_str
from django.shortcuts import resolve_url
def user_passes_test(test_func, login_url=None, redirect_field_name=REDIRECT_FIELD_NAME):
"""
Decorator for views that checks that the user passes the given test,
redirecting to the log-in page if necessary. The test should be a callable
that takes the user object and returns True if the user passes.
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if test_func(request.user):
return view_func(request, *args, **kwargs)
path = request.build_absolute_uri()
# urlparse chokes on lazy objects in Python 3, force to str
resolved_login_url = force_str(
resolve_url(login_url or settings.LOGIN_URL))
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
login_scheme, login_netloc = urlparse(resolved_login_url)[:2]
current_scheme, current_netloc = urlparse(path)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
path = request.get_full_path()
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
path, resolved_login_url, redirect_field_name)
return _wrapped_view
return decorator
def login_required(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url=None):
"""
Decorator for views that checks that the user is logged in, redirecting
to the log-in page if necessary.
"""
actual_decorator = user_passes_test(
lambda u: u.is_authenticated(),
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
def permission_required(perm, login_url=None, raise_exception=False):
"""
Decorator for views that checks whether a user has a particular permission
enabled, redirecting to the log-in page if neccesary.
If the raise_exception parameter is given the PermissionDenied exception
is raised.
"""
def check_perms(user):
# First check if the user has the permission (even anon users)
if user.has_perm(perm):
return True
# In case the 403 handler should be called raise the exception
if raise_exception:
raise PermissionDenied
# As the last resort, show the login form
return False
return user_passes_test(check_perms, login_url=login_url)
| bsd-3-clause |
synconics/odoo | addons/account_anglo_saxon/__init__.py | 436 | 1090 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import product
import stock
import purchase
import invoice
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
a-parhom/edx-platform | openedx/core/djangoapps/content/course_overviews/migrations/0007_courseoverviewimageconfig.py | 70 | 1394 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('course_overviews', '0006_courseoverviewimageset'),
]
operations = [
migrations.CreateModel(
name='CourseOverviewImageConfig',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('small_width', models.IntegerField(default=375)),
('small_height', models.IntegerField(default=200)),
('large_width', models.IntegerField(default=750)),
('large_height', models.IntegerField(default=400)),
('changed_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Changed by')),
],
options={
'ordering': ('-change_date',),
'abstract': False,
},
),
]
| agpl-3.0 |
shurihell/testasia | openedx/core/djangoapps/credit/verification_access.py | 105 | 6749 | """
Create in-course reverification access groups in a course.
We model the rules as a set of user partitions, one for each
verification checkpoint in a course.
For example, suppose that a course has two verification checkpoints,
one at midterm A and one at the midterm B.
Then the user partitions would look like this:
Midterm A: |-- ALLOW --|-- DENY --|
Midterm B: |-- ALLOW --|-- DENY --|
where the groups are defined as:
* ALLOW: The user has access to content gated by the checkpoint.
* DENY: The user does not have access to content gated by the checkpoint.
"""
import logging
from util.db import generate_int_id
from openedx.core.djangoapps.credit.utils import get_course_blocks
from xmodule.modulestore.django import modulestore
from xmodule.modulestore import ModuleStoreEnum
from xmodule.partitions.partitions import Group, UserPartition
log = logging.getLogger(__name__)
VERIFICATION_SCHEME_NAME = "verification"
VERIFICATION_BLOCK_CATEGORY = "edx-reverification-block"
def update_verification_partitions(course_key):
"""
Create a user partition for each verification checkpoint in the course.
This will modify the published version of the course descriptor.
It ensures that any in-course reverification XBlocks in the course
have an associated user partition. Other user partitions (e.g. cohorts)
will be preserved. Partitions associated with deleted reverification checkpoints
will be marked as inactive and will not be used to restrict access.
Arguments:
course_key (CourseKey): identifier for the course.
Returns:
None
"""
# Batch all the queries we're about to do and suppress
# the "publish" signal to avoid an infinite call loop.
with modulestore().bulk_operations(course_key, emit_signals=False):
# Retrieve all in-course reverification blocks in the course
icrv_blocks = get_course_blocks(course_key, VERIFICATION_BLOCK_CATEGORY)
# Update the verification definitions in the course descriptor
# This will also clean out old verification partitions if checkpoints
# have been deleted.
_set_verification_partitions(course_key, icrv_blocks)
def _unique_partition_id(course):
"""Return a unique user partition ID for the course. """
# Exclude all previously used IDs, even for partitions that have been disabled
# (e.g. if the course author deleted an in-course reverifification block but
# there are courseware components that reference the disabled partition).
used_ids = set(p.id for p in course.user_partitions)
return generate_int_id(used_ids=used_ids)
def _other_partitions(verified_partitions, exclude_partitions, course_key):
"""
Retrieve all partitions NOT associated with the current set of ICRV blocks.
Any partition associated with a deleted ICRV block will be marked as inactive
so its access rules will no longer be enforced.
Arguments:
all_partitions (list of UserPartition): All verified partitions defined in the course.
exclude_partitions (list of UserPartition): Partitions to exclude (e.g. the ICRV partitions already added)
course_key (CourseKey): Identifier for the course (used for logging).
Returns: list of `UserPartition`s
"""
results = []
partition_by_id = {
p.id: p for p in verified_partitions
}
other_partition_ids = set(p.id for p in verified_partitions) - set(p.id for p in exclude_partitions)
for pid in other_partition_ids:
partition = partition_by_id[pid]
results.append(
UserPartition(
id=partition.id,
name=partition.name,
description=partition.description,
scheme=partition.scheme,
parameters=partition.parameters,
groups=partition.groups,
active=False,
)
)
log.info(
(
"Disabled partition %s in course %s because the "
"associated in-course-reverification checkpoint does not exist."
),
partition.id, course_key
)
return results
def _set_verification_partitions(course_key, icrv_blocks):
"""
Create or update user partitions in the course.
Ensures that each ICRV block in the course has an associated user partition
with the groups ALLOW and DENY.
Arguments:
course_key (CourseKey): Identifier for the course.
icrv_blocks (list of XBlock): In-course reverification blocks, e.g. reverification checkpoints.
Returns:
list of UserPartition
"""
scheme = UserPartition.get_scheme(VERIFICATION_SCHEME_NAME)
if scheme is None:
log.error("Could not retrieve user partition scheme with ID %s", VERIFICATION_SCHEME_NAME)
return []
course = modulestore().get_course(course_key)
if course is None:
log.error("Could not find course %s", course_key)
return []
verified_partitions = course.get_user_partitions_for_scheme(scheme)
partition_id_for_location = {
p.parameters["location"]: p.id
for p in verified_partitions
if "location" in p.parameters
}
partitions = []
for block in icrv_blocks:
partition = UserPartition(
id=partition_id_for_location.get(
unicode(block.location),
_unique_partition_id(course)
),
name=block.related_assessment,
description=u"Verification checkpoint at {}".format(block.related_assessment),
scheme=scheme,
parameters={"location": unicode(block.location)},
groups=[
Group(scheme.ALLOW, "Completed verification at {}".format(block.related_assessment)),
Group(scheme.DENY, "Did not complete verification at {}".format(block.related_assessment)),
]
)
partitions.append(partition)
log.info(
(
"Configured partition %s for course %s using a verified partition scheme "
"for the in-course-reverification checkpoint at location %s"
),
partition.id,
course_key,
partition.parameters["location"]
)
# Preserve existing, non-verified partitions from the course
# Mark partitions for deleted in-course reverification as disabled.
partitions += _other_partitions(verified_partitions, partitions, course_key)
course.set_user_partitions_for_scheme(partitions, scheme)
modulestore().update_item(course, ModuleStoreEnum.UserID.system)
log.info("Saved updated partitions for the course %s", course_key)
return partitions
| agpl-3.0 |
EricSB/nupic | tests/unit/nupic/utils_test.py | 15 | 5895 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for utils module."""
import pickle
import tempfile
import unittest
from nupic.utils import MovingAverage
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.movingaverage_capnp import MovingAverageProto
class UtilsTest(unittest.TestCase):
"""testing common.utils"""
def testMovingAverage(self):
"""
Test that the (internal) moving average maintains the averages correctly,
even for null initial condition and when the number of values goes over
windowSize. Pass in integers and floats.
"""
historicalValues = []
total = 0
windowSize = 3
newAverage, historicalValues, total = (
MovingAverage.compute(historicalValues, total, 3, windowSize)
)
self.assertEqual(newAverage, 3.0)
self.assertEqual(historicalValues, [3.0])
self.assertEqual(total, 3.0)
newAverage, historicalValues, total = (
MovingAverage.compute(historicalValues, total, 4, windowSize)
)
self.assertEqual(newAverage, 3.5)
self.assertListEqual(historicalValues, [3.0, 4.0])
self.assertEqual(total, 7.0)
newAverage, historicalValues, total = (
MovingAverage.compute(historicalValues, total, 5.0, windowSize)
)
self.assertEqual(newAverage, 4.0)
self.assertListEqual(historicalValues, [3.0, 4.0, 5.0])
self.assertEqual(total, 12.0)
# Ensure the first value gets popped
newAverage, historicalValues, total = (
MovingAverage.compute(historicalValues, total, 6.0, windowSize)
)
self.assertEqual(newAverage, 5.0)
self.assertListEqual(historicalValues, [4.0, 5.0, 6.0])
self.assertEqual(total, 15.0)
def testMovingAverageInstance(self):
"""
Test that the (internal) moving average maintains the averages correctly,
even for null initial condition and when the number of values goes over
windowSize. Pass in integers and floats.
this is for the instantce method next()
"""
ma = MovingAverage(windowSize=3)
newAverage = ma.next(3)
self.assertEqual(newAverage, 3.0)
self.assertListEqual(ma.getSlidingWindow(), [3.0])
self.assertEqual(ma.total, 3.0)
newAverage = ma.next(4)
self.assertEqual(newAverage, 3.5)
self.assertListEqual(ma.getSlidingWindow(), [3.0, 4.0])
self.assertEqual(ma.total, 7.0)
newAverage = ma.next(5)
self.assertEqual(newAverage, 4.0)
self.assertListEqual(ma.getSlidingWindow(), [3.0, 4.0, 5.0])
self.assertEqual(ma.total, 12.0)
# Ensure the first value gets popped
newAverage = ma.next(6)
self.assertEqual(newAverage, 5.0)
self.assertListEqual(ma.getSlidingWindow(), [4.0, 5.0, 6.0])
self.assertEqual(ma.total, 15.0)
def testMovingAverageSlidingWindowInit(self):
"""
Test the slidingWindow value is correctly assigned when initializing a
new MovingAverage object.
"""
# With exisiting historical values; same values as tested in testMovingAverage()
ma = MovingAverage(windowSize=3, existingHistoricalValues=[3.0, 4.0, 5.0])
self.assertListEqual(ma.getSlidingWindow(), [3.0, 4.0, 5.0])
# Withoout exisiting historical values
ma = MovingAverage(windowSize=3)
self.assertListEqual(ma.getSlidingWindow(), [])
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testMovingAverageReadWrite(self):
ma = MovingAverage(windowSize=3)
ma.next(3)
ma.next(4.5)
ma.next(5)
proto1 = MovingAverageProto.new_message()
ma.write(proto1)
# Write the proto to a temp file and read it back into a new proto
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = MovingAverageProto.read(f)
resurrectedMa = MovingAverage.read(proto2)
newAverage = ma.next(6)
self.assertEqual(newAverage, resurrectedMa.next(6))
self.assertListEqual(ma.getSlidingWindow(),
resurrectedMa.getSlidingWindow())
self.assertEqual(ma.total, resurrectedMa.total)
self.assertTrue(ma, resurrectedMa) #using the __eq__ method
def testSerialization(self):
"""serialization using pickle"""
ma = MovingAverage(windowSize=3)
ma.next(3)
ma.next(4.5)
ma.next(5)
stored = pickle.dumps(ma)
restored = pickle.loads(stored)
self.assertEqual(restored, ma)
self.assertEqual(ma.next(6), restored.next(6))
def testEquals(self):
ma = MovingAverage(windowSize=3)
maP = MovingAverage(windowSize=3)
self.assertEqual(ma, maP)
maN = MovingAverage(windowSize=10)
self.assertNotEqual(ma, maN)
ma = MovingAverage(windowSize=2, existingHistoricalValues=[3.0, 4.0, 5.0])
maP = MovingAverage(windowSize=2, existingHistoricalValues=[3.0, 4.0, 5.0])
self.assertEqual(ma, maP)
maP.next(6)
self.assertNotEqual(ma, maP)
ma.next(6)
self.assertEqual(ma, maP)
if __name__ == "__main__":
unittest.main()
| agpl-3.0 |
amoikevin/gyp | test/variables/filelist/gyptest-filelist-golden.py | 228 | 1584 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test variable expansion of '<|(list.txt ...)' syntax commands.
"""
import os
import sys
import TestGyp
test = TestGyp.TestGyp(format='gypd')
expect = test.read('filelist.gyp.stdout')
if sys.platform == 'win32':
expect = expect.replace('/', r'\\').replace('\r\n', '\n')
test.run_gyp('src/filelist.gyp',
'--debug', 'variables',
stdout=expect, ignore_line_numbers=True)
# Verify the filelist.gypd against the checked-in expected contents.
#
# Normally, we should canonicalize line endings in the expected
# contents file setting the Subversion svn:eol-style to native,
# but that would still fail if multiple systems are sharing a single
# workspace on a network-mounted file system. Consequently, we
# massage the Windows line endings ('\r\n') in the output to the
# checked-in UNIX endings ('\n').
contents = test.read('src/filelist.gypd').replace(
'\r', '').replace('\\\\', '/')
expect = test.read('filelist.gypd.golden').replace('\r', '')
if not test.match(contents, expect):
print "Unexpected contents of `src/filelist.gypd'"
test.diff(expect, contents, 'src/filelist.gypd ')
test.fail_test()
contents = test.read('src/names.txt')
expect = 'John\nJacob\nJingleheimer\nSchmidt\n'
if not test.match(contents, expect):
print "Unexpected contents of `src/names.txt'"
test.diff(expect, contents, 'src/names.txt ')
test.fail_test()
test.pass_test()
| bsd-3-clause |
andmos/ansible | lib/ansible/modules/network/ios/ios_user.py | 8 | 17360 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: ios_user
version_added: "2.4"
author: "Trishna Guha (@trishnaguha)"
short_description: Manage the aggregate of local users on Cisco IOS device
description:
- This module provides declarative management of the local usernames
configured on network devices. It allows playbooks to manage
either individual usernames or the aggregate of usernames in the
current running config. It also supports purging usernames from the
configuration that are not explicitly defined.
notes:
- Tested against IOS 15.6
options:
aggregate:
description:
- The set of username objects to be configured on the remote
Cisco IOS device. The list entries can either be the username
or a hash of username and properties. This argument is mutually
exclusive with the C(name) argument.
aliases: ['users', 'collection']
name:
description:
- The username to be configured on the Cisco IOS device.
This argument accepts a string value and is mutually exclusive
with the C(aggregate) argument.
Please note that this option is not same as C(provider username).
configured_password:
description:
- The password to be configured on the Cisco IOS device. The
password needs to be provided in clear and it will be encrypted
on the device.
Please note that this option is not same as C(provider password).
update_password:
description:
- Since passwords are encrypted in the device running config, this
argument will instruct the module when to change the password. When
set to C(always), the password will always be updated in the device
and when set to C(on_create) the password will be updated only if
the username is created.
default: always
choices: ['on_create', 'always']
password_type:
description:
- This argument determines whether a 'password' or 'secret' will be
configured.
default: secret
choices: ['secret', 'password']
version_added: "2.8"
hashed_password:
description:
- This option allows configuring hashed passwords on Cisco IOS devices.
suboptions:
type:
description:
- Specifies the type of hash (e.g., 5 for MD5, 8 for PBKDF2, etc.)
- For this to work, the device needs to support the desired hash type
type: int
required: True
value:
description:
- The actual hashed password to be configured on the device
required: True
version_added: "2.8"
privilege:
description:
- The C(privilege) argument configures the privilege level of the
user when logged into the system. This argument accepts integer
values in the range of 1 to 15.
view:
description:
- Configures the view for the username in the
device running configuration. The argument accepts a string value
defining the view name. This argument does not check if the view
has been configured on the device.
aliases: ['role']
sshkey:
description:
- Specifies one or more SSH public key(s) to configure
for the given username.
- This argument accepts a valid SSH key value.
version_added: "2.7"
nopassword:
description:
- Defines the username without assigning
a password. This will allow the user to login to the system
without being authenticated by a password.
type: bool
purge:
description:
- Instructs the module to consider the
resource definition absolute. It will remove any previously
configured usernames on the device with the exception of the
`admin` user (the current defined set of users).
type: bool
default: false
state:
description:
- Configures the state of the username definition
as it relates to the device operational configuration. When set
to I(present), the username(s) should be configured in the device active
configuration and when set to I(absent) the username(s) should not be
in the device active configuration
default: present
choices: ['present', 'absent']
extends_documentation_fragment: ios
"""
EXAMPLES = """
- name: create a new user
ios_user:
name: ansible
nopassword: True
sshkey: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
state: present
- name: create a new user with multiple keys
ios_user:
name: ansible
sshkey:
- "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
- "{{ lookup('file', '~/path/to/public_key') }}"
state: present
- name: remove all users except admin
ios_user:
purge: yes
- name: remove all users except admin and these listed users
ios_user:
aggregate:
- name: testuser1
- name: testuser2
- name: testuser3
purge: yes
- name: set multiple users to privilege level 15
ios_user:
aggregate:
- name: netop
- name: netend
privilege: 15
state: present
- name: set user view/role
ios_user:
name: netop
view: network-operator
state: present
- name: Change Password for User netop
ios_user:
name: netop
configured_password: "{{ new_password }}"
update_password: always
state: present
- name: Aggregate of users
ios_user:
aggregate:
- name: ansibletest2
- name: ansibletest3
view: network-admin
- name: Add a user specifying password type
ios_user:
name: ansibletest4
configured_password: "{{ new_password }}"
password_type: password
- name: Add a user with MD5 hashed password
ios_user:
name: ansibletest5
hashed_password:
type: 5
value: $3$8JcDilcYgFZi.yz4ApaqkHG2.8/
- name: Delete users with aggregate
ios_user:
aggregate:
- name: ansibletest1
- name: ansibletest2
- name: ansibletest3
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- username ansible secret password
- username admin secret admin
"""
import base64
import hashlib
import re
from copy import deepcopy
from functools import partial
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.ios.ios import get_config, load_config
from ansible.module_utils.network.ios.ios import ios_argument_spec, check_args
from ansible.module_utils.six import iteritems
def validate_privilege(value, module):
if value and not 1 <= value <= 15:
module.fail_json(msg='privilege must be between 1 and 15, got %s' % value)
def user_del_cmd(username):
return {
'command': 'no username %s' % username,
'prompt': 'This operation will remove all username related configurations with same name',
'answer': 'y',
'newline': False,
}
def sshkey_fingerprint(sshkey):
# IOS will accept a MD5 fingerprint of the public key
# and is easier to configure in a single line
# we calculate this fingerprint here
if not sshkey:
return None
if ' ' in sshkey:
# ssh-rsa AAA...== comment
keyparts = sshkey.split(' ')
keyparts[1] = hashlib.md5(base64.b64decode(keyparts[1])).hexdigest().upper()
return ' '.join(keyparts)
else:
# just the key, assume rsa type
return 'ssh-rsa %s' % hashlib.md5(base64.b64decode(sshkey)).hexdigest().upper()
def map_obj_to_commands(updates, module):
commands = list()
state = module.params['state']
update_password = module.params['update_password']
password_type = module.params['password_type']
def needs_update(want, have, x):
return want.get(x) and (want.get(x) != have.get(x))
def add(command, want, x):
command.append('username %s %s' % (want['name'], x))
def add_hashed_password(command, want, x):
command.append('username %s secret %s %s' % (want['name'], x.get('type'),
x.get('value')))
def add_ssh(command, want, x=None):
command.append('ip ssh pubkey-chain')
if x:
command.append('username %s' % want['name'])
for item in x:
command.append('key-hash %s' % item)
command.append('exit')
else:
command.append('no username %s' % want['name'])
command.append('exit')
for update in updates:
want, have = update
if want['state'] == 'absent':
if have['sshkey']:
add_ssh(commands, want)
else:
commands.append(user_del_cmd(want['name']))
if needs_update(want, have, 'view'):
add(commands, want, 'view %s' % want['view'])
if needs_update(want, have, 'privilege'):
add(commands, want, 'privilege %s' % want['privilege'])
if needs_update(want, have, 'sshkey'):
add_ssh(commands, want, want['sshkey'])
if needs_update(want, have, 'configured_password'):
if update_password == 'always' or not have:
if have and password_type != have['password_type']:
module.fail_json(msg='Can not have both a user password and a user secret.' +
' Please choose one or the other.')
add(commands, want, '%s %s' % (password_type, want['configured_password']))
if needs_update(want, have, 'hashed_password'):
add_hashed_password(commands, want, want['hashed_password'])
if needs_update(want, have, 'nopassword'):
if want['nopassword']:
add(commands, want, 'nopassword')
else:
add(commands, want, user_del_cmd(want['name']))
return commands
def parse_view(data):
match = re.search(r'view (\S+)', data, re.M)
if match:
return match.group(1)
def parse_sshkey(data, user):
sshregex = r'username %s(\n\s+key-hash .+$)+' % user
sshcfg = re.search(sshregex, data, re.M)
key_list = []
if sshcfg:
match = re.findall(r'key-hash (\S+ \S+(?: .+)?)$', sshcfg.group(), re.M)
if match:
key_list = match
return key_list
def parse_privilege(data):
match = re.search(r'privilege (\S+)', data, re.M)
if match:
return int(match.group(1))
def parse_password_type(data):
type = None
if data and data.split()[-3] in ['password', 'secret']:
type = data.split()[-3]
return type
def map_config_to_obj(module):
data = get_config(module, flags=['| include username'])
match = re.findall(r'(?:^(?:u|\s{2}u))sername (\S+)', data, re.M)
if not match:
return list()
instances = list()
for user in set(match):
regex = r'username %s .+$' % user
cfg = re.findall(regex, data, re.M)
cfg = '\n'.join(cfg)
obj = {
'name': user,
'state': 'present',
'nopassword': 'nopassword' in cfg,
'configured_password': None,
'hashed_password': None,
'password_type': parse_password_type(cfg),
'sshkey': parse_sshkey(data, user),
'privilege': parse_privilege(cfg),
'view': parse_view(cfg)
}
instances.append(obj)
return instances
def get_param_value(key, item, module):
# if key doesn't exist in the item, get it from module.params
if not item.get(key):
value = module.params[key]
# if key does exist, do a type check on it to validate it
else:
value_type = module.argument_spec[key].get('type', 'str')
type_checker = module._CHECK_ARGUMENT_TYPES_DISPATCHER[value_type]
type_checker(item[key])
value = item[key]
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if all((value, validator)):
validator(value, module)
return value
def map_params_to_obj(module):
users = module.params['aggregate']
if not users:
if not module.params['name'] and module.params['purge']:
return list()
elif not module.params['name']:
module.fail_json(msg='username is required')
else:
aggregate = [{'name': module.params['name']}]
else:
aggregate = list()
for item in users:
if not isinstance(item, dict):
aggregate.append({'name': item})
elif 'name' not in item:
module.fail_json(msg='name is required')
else:
aggregate.append(item)
objects = list()
for item in aggregate:
get_value = partial(get_param_value, item=item, module=module)
item['configured_password'] = get_value('configured_password')
item['hashed_password'] = get_value('hashed_password')
item['nopassword'] = get_value('nopassword')
item['privilege'] = get_value('privilege')
item['view'] = get_value('view')
item['sshkey'] = render_key_list(get_value('sshkey'))
item['state'] = get_value('state')
objects.append(item)
return objects
def render_key_list(ssh_keys):
key_list = []
if ssh_keys:
for item in ssh_keys:
key_list.append(sshkey_fingerprint(item))
return key_list
def update_objects(want, have):
updates = list()
for entry in want:
item = next((i for i in have if i['name'] == entry['name']), None)
if all((item is None, entry['state'] == 'present')):
updates.append((entry, {}))
elif item:
for key, value in iteritems(entry):
if value and value != item[key]:
updates.append((entry, item))
return updates
def main():
""" main entry point for module execution
"""
hashed_password_spec = dict(
type=dict(type='int', required=True),
value=dict(no_log=True, required=True)
)
element_spec = dict(
name=dict(),
configured_password=dict(no_log=True),
hashed_password=dict(no_log=True, type='dict', options=hashed_password_spec),
nopassword=dict(type='bool'),
update_password=dict(default='always', choices=['on_create', 'always']),
password_type=dict(default='secret', choices=['secret', 'password']),
privilege=dict(type='int'),
view=dict(aliases=['role']),
sshkey=dict(type='list'),
state=dict(default='present', choices=['present', 'absent'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['name'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec, aliases=['users', 'collection']),
purge=dict(type='bool', default=False)
)
argument_spec.update(element_spec)
argument_spec.update(ios_argument_spec)
mutually_exclusive = [('name', 'aggregate'), ('nopassword', 'hashed_password', 'configured_password')]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
if module.params['password'] and not module.params['configured_password']:
warnings.append(
'The "password" argument is used to authenticate the current connection. ' +
'To set a user password use "configured_password" instead.'
)
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands(update_objects(want, have), module)
if module.params['purge']:
want_users = [x['name'] for x in want]
have_users = [x['name'] for x in have]
for item in set(have_users).difference(want_users):
if item != 'admin':
commands.append(user_del_cmd(item))
result['commands'] = commands
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
mandeepdhami/horizon | openstack_dashboard/dashboards/identity/domains/urls.py | 64 | 1036 | # Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.identity.domains import views
urlpatterns = patterns(
'',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^create$', views.CreateDomainView.as_view(), name='create'),
url(r'^(?P<domain_id>[^/]+)/update/$',
views.UpdateDomainView.as_view(), name='update')
)
| apache-2.0 |
AladdinSonni/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py | 118 | 23190 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.checkout.baselineoptimizer import BaselineOptimizer
from webkitpy.common.net.buildbot.buildbot_mock import MockBuilder
from webkitpy.common.system.executive_mock import MockExecutive2
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.commands.rebaseline import *
from webkitpy.tool.mocktool import MockTool, MockOptions
class _BaseTestCase(unittest.TestCase):
MOCK_WEB_RESULT = 'MOCK Web result, convert 404 to None=True'
WEB_PREFIX = 'http://example.com/f/builders/Apple Lion Release WK1 (Tests)/results/layout-test-results'
command_constructor = None
def setUp(self):
self.tool = MockTool()
self.command = self.command_constructor() # lint warns that command_constructor might not be set, but this is intentional; pylint: disable=E1102
self.command.bind_to_tool(self.tool)
self.lion_port = self.tool.port_factory.get_from_builder_name("Apple Lion Release WK1 (Tests)")
self.lion_expectations_path = self.lion_port.path_to_test_expectations_file()
# FIXME: we should override builders._exact_matches here to point to a set
# of test ports and restore the value in tearDown(), and that way the
# individual tests wouldn't have to worry about it.
def _expand(self, path):
if self.tool.filesystem.isabs(path):
return path
return self.tool.filesystem.join(self.lion_port.layout_tests_dir(), path)
def _read(self, path):
return self.tool.filesystem.read_text_file(self._expand(path))
def _write(self, path, contents):
self.tool.filesystem.write_text_file(self._expand(path), contents)
def _zero_out_test_expectations(self):
for port_name in self.tool.port_factory.all_port_names():
port = self.tool.port_factory.get(port_name)
for path in port.expectations_files():
self._write(path, '')
self.tool.filesystem.written_files = {}
class TestRebaselineTest(_BaseTestCase):
command_constructor = RebaselineTest # AKA webkit-patch rebaseline-test-internal
def setUp(self):
super(TestRebaselineTest, self).setUp()
self.options = MockOptions(builder="Apple Lion Release WK1 (Tests)", test="userscripts/another-test.html", suffixes="txt",
move_overwritten_baselines_to=None, results_directory=None)
def test_baseline_directory(self):
command = self.command
self.assertMultiLineEqual(command._baseline_directory("Apple Win XP Debug (Tests)"), "/mock-checkout/LayoutTests/platform/win-xp")
self.assertMultiLineEqual(command._baseline_directory("Apple Win 7 Release (Tests)"), "/mock-checkout/LayoutTests/platform/win")
self.assertMultiLineEqual(command._baseline_directory("Apple Lion Release WK1 (Tests)"), "/mock-checkout/LayoutTests/platform/mac-lion")
self.assertMultiLineEqual(command._baseline_directory("Apple Lion Release WK2 (Tests)"), "/mock-checkout/LayoutTests/platform/mac-wk2")
self.assertMultiLineEqual(command._baseline_directory("Apple MountainLion Release WK1 (Tests)"), "/mock-checkout/LayoutTests/platform/mac")
self.assertMultiLineEqual(command._baseline_directory("Apple MountainLion Release WK2 (Tests)"), "/mock-checkout/LayoutTests/platform/mac")
self.assertMultiLineEqual(command._baseline_directory("GTK Linux 64-bit Debug"), "/mock-checkout/LayoutTests/platform/gtk-wk1")
self.assertMultiLineEqual(command._baseline_directory("GTK Linux 64-bit Release WK2 (Tests)"), "/mock-checkout/LayoutTests/platform/gtk-wk2")
self.assertMultiLineEqual(command._baseline_directory("EFL Linux 64-bit Release WK2"), "/mock-checkout/LayoutTests/platform/efl-wk2")
self.assertMultiLineEqual(command._baseline_directory("Qt Linux Release"), "/mock-checkout/LayoutTests/platform/qt")
def test_rebaseline_updates_expectations_file_noop(self):
self._zero_out_test_expectations()
self._write(self.lion_expectations_path, """Bug(B) [ Mac Linux XP Debug ] fast/dom/Window/window-postmessage-clone-really-deep-array.html [ Pass ]
Bug(A) [ Debug ] : fast/css/large-list-of-rules-crash.html [ Failure ]
""")
self._write("fast/dom/Window/window-postmessage-clone-really-deep-array.html", "Dummy test contents")
self._write("fast/css/large-list-of-rules-crash.html", "Dummy test contents")
self._write("userscripts/another-test.html", "Dummy test contents")
self.options.suffixes = "png,wav,txt"
self.command._rebaseline_test_and_update_expectations(self.options)
self.assertItemsEqual(self.tool.web.urls_fetched,
[self.WEB_PREFIX + '/userscripts/another-test-actual.png',
self.WEB_PREFIX + '/userscripts/another-test-actual.wav',
self.WEB_PREFIX + '/userscripts/another-test-actual.txt'])
new_expectations = self._read(self.lion_expectations_path)
self.assertMultiLineEqual(new_expectations, """Bug(B) [ Mac Linux XP Debug ] fast/dom/Window/window-postmessage-clone-really-deep-array.html [ Pass ]
Bug(A) [ Debug ] : fast/css/large-list-of-rules-crash.html [ Failure ]
""")
def test_rebaseline_updates_expectations_file(self):
self._write(self.lion_expectations_path, "Bug(x) [ Mac ] userscripts/another-test.html [ ImageOnlyFailure ]\nbug(z) [ Linux ] userscripts/another-test.html [ ImageOnlyFailure ]\n")
self._write("userscripts/another-test.html", "Dummy test contents")
self.options.suffixes = 'png,wav,txt'
self.command._rebaseline_test_and_update_expectations(self.options)
self.assertItemsEqual(self.tool.web.urls_fetched,
[self.WEB_PREFIX + '/userscripts/another-test-actual.png',
self.WEB_PREFIX + '/userscripts/another-test-actual.wav',
self.WEB_PREFIX + '/userscripts/another-test-actual.txt'])
new_expectations = self._read(self.lion_expectations_path)
self.assertMultiLineEqual(new_expectations, "Bug(x) [ Mac ] userscripts/another-test.html [ ImageOnlyFailure ]\nbug(z) [ Linux ] userscripts/another-test.html [ ImageOnlyFailure ]\n")
def test_rebaseline_does_not_include_overrides(self):
self._write(self.lion_expectations_path, "Bug(x) [ Mac ] userscripts/another-test.html [ ImageOnlyFailure ]\nBug(z) [ Linux ] userscripts/another-test.html [ ImageOnlyFailure ]\n")
self._write("userscripts/another-test.html", "Dummy test contents")
self.options.suffixes = 'png,wav,txt'
self.command._rebaseline_test_and_update_expectations(self.options)
self.assertItemsEqual(self.tool.web.urls_fetched,
[self.WEB_PREFIX + '/userscripts/another-test-actual.png',
self.WEB_PREFIX + '/userscripts/another-test-actual.wav',
self.WEB_PREFIX + '/userscripts/another-test-actual.txt'])
new_expectations = self._read(self.lion_expectations_path)
self.assertMultiLineEqual(new_expectations, "Bug(x) [ Mac ] userscripts/another-test.html [ ImageOnlyFailure ]\nBug(z) [ Linux ] userscripts/another-test.html [ ImageOnlyFailure ]\n")
def test_rebaseline_test(self):
self.command._rebaseline_test("Apple Lion Release WK1 (Tests)", "userscripts/another-test.html", None, "txt", self.WEB_PREFIX)
self.assertItemsEqual(self.tool.web.urls_fetched, [self.WEB_PREFIX + '/userscripts/another-test-actual.txt'])
def test_rebaseline_test_with_results_directory(self):
self._write(self.lion_expectations_path, "Bug(x) [ Mac ] userscripts/another-test.html [ ImageOnlyFailure ]\nbug(z) [ Linux ] userscripts/another-test.html [ ImageOnlyFailure ]\n")
self.options.results_directory = '/tmp'
self.command._rebaseline_test_and_update_expectations(self.options)
self.assertItemsEqual(self.tool.web.urls_fetched, ['file:///tmp/userscripts/another-test-actual.txt'])
def test_rebaseline_test_and_print_scm_changes(self):
self.command._print_scm_changes = True
self.command._scm_changes = {'add': [], 'delete': []}
self.tool._scm.exists = lambda x: False
self.command._rebaseline_test("Apple Lion Release WK1 (Tests)", "userscripts/another-test.html", None, "txt", None)
self.assertDictEqual(self.command._scm_changes, {'add': ['/mock-checkout/LayoutTests/platform/mac-lion/userscripts/another-test-expected.txt'], 'delete': []})
def test_rebaseline_and_copy_test(self):
self._write("userscripts/another-test-expected.txt", "generic result")
self.command._rebaseline_test("Apple Lion Release WK1 (Tests)", "userscripts/another-test.html", ["mac-lion-wk2"], "txt", None)
self.assertMultiLineEqual(self._read('platform/mac-lion/userscripts/another-test-expected.txt'), self.MOCK_WEB_RESULT)
self.assertMultiLineEqual(self._read('platform/mac-wk2/userscripts/another-test-expected.txt'), 'generic result')
def test_rebaseline_and_copy_test_no_existing_result(self):
self.command._rebaseline_test("Apple Lion Release WK1 (Tests)", "userscripts/another-test.html", ["mac-lion-wk2"], "txt", None)
self.assertMultiLineEqual(self._read('platform/mac-lion/userscripts/another-test-expected.txt'), self.MOCK_WEB_RESULT)
self.assertFalse(self.tool.filesystem.exists(self._expand('platform/mac-lion-wk2/userscripts/another-test-expected.txt')))
def test_rebaseline_and_copy_test_with_lion_result(self):
self._write("platform/mac-lion/userscripts/another-test-expected.txt", "original lion result")
self.command._rebaseline_test("Apple Lion Release WK1 (Tests)", "userscripts/another-test.html", ["mac-lion-wk2"], "txt", self.WEB_PREFIX)
self.assertItemsEqual(self.tool.web.urls_fetched, [self.WEB_PREFIX + '/userscripts/another-test-actual.txt'])
self.assertMultiLineEqual(self._read("platform/mac-wk2/userscripts/another-test-expected.txt"), "original lion result")
self.assertMultiLineEqual(self._read("platform/mac-lion/userscripts/another-test-expected.txt"), self.MOCK_WEB_RESULT)
def test_rebaseline_and_copy_no_overwrite_test(self):
self._write("platform/mac-lion/userscripts/another-test-expected.txt", "original lion result")
self._write("platform/mac-lion-wk2/userscripts/another-test-expected.txt", "original lion wk2 result")
self.command._rebaseline_test("Apple Lion Release WK1 (Tests)", "userscripts/another-test.html", ["mac-lion-wk2"], "txt", None)
self.assertMultiLineEqual(self._read("platform/mac-lion-wk2/userscripts/another-test-expected.txt"), "original lion wk2 result")
self.assertMultiLineEqual(self._read("platform/mac-lion/userscripts/another-test-expected.txt"), self.MOCK_WEB_RESULT)
def test_rebaseline_test_internal_with_move_overwritten_baselines_to(self):
self.tool.executive = MockExecutive2()
# FIXME: it's confusing that this is the test- port, and not the regular lion port. Really all of the tests should be using the test ports.
port = self.tool.port_factory.get('test-mac-snowleopard')
self._write(port._filesystem.join(port.layout_tests_dir(), 'platform/test-mac-snowleopard/failures/expected/image-expected.txt'), 'original snowleopard result')
old_exact_matches = builders._exact_matches
oc = OutputCapture()
try:
builders._exact_matches = {
"MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
"MOCK SnowLeopard": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
}
options = MockOptions(optimize=True, builder="MOCK SnowLeopard", suffixes="txt",
move_overwritten_baselines_to=["test-mac-leopard"], verbose=True, test="failures/expected/image.html",
results_directory=None)
oc.capture_output()
self.command.execute(options, [], self.tool)
finally:
out, _, _ = oc.restore_output()
builders._exact_matches = old_exact_matches
self.assertMultiLineEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-mac-leopard/failures/expected/image-expected.txt')), 'original snowleopard result')
self.assertMultiLineEqual(out, '{"add": []}\n')
class TestRebaselineJson(_BaseTestCase):
command_constructor = RebaselineJson
def setUp(self):
super(TestRebaselineJson, self).setUp()
self.tool.executive = MockExecutive2()
self.old_exact_matches = builders._exact_matches
builders._exact_matches = {
"MOCK builder": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"]),
"move_overwritten_baselines_to": ["test-mac-leopard"]},
"MOCK builder (Debug)": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier", "debug"])},
}
def tearDown(self):
builders._exact_matches = self.old_exact_matches
super(TestRebaselineJson, self).tearDown()
def test_rebaseline_all(self):
options = MockOptions(optimize=True, verbose=True, move_overwritten_baselines=False, results_directory=None)
self.command._rebaseline(options, {"user-scripts/another-test.html": {"MOCK builder": ["txt", "png"]}})
# Note that we have one run_in_parallel() call followed by a run_command()
self.assertEqual(self.tool.executive.calls,
[[['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'user-scripts/another-test.html', '--verbose']],
['echo', '--verbose', 'optimize-baselines', '--suffixes', 'txt,png', 'user-scripts/another-test.html']])
def test_rebaseline_debug(self):
options = MockOptions(optimize=True, verbose=True, move_overwritten_baselines=False, results_directory=None)
self.command._rebaseline(options, {"user-scripts/another-test.html": {"MOCK builder (Debug)": ["txt", "png"]}})
# Note that we have one run_in_parallel() call followed by a run_command()
self.assertEqual(self.tool.executive.calls,
[[['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder (Debug)', '--test', 'user-scripts/another-test.html', '--verbose']],
['echo', '--verbose', 'optimize-baselines', '--suffixes', 'txt,png', 'user-scripts/another-test.html']])
def test_move_overwritten(self):
options = MockOptions(optimize=True, verbose=True, move_overwritten_baselines=True, results_directory=None)
self.command._rebaseline(options, {"user-scripts/another-test.html": {"MOCK builder": ["txt", "png"]}})
# Note that we have one run_in_parallel() call followed by a run_command()
self.assertEqual(self.tool.executive.calls,
[[['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'user-scripts/another-test.html', '--move-overwritten-baselines-to', 'test-mac-leopard', '--verbose']],
['echo', '--verbose', 'optimize-baselines', '--suffixes', 'txt,png', 'user-scripts/another-test.html']])
def test_no_optimize(self):
options = MockOptions(optimize=False, verbose=True, move_overwritten_baselines=False, results_directory=None)
self.command._rebaseline(options, {"user-scripts/another-test.html": {"MOCK builder (Debug)": ["txt", "png"]}})
# Note that we have only one run_in_parallel() call
self.assertEqual(self.tool.executive.calls,
[[['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder (Debug)', '--test', 'user-scripts/another-test.html', '--verbose']]])
def test_results_directory(self):
options = MockOptions(optimize=False, verbose=True, move_overwritten_baselines=False, results_directory='/tmp')
self.command._rebaseline(options, {"user-scripts/another-test.html": {"MOCK builder": ["txt", "png"]}})
# Note that we have only one run_in_parallel() call
self.assertEqual(self.tool.executive.calls,
[[['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'user-scripts/another-test.html', '--results-directory', '/tmp', '--verbose']]])
class TestRebaseline(_BaseTestCase):
# This command shares most of its logic with RebaselineJson, so these tests just test what is different.
command_constructor = Rebaseline # AKA webkit-patch rebaseline
def test_tests_to_update(self):
build = Mock()
OutputCapture().assert_outputs(self, self.command._tests_to_update, [build])
def test_rebaseline(self):
self.command._builders_to_pull_from = lambda: [MockBuilder('MOCK builder')]
self.command._tests_to_update = lambda builder: ['mock/path/to/test.html']
self._zero_out_test_expectations()
old_exact_matches = builders._exact_matches
oc = OutputCapture()
try:
builders._exact_matches = {
"MOCK builder": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
}
oc.capture_output()
self.command.execute(MockOptions(optimize=False, builders=None, suffixes="txt,png", verbose=True, move_overwritten_baselines=False), [], self.tool)
finally:
oc.restore_output()
builders._exact_matches = old_exact_matches
calls = filter(lambda x: x != ['qmake', '-v'] and x[0] != 'perl', self.tool.executive.calls)
self.assertEqual(calls,
[[['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'mock/path/to/test.html', '--verbose']]])
class TestRebaselineExpectations(_BaseTestCase):
command_constructor = RebaselineExpectations
def setUp(self):
super(TestRebaselineExpectations, self).setUp()
self.options = MockOptions(optimize=False, builders=None, suffixes=['txt'], verbose=False, platform=None,
move_overwritten_baselines=False, results_directory=None)
def test_rebaseline_expectations(self):
self._zero_out_test_expectations()
self.tool.executive = MockExecutive2()
self.command._tests_to_rebaseline = lambda port: {'userscripts/another-test.html': set(['txt']), 'userscripts/images.svg': set(['png'])}
self.command.execute(self.options, [], self.tool)
# FIXME: change this to use the test- ports.
calls = filter(lambda x: x != ['qmake', '-v'], self.tool.executive.calls)
self.assertEqual(len(calls), 1)
self.assertEqual(len(calls[0]), 22)
def test_rebaseline_expectations_noop(self):
self._zero_out_test_expectations()
oc = OutputCapture()
try:
oc.capture_output()
self.command.execute(self.options, [], self.tool)
finally:
_, _, logs = oc.restore_output()
self.assertEqual(self.tool.filesystem.written_files, {})
self.assertEqual(logs, 'Did not find any tests marked Rebaseline.\n')
def disabled_test_overrides_are_included_correctly(self):
# This tests that the any tests marked as REBASELINE in the overrides are found, but
# that the overrides do not get written into the main file.
self._zero_out_test_expectations()
self._write(self.lion_expectations_path, '')
self.lion_port.expectations_dict = lambda: {
self.lion_expectations_path: '',
'overrides': ('Bug(x) userscripts/another-test.html [ Failure Rebaseline ]\n'
'Bug(y) userscripts/test.html [ Crash ]\n')}
self._write('/userscripts/another-test.html', '')
self.assertDictEqual(self.command._tests_to_rebaseline(self.lion_port), {'userscripts/another-test.html': set(['png', 'txt', 'wav'])})
self.assertEqual(self._read(self.lion_expectations_path), '')
class _FakeOptimizer(BaselineOptimizer):
def read_results_by_directory(self, baseline_name):
if baseline_name.endswith('txt'):
return {'LayoutTests/passes/text.html': '123456',
'LayoutTests/platform/test-mac-leopard/passes/text.html': 'abcdef'}
return {}
class TestAnalyzeBaselines(_BaseTestCase):
command_constructor = AnalyzeBaselines
def setUp(self):
super(TestAnalyzeBaselines, self).setUp()
self.port = self.tool.port_factory.get('test')
self.tool.port_factory.get = (lambda port_name=None, options=None: self.port)
self.lines = []
self.command._optimizer_class = _FakeOptimizer
self.command._write = (lambda msg: self.lines.append(msg)) # pylint bug warning about unnecessary lambda? pylint: disable=W0108
def test_default(self):
self.command.execute(MockOptions(suffixes='txt', missing=False, platform=None), ['passes/text.html'], self.tool)
self.assertEqual(self.lines,
['passes/text-expected.txt:',
' (generic): 123456',
' test-mac-leopard: abcdef'])
def test_missing_baselines(self):
self.command.execute(MockOptions(suffixes='png,txt', missing=True, platform=None), ['passes/text.html'], self.tool)
self.assertEqual(self.lines,
['passes/text-expected.png: (no baselines found)',
'passes/text-expected.txt:',
' (generic): 123456',
' test-mac-leopard: abcdef'])
| bsd-3-clause |
DTOcean/dtocean-core | dtocean_core/strategies/__init__.py | 1 | 3195 | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2018 Mathew Topper
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import abc
from copy import deepcopy
from polite.abc import abstractclassmethod
from ..menu import ModuleMenu, ThemeMenu
from ..pipeline import Tree
class Strategy(object):
'''The base abstract class for all strategy classes'''
__metaclass__ = abc.ABCMeta
def __init__(self):
'''The init method should never have arguments. Provided for all
subclasses are:
self._module_menu: a ModuleMenu object
self._theme_menu: a ThemeMenu object
self._tree: a Tree object
'''
self._module_menu = ModuleMenu()
self._theme_menu = ThemeMenu()
self._tree = Tree()
# Record the simulation indexes used in the strategy
self._sim_record = []
# Record the configuration dictionary of the strategy (assume this
# is picklable)
self._config = None
# Record any detailed information about the simulation (assume this
# is picklable)
self.sim_details = None
return
@abstractclassmethod
def get_name(cls):
'''A class method for the common name of the strategy.
Returns:
str: A unique string
'''
return cls()
@abc.abstractmethod
def configure(self):
'''The configure method is collect information required for executing
the strategy.
'''
return
@abc.abstractmethod
def get_variables(self):
'''The get_variables method returns the list of any variables that
will be set by the strategy
'''
return
@abc.abstractmethod
def execute(self, core, project):
'''The execute method is used to execute the strategy. It should always
take a Core and a Project class as the only inputs.
'''
return
def get_config(self):
return deepcopy(self._config)
def set_config(self, config_dict):
self._config = config_dict
return
def add_simulation_index(self, sim_index):
self._sim_record.append(sim_index)
return
def get_simulation_record(self):
return self._sim_record[:]
def restart(self):
self._sim_record = []
self.sim_details = None
return
| gpl-3.0 |
mtholder/taxalotl | taxalotl/parsing/col.py | 1 | 5000 | from __future__ import print_function
import io
import logging
from peyutil import shorter_fp_form
from taxalotl.resource_wrapper import TaxonomyWrapper
from taxalotl.parsing.darwin_core import normalize_darwin_core_taxonomy
_LOG = logging.getLogger(__name__)
COL_PARTMAP = {
'Archaea': frozenset([52435722]),
'Bacteria': frozenset([52433432]),
'Eukaryota': frozenset([52433499, 52435027, 52433974, 52433370]),
'Archaeplastida': frozenset([52433499]),
'Fungi': frozenset([52433393]),
'Metazoa': frozenset([52433370]),
'Viruses': frozenset([52433426]),
'Glaucophyta': frozenset([52444130]),
'Rhodophyta': frozenset([52444134]),
'Chloroplastida': frozenset([52442327, 52442210, 52442148, 52434330, 52434201, 52433500, ]),
'Annelida': frozenset([52433489]),
'Arthropoda': frozenset([52433375]),
'Malacostraca': frozenset([52433389]),
'Arachnida': frozenset([52433402]),
'Insecta': frozenset([52433376]),
'Diptera': frozenset([52433521]),
'Coleoptera': frozenset([52433486]),
'Lepidoptera': frozenset([52433663]),
'Hymenoptera': frozenset([52433621]),
'Bryozoa': frozenset([52442814]),
'Chordata': frozenset([52433371]),
'Cnidaria': frozenset([52433398]),
'Ctenophora': frozenset([52443092]),
'Mollusca': frozenset([52440786]),
'Nematoda': frozenset([52436787]),
'Platyhelminthes': frozenset([52443117]),
'Porifera': frozenset([52442836]),
}
# noinspection PyUnreachableCode
def partition_col_by_root_id(tax_part): # type (TaxonPartition) -> None
"""Reads the serialized taxonomy of the parent, adds the easy lines to their partition element,
and returns dicts needed to finish the assignments.
Signature for partition functions. Takes:
1. abs path of taxonomy file for parent taxon
2. list of PartitionElements whose roots are sets that specify IDs that are the
roots of the subtrees that are to go in each partition elemen.
Returns a tuple:
0. par_id ->[child_id] dict,
1. id -> partition_element dict for already assigned IDs,
2. id -> line dict - may only have unassigned IDs in it,
3. synonym id -> [(accepted_id, line), ] for any synonyms
4. roots_set - a frozen set of the union of the partition element roots
5. the rootless partition element ("garbage_bin" for all unassigned IDs)
6. header for taxon file
7. header for synonyms file (or None)
"""
assert False
complete_taxon_fp = tax_part.tax_fp
syn_fp = tax_part.input_synonyms_filepath
assert not syn_fp
syn_by_id = tax_part._syn_by_id
ptp = shorter_fp_form(complete_taxon_fp)
with io.open(complete_taxon_fp, 'rU', encoding='utf-8') as inp:
iinp = iter(inp)
tax_part.taxon_header = next(iinp)
prev_line = None
# vt = unicode('\x0b') # Do some lines have vertical tabs? Of course they do....
# istwo = unicode('\x1e')
for n, line in enumerate(iinp):
if not line.endswith('\n'):
if prev_line:
prev_line = prev_line + line[:-1]
else:
prev_line = line[:-1]
continue
elif prev_line:
line = prev_line + line
prev_line = ''
ls = line.split('\t')
if n % 1000 == 0:
_LOG.info(' read taxon {} from {}'.format(n, ptp))
try:
col_id, accept_id, par_id = ls[0], ls[4], ls[5]
col_id = int(col_id)
if accept_id:
try:
accept_id = int(accept_id)
except:
if n == 0:
continue
syn_by_id.setdefault(accept_id, []).append((col_id, line))
else:
tax_part.read_taxon_line(col_id, par_id, line)
except Exception:
_LOG.exception("Exception parsing line {}:\n{}".format(1 + n, line))
raise
# noinspection PyAbstractClass
class CoLTaxonomyWrapper(TaxonomyWrapper):
taxon_filename = 'taxonomy.tsv'
# synonyms_filename = None
# partition_parsing_fn = staticmethod(partition_col_by_root_id)
schema = {"http://rs.tdwg.org/dwc/"}
def __init__(self, obj, parent=None, refs=None):
TaxonomyWrapper.__init__(self, obj, parent=parent, refs=refs)
@property
def partition_source_dir(self):
return self.normalized_filedir
def get_primary_partition_map(self):
return COL_PARTMAP
def normalize(self):
normalize_darwin_core_taxonomy(self.unpacked_filepath, self.normalized_filedir, self)
def _post_process_tree(self, tree):
self.collapse_incertae_sedis_by_name_prefix(tree, 'not assigned')
def post_process_interim_tax_data(self, interim_tax_data):
self.collapse_as_incertae_sedis_interim_tax_data(interim_tax_data, 'not assigned')
| bsd-2-clause |
shangwuhencc/scikit-learn | examples/svm/plot_svm_anova.py | 250 | 2000 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause |
shyamalschandra/picochess | libs/requests/packages/urllib3/connection.py | 309 | 6533 | # urllib3/connection.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import sys
import socket
from socket import timeout as SocketTimeout
try: # Python 3
from http.client import HTTPConnection as _HTTPConnection, HTTPException
except ImportError:
from httplib import HTTPConnection as _HTTPConnection, HTTPException
class DummyConnection(object):
"Used to detect a failed ConnectionCls import."
pass
try: # Compiled with SSL?
ssl = None
HTTPSConnection = DummyConnection
class BaseSSLError(BaseException):
pass
try: # Python 3
from http.client import HTTPSConnection as _HTTPSConnection
except ImportError:
from httplib import HTTPSConnection as _HTTPSConnection
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError): # Platform-specific: No SSL.
pass
from .exceptions import (
ConnectTimeoutError,
)
from .packages.ssl_match_hostname import match_hostname
from .packages import six
from .util import (
assert_fingerprint,
resolve_cert_reqs,
resolve_ssl_version,
ssl_wrap_socket,
)
port_by_scheme = {
'http': 80,
'https': 443,
}
class HTTPConnection(_HTTPConnection, object):
"""
Based on httplib.HTTPConnection but provides an extra constructor
backwards-compatibility layer between older and newer Pythons.
"""
default_port = port_by_scheme['http']
# By default, disable Nagle's Algorithm.
tcp_nodelay = 1
def __init__(self, *args, **kw):
if six.PY3: # Python 3
kw.pop('strict', None)
if sys.version_info < (2, 7): # Python 2.6 and older
kw.pop('source_address', None)
# Pre-set source_address in case we have an older Python like 2.6.
self.source_address = kw.get('source_address')
# Superclass also sets self.source_address in Python 2.7+.
_HTTPConnection.__init__(self, *args, **kw)
def _new_conn(self):
""" Establish a socket connection and set nodelay settings on it.
:return: a new socket connection
"""
extra_args = []
if self.source_address: # Python 2.7+
extra_args.append(self.source_address)
conn = socket.create_connection(
(self.host, self.port), self.timeout, *extra_args)
conn.setsockopt(
socket.IPPROTO_TCP, socket.TCP_NODELAY, self.tcp_nodelay)
return conn
def _prepare_conn(self, conn):
self.sock = conn
# the _tunnel_host attribute was added in python 2.6.3 (via
# http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do
# not have them.
if getattr(self, '_tunnel_host', None):
# TODO: Fix tunnel so it doesn't depend on self.sock state.
self._tunnel()
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
class HTTPSConnection(HTTPConnection):
default_port = port_by_scheme['https']
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, **kw):
HTTPConnection.__init__(self, host, port, strict=strict,
timeout=timeout, **kw)
self.key_file = key_file
self.cert_file = cert_file
# Required property for Google AppEngine 1.9.0 which otherwise causes
# HTTPS requests to go out as HTTP. (See Issue #356)
self._protocol = 'https'
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
self.sock = ssl.wrap_socket(conn, self.key_file, self.cert_file)
class VerifiedHTTPSConnection(HTTPSConnection):
"""
Based on httplib.HTTPSConnection but wraps the socket with
SSL certification.
"""
cert_reqs = None
ca_certs = None
ssl_version = None
conn_kw = {}
def set_cert(self, key_file=None, cert_file=None,
cert_reqs=None, ca_certs=None,
assert_hostname=None, assert_fingerprint=None):
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def connect(self):
# Add certificate verification
try:
sock = socket.create_connection(
address=(self.host, self.port), timeout=self.timeout,
**self.conn_kw)
except SocketTimeout:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout))
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY,
self.tcp_nodelay)
resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs)
resolved_ssl_version = resolve_ssl_version(self.ssl_version)
hostname = self.host
if getattr(self, '_tunnel_host', None):
# _tunnel_host was added in Python 2.6.3
# (See: http://hg.python.org/cpython/rev/0f57b30a152f)
self.sock = sock
# Calls self._set_hostport(), so self.host is
# self._tunnel_host below.
self._tunnel()
# Override the host with the one we're requesting data from.
hostname = self._tunnel_host
# Wrap socket using verification with the root certs in
# trusted_root_certs
self.sock = ssl_wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=resolved_cert_reqs,
ca_certs=self.ca_certs,
server_hostname=hostname,
ssl_version=resolved_ssl_version)
if resolved_cert_reqs != ssl.CERT_NONE:
if self.assert_fingerprint:
assert_fingerprint(self.sock.getpeercert(binary_form=True),
self.assert_fingerprint)
elif self.assert_hostname is not False:
match_hostname(self.sock.getpeercert(),
self.assert_hostname or hostname)
if ssl:
# Make a copy for testing.
UnverifiedHTTPSConnection = HTTPSConnection
HTTPSConnection = VerifiedHTTPSConnection
| gpl-3.0 |
jmehnle/ansible | lib/ansible/modules/web_infrastructure/ansible_tower/tower_job_list.py | 36 | 4523 | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <wayne@riotousliving.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_job_list
author: "Wayne Witzel III (@wwitzel3)"
version_added: "2.3"
short_description: List Ansible Tower jobs.
description:
- List Ansible Tower jobs. See
U(https://www.ansible.com/tower) for an overview.
options:
status:
description:
- Only list jobs with this status.
default: null
choices: ['pending', 'waiting', 'running', 'error', 'failed', 'canceled', 'successful']
page:
description:
- Page number of the results to fetch.
default: null
all_pages:
description:
- Fetch all the pages and return a single result.
default: False
query:
description:
- Query used to further filter the list of jobs. {"foo":"bar"} will be passed at ?foo=bar
default: null
extends_documentation_fragment: tower
'''
EXAMPLES = '''
- name: List running jobs for the testing.yml playbook
tower_job_list:
status: running
query: {"playbook": "testing.yml"}
register: testing_jobs
tower_config_file: "~/tower_cli.cfg"
'''
RETURN = '''
count:
description: Total count of objects return
returned: success
type: int
sample: 51
next:
description: next page available for the listing
returned: success
type: int
sample: 3
previous:
description: previous page available for the listing
returned: success
type: int
sample: 1
results:
description: a list of job objects represented as dictionaries
returned: success
type: list
sample: [{"allow_simultaneous": false, "artifacts": {}, "ask_credential_on_launch": false,
"ask_inventory_on_launch": false, "ask_job_type_on_launch": false, "failed": false,
"finished": "2017-02-22T15:09:05.633942Z", "force_handlers": false, "forks": 0, "id": 2,
"inventory": 1, "job_explanation": "", "job_tags": "", "job_template": 5, "job_type": "run"}, ...]
'''
from ansible.module_utils.basic import AnsibleModule
try:
import tower_cli
import tower_cli.utils.exceptions as exc
from tower_cli.conf import settings
from ansible.module_utils.ansible_tower import (
tower_auth_config,
tower_check_mode,
tower_argument_spec,
)
HAS_TOWER_CLI = True
except ImportError:
HAS_TOWER_CLI = False
def main():
argument_spec = tower_argument_spec()
argument_spec.update(dict(
status=dict(choices=['pending', 'waiting', 'running', 'error', 'failed', 'canceled', 'successful']),
page=dict(type='int'),
all_pages=dict(type='bool', default=False),
query=dict(type='dict'),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if not HAS_TOWER_CLI:
module.fail_json(msg='ansible-tower-cli required for this module')
json_output = {}
query = module.params.get('query')
status = module.params.get('status')
page = module.params.get('page')
all_pages = module.params.get('all_pages')
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
try:
job = tower_cli.get_resource('job')
params = {'status': status, 'page': page, 'all_pages': all_pages}
if query:
params['query'] = query.items()
json_output = job.list(**params)
except (exc.ConnectionError, exc.BadRequest) as excinfo:
module.fail_json(msg='Failed to list jobs: {0}'.format(excinfo), changed=False)
module.exit_json(**json_output)
if __name__ == '__main__':
main()
| gpl-3.0 |
e-gob/plataforma-kioscos-autoatencion | scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/messaging/rabbitmq_user.py | 49 | 9782 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Chatham Financial <oss@chathamfinancial.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rabbitmq_user
short_description: Adds or removes users to RabbitMQ
description:
- Add or remove users to RabbitMQ and assign permissions
version_added: "1.1"
author: '"Chris Hoffman (@chrishoffman)"'
options:
user:
description:
- Name of user to add
required: true
default: null
aliases: [username, name]
password:
description:
- Password of user to add.
- To change the password of an existing user, you must also specify
C(force=yes).
required: false
default: null
tags:
description:
- User tags specified as comma delimited
required: false
default: null
permissions:
description:
- a list of dicts, each dict contains vhost, configure_priv, write_priv, and read_priv,
and represents a permission rule for that vhost.
- This option should be preferable when you care about all permissions of the user.
- You should use vhost, configure_priv, write_priv, and read_priv options instead
if you care about permissions for just some vhosts.
required: false
default: []
vhost:
description:
- vhost to apply access privileges.
- This option will be ignored when permissions option is used.
required: false
default: /
node:
description:
- erlang node name of the rabbit we wish to configure
required: false
default: rabbit
version_added: "1.2"
configure_priv:
description:
- Regular expression to restrict configure actions on a resource
for the specified vhost.
- By default all actions are restricted.
- This option will be ignored when permissions option is used.
required: false
default: ^$
write_priv:
description:
- Regular expression to restrict configure actions on a resource
for the specified vhost.
- By default all actions are restricted.
- This option will be ignored when permissions option is used.
required: false
default: ^$
read_priv:
description:
- Regular expression to restrict configure actions on a resource
for the specified vhost.
- By default all actions are restricted.
- This option will be ignored when permissions option is used.
required: false
default: ^$
force:
description:
- Deletes and recreates the user.
required: false
default: "no"
choices: [ "yes", "no" ]
state:
description:
- Specify if user is to be added or removed
required: false
default: present
choices: [present, absent]
'''
EXAMPLES = '''
# Add user to server and assign full access control on / vhost.
# The user might have permission rules for other vhost but you don't care.
- rabbitmq_user:
user: joe
password: changeme
vhost: /
configure_priv: .*
read_priv: .*
write_priv: .*
state: present
# Add user to server and assign full access control on / vhost.
# The user doesn't have permission rules for other vhosts
- rabbitmq_user:
user: joe
password: changeme
permissions:
- vhost: /
configure_priv: .*
read_priv: .*
write_priv: .*
state: present
'''
from ansible.module_utils.basic import AnsibleModule
class RabbitMqUser(object):
def __init__(self, module, username, password, tags, permissions,
node, bulk_permissions=False):
self.module = module
self.username = username
self.password = password
self.node = node
if not tags:
self.tags = list()
else:
self.tags = tags.split(',')
self.permissions = permissions
self.bulk_permissions = bulk_permissions
self._tags = None
self._permissions = []
self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True)
def _exec(self, args, run_in_check_mode=False):
if not self.module.check_mode or run_in_check_mode:
cmd = [self._rabbitmqctl, '-q']
if self.node is not None:
cmd.extend(['-n', self.node])
rc, out, err = self.module.run_command(cmd + args, check_rc=True)
return out.splitlines()
return list()
def get(self):
users = self._exec(['list_users'], True)
for user_tag in users:
if '\t' not in user_tag:
continue
user, tags = user_tag.split('\t')
if user == self.username:
for c in ['[', ']', ' ']:
tags = tags.replace(c, '')
if tags != '':
self._tags = tags.split(',')
else:
self._tags = list()
self._permissions = self._get_permissions()
return True
return False
def _get_permissions(self):
perms_out = self._exec(['list_user_permissions', self.username], True)
perms_list = list()
for perm in perms_out:
vhost, configure_priv, write_priv, read_priv = perm.split('\t')
if not self.bulk_permissions:
if vhost == self.permissions[0]['vhost']:
perms_list.append(dict(vhost=vhost, configure_priv=configure_priv,
write_priv=write_priv, read_priv=read_priv))
break
else:
perms_list.append(dict(vhost=vhost, configure_priv=configure_priv,
write_priv=write_priv, read_priv=read_priv))
return perms_list
def add(self):
if self.password is not None:
self._exec(['add_user', self.username, self.password])
else:
self._exec(['add_user', self.username, ''])
self._exec(['clear_password', self.username])
def delete(self):
self._exec(['delete_user', self.username])
def set_tags(self):
self._exec(['set_user_tags', self.username] + self.tags)
def set_permissions(self):
for permission in self._permissions:
if permission not in self.permissions:
cmd = ['clear_permissions', '-p']
cmd.append(permission['vhost'])
cmd.append(self.username)
self._exec(cmd)
for permission in self.permissions:
if permission not in self._permissions:
cmd = ['set_permissions', '-p']
cmd.append(permission['vhost'])
cmd.append(self.username)
cmd.append(permission['configure_priv'])
cmd.append(permission['write_priv'])
cmd.append(permission['read_priv'])
self._exec(cmd)
def has_tags_modifications(self):
return set(self.tags) != set(self._tags)
def has_permissions_modifications(self):
return sorted(self._permissions) != sorted(self.permissions)
def main():
arg_spec = dict(
user=dict(required=True, aliases=['username', 'name']),
password=dict(default=None, no_log=True),
tags=dict(default=None),
permissions=dict(default=list(), type='list'),
vhost=dict(default='/'),
configure_priv=dict(default='^$'),
write_priv=dict(default='^$'),
read_priv=dict(default='^$'),
force=dict(default='no', type='bool'),
state=dict(default='present', choices=['present', 'absent']),
node=dict(default=None)
)
module = AnsibleModule(
argument_spec=arg_spec,
supports_check_mode=True
)
username = module.params['user']
password = module.params['password']
tags = module.params['tags']
permissions = module.params['permissions']
vhost = module.params['vhost']
configure_priv = module.params['configure_priv']
write_priv = module.params['write_priv']
read_priv = module.params['read_priv']
force = module.params['force']
state = module.params['state']
node = module.params['node']
bulk_permissions = True
if not permissions:
perm = {
'vhost': vhost,
'configure_priv': configure_priv,
'write_priv': write_priv,
'read_priv': read_priv
}
permissions.append(perm)
bulk_permissions = False
rabbitmq_user = RabbitMqUser(module, username, password, tags, permissions,
node, bulk_permissions=bulk_permissions)
result = dict(changed=False, user=username, state=state)
if rabbitmq_user.get():
if state == 'absent':
rabbitmq_user.delete()
result['changed'] = True
else:
if force:
rabbitmq_user.delete()
rabbitmq_user.add()
rabbitmq_user.get()
result['changed'] = True
if rabbitmq_user.has_tags_modifications():
rabbitmq_user.set_tags()
result['changed'] = True
if rabbitmq_user.has_permissions_modifications():
rabbitmq_user.set_permissions()
result['changed'] = True
elif state == 'present':
rabbitmq_user.add()
rabbitmq_user.set_tags()
rabbitmq_user.set_permissions()
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| bsd-3-clause |
shaufi/odoo | addons/l10n_fr/report/__init__.py | 424 | 1475 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
import base_report
import bilan_report
import compute_resultant_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
TheoRettisch/p2pool-giarcoin | p2pool/__init__.py | 278 | 1595 | import os
import re
import sys
import traceback
import subprocess
def check_output(*popenargs, **kwargs):
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
raise ValueError((retcode, output))
return output
def _get_version():
try:
try:
return check_output(['git', 'describe', '--always', '--dirty'], cwd=os.path.dirname(os.path.abspath(sys.argv[0]))).strip()
except:
pass
try:
return check_output(['git.cmd', 'describe', '--always', '--dirty'], cwd=os.path.dirname(os.path.abspath(sys.argv[0]))).strip()
except:
pass
root_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
git_dir = os.path.join(root_dir, '.git')
if os.path.exists(git_dir):
head = open(os.path.join(git_dir, 'HEAD')).read().strip()
prefix = 'ref: '
if head.startswith(prefix):
path = head[len(prefix):].split('/')
return open(os.path.join(git_dir, *path)).read().strip()[:7]
else:
return head[:7]
dir_name = os.path.split(root_dir)[1]
match = re.match('p2pool-([.0-9]+)', dir_name)
if match:
return match.groups()[0]
return 'unknown %s' % (dir_name.encode('hex'),)
except Exception, e:
traceback.print_exc()
return 'unknown %s' % (str(e).encode('hex'),)
__version__ = _get_version()
DEBUG = True
| gpl-3.0 |
rhelmer/socorro-lib | scripts/cmp_hbase_s3.py | 12 | 7214 | #!/usr/bin/env python
import os, sys
import happybase
import logging
from boto.sqs import connect_to_region as sqs_connect
from boto.sqs.jsonmessage import JSONMessage
from boto.s3 import connect_to_region as s3_connect
from boto.s3.key import Key
from boto.exception import BotoServerError
from multiprocessing import Process as TaskClass
from multiprocessing import JoinableQueue as Queue
import signal
import random
from collections import deque
import hashlib
logger = logging.getLogger(__name__)
# Following params need to be adjusted based on payload size, bandwidth etc
MAX_ROWS_IN_FLIGHT = 4096
TASK_QUEUE_SIZE = MAX_ROWS_IN_FLIGHT * 4
class HBaseSource:
def __init__(self, addr, row_range, max_rows = 2048, batch_size = 256, stop_after_nrows = -1):
self.thrift_addr = addr
self.start_row, self.end_row = row_range
self.max_rows = max_rows
self.batch_size = batch_size
self.stop_after_nrows = stop_after_nrows
if (self.stop_after_nrows > 0) and (stop_after_nrows < max_rows):
self.max_rows = self.stop_after_nrows + 1
def items(self):
prev_last_read_key = None
curr_last_read_key = self.start_row
end_row = self.end_row
stop_after_nrows = self.stop_after_nrows
total_read_rows = 0
while True:
src_tbl = happybase.Connection(random.choice(self.thrift_addr)).table('crash_reports')
nrows = 0
try:
logger.debug('fetch %d rows of data via thrift', self.max_rows)
# scan fetches rows with key in the range [row_start, row_stop)
# this necessitates the check for repeating keys as stopping condition
#
logger.info("scan start")
data = deque(src_tbl.scan(row_start = curr_last_read_key,
row_stop = end_row,
columns = ['raw_data', 'processed_data', 'meta_data'],
limit = self.max_rows,
batch_size = self.batch_size))
logger.info("scan end %d rows starting at %s", len(data), data[0][0])
while True:
if not data:
break
key, val = data.popleft()
if (key == prev_last_read_key):
# last record from previous batch should be ignored
continue
yield key, val
nrows += 1
total_read_rows += 1
if (stop_after_nrows > 0) and (stop_after_nrows == total_read_rows):
break
prev_last_read_key = curr_last_read_key
curr_last_read_key = key
logger.debug('read %d rows of data from hbase ending at %s; total %s', nrows, curr_last_read_key, total_read_rows)
if nrows < self.max_rows:
print >> sys.stderr, "end of range. exiting"
break
except happybase.hbase.ttypes.IOError:
logger.exception('caught exception. retrying.')
except Exception:
logger.exception('unrecoverable exception.')
raise
class SourceWorker(TaskClass):
def __init__(self, queue, source_config):
TaskClass.__init__(self)
self.source = HBaseSource(*source_config)
self.queue = queue
def run(self):
num_rows_written = 0
total_size_written = 0
s3_path_tmpl = 'v1/{ftype}/{uuid}'
env = 'stage'
for key, cols in self.source.items():
dump_names = []
for j in cols.keys():
suffix = get_suffix(j)
if not suffix:
#logger.info('column %s ignored for key %s', j, key)
continue
if j.startswith('raw_data'):
dump_names.append(suffix)
# crashstats/stage/v1/
# format {{bucket}}/{{prefix}}/{{version}}/{{crash_type}}/{{crash_id}}
skey = s3_path_tmpl.format(env = env,
uuid = key[7:],
ftype = suffix)
self.queue.put((skey, cols[j]))
total_size_written += len(cols[j])
num_rows_written += 1
if ((num_rows_written % 1000) == 0):
logger.info("wrote %d rows, at %s", num_rows_written, key)
logger.warn("qsize is %d", self.queue.qsize())
print >> sys.stderr, "SourceWorker DONE", num_rows_written, total_size_written
class S3Worker(TaskClass):
def __init__(self, s3_region, s3_bucket, task_queue, result_queue):
signal.signal(signal.SIGINT, signal.SIG_IGN)
TaskClass.__init__(self)
self.task_queue = task_queue
self.result_queue = result_queue
self.s3_region = s3_region
self.s3_bucket = s3_bucket
self.num_rows = 0
def setup_s3(self):
self.s3 = s3_connect(self.s3_region)
self.bucket = self.s3.get_bucket(self.s3_bucket)
def cmp_s3_hash(self, key, val):
k = self.bucket.get_key(key)
s3_md5 = k.etag[1:-1]
hbase_md5 = hashlib.md5(val).hexdigest()
self.num_rows += 1
if s3_md5 != hbase_md5:
print >> sys.stderr, "MISMATCH", k, key, s3_md5, hbase_md5
def run(self):
self.setup_s3()
while True:
kv = self.task_queue.get()
if kv is None:
print >> sys.stderr, '%s: Exiting' % self.name
self.task_queue.task_done()
break
k, v = kv
while True:
try:
self.cmp_s3_hash(k, v)
break
except BotoServerError:
pass
self.task_queue.task_done()
return
def get_suffix(colname):
suffix_map = {
'processed_data:json' : 'processed_crash',
'raw_data:dump' : 'dump',
'meta_data:json' : 'raw_crash',
'raw_data:upload_file_minidump_browser' : 'upload_file_minidump_browser',
'raw_data:upload_file_minidump_flash1' : 'upload_file_minidump_flash1',
'raw_data:upload_file_minidump_flash2' : 'upload_file_minidump_flash2'
}
if colname in suffix_map:
return suffix_map[colname]
elif colname.startswith('raw_data'):
return colname.split(':', 1)[1]
else:
return None
def main(num_workers = 64):
if len(sys.argv) != 3:
show_usage_and_quit()
queue = Queue(TASK_QUEUE_SIZE)
# start s3 workers
workers = [S3Worker('us-west-2', 'org.mozilla.crash-stats.production.crashes', queue, None)
for i in xrange(num_workers)]
for i in workers:
i.start()
thrift_hosts = sys.argv[1].split(',')
date = sys.argv[2]
# start hbase workers
key_ranges = []
for i in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']:
key_ranges.append(('%s%s%s' % (i, date, i), '%s%s%sg' % (i, date, i)))
num_hbase_workers = 1
for i in xrange(0, len(key_ranges), num_hbase_workers):
src_workers = []
krng = key_ranges[i : (i + num_hbase_workers)]
for j in range(len(krng)):
src_workers.append(SourceWorker(queue, (thrift_hosts, krng[j], 2048, 256, 10241)))
for w in src_workers:
print "starting src worker", w
w.start()
for w in src_workers:
w.join()
for i in workers:
queue.put(None)
queue.join()
def show_usage_and_quit():
print >> sys.stderr, "Usage: %s hosts('host1,host2,host3') date(YYMMDD)" % (sys.argv[0])
sys.exit(2)
if __name__ == '__main__':
logging.basicConfig(format = '%(asctime)s %(name)s:%(levelname)s: %(message)s',
level = logging.INFO)
main()
| mpl-2.0 |
mgedmin/ansible | lib/ansible/modules/network/wakeonlan.py | 8 | 4047 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: wakeonlan
version_added: 2.2
short_description: Send a magic Wake-on-LAN (WoL) broadcast packet
description:
- The M(wakeonlan) module sends magic Wake-on-LAN (WoL) broadcast packets.
options:
mac:
description:
- MAC address to send Wake-on-LAN broadcast packet for
required: true
default: null
broadcast:
description:
- Network broadcast address to use for broadcasting magic Wake-on-LAN packet
required: false
default: 255.255.255.255
port:
description:
- UDP port to use for magic Wake-on-LAN packet
required: false
default: 7
author: "Dag Wieers (@dagwieers)"
todo:
- Add arping support to check whether the system is up (before and after)
- Enable check-mode support (when we have arping support)
- Does not have SecureOn password support
notes:
- This module sends a magic packet, without knowing whether it worked
- Only works if the target system was properly configured for Wake-on-LAN (in the BIOS and/or the OS)
- Some BIOSes have a different (configurable) Wake-on-LAN boot order (i.e. PXE first) when turned off
'''
EXAMPLES = '''
# Send a magic Wake-on-LAN packet to 00:00:5E:00:53:66
- wakeonlan:
mac: '00:00:5E:00:53:66'
broadcast: 192.0.2.23
delegate_to: loclahost
- wakeonlan:
mac: 00:00:5E:00:53:66
port: 9
delegate_to: localhost
'''
RETURN='''
# Default return values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
import socket
import struct
def wakeonlan(module, mac, broadcast, port):
""" Send a magic Wake-on-LAN packet. """
mac_orig = mac
# Remove possible separator from MAC address
if len(mac) == 12 + 5:
mac = mac.replace(mac[2], '')
# If we don't end up with 12 hexadecimal characters, fail
if len(mac) != 12:
module.fail_json(msg="Incorrect MAC address length: %s" % mac_orig)
# Test if it converts to an integer, otherwise fail
try:
int(mac, 16)
except ValueError:
module.fail_json(msg="Incorrect MAC address format: %s" % mac_orig)
# Create payload for magic packet
data = ''
padding = ''.join(['FFFFFFFFFFFF', mac * 20])
for i in range(0, len(padding), 2):
data = ''.join([data, struct.pack('B', int(padding[i: i + 2], 16))])
# Broadcast payload to network
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
try:
sock.sendto(data, (broadcast, port))
except socket.error:
e = get_exception()
module.fail_json(msg=str(e))
def main():
module = AnsibleModule(
argument_spec = dict(
mac = dict(required=True, type='str'),
broadcast = dict(required=False, default='255.255.255.255'),
port = dict(required=False, type='int', default=7),
),
)
mac = module.params.get('mac')
broadcast = module.params.get('broadcast')
port = module.params.get('port')
wakeonlan(module, mac, broadcast, port)
module.exit_json(changed=True)
if __name__ == '__main__':
main()
| gpl-3.0 |
roglew/pappy-proxy | pappyproxy/interface/decode.py | 1 | 10668 | import html
import base64
import datetime
import gzip
import shlex
import string
import urllib
from ..util import hexdump, printable_data, copy_to_clipboard, clipboard_contents, encode_basic_auth, parse_basic_auth
from ..console import CommandError
from io import StringIO
def print_maybe_bin(s):
binary = False
for c in s:
if chr(c) not in string.printable:
binary = True
break
if binary:
print(hexdump(s))
else:
print(s.decode())
def asciihex_encode_helper(s):
return ''.join('{0:x}'.format(c) for c in s).encode()
def asciihex_decode_helper(s):
ret = []
try:
for a, b in zip(s[0::2], s[1::2]):
c = chr(a)+chr(b)
ret.append(chr(int(c, 16)))
return ''.join(ret).encode()
except Exception as e:
raise CommandError(e)
def gzip_encode_helper(s):
out = StringIO.StringIO()
with gzip.GzipFile(fileobj=out, mode="w") as f:
f.write(s)
return out.getvalue()
def gzip_decode_helper(s):
dec_data = gzip.GzipFile('', 'rb', 9, StringIO.StringIO(s))
dec_data = dec_data.read()
return dec_data
def base64_decode_helper(s):
try:
return base64.b64decode(s)
except TypeError:
for i in range(1, 5):
try:
s_padded = base64.b64decode(s + '='*i)
return s_padded
except:
pass
raise CommandError("Unable to base64 decode string")
def url_decode_helper(s):
bs = s.decode()
return urllib.parse.unquote(bs).encode()
def url_encode_helper(s):
bs = s.decode()
return urllib.parse.quote_plus(bs).encode()
def html_encode_helper(s):
return ''.join(['&#x{0:x};'.format(c) for c in s]).encode()
def html_decode_helper(s):
return html.unescape(s.decode()).encode()
def _code_helper(args, func, copy=True):
if len(args) == 0:
s = clipboard_contents().encode()
print('Will decode:')
print(printable_data(s))
s = func(s)
if copy:
try:
copy_to_clipboard(s)
except Exception as e:
print('Result cannot be copied to the clipboard. Result not copied.')
raise e
return s
else:
s = func(args[0].encode())
if copy:
try:
copy_to_clipboard(s)
except Exception as e:
print('Result cannot be copied to the clipboard. Result not copied.')
raise e
return s
def base64_decode(client, args):
"""
Base64 decode a string.
If no string is given, will decode the contents of the clipboard.
Results are copied to the clipboard.
"""
print_maybe_bin(_code_helper(args, base64_decode_helper))
def base64_encode(client, args):
"""
Base64 encode a string.
If no string is given, will encode the contents of the clipboard.
Results are copied to the clipboard.
"""
print_maybe_bin(_code_helper(args, base64.b64encode))
def url_decode(client, args):
"""
URL decode a string.
If no string is given, will decode the contents of the clipboard.
Results are copied to the clipboard.
"""
print_maybe_bin(_code_helper(args, url_decode_helper))
def url_encode(client, args):
"""
URL encode special characters in a string.
If no string is given, will encode the contents of the clipboard.
Results are copied to the clipboard.
"""
print_maybe_bin(_code_helper(args, url_encode_helper))
def asciihex_decode(client, args):
"""
Decode an ascii hex string.
If no string is given, will decode the contents of the clipboard.
Results are copied to the clipboard.
"""
print_maybe_bin(_code_helper(args, asciihex_decode_helper))
def asciihex_encode(client, args):
"""
Convert all the characters in a line to hex and combine them.
If no string is given, will encode the contents of the clipboard.
Results are copied to the clipboard.
"""
print_maybe_bin(_code_helper(args, asciihex_encode_helper))
def html_decode(client, args):
"""
Decode an html encoded string.
If no string is given, will decode the contents of the clipboard.
Results are copied to the clipboard.
"""
print_maybe_bin(_code_helper(args, html_decode_helper))
def html_encode(client, args):
"""
Encode a string and escape html control characters.
If no string is given, will encode the contents of the clipboard.
Results are copied to the clipboard.
"""
print_maybe_bin(_code_helper(args, html_encode_helper))
def gzip_decode(client, args):
"""
Un-gzip a string.
If no string is given, will decompress the contents of the clipboard.
Results are copied to the clipboard.
"""
print_maybe_bin(_code_helper(args, gzip_decode_helper))
def gzip_encode(client, args):
"""
Gzip a string.
If no string is given, will decompress the contents of the clipboard.
Results are NOT copied to the clipboard.
"""
print_maybe_bin(_code_helper(args, gzip_encode_helper, copy=False))
def base64_decode_raw(client, args):
"""
Same as base64_decode but the output will never be printed as a hex dump and
results will not be copied. It is suggested you redirect the output
to a file.
"""
print(_code_helper(args, base64_decode_helper, copy=False))
def base64_encode_raw(client, args):
"""
Same as base64_encode but the output will never be printed as a hex dump and
results will not be copied. It is suggested you redirect the output
to a file.
"""
print(_code_helper(args, base64.b64encode, copy=False))
def url_decode_raw(client, args):
"""
Same as url_decode but the output will never be printed as a hex dump and
results will not be copied. It is suggested you redirect the output
to a file.
"""
print(_code_helper(args, url_decode_helper, copy=False))
def url_encode_raw(client, args):
"""
Same as url_encode but the output will never be printed as a hex dump and
results will not be copied. It is suggested you redirect the output
to a file.
"""
print(_code_helper(args, url_encode_helper, copy=False))
def asciihex_decode_raw(client, args):
"""
Same as asciihex_decode but the output will never be printed as a hex dump and
results will not be copied. It is suggested you redirect the output
to a file.
"""
print(_code_helper(args, asciihex_decode_helper, copy=False))
def asciihex_encode_raw(client, args):
"""
Same as asciihex_encode but the output will never be printed as a hex dump and
results will not be copied. It is suggested you redirect the output
to a file.
"""
print(_code_helper(args, asciihex_encode_helper, copy=False))
def html_decode_raw(client, args):
"""
Same as html_decode but the output will never be printed as a hex dump and
results will not be copied. It is suggested you redirect the output
to a file.
"""
print(_code_helper(args, html_decode_helper, copy=False))
def html_encode_raw(client, args):
"""
Same as html_encode but the output will never be printed as a hex dump and
results will not be copied. It is suggested you redirect the output
to a file.
"""
print(_code_helper(args, html_encode_helper, copy=False))
def gzip_decode_raw(client, args):
"""
Same as gzip_decode but the output will never be printed as a hex dump and
results will not be copied. It is suggested you redirect the output
to a file.
"""
print(_code_helper(args, gzip_decode_helper, copy=False))
def gzip_encode_raw(client, args):
"""
Same as gzip_encode but the output will never be printed as a hex dump and
results will not be copied. It is suggested you redirect the output
to a file.
"""
print(_code_helper(args, gzip_encode_helper, copy=False))
def unix_time_decode_helper(line):
unix_time = int(line.strip())
dtime = datetime.datetime.fromtimestamp(unix_time)
return dtime.strftime('%Y-%m-%d %H:%M:%S')
def unix_time_decode(client, args):
print(_code_helper(args, unix_time_decode_helper))
def http_auth_encode(client, args):
if len(args) != 2:
raise CommandError('Usage: http_auth_encode <username> <password>')
username, password = args
print(encode_basic_auth(username, password))
def http_auth_decode(client, args):
username, password = decode_basic_auth(args[0])
print(username)
print(password)
def load_cmds(cmd):
cmd.set_cmds({
'base64_decode': (base64_decode, None),
'base64_encode': (base64_encode, None),
'asciihex_decode': (asciihex_decode, None),
'asciihex_encode': (asciihex_encode, None),
'url_decode': (url_decode, None),
'url_encode': (url_encode, None),
'html_decode': (html_decode, None),
'html_encode': (html_encode, None),
'gzip_decode': (gzip_decode, None),
'gzip_encode': (gzip_encode, None),
'base64_decode_raw': (base64_decode_raw, None),
'base64_encode_raw': (base64_encode_raw, None),
'asciihex_decode_raw': (asciihex_decode_raw, None),
'asciihex_encode_raw': (asciihex_encode_raw, None),
'url_decode_raw': (url_decode_raw, None),
'url_encode_raw': (url_encode_raw, None),
'html_decode_raw': (html_decode_raw, None),
'html_encode_raw': (html_encode_raw, None),
'gzip_decode_raw': (gzip_decode_raw, None),
'gzip_encode_raw': (gzip_encode_raw, None),
'unixtime_decode': (unix_time_decode, None),
'httpauth_encode': (http_auth_encode, None),
'httpauth_decode': (http_auth_decode, None)
})
cmd.add_aliases([
('base64_decode', 'b64d'),
('base64_encode', 'b64e'),
('asciihex_decode', 'ahd'),
('asciihex_encode', 'ahe'),
('url_decode', 'urld'),
('url_encode', 'urle'),
('html_decode', 'htmld'),
('html_encode', 'htmle'),
('gzip_decode', 'gzd'),
('gzip_encode', 'gze'),
('base64_decode_raw', 'b64dr'),
('base64_encode_raw', 'b64er'),
('asciihex_decode_raw', 'ahdr'),
('asciihex_encode_raw', 'aher'),
('url_decode_raw', 'urldr'),
('url_encode_raw', 'urler'),
('html_decode_raw', 'htmldr'),
('html_encode_raw', 'htmler'),
('gzip_decode_raw', 'gzdr'),
('gzip_encode_raw', 'gzer'),
('unixtime_decode', 'uxtd'),
('httpauth_encode', 'hae'),
('httpauth_decode', 'had'),
])
| mit |
nwchandler/ansible | lib/ansible/modules/remote_management/ipmi/ipmi_boot.py | 69 | 6041 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ipmi_boot
short_description: Management of order of boot devices
description:
- Use this module to manage order of boot devices
version_added: "2.2"
options:
name:
description:
- Hostname or ip address of the BMC.
required: true
port:
description:
- Remote RMCP port.
required: false
default: 623
user:
description:
- Username to use to connect to the BMC.
required: true
password:
description:
- Password to connect to the BMC.
required: true
default: null
bootdev:
description:
- Set boot device to use on next reboot
required: true
choices:
- network -- Request network boot
- hd -- Boot from hard drive
- safe -- Boot from hard drive, requesting 'safe mode'
- optical -- boot from CD/DVD/BD drive
- setup -- Boot into setup utility
- default -- remove any IPMI directed boot device request
state:
description:
- Whether to ensure that boot devices is desired.
default: present
choices:
- present -- Request system turn on
- absent -- Request system turn on
persistent:
description:
- If set, ask that system firmware uses this device beyond next boot.
Be aware many systems do not honor this.
required: false
type: bool
default: false
uefiboot:
description:
- If set, request UEFI boot explicitly.
Strictly speaking, the spec suggests that if not set, the system should BIOS boot and offers no "don't care" option.
In practice, this flag not being set does not preclude UEFI boot on any system I've encountered.
required: false
type: bool
default: false
requirements:
- "python >= 2.6"
- pyghmi
author: "Bulat Gaifullin (gaifullinbf@gmail.com)"
'''
RETURN = '''
bootdev:
description: The boot device name which will be used beyond next boot.
returned: success
type: string
sample: default
persistent:
description: If True, system firmware will use this device beyond next boot.
returned: success
type: bool
sample: false
uefimode:
description: If True, system firmware will use UEFI boot explicitly beyond next boot.
returned: success
type: bool
sample: false
'''
EXAMPLES = '''
# Ensure bootdevice is HD.
- ipmi_boot:
name: test.testdomain.com
user: admin
password: password
bootdev: hd
# Ensure bootdevice is not Network
- ipmi_boot:
name: test.testdomain.com
user: admin
password: password
bootdev: network
state: absent
'''
try:
from pyghmi.ipmi import command
except ImportError:
command = None
from ansible.module_utils.basic import *
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
port=dict(default=623, type='int'),
user=dict(required=True, no_log=True),
password=dict(required=True, no_log=True),
state=dict(default='present', choices=['present', 'absent']),
bootdev=dict(required=True, choices=['network', 'hd', 'safe', 'optical', 'setup', 'default']),
persistent=dict(default=False, type='bool'),
uefiboot=dict(default=False, type='bool')
),
supports_check_mode=True,
)
if command is None:
module.fail_json(msg='the python pyghmi module is required')
name = module.params['name']
port = module.params['port']
user = module.params['user']
password = module.params['password']
state = module.params['state']
bootdev = module.params['bootdev']
persistent = module.params['persistent']
uefiboot = module.params['uefiboot']
request = dict()
if state == 'absent' and bootdev == 'default':
module.fail_json(msg="The bootdev 'default' cannot be used with state 'absent'.")
# --- run command ---
try:
ipmi_cmd = command.Command(
bmc=name, userid=user, password=password, port=port
)
module.debug('ipmi instantiated - name: "%s"' % name)
current = ipmi_cmd.get_bootdev()
# uefimode may not supported by BMC, so use desired value as default
current.setdefault('uefimode', uefiboot)
if state == 'present' and current != dict(bootdev=bootdev, persistent=persistent, uefimode=uefiboot):
request = dict(bootdev=bootdev, uefiboot=uefiboot, persist=persistent)
elif state == 'absent' and current['bootdev'] == bootdev:
request = dict(bootdev='default')
else:
module.exit_json(changed=False, **current)
if module.check_mode:
response = dict(bootdev=request['bootdev'])
else:
response = ipmi_cmd.set_bootdev(**request)
if 'error' in response:
module.fail_json(msg=response['error'])
if 'persist' in request:
response['persistent'] = request['persist']
if 'uefiboot' in request:
response['uefimode'] = request['uefiboot']
module.exit_json(changed=True, **response)
except Exception as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
nirmeshk/oh-mainline | vendor/packages/distribute/pkg_resources.py | 75 | 89800 | """Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
import sys, os, zipimport, time, re, imp, types
from urlparse import urlparse, urlunparse
try:
frozenset
except NameError:
from sets import ImmutableSet as frozenset
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
# This marker is used to simplify the process that checks is the
# setuptools package was installed by the Setuptools project
# or by the Distribute project, in case Setuptools creates
# a distribution with the same version.
#
# The bootstrapping script for instance, will check if this
# attribute is present to decide wether to reinstall the package
_distribute = True
def _bypass_ensure_directory(name, mode=0777):
# Sandbox-bypassing version of ensure_directory()
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(name)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, mode)
_state_vars = {}
def _declare_state(vartype, **kw):
g = globals()
for name, val in kw.iteritems():
g[name] = val
_state_vars[name] = vartype
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.iteritems():
state[k] = g['_sget_'+v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.iteritems():
g['_sset_'+_state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform(); m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
pass # not Mac OS X
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info', 'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError','VersionConflict','DistributionNotFound','UnknownExtra',
'ExtractionError',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__+repr(self.args)
class VersionConflict(ResolutionError):
"""An already-installed version conflicts with the requested version"""
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq,Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
import platform
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
import plistlib
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC':'ppc', 'Power_Macintosh':'ppc'}.get(machine,machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
try:
from distutils.util import get_platform
except ImportError:
from sysconfig import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
_macosx_arch(machine))
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
get_platform = get_build_platform # XXX backward compat
def compatible_platforms(provided,required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided==required:
return True # easy case
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
#import warnings
#warnings.warn("Mac eggs should be rebuilt to "
# "use the macosx designation instead of darwin.",
# category=DeprecationWarning)
return True
return False # egg isn't macosx or legacy darwin
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
run_main = run_script # backward compatibility
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist,basestring): dist = Requirement.parse(dist)
if isinstance(dist,Requirement): dist = get_provider(dist)
if not isinstance(dist,Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry,True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self,dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
raise VersionConflict(dist,req) # XXX add more info
else:
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
for key in self.entry_keys[item]:
if key not in seen:
seen[key]=1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set. If it's added, any
callbacks registered with the ``subscribe()`` method will be called.
"""
if insert:
dist.insert_on(self.entries, entry)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry,[])
keys2 = self.entry_keys.setdefault(dist.location,[])
if dist.key in self.by_key:
return # ignore hidden distros
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None, replacement=True):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
"""
requirements = list(requirements)[::-1] # set up the stack
processed = {} # set of processed requirements
best = {} # key -> dist
to_activate = []
while requirements:
req = requirements.pop(0) # process dependencies breadth-first
if _override_setuptools(req) and replacement:
req = Requirement.parse('distribute')
if req in processed:
# Ignore cyclic or redundant dependencies
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None:
if env is None:
env = Environment(self.entries)
dist = best[req.key] = env.best_match(req, self, installer)
if dist is None:
#msg = ("The '%s' distribution was not found on this "
# "system, and is required by this application.")
#raise DistributionNotFound(msg % req)
# unfortunately, zc.buildout uses a str(err)
# to get the name of the distribution here..
raise DistributionNotFound(req)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
raise VersionConflict(dist,req) # XXX put more info here
requirements.extend(dist.requires(req.extras)[::-1])
processed[req] = True
return to_activate # return list of distros to activate
def find_plugins(self,
plugin_env, full_env=None, installer=None, fallback=True
):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
map(working_set.add, distributions) # add plugins+libs to sys.path
print 'Could not load', errors # display errors
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
plugin_projects.sort() # scan project names in alphabetic order
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
map(shadow_set.add, self) # put all our entries in shadow_set
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError,v:
error_info[dist] = v # save error info
if fallback:
continue # try the next older version of project
else:
break # give up on this project, keep going
else:
map(shadow_set.add, resolvees)
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback):
"""Invoke `callback` for all distributions (including existing ones)"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:])
def __setstate__(self, (entries, keys, by_key, callbacks)):
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'2.4'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self._cache = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
return (self.python is None or dist.py_version is None
or dist.py_version==self.python) \
and compatible_platforms(dist.platform,self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self,project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
"""
try:
return self._cache[project_name]
except KeyError:
project_name = project_name.lower()
if project_name not in self._distmap:
return []
if project_name not in self._cache:
dists = self._cache[project_name] = self._distmap[project_name]
_sort_dists(dists)
return self._cache[project_name]
def add(self,dist):
"""Add `dist` if we ``can_add()`` it and it isn't already added"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key,[])
if dist not in dists:
dists.append(dist)
if dist.key in self._cache:
_sort_dists(self._cache[dist.key])
def best_match(self, req, working_set, installer=None):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
dist = working_set.find(req)
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
return self.obtain(req, installer) # try and download/install
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]: yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other,Distribution):
self.add(other)
elif isinstance(other,Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
AvailableDistributions = Environment # XXX backward compatibility
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
err = ExtractionError("""Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s) to the Python egg
cache:
%s
The Python egg cache directory is currently set to:
%s
Perhaps your account does not have write access to this directory? You can
change the cache directory by setting the PYTHON_EGG_CACHE environment
variable to point to an accessible directory.
""" % (old_exc, cache_path)
)
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name+'-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except:
self.extraction_error()
self.cached_files[target_path] = 1
return target_path
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0555) & 07777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""Determine the default cache location
This returns the ``PYTHON_EGG_CACHE`` environment variable, if set.
Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the
"Application Data" directory. On all other systems, it's "~/.python-eggs".
"""
try:
return os.environ['PYTHON_EGG_CACHE']
except KeyError:
pass
if os.name!='nt':
return os.path.expanduser('~/.python-eggs')
app_data = 'Application Data' # XXX this may be locale-specific!
app_homes = [
(('APPDATA',), None), # best option, should be locale-safe
(('USERPROFILE',), app_data),
(('HOMEDRIVE','HOMEPATH'), app_data),
(('HOMEPATH',), app_data),
(('HOME',), None),
(('WINDIR',), app_data), # 95/98/ME
]
for keys, subdir in app_homes:
dirname = ''
for key in keys:
if key in os.environ:
dirname = os.path.join(dirname, os.environ[key])
else:
break
else:
if subdir:
dirname = os.path.join(dirname,subdir)
return os.path.join(dirname, 'Python-Eggs')
else:
raise RuntimeError(
"Please set the PYTHON_EGG_CACHE enviroment variable"
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""Convert an arbitrary string to a standard version string
Spaces become dots, and all other non-alphanumeric characters become
dashes, with runs of multiple dashes condensed to a single dash.
"""
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return StringIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info,name))
if sys.version_info <= (3,):
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info,name))
else:
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info,name)).decode("utf-8")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self,resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self,name):
return self.egg_info and self._isdir(self._fn(self.egg_info,name))
def resource_listdir(self,resource_name):
return self._listdir(self._fn(self.module_path,resource_name))
def metadata_listdir(self,name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info,name))
return []
def run_script(self,script_name,namespace):
script = 'scripts/'+script_name
if not self.has_metadata(script):
raise ResolutionError("No script named %r" % script_name)
script_text = self.get_metadata(script).replace('\r\n','\n')
script_text = script_text.replace('\r','\n')
script_filename = self._fn(self.egg_info,script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
execfile(script_filename, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text,script_filename,'exec')
exec script_code in namespace, namespace
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self,module):
NullProvider.__init__(self,module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path!=old:
if path.lower().endswith('.egg'):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self,path):
return os.path.isdir(path)
def _listdir(self,path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
stream = open(path, 'rb')
try:
return stream.read()
finally:
stream.close()
register_loader_type(type(None), DefaultProvider)
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
_isdir = _has = lambda self,path: False
_get = lambda self,path: ''
_listdir = lambda self,path: []
module_path = None
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
def __init__(self, module):
EggProvider.__init__(self,module)
self.zipinfo = zipimport._zip_directory_cache[self.loader.archive]
self.zip_pre = self.loader.archive+os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath,self.zip_pre)
)
def _parts(self,zip_path):
# Convert a zipfile subpath into an egg-relative path part list
fspath = self.zip_pre+zip_path # pseudo-fs path
if fspath.startswith(self.egg_root+os.sep):
return fspath[len(self.egg_root)+1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath,self.egg_root)
)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
return os.path.dirname(last) # return the extracted directory name
zip_stat = self.zipinfo[zip_path]
t,d,size = zip_stat[5], zip_stat[6], zip_stat[3]
date_time = (
(d>>9)+1980, (d>>5)&0xF, d&0x1F, # ymd
(t&0xFFFF)>>11, (t>>5)&0x3F, (t&0x1F) * 2, 0, 0, -1 # hms, etc.
)
timestamp = time.mktime(date_time)
try:
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if os.path.isfile(real_path):
stat = os.stat(real_path)
if stat.st_size==size and stat.st_mtime==timestamp:
# size and stamp match, don't bother extracting
return real_path
outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp,timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
stat = os.stat(real_path)
if stat.st_size==size and stat.st_mtime==timestamp:
# size and stamp match, somebody did it just ahead of
# us, so we're done
return real_path
elif os.name=='nt': # Windows, del old file and retry
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
manager.extraction_error() # report a user-friendly error
return real_path
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self,fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self,fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self,resource_name):
return self._zipinfo_name(self._fn(self.egg_root,resource_name))
def _resource_to_zip(self,resource_name):
return self._zipinfo_name(self._fn(self.module_path,resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self,path):
self.path = path
def has_metadata(self,name):
return name=='PKG-INFO'
def get_metadata(self,name):
if name=='PKG-INFO':
f = open(self.path,'rU')
metadata = f.read()
f.close()
return metadata
raise KeyError("No metadata except PKG-INFO is available")
def get_metadata_lines(self,name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir,project_name=dist_name,metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zipinfo = zipimport._zip_directory_cache[importer.archive]
self.zip_pre = importer.archive+os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
class ImpWrapper:
"""PEP 302 Importer that wraps Python's "normal" import algorithm"""
def __init__(self, path=None):
self.path = path
def find_module(self, fullname, path=None):
subname = fullname.split(".")[-1]
if subname != fullname and self.path is None:
return None
if self.path is None:
path = None
else:
path = [self.path]
try:
file, filename, etc = imp.find_module(subname, path)
except ImportError:
return None
return ImpLoader(file, filename, etc)
class ImpLoader:
"""PEP 302 Loader that wraps Python's "normal" import algorithm"""
def __init__(self, file, filename, etc):
self.file = file
self.filename = filename
self.etc = etc
def load_module(self, fullname):
try:
mod = imp.load_module(fullname, self.file, self.filename, self.etc)
finally:
if self.file: self.file.close()
# Note: we don't set __loader__ because we want the module to look
# normal; i.e. this is just a wrapper for standard import machinery
return mod
def get_importer(path_item):
"""Retrieve a PEP 302 "importer" for the given path item
If there is no importer, this returns a wrapper around the builtin import
machinery. The returned importer is only cached if it was created by a
path hook.
"""
try:
importer = sys.path_importer_cache[path_item]
except KeyError:
for hook in sys.path_hooks:
try:
importer = hook(path_item)
except ImportError:
pass
else:
break
else:
importer = None
sys.path_importer_cache.setdefault(path_item,importer)
if importer is None:
try:
importer = ImpWrapper(path_item)
except ImportError:
pass
return importer
try:
from pkgutil import get_importer, ImpImporter
except ImportError:
pass # Python 2.3 or 2.4, use our own implementation
else:
ImpWrapper = ImpImporter # Python 2.5, use pkgutil's implementation
del ImpLoader, ImpImporter
_declare_state('dict', _distribution_finders = {})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_in_zip(importer, path_item, only=False):
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
return # don't yield nested distros
for subitem in metadata.resource_listdir('/'):
if subitem.endswith('.egg'):
subpath = os.path.join(path_item, subitem)
for dist in find_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist
register_finder(zipimport.zipimporter, find_in_zip)
def StringIO(*args, **kw):
"""Thunk to load the real StringIO on demand"""
global StringIO
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
return StringIO(*args,**kw)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object,find_nothing)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
if path_item.lower().endswith('.egg'):
# unpacked egg
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item,'EGG-INFO')
)
)
else:
# scan for .egg and .egg-info in directory
for entry in os.listdir(path_item):
lower = entry.lower()
if lower.endswith('.egg-info'):
fullpath = os.path.join(path_item, entry)
if os.path.isdir(fullpath):
# egg-info directory, allow getting metadata
metadata = PathMetadata(path_item, fullpath)
else:
metadata = FileMetadata(fullpath)
yield Distribution.from_location(
path_item,entry,metadata,precedence=DEVELOP_DIST
)
elif not only and lower.endswith('.egg'):
for dist in find_distributions(os.path.join(path_item, entry)):
yield dist
elif not only and lower.endswith('.egg-link'):
for line in open(os.path.join(path_item, entry)):
if not line.strip(): continue
for item in find_distributions(os.path.join(path_item,line.rstrip())):
yield item
break
register_finder(ImpWrapper,find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer,path_entry,moduleName,module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = types.ModuleType(packageName)
module.__path__ = []; _set_parent_ns(packageName)
elif not hasattr(module,'__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer,path_item,packageName,module)
if subpath is not None:
path = module.__path__; path.append(subpath)
loader.load_module(packageName); module.__path__ = path
return subpath
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent,[]).append(packageName)
_namespace_packages.setdefault(packageName,[])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
imp.acquire_lock()
try:
for package in _namespace_packages.get(parent,()):
subpath = _handle_ns(package, path_item)
if subpath: fixup_namespace_packages(subpath,package)
finally:
imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item)==normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(ImpWrapper,file_ns_handler)
register_namespace_handler(zipimport.zipimporter,file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object,null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename,_cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a ``basestring`` or sequence"""
if isinstance(strs,basestring):
for s in strs.splitlines():
s = s.strip()
if s and not s.startswith('#'): # skip blank lines/comments
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
LINE_END = re.compile(r"\s*(#.*)?$").match # whitespace and comment
CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match # line continuation
DISTRO = re.compile(r"\s*((\w|[-.])+)").match # Distribution or extra
VERSION = re.compile(r"\s*(<=?|>=?|==|!=)\s*((\w|[-.])+)").match # ver. info
COMMA = re.compile(r"\s*,").match # comma between items
OBRACKET = re.compile(r"\s*\[").match
CBRACKET = re.compile(r"\s*\]").match
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"(?P<name>[^-]+)"
r"( -(?P<ver>[^-]+) (-py(?P<pyver>[^-]+) (-(?P<plat>.+))? )? )?",
re.VERBOSE | re.IGNORECASE
).match
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {'pre':'c', 'preview':'c','-':'final-','rc':'c','dev':'@'}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part,part)
if part in ['', '.']:
continue
if part[:1] in '0123456789':
yield part.zfill(8) # pad for numeric comparison
else:
yield '*'+part
yield '*final' # ensure that alpha/beta/candidate are before final
def parse_version(s):
"""Convert a version string to a chronologically-sortable key
This is a rough cross between distutils' StrictVersion and LooseVersion;
if you give it versions that would work with StrictVersion, then it behaves
the same; otherwise it acts like a slightly-smarter LooseVersion. It is
*possible* to create pathological version coding schemes that will fool
this parser, but they should be very rare in practice.
The returned value will be a tuple of strings. Numeric portions of the
version are padded to 8 digits so they will compare numerically, but
without relying on how numbers compare relative to strings. Dots are
dropped, but dashes are retained. Trailing zeros between alpha segments
or dashes are suppressed, so that e.g. "2.4.0" is considered the same as
"2.4". Alphanumeric parts are lower-cased.
The algorithm assumes that strings like "-" and any alpha string that
alphabetically follows "final" represents a "patch level". So, "2.4-1"
is assumed to be a branch or patch of "2.4", and therefore "2.4.1" is
considered newer than "2.4-1", which in turn is newer than "2.4".
Strings like "a", "b", "c", "alpha", "beta", "candidate" and so on (that
come before "final" alphabetically) are assumed to be pre-release versions,
so that the version "2.4" is considered newer than "2.4a1".
Finally, to handle miscellaneous cases, the strings "pre", "preview", and
"rc" are treated as if they were "c", i.e. as though they were release
candidates, and therefore are not as new as a version string that does not
contain them, and "dev" is replaced with an '@' so that it sorts lower than
than any other pre-release tag.
"""
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
# remove trailing zeros from each series of numeric parts
while parts and parts[-1]=='00000000':
parts.pop()
parts.append(part)
return tuple(parts)
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, env=None, installer=None):
if require: self.require(env, installer)
entry = __import__(self.module_name, globals(),globals(), ['__name__'])
for attr in self.attrs:
try:
entry = getattr(entry,attr)
except AttributeError:
raise ImportError("%r has no %r attribute" % (entry,attr))
return entry
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
map(working_set.add,
working_set.resolve(self.dist.requires(self.extras),env,installer))
#@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1,extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
try:
attrs = extras = ()
name,value = src.split('=',1)
if '[' in value:
value,extras = value.split('[',1)
req = Requirement.parse("x["+extras)
if req.specs: raise ValueError
extras = req.extras
if ':' in value:
value,attrs = value.split(':',1)
if not MODULE(attrs.rstrip()):
raise ValueError
attrs = attrs.rstrip().split('.')
except ValueError:
raise ValueError(
"EntryPoint must be in 'name=module:attrs [extras]' format",
src
)
else:
return cls(name.strip(), value.strip(), attrs, extras, dist)
parse = classmethod(parse)
#@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name]=ep
return this
parse_group = classmethod(parse_group)
#@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data,dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
parse_map = classmethod(parse_map)
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urlparse(location)
if parsed[-1].startswith('md5='):
return urlunparse(parsed[:-1] + ('',))
return location
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
def __init__(self,
location=None, metadata=None, project_name=None, version=None,
py_version=PY_MAJOR, platform=None, precedence = EGG_DIST
):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
#@classmethod
def from_location(cls,location,basename,metadata=None,**kw):
project_name, version, py_version, platform = [None]*4
basename, ext = os.path.splitext(basename)
if ext.lower() in (".egg",".egg-info"):
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name','ver','pyver','plat'
)
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)
from_location = classmethod(from_location)
hashcmp = property(
lambda self: (
getattr(self,'parsed_version',()),
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version,
self.platform
)
)
def __hash__(self): return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
#@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
key = property(key)
#@property
def parsed_version(self):
try:
return self._parsed_version
except AttributeError:
self._parsed_version = pv = parse_version(self.version)
return pv
parsed_version = property(parsed_version)
#@property
def version(self):
try:
return self._version
except AttributeError:
for line in self._get_metadata('PKG-INFO'):
if line.lower().startswith('version:'):
self._version = safe_version(line.split(':',1)[1].strip())
return self._version
else:
raise ValueError(
"Missing 'Version:' header and/or PKG-INFO file", self
)
version = property(version)
#@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
dm = self.__dep_map = {None: []}
for name in 'requires.txt', 'depends.txt':
for extra,reqs in split_sections(self._get_metadata(name)):
if extra: extra = safe_extra(extra)
dm.setdefault(extra,[]).extend(parse_requirements(reqs))
return dm
_dep_map = property(_dep_map)
def requires(self,extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None,()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self,name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self,path=None):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None: path = sys.path
self.insert_on(path)
if path is sys.path:
fixup_namespace_packages(self.location)
map(declare_namespace, self._get_metadata('namespace_packages.txt'))
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-'+self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self,self.location)
else:
return str(self)
def __str__(self):
try: version = getattr(self,'version',None)
except ValueError: version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name,version)
def __getattr__(self,attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError,attr
return getattr(self._provider, attr)
#@classmethod
def from_filename(cls,filename,metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
from_filename = classmethod(from_filename)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
return Requirement.parse('%s==%s' % (self.project_name, self.version))
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group,name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group,name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group,{})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc = None):
"""Insert self.location in path before its nearest parent directory"""
loc = loc or self.location
if self.project_name == 'setuptools':
try:
version = self.version
except ValueError:
version = ''
if '0.7' in version:
raise ValueError(
"A 0.7-series setuptools cannot be installed "
"with distribute. Found one at %s" % str(self.location))
if not loc:
return
if path is sys.path:
self.check_version_conflict()
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath= map(_normalize_cached, path)
bp = None
for p, item in enumerate(npath):
if item==nloc:
break
elif item==bdir and self.precedence==EGG_DIST:
# if it's an .egg, give it precedence over its directory
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while 1:
try:
np = npath.index(nloc, p+1)
except ValueError:
break
else:
del npath[np], path[np]
p = np # ha!
return
def check_version_conflict(self):
if self.key=='distribute':
return # ignore the inevitable setuptools self-conflicts :(
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages
):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for "+repr(self))
return False
return True
def clone(self,**kw):
"""Copy this distribution, substituting in any changed keyword args"""
for attr in (
'project_name', 'version', 'py_version', 'platform', 'location',
'precedence'
):
kw.setdefault(attr, getattr(self,attr,None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
#@property
def extras(self):
return [dep for dep in self._dep_map if dep]
extras = property(extras)
def issue_warning(*args,**kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
from warnings import warn
warn(stacklevel = level+1, *args, **kw)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be an instance of ``basestring``, or a (possibly-nested)
iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
def scan_list(ITEM,TERMINATOR,line,p,groups,item_name):
items = []
while not TERMINATOR(line,p):
if CONTINUE(line,p):
try:
line = lines.next(); p = 0
except StopIteration:
raise ValueError(
"\\ must not appear on the last nonblank line"
)
match = ITEM(line,p)
if not match:
raise ValueError("Expected "+item_name+" in",line,"at",line[p:])
items.append(match.group(*groups))
p = match.end()
match = COMMA(line,p)
if match:
p = match.end() # skip the comma
elif not TERMINATOR(line,p):
raise ValueError(
"Expected ',' or end-of-list in",line,"at",line[p:]
)
match = TERMINATOR(line,p)
if match: p = match.end() # skip the terminator, if any
return line, p, items
for line in lines:
match = DISTRO(line)
if not match:
raise ValueError("Missing distribution spec", line)
project_name = match.group(1)
p = match.end()
extras = []
match = OBRACKET(line,p)
if match:
p = match.end()
line, p, extras = scan_list(
DISTRO, CBRACKET, line, p, (1,), "'extra' name"
)
line, p, specs = scan_list(VERSION,LINE_END,line,p,(1,2),"version spec")
specs = [(op,safe_version(val)) for op,val in specs]
yield Requirement(project_name, specs, extras)
def _sort_dists(dists):
tmp = [(dist.hashcmp,dist) for dist in dists]
tmp.sort()
dists[::-1] = [d for hc,d in tmp]
class Requirement:
def __init__(self, project_name, specs, extras):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
self.unsafe_name, project_name = project_name, safe_name(project_name)
self.project_name, self.key = project_name, project_name.lower()
index = [(parse_version(v),state_machine[op],op,v) for op,v in specs]
index.sort()
self.specs = [(op,ver) for parsed,trans,op,ver in index]
self.index, self.extras = index, tuple(map(safe_extra,extras))
self.hashCmp = (
self.key, tuple([(op,parsed) for parsed,trans,op,ver in index]),
frozenset(self.extras)
)
self.__hash = hash(self.hashCmp)
def __str__(self):
specs = ','.join([''.join(s) for s in self.specs])
extras = ','.join(self.extras)
if extras: extras = '[%s]' % extras
return '%s%s%s' % (self.project_name, extras, specs)
def __eq__(self,other):
return isinstance(other,Requirement) and self.hashCmp==other.hashCmp
def __contains__(self,item):
if isinstance(item,Distribution):
if item.key <> self.key: return False
if self.index: item = item.parsed_version # only get if we need it
elif isinstance(item,basestring):
item = parse_version(item)
last = None
compare = lambda a, b: (a > b) - (a < b) # -1, 0, 1
for parsed,trans,op,ver in self.index:
action = trans[compare(item,parsed)] # Indexing: 0, 1, -1
if action=='F': return False
elif action=='T': return True
elif action=='+': last = True
elif action=='-' or last is None: last = False
if last is None: last = True # no rules encountered
return last
def __hash__(self):
return self.__hash
def __repr__(self): return "Requirement.parse(%r)" % str(self)
#@staticmethod
def parse(s, replacement=True):
reqs = list(parse_requirements(s))
if reqs:
if len(reqs) == 1:
founded_req = reqs[0]
# if asked for setuptools distribution
# and if distribute is installed, we want to give
# distribute instead
if _override_setuptools(founded_req) and replacement:
distribute = list(parse_requirements('distribute'))
if len(distribute) == 1:
return distribute[0]
return founded_req
else:
return founded_req
raise ValueError("Expected only one requirement", s)
raise ValueError("No requirements found", s)
parse = staticmethod(parse)
state_machine = {
# =><
'<' : '--T',
'<=': 'T-T',
'>' : 'F+F',
'>=': 'T+F',
'==': 'T..',
'!=': 'F++',
}
def _override_setuptools(req):
"""Return True when distribute wants to override a setuptools dependency.
We want to override when the requirement is setuptools and the version is
a variant of 0.6.
"""
if req.project_name == 'setuptools':
if not len(req.specs):
# Just setuptools: ok
return True
for comparator, version in req.specs:
if comparator in ['==', '>=', '>']:
if '0.7' in version:
# We want some setuptools not from the 0.6 series.
return False
return True
return False
def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls,type):
class cls(cls,object): pass
return cls.__mro__[1:]
return cls.__mro__
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
for t in _get_mro(getattr(ob, '__class__', type(ob))):
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def split_sections(s):
"""Split a string or iterable thereof into (section,content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args,**kw):
from tempfile import mkstemp
old_open = os.open
try:
os.open = os_open # temporarily bypass sandboxing
return mkstemp(*args,**kw)
finally:
os.open = old_open # and then put it back
# Set up global resource manager (deliberately not state-saved)
_manager = ResourceManager()
def _initialize(g):
for name in dir(_manager):
if not name.startswith('_'):
g[name] = getattr(_manager, name)
_initialize(globals())
# Prepare the master working set and make the ``require()`` API available
_declare_state('object', working_set = WorkingSet())
try:
# Does the main program list any requirements?
from __main__ import __requires__
except ImportError:
pass # No: just use the default working set based on sys.path
else:
# Yes: ensure the requirements are met, by prefixing sys.path if necessary
try:
working_set.require(__requires__)
except VersionConflict: # try it without defaults already on sys.path
working_set = WorkingSet([]) # by starting with an empty path
for dist in working_set.resolve(
parse_requirements(__requires__), Environment()
):
working_set.add(dist)
for entry in sys.path: # add any missing entries from sys.path
if entry not in working_set.entries:
working_set.add_entry(entry)
sys.path[:] = working_set.entries # then copy back to sys.path
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
run_main = run_script # backward compatibility
# Activate all distributions already on sys.path, and ensure that
# all distributions added to the working set in the future (e.g. by
# calling ``require()``) will get activated as well.
add_activation_listener(lambda dist: dist.activate())
working_set.entries=[]; map(working_set.add_entry,sys.path) # match order
| agpl-3.0 |
li-xiao-nan/gyp_tools | test/mac/gyptest-infoplist-process.py | 303 | 1677 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies the Info.plist preprocessor functionality.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR = 'infoplist-process'
INFO_PLIST_PATH = 'Test.app/Contents/Info.plist'
# First process both keys.
test.set_configuration('One')
test.run_gyp('test1.gyp', chdir=CHDIR)
test.build('test1.gyp', test.ALL, chdir=CHDIR)
info_plist = test.built_file_path(INFO_PLIST_PATH, chdir=CHDIR)
test.must_exist(info_plist)
test.must_contain(info_plist, 'Foo')
test.must_contain(info_plist, 'Bar')
# Then process a single key.
test.set_configuration('Two')
test.run_gyp('test2.gyp', chdir=CHDIR)
test.build('test2.gyp', chdir=CHDIR)
info_plist = test.built_file_path(INFO_PLIST_PATH, chdir=CHDIR)
test.must_exist(info_plist)
test.must_contain(info_plist, 'com.google.Test') # Normal expansion works.
test.must_contain(info_plist, 'Foo (Bar)')
test.must_contain(info_plist, 'PROCESSED_KEY2')
# Then turn off the processor.
test.set_configuration('Three')
test.run_gyp('test3.gyp', chdir=CHDIR)
test.build('test3.gyp', chdir=CHDIR)
info_plist = test.built_file_path('Test App.app/Contents/Info.plist',
chdir=CHDIR)
test.must_exist(info_plist)
test.must_contain(info_plist, 'com.google.Test') # Normal expansion works.
test.must_contain(info_plist, 'PROCESSED_KEY1')
test.must_contain(info_plist, 'PROCESSED_KEY2')
test.pass_test()
| bsd-3-clause |
zhjunlang/kbengine | kbe/src/lib/python/Tools/scripts/gprof2html.py | 36 | 2203 | #! /usr/bin/env python3
"""Transform gprof(1) output into useful HTML."""
import re, os, sys, cgi, webbrowser
header = """\
<html>
<head>
<title>gprof output (%s)</title>
</head>
<body>
<pre>
"""
trailer = """\
</pre>
</body>
</html>
"""
def add_escapes(filename):
with open(filename) as fp:
for line in fp:
yield cgi.escape(line)
def main():
filename = "gprof.out"
if sys.argv[1:]:
filename = sys.argv[1]
outputfilename = filename + ".html"
input = add_escapes(filename)
output = open(outputfilename, "w")
output.write(header % filename)
for line in input:
output.write(line)
if line.startswith(" time"):
break
labels = {}
for line in input:
m = re.match(r"(.* )(\w+)\n", line)
if not m:
output.write(line)
break
stuff, fname = m.group(1, 2)
labels[fname] = fname
output.write('%s<a name="flat:%s" href="#call:%s">%s</a>\n' %
(stuff, fname, fname, fname))
for line in input:
output.write(line)
if line.startswith("index % time"):
break
for line in input:
m = re.match(r"(.* )(\w+)(( <cycle.*>)? \[\d+\])\n", line)
if not m:
output.write(line)
if line.startswith("Index by function name"):
break
continue
prefix, fname, suffix = m.group(1, 2, 3)
if fname not in labels:
output.write(line)
continue
if line.startswith("["):
output.write('%s<a name="call:%s" href="#flat:%s">%s</a>%s\n' %
(prefix, fname, fname, fname, suffix))
else:
output.write('%s<a href="#call:%s">%s</a>%s\n' %
(prefix, fname, fname, suffix))
for line in input:
for part in re.findall(r"(\w+(?:\.c)?|\W+)", line):
if part in labels:
part = '<a href="#call:%s">%s</a>' % (part, part)
output.write(part)
output.write(trailer)
output.close()
webbrowser.open("file:" + os.path.abspath(outputfilename))
if __name__ == '__main__':
main()
| lgpl-3.0 |
pycontw/pycontw2016 | src/proposals/migrations/0038_add_new_conference.py | 1 | 1404 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2019-07-10 07:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('proposals', '0037_auto_20180305_1339'),
]
operations = [
migrations.AlterField(
model_name='additionalspeaker',
name='conference',
field=models.SlugField(choices=[('pycontw-2016', 'PyCon Taiwan 2016'), ('pycontw-2017', 'PyCon Taiwan 2017'), ('pycontw-2018', 'PyCon Taiwan 2018'), ('pycontw-2019', 'PyCon Taiwan 2019')], default='pycontw-2019', verbose_name='conference'),
),
migrations.AlterField(
model_name='talkproposal',
name='conference',
field=models.SlugField(choices=[('pycontw-2016', 'PyCon Taiwan 2016'), ('pycontw-2017', 'PyCon Taiwan 2017'), ('pycontw-2018', 'PyCon Taiwan 2018'), ('pycontw-2019', 'PyCon Taiwan 2019')], default='pycontw-2019', verbose_name='conference'),
),
migrations.AlterField(
model_name='tutorialproposal',
name='conference',
field=models.SlugField(choices=[('pycontw-2016', 'PyCon Taiwan 2016'), ('pycontw-2017', 'PyCon Taiwan 2017'), ('pycontw-2018', 'PyCon Taiwan 2018'), ('pycontw-2019', 'PyCon Taiwan 2019')], default='pycontw-2019', verbose_name='conference'),
),
]
| mit |
zzh8829/RevOctane | bstream.py | 1 | 3007 | import io
import struct
little_endian_types = {
'int8': 'b',
'uint8': 'B',
'int16': 'h',
'uint16': 'H',
'int32': 'i',
'uint32': 'I',
'int64': 'q',
'uint64': 'Q',
'float': 'f',
'float32': 'f',
'double': 'd',
'char': 'c',
'bool': '?',
'pad': 'x',
'void*': 'P',
}
big_endian_types = { k:">"+v for k,v in little_endian_types.items()}
special_types = {
'int12': 'read_int12',
'uint12': 'read_int12',
'float16': 'read_float16',
}
class BStream:
def __init__(self, **kwargs):
if "file" in kwargs:
self.stream = open(kwargs["file"], "rb")
elif "stream" in kwargs:
self.stream = kwargs["stream"]
elif "bytes" in kwargs:
self.stream = io.BytesIO(kwargs["bytes"])
else:
raise Exception("unknown stream source")
self.endianness = kwargs.get("endianness","little")
if self.endianness == "little":
self.normal_types = little_endian_types
elif self.endianness == "big":
self.normal_types = big_endian_types
def read(self, type_name='char'):
if isinstance(type_name,int):
return self.unpack('%ds'%type_name)[0]
type_name = type_name.lower()
if type_name.endswith('_t'):
type_name = type_name[:-2]
if type_name in special_types:
return getattr(self, special_types[type_name])()
if type_name in self.normal_types:
return self.unpack(self.normal_types[type_name])[0]
raise Exception("unknown type")
def unpack(self, fmt):
return struct.unpack(fmt, self.stream.read(struct.calcsize(fmt)))
def read_cstring(self):
string = ""
while True:
char = self.read('char')
if ord(char) == 0:
break
string += char.decode("utf-8")
return string
def read_string(self):
return self.unpack('%ds'%self.read('uint32_t'))[0].decode('utf-8')
def read_all(self):
return self.read(self.size() - self.get_position())
def read_int12(self):
return int.from_bytes(self.read(3),byteorder=self.endianness)
def read_float16(self):
data = self.read('uint16_t')
s = int((data >> 15) & 0x00000001) # sign
e = int((data >> 10) & 0x0000001f) # exponent
f = int(data & 0x000003ff) # fraction
if e == 0:
if f == 0:
return int(s << 31)
else:
while not (f & 0x00000400):
f = f << 1
e -= 1
e += 1
f &= ~0x00000400
#print(s,e,f)
elif e == 31:
if f == 0:
return int((s << 31) | 0x7f800000)
else:
return int((s << 31) | 0x7f800000 | (f << 13))
e = e + (127 -15)
f = f << 13
buf = struct.pack('I',int((s << 31) | (e << 23) | f))
return struct.unpack('f',buf)[0]
def tell(self):
return self.stream.tell()
def seek(self, pos, whence):
return self.stream.seek(pos, whence)
def get_position(self):
return self.tell()
def set_position(self, pos, whence=0):
return self.seek(pos, whence)
def size(self):
pos = self.get_position()
self.set_position(0,2)
end = self.get_position()
self.set_position(pos,0)
return end
def align(self, alignment=4):
self.set_position((self.get_position() + alignment - 1) // alignment * alignment)
| mit |
viewdy/phantomjs2 | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/createbug.py | 125 | 2563 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.tool.steps.abstractstep import AbstractStep
from webkitpy.tool.steps.options import Options
class CreateBug(AbstractStep):
@classmethod
def options(cls):
return AbstractStep.options() + [
Options.cc,
Options.component,
Options.blocks,
]
def run(self, state):
# No need to create a bug if we already have one.
if state.get("bug_id"):
return
cc = self._options.cc
if not cc:
cc = state.get("bug_cc")
blocks = self._options.blocks
if not blocks:
blocks = state.get("bug_blocked")
state["bug_id"] = self._tool.bugs.create_bug(state["bug_title"], state["bug_description"], blocked=blocks, component=self._options.component, cc=cc)
if blocks:
status = self._tool.bugs.fetch_bug(blocks).status()
if status == 'RESOLVED':
self._tool.bugs.reopen_bug(blocks, "Re-opened since this is blocked by bug %s" % state["bug_id"])
| bsd-3-clause |
Fokko/incubator-airflow | airflow/migrations/versions/2e541a1dcfed_task_duration.py | 2 | 1530 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""task_duration
Revision ID: 2e541a1dcfed
Revises: 1b38cef5b76e
Create Date: 2015-10-28 20:38:41.266143
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '2e541a1dcfed'
down_revision = '1b38cef5b76e'
branch_labels = None
depends_on = None
def upgrade():
# use batch_alter_table to support SQLite workaround
with op.batch_alter_table("task_instance") as batch_op:
batch_op.alter_column('duration',
existing_type=mysql.INTEGER(display_width=11),
type_=sa.Float(),
existing_nullable=True)
def downgrade():
pass
| apache-2.0 |
frederica07/Dragon_Programming_Process | PyOpenGL-3.0.2/OpenGL/raw/GL/EXT/multisample.py | 1 | 1356 | '''Autogenerated by get_gl_extensions script, do not edit!'''
from OpenGL import platform as _p, constants as _cs, arrays
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_EXT_multisample'
def _f( function ):
return _p.createFunction( function,_p.GL,'GL_EXT_multisample',False)
_p.unpack_constants( """GL_MULTISAMPLE_EXT 0x809D
GL_SAMPLE_ALPHA_TO_MASK_EXT 0x809E
GL_SAMPLE_ALPHA_TO_ONE_EXT 0x809F
GL_SAMPLE_MASK_EXT 0x80A0
GL_1PASS_EXT 0x80A1
GL_2PASS_0_EXT 0x80A2
GL_2PASS_1_EXT 0x80A3
GL_4PASS_0_EXT 0x80A4
GL_4PASS_1_EXT 0x80A5
GL_4PASS_2_EXT 0x80A6
GL_4PASS_3_EXT 0x80A7
GL_SAMPLE_BUFFERS_EXT 0x80A8
GL_SAMPLES_EXT 0x80A9
GL_SAMPLE_MASK_VALUE_EXT 0x80AA
GL_SAMPLE_MASK_INVERT_EXT 0x80AB
GL_SAMPLE_PATTERN_EXT 0x80AC
GL_MULTISAMPLE_BIT_EXT 0x20000000""", globals())
glget.addGLGetConstant( GL_MULTISAMPLE_EXT, (1,) )
glget.addGLGetConstant( GL_SAMPLE_ALPHA_TO_MASK_EXT, (1,) )
glget.addGLGetConstant( GL_SAMPLE_ALPHA_TO_ONE_EXT, (1,) )
glget.addGLGetConstant( GL_SAMPLE_MASK_EXT, (1,) )
@_f
@_p.types(None,_cs.GLclampf,_cs.GLboolean)
def glSampleMaskEXT( value,invert ):pass
@_f
@_p.types(None,_cs.GLenum)
def glSamplePatternEXT( pattern ):pass
def glInitMultisampleEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( EXTENSION_NAME )
| bsd-2-clause |
cjerdonek/open-rcv | openrcv/models.py | 1 | 6692 | #
# Copyright (c) 2014 Chris Jerdonek. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
"""Internal models that do not require JSON serialization.
Ballot Model
------------
For now, the "Ballot" object is not represented by a class. It is
simply a `(weight, choices)` 2-tuple, where `weight` is a number and
choices is a tuple of integer choice ID's.
"""
from contextlib import contextmanager
import logging
import tempfile
# The current module should not depend on any modules in openrcv.formats.
from openrcv import streams, utils
from openrcv.utils import ReprMixin
log = logging.getLogger(__name__)
def make_candidate_numbers(candidate_count):
"""Return an iterable of candidate numbers."""
return range(1, candidate_count + 1)
# TODO: allow ordering and compressing to be done separately.
def normalize_ballots_to(source, target):
"""Normalize ballots by ordering and "compressing" them.
This function orders the ballots lexicographically by the list of
choices on each ballot, and also uses the weight component to "compress"
ballots having identical choices.
Arguments:
source: source ballots resource.
target: target ballots resource.
TODO: incorporate some of the wording below into the above.
This class takes a StreamInfo of internal ballots and returns a
new StreamInfo that represents an equivalent set of internal ballots,
but both "compressed" (by using the weight component) and ordered
lexicographically for readability by the list of choices on the ballot.
"""
# A dict mapping tuples of choices to the cumulative weight.
choices_dict = {}
with source.reading() as ballots:
for weight, choices in ballots:
try:
choices_dict[choices] += weight
except KeyError:
# Then we are adding the choices for the first time.
choices_dict[choices] = weight
sorted_choices = sorted(choices_dict.keys())
with target.writing() as gen:
for choices in sorted_choices:
weight = choices_dict[choices]
ballot = weight, choices
gen.send(ballot)
def normalize_ballots(ballots_resource):
"""Normalize the given ballots in place.
Arguments:
ballots_resource: a ballots resource.
"""
with ballots_resource.replacement() as temp_resource:
normalize_ballots_to(ballots_resource, temp_resource)
class BallotsResourceMixin(object):
def count_ballots(self):
with self.resource.reading() as gen:
return sum(weight for weight, choices in gen)
def normalize(self):
normalize_ballots(self)
class BallotsResource(streams.WrapperResource, BallotsResourceMixin):
pass
class CandidatesInfo(object):
"""Represents the collection of candidates."""
def __init__(self, candidates):
"""
Arguments:
candidates: an iterable of the candidate names.
"""
self.candidates = candidates
def from_number(self, number):
return self.candidates[number - 1]
def from_numbers(self, numbers):
return [self.from_number(n) for n in numbers]
class ContestInput(ReprMixin):
"""
Attributes:
ballots_resource: a BallotsResource object.
candidates: an iterable of the names of all candidates, in numeric
order of their ballot ID.
name: contest name.
seat_count: integer number of winners.
"""
# We include an underscore at the end of id_ since id() is a built-in.
# TODO: test defaults -- especially properties of default ballots resource.
# TODO: instead make seat_count part of the "rules".
def __init__(self, name=None, notes=None, candidates=None, seat_count=None,
ballots_resource=None):
if ballots_resource is None:
ballots_resource = streams.NullStreamResource()
if candidates is None:
candidates = []
if seat_count is None:
seat_count = 1
self.ballots_resource = ballots_resource
self.candidates = candidates
self.name = name
self.notes = notes
self.seat_count = seat_count
def repr_info(self):
return "name=%r" % (self.name, )
def make_candidates_info(self):
"""Return a CandidatesInfo object."""
return CandidatesInfo(self.candidates)
# TODO: remove this.
def get_candidate_numbers(self):
"""Return an iterable of the candidate numbers."""
return make_candidate_numbers(len(self.candidates))
@property
def should_normalize_ballots(self):
# Default to normalizing.
return (self.normalize_ballots is None) or self.normalize_ballots
class ContestOutcome(object):
def __init__(self, interrupted=None):
self.interrupted = interrupted
class RoundResults(object):
"""Represents contest results."""
def __init__(self, candidates_info=None, elected=None, eliminated=None,
tied_last_place=None, totals=None, tie_break=None):
"""
Arguments:
totals: dict of candidate number to vote total.
"""
self.candidates_info = candidates_info
self.elected = elected
self.eliminated = eliminated
self.tie_break = tie_break
self.tied_last_place = tied_last_place
self.totals = totals
class ContestResults(ReprMixin):
"""Represents contest results."""
def __init__(self, outcome=None, rounds=None):
self.outcome = outcome
self.rounds = rounds
def repr_info(self):
return "rounds=%s" % (len(self.rounds), )
| mit |
zhangjunlei26/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/test/test_extensions.py | 413 | 16128 | #!/usr/bin/env python
#
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for extensions module."""
import unittest
import zlib
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket import common
from mod_pywebsocket import extensions
class ExtensionsTest(unittest.TestCase):
"""A unittest for non-class methods in extensions.py"""
def test_parse_window_bits(self):
self.assertRaises(ValueError, extensions._parse_window_bits, None)
self.assertRaises(ValueError, extensions._parse_window_bits, 'foobar')
self.assertRaises(ValueError, extensions._parse_window_bits, ' 8 ')
self.assertRaises(ValueError, extensions._parse_window_bits, 'a8a')
self.assertRaises(ValueError, extensions._parse_window_bits, '00000')
self.assertRaises(ValueError, extensions._parse_window_bits, '00008')
self.assertRaises(ValueError, extensions._parse_window_bits, '0x8')
self.assertRaises(ValueError, extensions._parse_window_bits, '9.5')
self.assertRaises(ValueError, extensions._parse_window_bits, '8.0')
self.assertTrue(extensions._parse_window_bits, '8')
self.assertTrue(extensions._parse_window_bits, '15')
self.assertRaises(ValueError, extensions._parse_window_bits, '-8')
self.assertRaises(ValueError, extensions._parse_window_bits, '0')
self.assertRaises(ValueError, extensions._parse_window_bits, '7')
self.assertRaises(ValueError, extensions._parse_window_bits, '16')
self.assertRaises(
ValueError, extensions._parse_window_bits, '10000000')
class CompressionMethodParameterParserTest(unittest.TestCase):
"""A unittest for _parse_compression_method which parses the compression
method description used by perframe-compression and permessage-compression
extension in their "method" extension parameter.
"""
def test_parse_method_simple(self):
method_list = extensions._parse_compression_method('foo')
self.assertEqual(1, len(method_list))
method = method_list[0]
self.assertEqual('foo', method.name())
self.assertEqual(0, len(method.get_parameters()))
def test_parse_method_with_parameter(self):
method_list = extensions._parse_compression_method('foo; x; y=10')
self.assertEqual(1, len(method_list))
method = method_list[0]
self.assertEqual('foo', method.name())
self.assertEqual(2, len(method.get_parameters()))
self.assertTrue(method.has_parameter('x'))
self.assertEqual(None, method.get_parameter_value('x'))
self.assertTrue(method.has_parameter('y'))
self.assertEqual('10', method.get_parameter_value('y'))
def test_parse_method_with_quoted_parameter(self):
method_list = extensions._parse_compression_method(
'foo; x="Hello World"; y=10')
self.assertEqual(1, len(method_list))
method = method_list[0]
self.assertEqual('foo', method.name())
self.assertEqual(2, len(method.get_parameters()))
self.assertTrue(method.has_parameter('x'))
self.assertEqual('Hello World', method.get_parameter_value('x'))
self.assertTrue(method.has_parameter('y'))
self.assertEqual('10', method.get_parameter_value('y'))
def test_parse_method_multiple(self):
method_list = extensions._parse_compression_method('foo, bar')
self.assertEqual(2, len(method_list))
self.assertEqual('foo', method_list[0].name())
self.assertEqual(0, len(method_list[0].get_parameters()))
self.assertEqual('bar', method_list[1].name())
self.assertEqual(0, len(method_list[1].get_parameters()))
def test_parse_method_multiple_methods_with_quoted_parameter(self):
method_list = extensions._parse_compression_method(
'foo; x="Hello World", bar; y=10')
self.assertEqual(2, len(method_list))
self.assertEqual('foo', method_list[0].name())
self.assertEqual(1, len(method_list[0].get_parameters()))
self.assertTrue(method_list[0].has_parameter('x'))
self.assertEqual('Hello World',
method_list[0].get_parameter_value('x'))
self.assertEqual('bar', method_list[1].name())
self.assertEqual(1, len(method_list[1].get_parameters()))
self.assertTrue(method_list[1].has_parameter('y'))
self.assertEqual('10', method_list[1].get_parameter_value('y'))
def test_create_method_desc_simple(self):
params = common.ExtensionParameter('foo')
desc = extensions._create_accepted_method_desc('foo',
params.get_parameters())
self.assertEqual('foo', desc)
def test_create_method_desc_with_parameters(self):
params = common.ExtensionParameter('foo')
params.add_parameter('x', 'Hello, World')
params.add_parameter('y', '10')
desc = extensions._create_accepted_method_desc('foo',
params.get_parameters())
self.assertEqual('foo; x="Hello, World"; y=10', desc)
class DeflateFrameExtensionProcessorParsingTest(unittest.TestCase):
"""A unittest for checking that DeflateFrameExtensionProcessor parses given
extension parameter correctly.
"""
def test_registry(self):
processor = extensions.get_extension_processor(
common.ExtensionParameter('deflate-frame'))
self.assertIsInstance(processor,
extensions.DeflateFrameExtensionProcessor)
processor = extensions.get_extension_processor(
common.ExtensionParameter('x-webkit-deflate-frame'))
self.assertIsInstance(processor,
extensions.DeflateFrameExtensionProcessor)
def test_minimal_offer(self):
processor = extensions.DeflateFrameExtensionProcessor(
common.ExtensionParameter('perframe-deflate'))
response = processor.get_extension_response()
self.assertEqual('perframe-deflate', response.name())
self.assertEqual(0, len(response.get_parameters()))
self.assertEqual(zlib.MAX_WBITS,
processor._rfc1979_deflater._window_bits)
self.assertFalse(processor._rfc1979_deflater._no_context_takeover)
def test_offer_with_max_window_bits(self):
parameter = common.ExtensionParameter('perframe-deflate')
parameter.add_parameter('max_window_bits', '10')
processor = extensions.DeflateFrameExtensionProcessor(parameter)
response = processor.get_extension_response()
self.assertEqual('perframe-deflate', response.name())
self.assertEqual(0, len(response.get_parameters()))
self.assertEqual(10, processor._rfc1979_deflater._window_bits)
def test_offer_with_out_of_range_max_window_bits(self):
parameter = common.ExtensionParameter('perframe-deflate')
parameter.add_parameter('max_window_bits', '0')
processor = extensions.DeflateFrameExtensionProcessor(parameter)
self.assertIsNone(processor.get_extension_response())
def test_offer_with_max_window_bits_without_value(self):
parameter = common.ExtensionParameter('perframe-deflate')
parameter.add_parameter('max_window_bits', None)
processor = extensions.DeflateFrameExtensionProcessor(parameter)
self.assertIsNone(processor.get_extension_response())
def test_offer_with_no_context_takeover(self):
parameter = common.ExtensionParameter('perframe-deflate')
parameter.add_parameter('no_context_takeover', None)
processor = extensions.DeflateFrameExtensionProcessor(parameter)
response = processor.get_extension_response()
self.assertEqual('perframe-deflate', response.name())
self.assertEqual(0, len(response.get_parameters()))
self.assertTrue(processor._rfc1979_deflater._no_context_takeover)
def test_offer_with_no_context_takeover_with_value(self):
parameter = common.ExtensionParameter('perframe-deflate')
parameter.add_parameter('no_context_takeover', 'foobar')
processor = extensions.DeflateFrameExtensionProcessor(parameter)
self.assertIsNone(processor.get_extension_response())
def test_offer_with_unknown_parameter(self):
parameter = common.ExtensionParameter('perframe-deflate')
parameter.add_parameter('foo', 'bar')
processor = extensions.DeflateFrameExtensionProcessor(parameter)
response = processor.get_extension_response()
self.assertEqual('perframe-deflate', response.name())
self.assertEqual(0, len(response.get_parameters()))
class PerMessageDeflateExtensionProcessorParsingTest(unittest.TestCase):
"""A unittest for checking that PerMessageDeflateExtensionProcessor parses
given extension parameter correctly.
"""
def test_registry(self):
processor = extensions.get_extension_processor(
common.ExtensionParameter('permessage-deflate'))
self.assertIsInstance(processor,
extensions.PerMessageDeflateExtensionProcessor)
def test_minimal_offer(self):
processor = extensions.PerMessageDeflateExtensionProcessor(
common.ExtensionParameter('permessage-deflate'))
response = processor.get_extension_response()
self.assertEqual('permessage-deflate', response.name())
self.assertEqual(0, len(response.get_parameters()))
self.assertEqual(zlib.MAX_WBITS,
processor._rfc1979_deflater._window_bits)
self.assertFalse(processor._rfc1979_deflater._no_context_takeover)
def test_offer_with_max_window_bits(self):
parameter = common.ExtensionParameter('permessage-deflate')
parameter.add_parameter('server_max_window_bits', '10')
processor = extensions.PerMessageDeflateExtensionProcessor(parameter)
response = processor.get_extension_response()
self.assertEqual('permessage-deflate', response.name())
self.assertEqual([('server_max_window_bits', '10')],
response.get_parameters())
self.assertEqual(10, processor._rfc1979_deflater._window_bits)
def test_offer_with_out_of_range_max_window_bits(self):
parameter = common.ExtensionParameter('permessage-deflate')
parameter.add_parameter('server_max_window_bits', '0')
processor = extensions.PerMessageDeflateExtensionProcessor(parameter)
self.assertIsNone(processor.get_extension_response())
def test_offer_with_max_window_bits_without_value(self):
parameter = common.ExtensionParameter('permessage-deflate')
parameter.add_parameter('server_max_window_bits', None)
processor = extensions.PerMessageDeflateExtensionProcessor(parameter)
self.assertIsNone(processor.get_extension_response())
def test_offer_with_no_context_takeover(self):
parameter = common.ExtensionParameter('permessage-deflate')
parameter.add_parameter('server_no_context_takeover', None)
processor = extensions.PerMessageDeflateExtensionProcessor(parameter)
response = processor.get_extension_response()
self.assertEqual('permessage-deflate', response.name())
self.assertEqual([('server_no_context_takeover', None)],
response.get_parameters())
self.assertTrue(processor._rfc1979_deflater._no_context_takeover)
def test_offer_with_no_context_takeover_with_value(self):
parameter = common.ExtensionParameter('permessage-deflate')
parameter.add_parameter('server_no_context_takeover', 'foobar')
processor = extensions.PerMessageDeflateExtensionProcessor(parameter)
self.assertIsNone(processor.get_extension_response())
def test_offer_with_unknown_parameter(self):
parameter = common.ExtensionParameter('permessage-deflate')
parameter.add_parameter('foo', 'bar')
processor = extensions.PerMessageDeflateExtensionProcessor(parameter)
self.assertIsNone(processor.get_extension_response())
class PerMessageDeflateExtensionProcessorBuildingTest(unittest.TestCase):
"""A unittest for checking that PerMessageDeflateExtensionProcessor builds
a response based on specified options correctly.
"""
def test_response_with_max_window_bits(self):
parameter = common.ExtensionParameter('permessage-deflate')
parameter.add_parameter('client_max_window_bits', None)
processor = extensions.PerMessageDeflateExtensionProcessor(parameter)
processor.set_client_max_window_bits(10)
response = processor.get_extension_response()
self.assertEqual('permessage-deflate', response.name())
self.assertEqual([('client_max_window_bits', '10')],
response.get_parameters())
def test_response_with_max_window_bits_without_client_permission(self):
processor = extensions.PerMessageDeflateExtensionProcessor(
common.ExtensionParameter('permessage-deflate'))
processor.set_client_max_window_bits(10)
response = processor.get_extension_response()
self.assertIsNone(response)
def test_response_with_true_for_no_context_takeover(self):
processor = extensions.PerMessageDeflateExtensionProcessor(
common.ExtensionParameter('permessage-deflate'))
processor.set_client_no_context_takeover(True)
response = processor.get_extension_response()
self.assertEqual('permessage-deflate', response.name())
self.assertEqual([('client_no_context_takeover', None)],
response.get_parameters())
def test_response_with_false_for_no_context_takeover(self):
processor = extensions.PerMessageDeflateExtensionProcessor(
common.ExtensionParameter('permessage-deflate'))
processor.set_client_no_context_takeover(False)
response = processor.get_extension_response()
self.assertEqual('permessage-deflate', response.name())
self.assertEqual(0, len(response.get_parameters()))
class PerMessageCompressExtensionProcessorTest(unittest.TestCase):
def test_registry(self):
processor = extensions.get_extension_processor(
common.ExtensionParameter('permessage-compress'))
self.assertIsInstance(processor,
extensions.PerMessageCompressExtensionProcessor)
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| mpl-2.0 |
python-acoustics/Sea | gui/analysis/actions.py | 2 | 2362 | """
The following are some actions.
"""
from PyQt4 import QtCore, QtGui
import FreeCADGui as Gui
import FreeCAD as App
import logging
class RunAnalysis(object):
"""
Perform the SEA analysis. Solve the modal energies.
"""
def Activated(self):
import Sea
objects = Gui.Selection.getSelection()
systems = filter(Sea.actions.document.isSystem, objects)
if len(systems) == 0:
App.Console.PrintMessage('Please select an SEA system.\n')
elif len(systems) == 1:
systems[0].solve()
else:
App.Console.PrintMessage('Please select a single SEA system.\n')
def GetResources(self):
MenuText = QtCore.QT_TRANSLATE_NOOP('Sea_RunAnalysis', 'Start calculation')
ToolTip = QtCore.QT_TRANSLATE_NOOP('Sea_RunAnalysis', 'Calculate modal energies.')
return {'Pixmap' : 'AnalysisRunIco', 'MenuText': MenuText, 'ToolTip': ToolTip}
class StopAnalysis(object):
"""
Interrupt the calculation.
"""
def Activated(self):
import Sea
if App.ActiveDocument is not None:
if App.ActiveDocument.ActiveObject:
Sea.actions.stop(App.ActiveDocument.ActiveObject)
else:
App.Console.PrintMessage('Please select an SEA system.\n')
else:
App.Console.PrintMessage('First create a model.\n')
def GetResources(self):
MenuText = QtCore.QT_TRANSLATE_NOOP('Sea_StopAnalysis', 'Stop calculation')
ToolTip = QtCore.QT_TRANSLATE_NOOP('Sea_StopAnalysis', 'Stop the currently active calculation.')
return {'Pixmap' : 'AnalysisStopIco', 'MenuText': MenuText, 'ToolTip': ToolTip}
class ClearAnalysis(object):
"""
Clear the results of the analysis.
"""
def Activated(self):
import Sea
Sea.actions.analysis.clear()
def GetResources(self):
MenuText = QtCore.QT_TRANSLATE_NOOP('Sea_ClearAnalysis', 'Clear results')
ToolTip = QtCore.QT_TRANSLATE_NOOP('Sea_ClearAnalysis', 'Delete the stored results of the analysis.')
return {'Pixmap' : 'AnalysisPostIco', 'MenuText': MenuText, 'ToolTip': ToolTip}
Gui.addCommand('Sea_RunAnalysis', RunAnalysis())
Gui.addCommand('Sea_StopAnalysis', StopAnalysis())
Gui.addCommand('Sea_ClearAnalysis', ClearAnalysis())
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.