repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
stuntman723/rap-analyzer | rap_analyzer/lib/python2.7/site-packages/setuptools/depends.py | 462 | 6370 | import sys
import imp
import marshal
from imp import PKG_DIRECTORY, PY_COMPILED, PY_SOURCE, PY_FROZEN
from distutils.version import StrictVersion
from setuptools import compat
__all__ = [
'Require', 'find_module', 'get_module_constant', 'extract_constant'
]
class Require:
"""A prerequisite to building or installing a distribution"""
def __init__(self, name, requested_version, module, homepage='',
attribute=None, format=None):
if format is None and requested_version is not None:
format = StrictVersion
if format is not None:
requested_version = format(requested_version)
if attribute is None:
attribute = '__version__'
self.__dict__.update(locals())
del self.self
def full_name(self):
"""Return full package/distribution name, w/version"""
if self.requested_version is not None:
return '%s-%s' % (self.name,self.requested_version)
return self.name
def version_ok(self, version):
"""Is 'version' sufficiently up-to-date?"""
return self.attribute is None or self.format is None or \
str(version) != "unknown" and version >= self.requested_version
def get_version(self, paths=None, default="unknown"):
"""Get version number of installed module, 'None', or 'default'
Search 'paths' for module. If not found, return 'None'. If found,
return the extracted version attribute, or 'default' if no version
attribute was specified, or the value cannot be determined without
importing the module. The version is formatted according to the
requirement's version format (if any), unless it is 'None' or the
supplied 'default'.
"""
if self.attribute is None:
try:
f,p,i = find_module(self.module,paths)
if f: f.close()
return default
except ImportError:
return None
v = get_module_constant(self.module, self.attribute, default, paths)
if v is not None and v is not default and self.format is not None:
return self.format(v)
return v
def is_present(self, paths=None):
"""Return true if dependency is present on 'paths'"""
return self.get_version(paths) is not None
def is_current(self, paths=None):
"""Return true if dependency is present and up-to-date on 'paths'"""
version = self.get_version(paths)
if version is None:
return False
return self.version_ok(version)
def _iter_code(code):
"""Yield '(op,arg)' pair for each operation in code object 'code'"""
from array import array
from dis import HAVE_ARGUMENT, EXTENDED_ARG
bytes = array('b',code.co_code)
eof = len(code.co_code)
ptr = 0
extended_arg = 0
while ptr<eof:
op = bytes[ptr]
if op>=HAVE_ARGUMENT:
arg = bytes[ptr+1] + bytes[ptr+2]*256 + extended_arg
ptr += 3
if op==EXTENDED_ARG:
extended_arg = arg * compat.long_type(65536)
continue
else:
arg = None
ptr += 1
yield op,arg
def find_module(module, paths=None):
"""Just like 'imp.find_module()', but with package support"""
parts = module.split('.')
while parts:
part = parts.pop(0)
f, path, (suffix,mode,kind) = info = imp.find_module(part, paths)
if kind==PKG_DIRECTORY:
parts = parts or ['__init__']
paths = [path]
elif parts:
raise ImportError("Can't find %r in %s" % (parts,module))
return info
def get_module_constant(module, symbol, default=-1, paths=None):
"""Find 'module' by searching 'paths', and extract 'symbol'
Return 'None' if 'module' does not exist on 'paths', or it does not define
'symbol'. If the module defines 'symbol' as a constant, return the
constant. Otherwise, return 'default'."""
try:
f, path, (suffix, mode, kind) = find_module(module, paths)
except ImportError:
# Module doesn't exist
return None
try:
if kind==PY_COMPILED:
f.read(8) # skip magic & date
code = marshal.load(f)
elif kind==PY_FROZEN:
code = imp.get_frozen_object(module)
elif kind==PY_SOURCE:
code = compile(f.read(), path, 'exec')
else:
# Not something we can parse; we'll have to import it. :(
if module not in sys.modules:
imp.load_module(module, f, path, (suffix, mode, kind))
return getattr(sys.modules[module], symbol, None)
finally:
if f:
f.close()
return extract_constant(code, symbol, default)
def extract_constant(code, symbol, default=-1):
"""Extract the constant value of 'symbol' from 'code'
If the name 'symbol' is bound to a constant value by the Python code
object 'code', return that value. If 'symbol' is bound to an expression,
return 'default'. Otherwise, return 'None'.
Return value is based on the first assignment to 'symbol'. 'symbol' must
be a global, or at least a non-"fast" local in the code block. That is,
only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol'
must be present in 'code.co_names'.
"""
if symbol not in code.co_names:
# name's not there, can't possibly be an assigment
return None
name_idx = list(code.co_names).index(symbol)
STORE_NAME = 90
STORE_GLOBAL = 97
LOAD_CONST = 100
const = default
for op, arg in _iter_code(code):
if op==LOAD_CONST:
const = code.co_consts[arg]
elif arg==name_idx and (op==STORE_NAME or op==STORE_GLOBAL):
return const
else:
const = default
def _update_globals():
"""
Patch the globals to remove the objects not available on some platforms.
XXX it'd be better to test assertions about bytecode instead.
"""
if not sys.platform.startswith('java') and sys.platform != 'cli':
return
incompatible = 'extract_constant', 'get_module_constant'
for name in incompatible:
del globals()[name]
__all__.remove(name)
_update_globals()
| mit |
gatkin/declxml | tests/test_text_encoding.py | 1 | 2177 | # -*- coding: utf-8 -*-
"""Tests handling of text encoding"""
import os
import pytest
import declxml as xml
_PROCESSOR = xml.dictionary('root', [
xml.string('value')
])
_VALUE = {
'value': u'Hello, 世界!',
}
_XML_STRING = u"""<root><value>Hello, 世界!</value></root>"""
_XML_STRING_INDENTED = u"""<?xml version="1.0" encoding="utf-8"?>
<root>
<value>Hello, 世界!</value>
</root>
"""
_ENCODINGS = [
'utf-8',
'utf-16',
'utf-32',
'cp950',
'gb18030',
]
def test_parse_from_string():
"""Parse a unicode string"""
actual = xml.parse_from_string(_PROCESSOR, _XML_STRING)
assert _VALUE == actual
@pytest.mark.parametrize('encoding', _ENCODINGS)
def test_parse_from_file(tmpdir, encoding):
"""Tests parsing an XML file"""
xml_file = tmpdir.join('data.xml')
xml_file.write_text(_XML_STRING, encoding)
actual = xml.parse_from_file(_PROCESSOR, xml_file.strpath, encoding=encoding)
assert _VALUE == actual
@pytest.mark.parametrize('encoding', _ENCODINGS)
def test_serialize_to_file(tmpdir, encoding):
xml_file_name = 'data.xml'
xml_file_path = os.path.join(tmpdir.strpath, xml_file_name)
xml.serialize_to_file(_PROCESSOR, _VALUE, xml_file_path, encoding=encoding)
xml_file = tmpdir.join(xml_file_name)
actual = xml_file.read_text(encoding)
assert _XML_STRING == actual
@pytest.mark.parametrize('encoding', _ENCODINGS)
def test_serialize_to_file_indented(tmpdir, encoding):
xml_file_name = 'data.xml'
xml_file_path = os.path.join(tmpdir.strpath, xml_file_name)
xml.serialize_to_file(_PROCESSOR, _VALUE, xml_file_path, indent=' ', encoding=encoding)
xml_file = tmpdir.join(xml_file_name)
actual = xml_file.read_text(encoding)
assert _XML_STRING_INDENTED == actual
def test_serialize_to_string():
"""Serialize a value to a unicode string"""
actual = xml.serialize_to_string(_PROCESSOR, _VALUE)
assert _XML_STRING == actual
def test_serialize_to_string_indent():
"""Serialize a value to a unicode string"""
actual = xml.serialize_to_string(_PROCESSOR, _VALUE, indent=' ')
assert _XML_STRING_INDENTED == actual
| mit |
bousmalis/models | swivel/vecs.py | 20 | 3115 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mmap
import numpy as np
import os
import struct
class Vecs(object):
def __init__(self, vocab_filename, rows_filename, cols_filename=None):
"""Initializes the vectors from a text vocabulary and binary data."""
with open(vocab_filename, 'r') as lines:
self.vocab = [line.split()[0] for line in lines]
self.word_to_idx = {word: idx for idx, word in enumerate(self.vocab)}
n = len(self.vocab)
with open(rows_filename, 'r') as rows_fh:
rows_fh.seek(0, os.SEEK_END)
size = rows_fh.tell()
# Make sure that the file size seems reasonable.
if size % (4 * n) != 0:
raise IOError(
'unexpected file size for binary vector file %s' % rows_filename)
# Memory map the rows.
dim = size / (4 * n)
rows_mm = mmap.mmap(rows_fh.fileno(), 0, prot=mmap.PROT_READ)
rows = np.matrix(
np.frombuffer(rows_mm, dtype=np.float32).reshape(n, dim))
# If column vectors were specified, then open them and add them to the row
# vectors.
if cols_filename:
with open(cols_filename, 'r') as cols_fh:
cols_mm = mmap.mmap(cols_fh.fileno(), 0, prot=mmap.PROT_READ)
cols_fh.seek(0, os.SEEK_END)
if cols_fh.tell() != size:
raise IOError('row and column vector files have different sizes')
cols = np.matrix(
np.frombuffer(cols_mm, dtype=np.float32).reshape(n, dim))
rows += cols
cols_mm.close()
# Normalize so that dot products are just cosine similarity.
self.vecs = rows / np.linalg.norm(rows, axis=1).reshape(n, 1)
rows_mm.close()
def similarity(self, word1, word2):
"""Computes the similarity of two tokens."""
idx1 = self.word_to_idx.get(word1)
idx2 = self.word_to_idx.get(word2)
if not idx1 or not idx2:
return None
return float(self.vecs[idx1] * self.vecs[idx2].transpose())
def neighbors(self, query):
"""Returns the nearest neighbors to the query (a word or vector)."""
if isinstance(query, basestring):
idx = self.word_to_idx.get(query)
if idx is None:
return None
query = self.vecs[idx]
neighbors = self.vecs * query.transpose()
return sorted(
zip(self.vocab, neighbors.flat),
key=lambda kv: kv[1], reverse=True)
def lookup(self, word):
"""Returns the embedding for a token, or None if no embedding exists."""
idx = self.word_to_idx.get(word)
return None if idx is None else self.vecs[idx]
| apache-2.0 |
Ninjakow/TrueSkill | lib/numpy/f2py/tests/test_size.py | 145 | 1154 | from __future__ import division, absolute_import, print_function
import os
from numpy.testing import run_module_suite, assert_equal, dec
import util
def _path(*a):
return os.path.join(*((os.path.dirname(__file__),) + a))
class TestSizeSumExample(util.F2PyTest):
sources = [_path('src', 'size', 'foo.f90')]
@dec.slow
def test_all(self):
r = self.module.foo([[1, 2]])
assert_equal(r, [3], repr(r))
r = self.module.foo([[1, 2], [3, 4]])
assert_equal(r, [3, 7], repr(r))
r = self.module.foo([[1, 2], [3, 4], [5, 6]])
assert_equal(r, [3, 7, 11], repr(r))
@dec.slow
def test_transpose(self):
r = self.module.trans([[1, 2]])
assert_equal(r, [[1], [2]], repr(r))
r = self.module.trans([[1, 2, 3], [4, 5, 6]])
assert_equal(r, [[1, 4], [2, 5], [3, 6]], repr(r))
@dec.slow
def test_flatten(self):
r = self.module.flatten([[1, 2]])
assert_equal(r, [1, 2], repr(r))
r = self.module.flatten([[1, 2, 3], [4, 5, 6]])
assert_equal(r, [1, 2, 3, 4, 5, 6], repr(r))
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 |
switowski/invenio | invenio/modules/deposit/validation_utils.py | 9 | 9769 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Validation functions."""
import re
from flask import current_app
from invenio.utils import persistentid as pidutils
import six
from wtforms.validators import StopValidation, ValidationError
#
# General purpose validators
#
class ListLength(object):
"""Require number of elements.
:param min_num: Minimum number of elements.
:param max_num: Maximum number of elements.
:param element_filter: Callable used to filter the list prior to testing
the number of elements. Useful to remove empty elements.
"""
def __init__(self, min_num=None, max_num=None,
element_filter=lambda x: True):
self.min = min_num
self.max = max_num
self.element_filter = element_filter
def __call__(self, form, field):
test_list = []
if self.min or self.max:
test_list = filter(self.element_filter, field.data)
if self.min:
if self.min > len(test_list):
raise ValidationError(
"Minimum %s %s required." % (
self.min,
"entry is" if self.min == 1 else "entries are"
)
)
if self.max:
if self.max < len(test_list):
raise ValidationError(
"Maximum %s %s allowed." % (
self.max,
"entry is" if self.max == 1 else "entries are"
)
)
class RequiredIf(object):
"""Require field if value of another field is set to a certain value."""
def __init__(self, other_field_name, values, message=None):
self.other_field_name = other_field_name
self.values = values
self.message = message
def __call__(self, form, field):
try:
other_field = getattr(form, self.other_field_name)
other_val = other_field.data
for v in self.values:
# Check if field value is required
if (callable(v) and v(other_val)) or (other_val == v):
# Field value is required - check the value
if not field.data or \
isinstance(field.data, six.string_types) \
and not field.data.strip():
if self.message is None:
self.message = 'This field is required.'
field.errors[:] = []
raise StopValidation(self.message % {
'other_field': other_field.label.text,
'value': other_val
})
except AttributeError:
pass
class NotRequiredIf(RequiredIf):
"""Do not require field if another field contains a certain value."""
def __call__(self, form, field):
try:
other_field = getattr(form, self.other_field_name)
other_val = other_field.data
for v in self.values:
# Check if field value is not required.
if (callable(v) and v(other_val)) or (other_val == v):
raise StopValidation()
except AttributeError:
pass
class Unchangeable(object):
def __call__(self, form, field):
field.data = field.object_data
def number_validate(form, field, submit=False,
error_message='It must be a number!'):
value = field.data or ''
if value == "" or value.isspace():
return
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
if not is_number(value):
raise ValidationError(error_message)
#
# DOI-related validators
#
def doi_syntax_validator(form, field):
"""DOI syntax validator. Deprecated.
:param field: validated field.
:param form: validated form.
"""
import warnings
warnings.warn("Please use DOISyntaxValidator instead.", DeprecationWarning)
return DOISyntaxValidator()(form, field)
class DOISyntaxValidator(object):
"""DOI syntax validator."""
pattern = "(^$|(doi:)?10\.\d+(.\d+)*/.*)"
def __init__(self, message=None):
"""Constructor.
:param message: message to override the default one.
"""
self.regexp = re.compile(self.pattern, re.I)
self.message = message if message else (
"The provided DOI is invalid - it should look similar to "
"'10.1234/foo.bar'.")
def __call__(self, form, field):
"""Validate.
:param field: validated field.
:param form: validated form.
"""
doi = field.data
if doi and not self.regexp.match(doi):
# no point to further validate DOI which is invalid
raise StopValidation(self.message)
class InvalidDOIPrefix(object):
"""
Validates if DOI
"""
def __init__(self, prefix='10.5072', message=None,
message_testing=None):
"""
@param doi_prefix: DOI prefix, e.g. 10.5072
"""
self.doi_prefix = prefix
# Remove trailing slash
if self.doi_prefix[-1] == '/':
self.doi_prefix = self.doi_prefix[:-1]
if not message_testing:
self.message_testing = "The prefix 10.5072 is invalid. The prefix" \
"is only used for testing purposes, and no DOIs with this " \
"prefix are attached to any meaningful content."
if not message:
self.message = 'The prefix %(prefix)s is ' \
'administered automatically by %(CFG_SITE_NAME)s.'
ctx = dict(
prefix=prefix,
CFG_SITE_NAME=current_app.config['CFG_SITE_NAME']
)
self.message = self.message % ctx
self.message_testing = self.message_testing % ctx
def __call__(self, form, field):
value = field.data
# Defined prefix
if value:
if value.startswith("%s/" % self.doi_prefix):
raise ValidationError(self.message)
# Testing name space
if self.doi_prefix != "10.5072" and value.startswith("10.5072/"):
raise ValidationError(self.message_testing)
class MintedDOIValidator(object):
"""
Validates if DOI
"""
def __init__(self, prefix='10.5072', message=None):
"""
@param doi_prefix: DOI prefix, e.g. 10.5072
"""
self.doi_prefix = prefix
# Remove trailing slash
if self.doi_prefix[-1] == '/':
self.doi_prefix = self.doi_prefix[:-1]
if not message:
self.message = 'You cannot change an already registered DOI.'
ctx = dict(
prefix=prefix,
CFG_SITE_NAME=current_app.config['CFG_SITE_NAME']
)
self.message = self.message % ctx
def __call__(self, form, field):
if field.object_data and \
field.object_data.startswith("%s/" % self.doi_prefix):
# We have a DOI and it's our own DOI.
if field.data != field.object_data:
raise ValidationError(self.message)
else:
raise StopValidation()
else:
raise ValidationError(self.message)
class PreReservedDOI(object):
"""
Validate that user did not edit pre-reserved DOI.
"""
def __init__(self, field_name, message=None, prefix='10.5072'):
self.field_name = field_name
self.message = message or 'You are not allowed to edit a ' \
'pre-reserved DOI. Click the Pre-reserve ' \
'DOI button to resolve the problem.'
self.prefix = prefix
def __call__(self, form, field):
attr_value = getattr(form, self.field_name).data
if isinstance(attr_value, dict):
attr_value = attr_value['doi']
if attr_value and field.data and field.data != attr_value \
and field.data.startswith("%s/" % self.prefix):
raise StopValidation(self.message)
# Stop further validation if DOI equals pre-reserved DOI.
if attr_value and field.data and field.data == attr_value:
raise StopValidation()
class PidValidator(object):
"""
Validate that value is a persistent identifier understood by us.
"""
def __init__(self, message=None):
self.message = message or "Not a valid persistent identifier"
def __call__(self, form, field):
schemes = pidutils.detect_identifier_schemes(field.data)
if not schemes:
raise ValidationError(self.message)
#
# Aliases
#
required_if = RequiredIf
not_required_if = NotRequiredIf
unchangeable = Unchangeable
list_length = ListLength
invalid_doi_prefix_validator = InvalidDOIPrefix
minted_doi_validator = MintedDOIValidator
pre_reserved_doi_validator = PreReservedDOI
pid_validator = PidValidator
| gpl-2.0 |
fcole90/nemesys-qos | nemesys/client.py | 9 | 1705 | # client.py
# -*- coding: utf8 -*-
# Copyright (c) 2010 Fondazione Ugo Bordoni.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from isp import Isp
from profile import Profile
class Client:
# TODO Spostare il certificato dall'ISP al Client
def __init__(self, id, profile, isp, geocode, username='anonymous', password='anonymous@'):
self._id = id
self._profile = profile
self._isp = isp
self._geocode = geocode
self._username = username
self._password = password
@property
def id(self):
return self._id
@property
def profile(self):
return self._profile
@property
def isp(self):
return self._isp
@property
def geocode(self):
return self._geocode
@property
def username(self):
return self._username
@property
def password(self):
return self._password
def __str__(self):
return 'id: %s; profile: %s; isp: %s; geocode: %s' % (self.id, self.profile, self.isp, self.geocode)
if __name__ == '__main__':
c = Client('fub0000000001', Profile('fub00001', 512, 512), Isp('fub000', 'fub000.pem'), '41.843646,12.485726')
print c
| gpl-3.0 |
Geode/geonode | geonode/geoserver/urls.py | 19 | 2467 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.conf.urls import patterns, url
urlpatterns = patterns('geonode.geoserver.views',
url(r'^rest/stores/(?P<store_type>\w+)/$', 'stores', name="stores"),
(r'^rest/styles', 'geoserver_rest_proxy', dict(proxy_path='/gs/rest/styles',
downstream_path='rest/styles')),
(r'^rest/layers', 'geoserver_rest_proxy', dict(proxy_path='/gs/rest/layers',
downstream_path='rest/layers')),
(r'^rest/sldservice', 'geoserver_rest_proxy', dict(proxy_path='/gs/rest/sldservice',
downstream_path='rest/sldservice')),
url(r'^updatelayers/$', 'updatelayers', name="updatelayers"),
url(r'^(?P<layername>[^/]*)/style$', 'layer_style', name="layer_style"),
url(r'^(?P<layername>[^/]*)/style/upload$', 'layer_style_upload', name='layer_style_upload'),
url(r'^(?P<layername>[^/]*)/style/manage$', 'layer_style_manage', name='layer_style_manage'),
url(r'^(?P<layername>[^/]*)/edit-check?$', 'feature_edit_check', name="feature_edit_check"),
url(r'^acls/?$', 'layer_acls', name='layer_acls'),
url(r'^resolve_user/?$', 'resolve_user', name='layer_resolve_user'),
url(r'^download$', 'layer_batch_download', name='layer_batch_download'),
)
| gpl-3.0 |
NejcZupec/ggrc-core | src/ggrc/services/description.py | 7 | 1743 | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
import json
from flask import request, current_app
from flask.views import MethodView
"""RESTful service discovery API for gGRC services."""
class ServiceDescription(MethodView):
"""Flask view providing a RESTful service discovery resource for all gGRC
resources, resource collections and services.
"""
def get(self):
from ggrc import services
endpoints = {}
for entry in services.all_services():
service = getattr(services, entry.model_class.__name__)
endpoints[service.__name__] = {
'href': service.url_for(),
#TODO additional fields
#'discoveryVersion': '',
#'id': '',
#'name': '',
#'version': '',
#'title': '',
#'description': '',
#'documentationLink': '',
}
endpoints['search'] = { 'href': '/search' }
endpoints['log_event'] = {'href': '/api/log_events' }
return self.json_success_response({
'service_description': {
'name': 'gGRC-Core',
'endpoints': endpoints,
'selfLink': request.url,
#TODO additional fields
#'id': '',
#'title': '',
#'description': '',
#'documentationLink': '',
},
})
def json_success_response(
self, response_object, status=200):
headers = [
#('Last-Modified', self.http_timestamp(last_modified)),
#('Etag', self.etag(response_object)),
('Content-Type', 'application/json'),
#('Location', self.url_for())
]
return current_app.make_response(
(json.dumps(response_object), status, headers))
| apache-2.0 |
SneakersInc/sniffmypacketsv2 | src/sniffmypacketsv2/transforms/pcap-dns-domain.py | 1 | 3064 | #!/usr/bin/env python
import datetime
from common.dbconnect import mongo_connect, find_session
from common.hashmethods import *
import tldextract
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
from collections import OrderedDict
from common.entities import pcapFile
from canari.maltego.entities import Domain
from canari.maltego.message import UIMessage
from canari.framework import configure
from common.auxtools import error_logging
from canari.config import config
__author__ = 'catalyst256'
__copyright__ = 'Copyright 2014, sniffmypacketsv2 Project'
__credits__ = []
__license__ = 'GPL'
__version__ = '0.1'
__maintainer__ = 'catalyst256'
__email__ = 'catalyst256@gmail.com'
__status__ = 'Development'
__all__ = [
'dotransform'
]
@configure(
label='Find DNS Domains',
description='Find DNS Domains in a pcap file',
uuids=['sniffMyPacketsv2.v2.pcap_2_dnsdomains'],
inputs=[('[SmP] - DNS', pcapFile)],
debug=True
)
def dotransform(request, response):
# Store the pcap file as a variable
pcap = request.value
usedb = config['working/usedb']
# Check to see if we are using the database or not
if usedb > 0:
# Connect to the database so we can insert the record created below
x = mongo_connect()
c = x['DNS']
# Hash the pcap file
try:
md5hash = md5_for_file(pcap)
except Exception as e:
return response + UIMessage(str(e))
# Get the session and/or pcap id
d = find_session(md5hash)
pcap_id = d[0]
session_id = d[1]
else:
pass
try:
pkts = rdpcap(pcap)
dns_requests = []
for p in pkts:
if p.haslayer(DNSQR):
timestamp = datetime.datetime.fromtimestamp(p.time).strftime('%Y-%m-%d %H:%M:%S.%f')
r = p[DNSQR].qname[:-1]
tld = tldextract.extract(r)
domain = tld.registered_domain
if usedb > 0:
dns = OrderedDict({'PCAP ID': pcap_id, 'Stream ID': session_id,
'Time Stamp': timestamp,
'Type': 'Request', 'IP': {'src': p[IP].src, 'dst': p[IP].dst, 'length': p[IP].len},
'Request Details': {'Query Type': p[DNSQR].qtype, 'Query Name': r, 'Domain': domain}})
t = x.DNS.find({'Time Stamp': timestamp}).count()
if t > 0:
pass
else:
c.insert(dns)
else:
pass
if r not in dns_requests:
dns_requests.append(domain)
else:
pass
for d in dns_requests:
x = Domain(d)
response += x
return response
except Exception as e:
if usedb > 0:
error_logging(str(e), 'DNS Requests')
else:
return response + UIMessage(str(e))
| apache-2.0 |
Pluto-tv/chromium-crosswalk | tools/telemetry/third_party/gsutilz/third_party/boto/boto/rds/dbsnapshot.py | 167 | 6398 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class DBSnapshot(object):
"""
Represents a RDS DB Snapshot
Properties reference available from the AWS documentation at http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_DBSnapshot.html
:ivar engine_version: Specifies the version of the database engine
:ivar license_model: License model information for the restored DB instance
:ivar allocated_storage: Specifies the allocated storage size in gigabytes (GB)
:ivar availability_zone: Specifies the name of the Availability Zone the DB Instance was located in at the time of the DB Snapshot
:ivar connection: boto.rds.RDSConnection associated with the current object
:ivar engine: Specifies the name of the database engine
:ivar id: Specifies the identifier for the DB Snapshot (DBSnapshotIdentifier)
:ivar instance_create_time: Specifies the time (UTC) when the snapshot was taken
:ivar instance_id: Specifies the the DBInstanceIdentifier of the DB Instance this DB Snapshot was created from (DBInstanceIdentifier)
:ivar master_username: Provides the master username for the DB Instance
:ivar port: Specifies the port that the database engine was listening on at the time of the snapshot
:ivar snapshot_create_time: Provides the time (UTC) when the snapshot was taken
:ivar status: Specifies the status of this DB Snapshot. Possible values are [ available, backing-up, creating, deleted, deleting, failed, modifying, rebooting, resetting-master-credentials ]
:ivar iops: Specifies the Provisioned IOPS (I/O operations per second) value of the DB instance at the time of the snapshot.
:ivar option_group_name: Provides the option group name for the DB snapshot.
:ivar percent_progress: The percentage of the estimated data that has been transferred.
:ivar snapshot_type: Provides the type of the DB snapshot.
:ivar source_region: The region that the DB snapshot was created in or copied from.
:ivar vpc_id: Provides the Vpc Id associated with the DB snapshot.
"""
def __init__(self, connection=None, id=None):
self.connection = connection
self.id = id
self.engine = None
self.engine_version = None
self.snapshot_create_time = None
self.instance_create_time = None
self.port = None
self.status = None
self.availability_zone = None
self.master_username = None
self.allocated_storage = None
self.instance_id = None
self.availability_zone = None
self.license_model = None
self.iops = None
self.option_group_name = None
self.percent_progress = None
self.snapshot_type = None
self.source_region = None
self.vpc_id = None
def __repr__(self):
return 'DBSnapshot:%s' % self.id
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'Engine':
self.engine = value
elif name == 'EngineVersion':
self.engine_version = value
elif name == 'InstanceCreateTime':
self.instance_create_time = value
elif name == 'SnapshotCreateTime':
self.snapshot_create_time = value
elif name == 'DBInstanceIdentifier':
self.instance_id = value
elif name == 'DBSnapshotIdentifier':
self.id = value
elif name == 'Port':
self.port = int(value)
elif name == 'Status':
self.status = value
elif name == 'AvailabilityZone':
self.availability_zone = value
elif name == 'MasterUsername':
self.master_username = value
elif name == 'AllocatedStorage':
self.allocated_storage = int(value)
elif name == 'SnapshotTime':
self.time = value
elif name == 'LicenseModel':
self.license_model = value
elif name == 'Iops':
self.iops = int(value)
elif name == 'OptionGroupName':
self.option_group_name = value
elif name == 'PercentProgress':
self.percent_progress = int(value)
elif name == 'SnapshotType':
self.snapshot_type = value
elif name == 'SourceRegion':
self.source_region = value
elif name == 'VpcId':
self.vpc_id = value
else:
setattr(self, name, value)
def update(self, validate=False):
"""
Update the DB snapshot's status information by making a call to fetch
the current snapshot attributes from the service.
:type validate: bool
:param validate: By default, if EC2 returns no data about the
instance the update method returns quietly. If
the validate param is True, however, it will
raise a ValueError exception if no data is
returned from EC2.
"""
rs = self.connection.get_all_dbsnapshots(self.id)
if len(rs) > 0:
for i in rs:
if i.id == self.id:
self.__dict__.update(i.__dict__)
elif validate:
raise ValueError('%s is not a valid Snapshot ID' % self.id)
return self.status
| bsd-3-clause |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/IPython/core/tests/test_handlers.py | 30 | 3318 | """Tests for input handlers.
"""
#-----------------------------------------------------------------------------
# Module imports
#-----------------------------------------------------------------------------
# third party
import nose.tools as nt
# our own packages
from IPython.core import autocall
from IPython.testing import tools as tt
from IPython.testing.globalipapp import get_ipython
from IPython.utils import py3compat
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# Get the public instance of IPython
ip = get_ipython()
failures = []
num_tests = 0
#-----------------------------------------------------------------------------
# Test functions
#-----------------------------------------------------------------------------
class CallableIndexable(object):
def __getitem__(self, idx): return True
def __call__(self, *args, **kws): return True
class Autocallable(autocall.IPyAutocall):
def __call__(self):
return "called"
def run(tests):
"""Loop through a list of (pre, post) inputs, where pre is the string
handed to ipython, and post is how that string looks after it's been
transformed (i.e. ipython's notion of _i)"""
tt.check_pairs(ip.prefilter_manager.prefilter_lines, tests)
def test_handlers():
call_idx = CallableIndexable()
ip.user_ns['call_idx'] = call_idx
# For many of the below, we're also checking that leading whitespace
# turns off the esc char, which it should unless there is a continuation
# line.
run([(i,py3compat.u_format(o)) for i,o in \
[('"no change"', '"no change"'), # normal
(u"lsmagic", "get_ipython().magic({u}'lsmagic ')"), # magic
#("a = b # PYTHON-MODE", '_i'), # emacs -- avoids _in cache
]])
# Objects which are instances of IPyAutocall are *always* autocalled
autocallable = Autocallable()
ip.user_ns['autocallable'] = autocallable
# auto
ip.magic('autocall 0')
# Only explicit escapes or instances of IPyAutocallable should get
# expanded
run([
('len "abc"', 'len "abc"'),
('autocallable', 'autocallable()'),
# Don't add extra brackets (gh-1117)
('autocallable()', 'autocallable()'),
])
ip.magic('autocall 1')
run([
('len "abc"', 'len("abc")'),
('len "abc";', 'len("abc");'), # ; is special -- moves out of parens
# Autocall is turned off if first arg is [] and the object
# is both callable and indexable. Like so:
('len [1,2]', 'len([1,2])'), # len doesn't support __getitem__...
('call_idx [1]', 'call_idx [1]'), # call_idx *does*..
('call_idx 1', 'call_idx(1)'),
('len', 'len'), # only at 2 does it auto-call on single args
])
ip.magic('autocall 2')
run([
('len "abc"', 'len("abc")'),
('len "abc";', 'len("abc");'),
('len [1,2]', 'len([1,2])'),
('call_idx [1]', 'call_idx [1]'),
('call_idx 1', 'call_idx(1)'),
# This is what's different:
('len', 'len()'), # only at 2 does it auto-call on single args
])
ip.magic('autocall 1')
nt.assert_equal(failures, [])
| mit |
tmerrick1/spack | var/spack/repos/builtin/packages/xtrans/package.py | 5 | 1867 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Xtrans(AutotoolsPackage):
"""xtrans is a library of code that is shared among various X packages to
handle network protocol transport in a modular fashion, allowing a
single place to add new transport types. It is used by the X server,
libX11, libICE, the X font server, and related components."""
homepage = "http://cgit.freedesktop.org/xorg/lib/libxtrans"
url = "https://www.x.org/archive//individual/lib/xtrans-1.3.5.tar.gz"
version('1.3.5', '6e4eac1b7c6591da0753052e1eccfb58')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
| lgpl-2.1 |
XiaosongWei/crosswalk-test-suite | webapi/tct-csp-w3c-tests/csp-py/csp_default-src_self_object_blocked-manual.py | 30 | 2477 | def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
response.headers.set("Content-Security-Policy", "default-src 'self'")
response.headers.set("X-Content-Security-Policy", "default-src 'self'")
response.headers.set("X-WebKit-CSP", "default-src 'self'")
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Hao, Yunfei <yunfeix.hao@intel.com>
-->
<html>
<head>
<title>CSP Test: csp_default-src_self_object_blocked</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#default-src"/>
<meta name="flags" content=""/>
<meta name="assert" content="default-src 'self'"/>
<meta charset="utf-8"/>
</head>
<body>
<p>Test passes if there is <strong>no red</strong>.</p>
<object data='""" + url1 + """/tests/csp/support/red-100x100.png'/>
</body>
</html> """
| bsd-3-clause |
turbinenreiter/micropython | tests/pyb/i2c_error.py | 22 | 1119 | # test I2C errors, with polling (disabled irqs) and DMA
import pyb
from pyb import I2C
# init accelerometer
pyb.Accel()
# get I2C bus
i2c = I2C(1, I2C.MASTER)
# test polling mem_read
pyb.disable_irq()
i2c.mem_read(1, 76, 0x0a) # should succeed
pyb.enable_irq()
try:
pyb.disable_irq()
i2c.mem_read(1, 77, 0x0a) # should fail
except OSError as e:
pyb.enable_irq()
print(repr(e))
i2c.mem_read(1, 76, 0x0a) # should succeed
# test polling mem_write
pyb.disable_irq()
i2c.mem_write(1, 76, 0x0a) # should succeed
pyb.enable_irq()
try:
pyb.disable_irq()
i2c.mem_write(1, 77, 0x0a) # should fail
except OSError as e:
pyb.enable_irq()
print(repr(e))
i2c.mem_write(1, 76, 0x0a) # should succeed
# test DMA mem_read
i2c.mem_read(1, 76, 0x0a) # should succeed
try:
i2c.mem_read(1, 77, 0x0a) # should fail
except OSError as e:
print(repr(e))
i2c.mem_read(1, 76, 0x0a) # should succeed
# test DMA mem_write
i2c.mem_write(1, 76, 0x0a) # should succeed
try:
i2c.mem_write(1, 77, 0x0a) # should fail
except OSError as e:
print(repr(e))
i2c.mem_write(1, 76, 0x0a) # should succeed
| mit |
has2k1/numpy | numpy/distutils/cpuinfo.py | 173 | 22970 | #!/usr/bin/env python
"""
cpuinfo
Copyright 2002 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@cens.ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Pearu Peterson
"""
from __future__ import division, absolute_import, print_function
__all__ = ['cpu']
import sys, re, types
import os
if sys.version_info[0] >= 3:
from subprocess import getstatusoutput
else:
from commands import getstatusoutput
import warnings
import platform
from numpy.distutils.compat import get_exception
def getoutput(cmd, successful_status=(0,), stacklevel=1):
try:
status, output = getstatusoutput(cmd)
except EnvironmentError:
e = get_exception()
warnings.warn(str(e), UserWarning, stacklevel=stacklevel)
return False, output
if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status:
return True, output
return False, output
def command_info(successful_status=(0,), stacklevel=1, **kw):
info = {}
for key in kw:
ok, output = getoutput(kw[key], successful_status=successful_status,
stacklevel=stacklevel+1)
if ok:
info[key] = output.strip()
return info
def command_by_line(cmd, successful_status=(0,), stacklevel=1):
ok, output = getoutput(cmd, successful_status=successful_status,
stacklevel=stacklevel+1)
if not ok:
return
for line in output.splitlines():
yield line.strip()
def key_value_from_command(cmd, sep, successful_status=(0,),
stacklevel=1):
d = {}
for line in command_by_line(cmd, successful_status=successful_status,
stacklevel=stacklevel+1):
l = [s.strip() for s in line.split(sep, 1)]
if len(l) == 2:
d[l[0]] = l[1]
return d
class CPUInfoBase(object):
"""Holds CPU information and provides methods for requiring
the availability of various CPU features.
"""
def _try_call(self, func):
try:
return func()
except:
pass
def __getattr__(self, name):
if not name.startswith('_'):
if hasattr(self, '_'+name):
attr = getattr(self, '_'+name)
if isinstance(attr, types.MethodType):
return lambda func=self._try_call,attr=attr : func(attr)
else:
return lambda : None
raise AttributeError(name)
def _getNCPUs(self):
return 1
def __get_nbits(self):
abits = platform.architecture()[0]
nbits = re.compile('(\d+)bit').search(abits).group(1)
return nbits
def _is_32bit(self):
return self.__get_nbits() == '32'
def _is_64bit(self):
return self.__get_nbits() == '64'
class LinuxCPUInfo(CPUInfoBase):
info = None
def __init__(self):
if self.info is not None:
return
info = [ {} ]
ok, output = getoutput('uname -m')
if ok:
info[0]['uname_m'] = output.strip()
try:
fo = open('/proc/cpuinfo')
except EnvironmentError:
e = get_exception()
warnings.warn(str(e), UserWarning)
else:
for line in fo:
name_value = [s.strip() for s in line.split(':', 1)]
if len(name_value) != 2:
continue
name, value = name_value
if not info or name in info[-1]: # next processor
info.append({})
info[-1][name] = value
fo.close()
self.__class__.info = info
def _not_impl(self): pass
# Athlon
def _is_AMD(self):
return self.info[0]['vendor_id']=='AuthenticAMD'
def _is_AthlonK6_2(self):
return self._is_AMD() and self.info[0]['model'] == '2'
def _is_AthlonK6_3(self):
return self._is_AMD() and self.info[0]['model'] == '3'
def _is_AthlonK6(self):
return re.match(r'.*?AMD-K6', self.info[0]['model name']) is not None
def _is_AthlonK7(self):
return re.match(r'.*?AMD-K7', self.info[0]['model name']) is not None
def _is_AthlonMP(self):
return re.match(r'.*?Athlon\(tm\) MP\b',
self.info[0]['model name']) is not None
def _is_AMD64(self):
return self.is_AMD() and self.info[0]['family'] == '15'
def _is_Athlon64(self):
return re.match(r'.*?Athlon\(tm\) 64\b',
self.info[0]['model name']) is not None
def _is_AthlonHX(self):
return re.match(r'.*?Athlon HX\b',
self.info[0]['model name']) is not None
def _is_Opteron(self):
return re.match(r'.*?Opteron\b',
self.info[0]['model name']) is not None
def _is_Hammer(self):
return re.match(r'.*?Hammer\b',
self.info[0]['model name']) is not None
# Alpha
def _is_Alpha(self):
return self.info[0]['cpu']=='Alpha'
def _is_EV4(self):
return self.is_Alpha() and self.info[0]['cpu model'] == 'EV4'
def _is_EV5(self):
return self.is_Alpha() and self.info[0]['cpu model'] == 'EV5'
def _is_EV56(self):
return self.is_Alpha() and self.info[0]['cpu model'] == 'EV56'
def _is_PCA56(self):
return self.is_Alpha() and self.info[0]['cpu model'] == 'PCA56'
# Intel
#XXX
_is_i386 = _not_impl
def _is_Intel(self):
return self.info[0]['vendor_id']=='GenuineIntel'
def _is_i486(self):
return self.info[0]['cpu']=='i486'
def _is_i586(self):
return self.is_Intel() and self.info[0]['cpu family'] == '5'
def _is_i686(self):
return self.is_Intel() and self.info[0]['cpu family'] == '6'
def _is_Celeron(self):
return re.match(r'.*?Celeron',
self.info[0]['model name']) is not None
def _is_Pentium(self):
return re.match(r'.*?Pentium',
self.info[0]['model name']) is not None
def _is_PentiumII(self):
return re.match(r'.*?Pentium.*?II\b',
self.info[0]['model name']) is not None
def _is_PentiumPro(self):
return re.match(r'.*?PentiumPro\b',
self.info[0]['model name']) is not None
def _is_PentiumMMX(self):
return re.match(r'.*?Pentium.*?MMX\b',
self.info[0]['model name']) is not None
def _is_PentiumIII(self):
return re.match(r'.*?Pentium.*?III\b',
self.info[0]['model name']) is not None
def _is_PentiumIV(self):
return re.match(r'.*?Pentium.*?(IV|4)\b',
self.info[0]['model name']) is not None
def _is_PentiumM(self):
return re.match(r'.*?Pentium.*?M\b',
self.info[0]['model name']) is not None
def _is_Prescott(self):
return self.is_PentiumIV() and self.has_sse3()
def _is_Nocona(self):
return self.is_Intel() \
and (self.info[0]['cpu family'] == '6' \
or self.info[0]['cpu family'] == '15' ) \
and (self.has_sse3() and not self.has_ssse3())\
and re.match(r'.*?\blm\b', self.info[0]['flags']) is not None
def _is_Core2(self):
return self.is_64bit() and self.is_Intel() and \
re.match(r'.*?Core\(TM\)2\b', \
self.info[0]['model name']) is not None
def _is_Itanium(self):
return re.match(r'.*?Itanium\b',
self.info[0]['family']) is not None
def _is_XEON(self):
return re.match(r'.*?XEON\b',
self.info[0]['model name'], re.IGNORECASE) is not None
_is_Xeon = _is_XEON
# Varia
def _is_singleCPU(self):
return len(self.info) == 1
def _getNCPUs(self):
return len(self.info)
def _has_fdiv_bug(self):
return self.info[0]['fdiv_bug']=='yes'
def _has_f00f_bug(self):
return self.info[0]['f00f_bug']=='yes'
def _has_mmx(self):
return re.match(r'.*?\bmmx\b', self.info[0]['flags']) is not None
def _has_sse(self):
return re.match(r'.*?\bsse\b', self.info[0]['flags']) is not None
def _has_sse2(self):
return re.match(r'.*?\bsse2\b', self.info[0]['flags']) is not None
def _has_sse3(self):
return re.match(r'.*?\bpni\b', self.info[0]['flags']) is not None
def _has_ssse3(self):
return re.match(r'.*?\bssse3\b', self.info[0]['flags']) is not None
def _has_3dnow(self):
return re.match(r'.*?\b3dnow\b', self.info[0]['flags']) is not None
def _has_3dnowext(self):
return re.match(r'.*?\b3dnowext\b', self.info[0]['flags']) is not None
class IRIXCPUInfo(CPUInfoBase):
info = None
def __init__(self):
if self.info is not None:
return
info = key_value_from_command('sysconf', sep=' ',
successful_status=(0, 1))
self.__class__.info = info
def _not_impl(self): pass
def _is_singleCPU(self):
return self.info.get('NUM_PROCESSORS') == '1'
def _getNCPUs(self):
return int(self.info.get('NUM_PROCESSORS', 1))
def __cputype(self, n):
return self.info.get('PROCESSORS').split()[0].lower() == 'r%s' % (n)
def _is_r2000(self): return self.__cputype(2000)
def _is_r3000(self): return self.__cputype(3000)
def _is_r3900(self): return self.__cputype(3900)
def _is_r4000(self): return self.__cputype(4000)
def _is_r4100(self): return self.__cputype(4100)
def _is_r4300(self): return self.__cputype(4300)
def _is_r4400(self): return self.__cputype(4400)
def _is_r4600(self): return self.__cputype(4600)
def _is_r4650(self): return self.__cputype(4650)
def _is_r5000(self): return self.__cputype(5000)
def _is_r6000(self): return self.__cputype(6000)
def _is_r8000(self): return self.__cputype(8000)
def _is_r10000(self): return self.__cputype(10000)
def _is_r12000(self): return self.__cputype(12000)
def _is_rorion(self): return self.__cputype('orion')
def get_ip(self):
try: return self.info.get('MACHINE')
except: pass
def __machine(self, n):
return self.info.get('MACHINE').lower() == 'ip%s' % (n)
def _is_IP19(self): return self.__machine(19)
def _is_IP20(self): return self.__machine(20)
def _is_IP21(self): return self.__machine(21)
def _is_IP22(self): return self.__machine(22)
def _is_IP22_4k(self): return self.__machine(22) and self._is_r4000()
def _is_IP22_5k(self): return self.__machine(22) and self._is_r5000()
def _is_IP24(self): return self.__machine(24)
def _is_IP25(self): return self.__machine(25)
def _is_IP26(self): return self.__machine(26)
def _is_IP27(self): return self.__machine(27)
def _is_IP28(self): return self.__machine(28)
def _is_IP30(self): return self.__machine(30)
def _is_IP32(self): return self.__machine(32)
def _is_IP32_5k(self): return self.__machine(32) and self._is_r5000()
def _is_IP32_10k(self): return self.__machine(32) and self._is_r10000()
class DarwinCPUInfo(CPUInfoBase):
info = None
def __init__(self):
if self.info is not None:
return
info = command_info(arch='arch',
machine='machine')
info['sysctl_hw'] = key_value_from_command('sysctl hw', sep='=')
self.__class__.info = info
def _not_impl(self): pass
def _getNCPUs(self):
return int(self.info['sysctl_hw'].get('hw.ncpu', 1))
def _is_Power_Macintosh(self):
return self.info['sysctl_hw']['hw.machine']=='Power Macintosh'
def _is_i386(self):
return self.info['arch']=='i386'
def _is_ppc(self):
return self.info['arch']=='ppc'
def __machine(self, n):
return self.info['machine'] == 'ppc%s'%n
def _is_ppc601(self): return self.__machine(601)
def _is_ppc602(self): return self.__machine(602)
def _is_ppc603(self): return self.__machine(603)
def _is_ppc603e(self): return self.__machine('603e')
def _is_ppc604(self): return self.__machine(604)
def _is_ppc604e(self): return self.__machine('604e')
def _is_ppc620(self): return self.__machine(620)
def _is_ppc630(self): return self.__machine(630)
def _is_ppc740(self): return self.__machine(740)
def _is_ppc7400(self): return self.__machine(7400)
def _is_ppc7450(self): return self.__machine(7450)
def _is_ppc750(self): return self.__machine(750)
def _is_ppc403(self): return self.__machine(403)
def _is_ppc505(self): return self.__machine(505)
def _is_ppc801(self): return self.__machine(801)
def _is_ppc821(self): return self.__machine(821)
def _is_ppc823(self): return self.__machine(823)
def _is_ppc860(self): return self.__machine(860)
class SunOSCPUInfo(CPUInfoBase):
info = None
def __init__(self):
if self.info is not None:
return
info = command_info(arch='arch',
mach='mach',
uname_i='uname_i',
isainfo_b='isainfo -b',
isainfo_n='isainfo -n',
)
info['uname_X'] = key_value_from_command('uname -X', sep='=')
for line in command_by_line('psrinfo -v 0'):
m = re.match(r'\s*The (?P<p>[\w\d]+) processor operates at', line)
if m:
info['processor'] = m.group('p')
break
self.__class__.info = info
def _not_impl(self): pass
def _is_i386(self):
return self.info['isainfo_n']=='i386'
def _is_sparc(self):
return self.info['isainfo_n']=='sparc'
def _is_sparcv9(self):
return self.info['isainfo_n']=='sparcv9'
def _getNCPUs(self):
return int(self.info['uname_X'].get('NumCPU', 1))
def _is_sun4(self):
return self.info['arch']=='sun4'
def _is_SUNW(self):
return re.match(r'SUNW', self.info['uname_i']) is not None
def _is_sparcstation5(self):
return re.match(r'.*SPARCstation-5', self.info['uname_i']) is not None
def _is_ultra1(self):
return re.match(r'.*Ultra-1', self.info['uname_i']) is not None
def _is_ultra250(self):
return re.match(r'.*Ultra-250', self.info['uname_i']) is not None
def _is_ultra2(self):
return re.match(r'.*Ultra-2', self.info['uname_i']) is not None
def _is_ultra30(self):
return re.match(r'.*Ultra-30', self.info['uname_i']) is not None
def _is_ultra4(self):
return re.match(r'.*Ultra-4', self.info['uname_i']) is not None
def _is_ultra5_10(self):
return re.match(r'.*Ultra-5_10', self.info['uname_i']) is not None
def _is_ultra5(self):
return re.match(r'.*Ultra-5', self.info['uname_i']) is not None
def _is_ultra60(self):
return re.match(r'.*Ultra-60', self.info['uname_i']) is not None
def _is_ultra80(self):
return re.match(r'.*Ultra-80', self.info['uname_i']) is not None
def _is_ultraenterprice(self):
return re.match(r'.*Ultra-Enterprise', self.info['uname_i']) is not None
def _is_ultraenterprice10k(self):
return re.match(r'.*Ultra-Enterprise-10000', self.info['uname_i']) is not None
def _is_sunfire(self):
return re.match(r'.*Sun-Fire', self.info['uname_i']) is not None
def _is_ultra(self):
return re.match(r'.*Ultra', self.info['uname_i']) is not None
def _is_cpusparcv7(self):
return self.info['processor']=='sparcv7'
def _is_cpusparcv8(self):
return self.info['processor']=='sparcv8'
def _is_cpusparcv9(self):
return self.info['processor']=='sparcv9'
class Win32CPUInfo(CPUInfoBase):
info = None
pkey = r"HARDWARE\DESCRIPTION\System\CentralProcessor"
# XXX: what does the value of
# HKEY_LOCAL_MACHINE\HARDWARE\DESCRIPTION\System\CentralProcessor\0
# mean?
def __init__(self):
if self.info is not None:
return
info = []
try:
#XXX: Bad style to use so long `try:...except:...`. Fix it!
if sys.version_info[0] >= 3:
import winreg
else:
import _winreg as winreg
prgx = re.compile(r"family\s+(?P<FML>\d+)\s+model\s+(?P<MDL>\d+)"\
"\s+stepping\s+(?P<STP>\d+)", re.IGNORECASE)
chnd=winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, self.pkey)
pnum=0
while True:
try:
proc=winreg.EnumKey(chnd, pnum)
except winreg.error:
break
else:
pnum+=1
info.append({"Processor":proc})
phnd=winreg.OpenKey(chnd, proc)
pidx=0
while True:
try:
name, value, vtpe=winreg.EnumValue(phnd, pidx)
except winreg.error:
break
else:
pidx=pidx+1
info[-1][name]=value
if name=="Identifier":
srch=prgx.search(value)
if srch:
info[-1]["Family"]=int(srch.group("FML"))
info[-1]["Model"]=int(srch.group("MDL"))
info[-1]["Stepping"]=int(srch.group("STP"))
except:
print(sys.exc_info()[1], '(ignoring)')
self.__class__.info = info
def _not_impl(self): pass
# Athlon
def _is_AMD(self):
return self.info[0]['VendorIdentifier']=='AuthenticAMD'
def _is_Am486(self):
return self.is_AMD() and self.info[0]['Family']==4
def _is_Am5x86(self):
return self.is_AMD() and self.info[0]['Family']==4
def _is_AMDK5(self):
return self.is_AMD() and self.info[0]['Family']==5 \
and self.info[0]['Model'] in [0, 1, 2, 3]
def _is_AMDK6(self):
return self.is_AMD() and self.info[0]['Family']==5 \
and self.info[0]['Model'] in [6, 7]
def _is_AMDK6_2(self):
return self.is_AMD() and self.info[0]['Family']==5 \
and self.info[0]['Model']==8
def _is_AMDK6_3(self):
return self.is_AMD() and self.info[0]['Family']==5 \
and self.info[0]['Model']==9
def _is_AMDK7(self):
return self.is_AMD() and self.info[0]['Family'] == 6
# To reliably distinguish between the different types of AMD64 chips
# (Athlon64, Operton, Athlon64 X2, Semperon, Turion 64, etc.) would
# require looking at the 'brand' from cpuid
def _is_AMD64(self):
return self.is_AMD() and self.info[0]['Family'] == 15
# Intel
def _is_Intel(self):
return self.info[0]['VendorIdentifier']=='GenuineIntel'
def _is_i386(self):
return self.info[0]['Family']==3
def _is_i486(self):
return self.info[0]['Family']==4
def _is_i586(self):
return self.is_Intel() and self.info[0]['Family']==5
def _is_i686(self):
return self.is_Intel() and self.info[0]['Family']==6
def _is_Pentium(self):
return self.is_Intel() and self.info[0]['Family']==5
def _is_PentiumMMX(self):
return self.is_Intel() and self.info[0]['Family']==5 \
and self.info[0]['Model']==4
def _is_PentiumPro(self):
return self.is_Intel() and self.info[0]['Family']==6 \
and self.info[0]['Model']==1
def _is_PentiumII(self):
return self.is_Intel() and self.info[0]['Family']==6 \
and self.info[0]['Model'] in [3, 5, 6]
def _is_PentiumIII(self):
return self.is_Intel() and self.info[0]['Family']==6 \
and self.info[0]['Model'] in [7, 8, 9, 10, 11]
def _is_PentiumIV(self):
return self.is_Intel() and self.info[0]['Family']==15
def _is_PentiumM(self):
return self.is_Intel() and self.info[0]['Family'] == 6 \
and self.info[0]['Model'] in [9, 13, 14]
def _is_Core2(self):
return self.is_Intel() and self.info[0]['Family'] == 6 \
and self.info[0]['Model'] in [15, 16, 17]
# Varia
def _is_singleCPU(self):
return len(self.info) == 1
def _getNCPUs(self):
return len(self.info)
def _has_mmx(self):
if self.is_Intel():
return (self.info[0]['Family']==5 and self.info[0]['Model']==4) \
or (self.info[0]['Family'] in [6, 15])
elif self.is_AMD():
return self.info[0]['Family'] in [5, 6, 15]
else:
return False
def _has_sse(self):
if self.is_Intel():
return (self.info[0]['Family']==6 and \
self.info[0]['Model'] in [7, 8, 9, 10, 11]) \
or self.info[0]['Family']==15
elif self.is_AMD():
return (self.info[0]['Family']==6 and \
self.info[0]['Model'] in [6, 7, 8, 10]) \
or self.info[0]['Family']==15
else:
return False
def _has_sse2(self):
if self.is_Intel():
return self.is_Pentium4() or self.is_PentiumM() \
or self.is_Core2()
elif self.is_AMD():
return self.is_AMD64()
else:
return False
def _has_3dnow(self):
return self.is_AMD() and self.info[0]['Family'] in [5, 6, 15]
def _has_3dnowext(self):
return self.is_AMD() and self.info[0]['Family'] in [6, 15]
if sys.platform.startswith('linux'): # variations: linux2,linux-i386 (any others?)
cpuinfo = LinuxCPUInfo
elif sys.platform.startswith('irix'):
cpuinfo = IRIXCPUInfo
elif sys.platform == 'darwin':
cpuinfo = DarwinCPUInfo
elif sys.platform.startswith('sunos'):
cpuinfo = SunOSCPUInfo
elif sys.platform.startswith('win32'):
cpuinfo = Win32CPUInfo
elif sys.platform.startswith('cygwin'):
cpuinfo = LinuxCPUInfo
#XXX: other OS's. Eg. use _winreg on Win32. Or os.uname on unices.
else:
cpuinfo = CPUInfoBase
cpu = cpuinfo()
#if __name__ == "__main__":
#
# cpu.is_blaa()
# cpu.is_Intel()
# cpu.is_Alpha()
#
# print 'CPU information:',
# for name in dir(cpuinfo):
# if name[0]=='_' and name[1]!='_':
# r = getattr(cpu,name[1:])()
# if r:
# if r!=1:
# print '%s=%s' %(name[1:],r),
# else:
# print name[1:],
# print
| bsd-3-clause |
emetsger/osf.io | scripts/migration/migrate_mailing_lists_to_mailchimp_field.py | 40 | 1043 | """
Used to transfer over subscriptions current users might have from their mailing_list field (which is to be deprecated),
to the new mailchimp_mailing_lists field. After that is done, to clean-up, remove mailing_lists as a User field.
"""
import logging
import sys
from website import models
from website.app import init_app
from modularodm import Q
logger = logging.getLogger(__name__)
def main():
init_app(routes=False)
dry_run = 'dry' in sys.argv
logger.warn('Users will have "mailchimp_mailing_lists" updated from deprecated field "mailing_lists" value')
if dry_run:
logger.warn('Dry_run mode')
for user in get_users_needing_mailing_lists_update():
logger.info('User {0} "mailchimp_mailing_lists" updated'.format(user.username))
if not dry_run:
user.mailchimp_mailing_lists = user.mailing_lists
user.save()
def get_users_needing_mailing_lists_update():
return models.User.find(
Q('mailing_lists', 'ne', {})
)
if __name__ == '__main__':
main()
| apache-2.0 |
pwoodworth/intellij-community | python/lib/Lib/pdb.py | 90 | 42362 | #! /usr/bin/env python
"""A Python debugger."""
# (See pdb.doc for documentation.)
import sys
import linecache
import cmd
import bdb
from repr import Repr
import os
import re
import pprint
import traceback
# Create a custom safe Repr instance and increase its maxstring.
# The default of 30 truncates error messages too easily.
_repr = Repr()
_repr.maxstring = 200
_saferepr = _repr.repr
__all__ = ["run", "pm", "Pdb", "runeval", "runctx", "runcall", "set_trace",
"post_mortem", "help"]
def find_function(funcname, filename):
cre = re.compile(r'def\s+%s\s*[(]' % funcname)
try:
fp = open(filename)
except IOError:
return None
# consumer of this info expects the first line to be 1
lineno = 1
answer = None
while 1:
line = fp.readline()
if line == '':
break
if cre.match(line):
answer = funcname, filename, lineno
break
lineno = lineno + 1
fp.close()
return answer
# Interaction prompt line will separate file and call info from code
# text using value of line_prefix string. A newline and arrow may
# be to your liking. You can set it once pdb is imported using the
# command "pdb.line_prefix = '\n% '".
# line_prefix = ': ' # Use this to get the old situation back
line_prefix = '\n-> ' # Probably a better default
class Pdb(bdb.Bdb, cmd.Cmd):
def __init__(self, completekey='tab', stdin=None, stdout=None):
bdb.Bdb.__init__(self)
cmd.Cmd.__init__(self, completekey, stdin, stdout)
if stdout:
self.use_rawinput = 0
self.prompt = '(Pdb) '
self.aliases = {}
self.mainpyfile = ''
self._wait_for_mainpyfile = 0
# Try to load readline if it exists
try:
import readline
except ImportError:
pass
# Read $HOME/.pdbrc and ./.pdbrc
self.rcLines = []
if 'HOME' in os.environ:
envHome = os.environ['HOME']
try:
rcFile = open(os.path.join(envHome, ".pdbrc"))
except IOError:
pass
else:
for line in rcFile.readlines():
self.rcLines.append(line)
rcFile.close()
try:
rcFile = open(".pdbrc")
except IOError:
pass
else:
for line in rcFile.readlines():
self.rcLines.append(line)
rcFile.close()
self.commands = {} # associates a command list to breakpoint numbers
self.commands_doprompt = {} # for each bp num, tells if the prompt must be disp. after execing the cmd list
self.commands_silent = {} # for each bp num, tells if the stack trace must be disp. after execing the cmd list
self.commands_defining = False # True while in the process of defining a command list
self.commands_bnum = None # The breakpoint number for which we are defining a list
def reset(self):
bdb.Bdb.reset(self)
self.forget()
def forget(self):
self.lineno = None
self.stack = []
self.curindex = 0
self.curframe = None
def setup(self, f, t):
self.forget()
self.stack, self.curindex = self.get_stack(f, t)
self.curframe = self.stack[self.curindex][0]
self.execRcLines()
# Can be executed earlier than 'setup' if desired
def execRcLines(self):
if self.rcLines:
# Make local copy because of recursion
rcLines = self.rcLines
# executed only once
self.rcLines = []
for line in rcLines:
line = line[:-1]
if len(line) > 0 and line[0] != '#':
self.onecmd(line)
# Override Bdb methods
def user_call(self, frame, argument_list):
"""This method is called when there is the remote possibility
that we ever need to stop in this function."""
if self._wait_for_mainpyfile:
return
if self.stop_here(frame):
print >>self.stdout, '--Call--'
self.interaction(frame, None)
def user_line(self, frame):
"""This function is called when we stop or break at this line."""
if self._wait_for_mainpyfile:
if (self.mainpyfile != self.canonic(frame.f_code.co_filename)
or frame.f_lineno<= 0):
return
self._wait_for_mainpyfile = 0
if self.bp_commands(frame):
self.interaction(frame, None)
def bp_commands(self,frame):
""" Call every command that was set for the current active breakpoint (if there is one)
Returns True if the normal interaction function must be called, False otherwise """
#self.currentbp is set in bdb.py in bdb.break_here if a breakpoint was hit
if getattr(self,"currentbp",False) and self.currentbp in self.commands:
currentbp = self.currentbp
self.currentbp = 0
lastcmd_back = self.lastcmd
self.setup(frame, None)
for line in self.commands[currentbp]:
self.onecmd(line)
self.lastcmd = lastcmd_back
if not self.commands_silent[currentbp]:
self.print_stack_entry(self.stack[self.curindex])
if self.commands_doprompt[currentbp]:
self.cmdloop()
self.forget()
return
return 1
def user_return(self, frame, return_value):
"""This function is called when a return trap is set here."""
frame.f_locals['__return__'] = return_value
print >>self.stdout, '--Return--'
self.interaction(frame, None)
def user_exception(self, frame, (exc_type, exc_value, exc_traceback)):
"""This function is called if an exception occurs,
but only if we are to stop at or just below this level."""
frame.f_locals['__exception__'] = exc_type, exc_value
if type(exc_type) == type(''):
exc_type_name = exc_type
else: exc_type_name = exc_type.__name__
print >>self.stdout, exc_type_name + ':', _saferepr(exc_value)
self.interaction(frame, exc_traceback)
# General interaction function
def interaction(self, frame, traceback):
self.setup(frame, traceback)
self.print_stack_entry(self.stack[self.curindex])
self.cmdloop()
self.forget()
def default(self, line):
if line[:1] == '!': line = line[1:]
locals = self.curframe.f_locals
globals = self.curframe.f_globals
try:
code = compile(line + '\n', '<stdin>', 'single')
exec code in globals, locals
except:
t, v = sys.exc_info()[:2]
if type(t) == type(''):
exc_type_name = t
else: exc_type_name = t.__name__
print >>self.stdout, '***', exc_type_name + ':', v
def precmd(self, line):
"""Handle alias expansion and ';;' separator."""
if not line.strip():
return line
args = line.split()
while args[0] in self.aliases:
line = self.aliases[args[0]]
ii = 1
for tmpArg in args[1:]:
line = line.replace("%" + str(ii),
tmpArg)
ii = ii + 1
line = line.replace("%*", ' '.join(args[1:]))
args = line.split()
# split into ';;' separated commands
# unless it's an alias command
if args[0] != 'alias':
marker = line.find(';;')
if marker >= 0:
# queue up everything after marker
next = line[marker+2:].lstrip()
self.cmdqueue.append(next)
line = line[:marker].rstrip()
return line
def onecmd(self, line):
"""Interpret the argument as though it had been typed in response
to the prompt.
Checks whether this line is typed at the normal prompt or in
a breakpoint command list definition.
"""
if not self.commands_defining:
return cmd.Cmd.onecmd(self, line)
else:
return self.handle_command_def(line)
def handle_command_def(self,line):
""" Handles one command line during command list definition. """
cmd, arg, line = self.parseline(line)
if cmd == 'silent':
self.commands_silent[self.commands_bnum] = True
return # continue to handle other cmd def in the cmd list
elif cmd == 'end':
self.cmdqueue = []
return 1 # end of cmd list
cmdlist = self.commands[self.commands_bnum]
if (arg):
cmdlist.append(cmd+' '+arg)
else:
cmdlist.append(cmd)
# Determine if we must stop
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
func = self.default
if func.func_name in self.commands_resuming : # one of the resuming commands.
self.commands_doprompt[self.commands_bnum] = False
self.cmdqueue = []
return 1
return
# Command definitions, called by cmdloop()
# The argument is the remaining string on the command line
# Return true to exit from the command loop
do_h = cmd.Cmd.do_help
def do_commands(self, arg):
"""Defines a list of commands associated to a breakpoint
Those commands will be executed whenever the breakpoint causes the program to stop execution."""
if not arg:
bnum = len(bdb.Breakpoint.bpbynumber)-1
else:
try:
bnum = int(arg)
except:
print >>self.stdout, "Usage : commands [bnum]\n ...\n end"
return
self.commands_bnum = bnum
self.commands[bnum] = []
self.commands_doprompt[bnum] = True
self.commands_silent[bnum] = False
prompt_back = self.prompt
self.prompt = '(com) '
self.commands_defining = True
self.cmdloop()
self.commands_defining = False
self.prompt = prompt_back
def do_break(self, arg, temporary = 0):
# break [ ([filename:]lineno | function) [, "condition"] ]
if not arg:
if self.breaks: # There's at least one
print >>self.stdout, "Num Type Disp Enb Where"
for bp in bdb.Breakpoint.bpbynumber:
if bp:
bp.bpprint(self.stdout)
return
# parse arguments; comma has lowest precedence
# and cannot occur in filename
filename = None
lineno = None
cond = None
comma = arg.find(',')
if comma > 0:
# parse stuff after comma: "condition"
cond = arg[comma+1:].lstrip()
arg = arg[:comma].rstrip()
# parse stuff before comma: [filename:]lineno | function
colon = arg.rfind(':')
funcname = None
if colon >= 0:
filename = arg[:colon].rstrip()
f = self.lookupmodule(filename)
if not f:
print >>self.stdout, '*** ', repr(filename),
print >>self.stdout, 'not found from sys.path'
return
else:
filename = f
arg = arg[colon+1:].lstrip()
try:
lineno = int(arg)
except ValueError, msg:
print >>self.stdout, '*** Bad lineno:', arg
return
else:
# no colon; can be lineno or function
try:
lineno = int(arg)
except ValueError:
try:
func = eval(arg,
self.curframe.f_globals,
self.curframe.f_locals)
except:
func = arg
try:
if hasattr(func, 'im_func'):
func = func.im_func
code = func.func_code
#use co_name to identify the bkpt (function names
#could be aliased, but co_name is invariant)
funcname = code.co_name
lineno = code.co_firstlineno
filename = code.co_filename
except:
# last thing to try
(ok, filename, ln) = self.lineinfo(arg)
if not ok:
print >>self.stdout, '*** The specified object',
print >>self.stdout, repr(arg),
print >>self.stdout, 'is not a function'
print >>self.stdout, 'or was not found along sys.path.'
return
funcname = ok # ok contains a function name
lineno = int(ln)
if not filename:
filename = self.defaultFile()
# Check for reasonable breakpoint
line = self.checkline(filename, lineno)
if line:
# now set the break point
err = self.set_break(filename, line, temporary, cond, funcname)
if err: print >>self.stdout, '***', err
else:
bp = self.get_breaks(filename, line)[-1]
print >>self.stdout, "Breakpoint %d at %s:%d" % (bp.number,
bp.file,
bp.line)
# To be overridden in derived debuggers
def defaultFile(self):
"""Produce a reasonable default."""
filename = self.curframe.f_code.co_filename
if filename == '<string>' and self.mainpyfile:
filename = self.mainpyfile
return filename
do_b = do_break
def do_tbreak(self, arg):
self.do_break(arg, 1)
def lineinfo(self, identifier):
failed = (None, None, None)
# Input is identifier, may be in single quotes
idstring = identifier.split("'")
if len(idstring) == 1:
# not in single quotes
id = idstring[0].strip()
elif len(idstring) == 3:
# quoted
id = idstring[1].strip()
else:
return failed
if id == '': return failed
parts = id.split('.')
# Protection for derived debuggers
if parts[0] == 'self':
del parts[0]
if len(parts) == 0:
return failed
# Best first guess at file to look at
fname = self.defaultFile()
if len(parts) == 1:
item = parts[0]
else:
# More than one part.
# First is module, second is method/class
f = self.lookupmodule(parts[0])
if f:
fname = f
item = parts[1]
answer = find_function(item, fname)
return answer or failed
def checkline(self, filename, lineno):
"""Check whether specified line seems to be executable.
Return `lineno` if it is, 0 if not (e.g. a docstring, comment, blank
line or EOF). Warning: testing is not comprehensive.
"""
line = linecache.getline(filename, lineno)
if not line:
print >>self.stdout, 'End of file'
return 0
line = line.strip()
# Don't allow setting breakpoint at a blank line
if (not line or (line[0] == '#') or
(line[:3] == '"""') or line[:3] == "'''"):
print >>self.stdout, '*** Blank or comment'
return 0
return lineno
def do_enable(self, arg):
args = arg.split()
for i in args:
try:
i = int(i)
except ValueError:
print >>self.stdout, 'Breakpoint index %r is not a number' % i
continue
if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
print >>self.stdout, 'No breakpoint numbered', i
continue
bp = bdb.Breakpoint.bpbynumber[i]
if bp:
bp.enable()
def do_disable(self, arg):
args = arg.split()
for i in args:
try:
i = int(i)
except ValueError:
print >>self.stdout, 'Breakpoint index %r is not a number' % i
continue
if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
print >>self.stdout, 'No breakpoint numbered', i
continue
bp = bdb.Breakpoint.bpbynumber[i]
if bp:
bp.disable()
def do_condition(self, arg):
# arg is breakpoint number and condition
args = arg.split(' ', 1)
try:
bpnum = int(args[0].strip())
except ValueError:
# something went wrong
print >>self.stdout, \
'Breakpoint index %r is not a number' % args[0]
return
try:
cond = args[1]
except:
cond = None
try:
bp = bdb.Breakpoint.bpbynumber[bpnum]
except IndexError:
print >>self.stdout, 'Breakpoint index %r is not valid' % args[0]
return
if bp:
bp.cond = cond
if not cond:
print >>self.stdout, 'Breakpoint', bpnum,
print >>self.stdout, 'is now unconditional.'
def do_ignore(self,arg):
"""arg is bp number followed by ignore count."""
args = arg.split()
try:
bpnum = int(args[0].strip())
except ValueError:
# something went wrong
print >>self.stdout, \
'Breakpoint index %r is not a number' % args[0]
return
try:
count = int(args[1].strip())
except:
count = 0
try:
bp = bdb.Breakpoint.bpbynumber[bpnum]
except IndexError:
print >>self.stdout, 'Breakpoint index %r is not valid' % args[0]
return
if bp:
bp.ignore = count
if count > 0:
reply = 'Will ignore next '
if count > 1:
reply = reply + '%d crossings' % count
else:
reply = reply + '1 crossing'
print >>self.stdout, reply + ' of breakpoint %d.' % bpnum
else:
print >>self.stdout, 'Will stop next time breakpoint',
print >>self.stdout, bpnum, 'is reached.'
def do_clear(self, arg):
"""Three possibilities, tried in this order:
clear -> clear all breaks, ask for confirmation
clear file:lineno -> clear all breaks at file:lineno
clear bpno bpno ... -> clear breakpoints by number"""
if not arg:
try:
reply = raw_input('Clear all breaks? ')
except EOFError:
reply = 'no'
reply = reply.strip().lower()
if reply in ('y', 'yes'):
self.clear_all_breaks()
return
if ':' in arg:
# Make sure it works for "clear C:\foo\bar.py:12"
i = arg.rfind(':')
filename = arg[:i]
arg = arg[i+1:]
try:
lineno = int(arg)
except ValueError:
err = "Invalid line number (%s)" % arg
else:
err = self.clear_break(filename, lineno)
if err: print >>self.stdout, '***', err
return
numberlist = arg.split()
for i in numberlist:
try:
i = int(i)
except ValueError:
print >>self.stdout, 'Breakpoint index %r is not a number' % i
continue
if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
print >>self.stdout, 'No breakpoint numbered', i
continue
err = self.clear_bpbynumber(i)
if err:
print >>self.stdout, '***', err
else:
print >>self.stdout, 'Deleted breakpoint', i
do_cl = do_clear # 'c' is already an abbreviation for 'continue'
def do_where(self, arg):
self.print_stack_trace()
do_w = do_where
do_bt = do_where
def do_up(self, arg):
if self.curindex == 0:
print >>self.stdout, '*** Oldest frame'
else:
self.curindex = self.curindex - 1
self.curframe = self.stack[self.curindex][0]
self.print_stack_entry(self.stack[self.curindex])
self.lineno = None
do_u = do_up
def do_down(self, arg):
if self.curindex + 1 == len(self.stack):
print >>self.stdout, '*** Newest frame'
else:
self.curindex = self.curindex + 1
self.curframe = self.stack[self.curindex][0]
self.print_stack_entry(self.stack[self.curindex])
self.lineno = None
do_d = do_down
def do_step(self, arg):
self.set_step()
return 1
do_s = do_step
def do_next(self, arg):
self.set_next(self.curframe)
return 1
do_n = do_next
def do_return(self, arg):
self.set_return(self.curframe)
return 1
do_r = do_return
def do_continue(self, arg):
self.set_continue()
return 1
do_c = do_cont = do_continue
def do_jump(self, arg):
if self.curindex + 1 != len(self.stack):
print >>self.stdout, "*** You can only jump within the bottom frame"
return
try:
arg = int(arg)
except ValueError:
print >>self.stdout, "*** The 'jump' command requires a line number."
else:
try:
# Do the jump, fix up our copy of the stack, and display the
# new position
self.curframe.f_lineno = arg
self.stack[self.curindex] = self.stack[self.curindex][0], arg
self.print_stack_entry(self.stack[self.curindex])
except ValueError, e:
print >>self.stdout, '*** Jump failed:', e
do_j = do_jump
def do_debug(self, arg):
sys.settrace(None)
globals = self.curframe.f_globals
locals = self.curframe.f_locals
p = Pdb(self.completekey, self.stdin, self.stdout)
p.prompt = "(%s) " % self.prompt.strip()
print >>self.stdout, "ENTERING RECURSIVE DEBUGGER"
sys.call_tracing(p.run, (arg, globals, locals))
print >>self.stdout, "LEAVING RECURSIVE DEBUGGER"
sys.settrace(self.trace_dispatch)
self.lastcmd = p.lastcmd
def do_quit(self, arg):
self._user_requested_quit = 1
self.set_quit()
return 1
do_q = do_quit
do_exit = do_quit
def do_EOF(self, arg):
print >>self.stdout
self._user_requested_quit = 1
self.set_quit()
return 1
def do_args(self, arg):
f = self.curframe
co = f.f_code
dict = f.f_locals
n = co.co_argcount
if co.co_flags & 4: n = n+1
if co.co_flags & 8: n = n+1
for i in range(n):
name = co.co_varnames[i]
print >>self.stdout, name, '=',
if name in dict: print >>self.stdout, dict[name]
else: print >>self.stdout, "*** undefined ***"
do_a = do_args
def do_retval(self, arg):
if '__return__' in self.curframe.f_locals:
print >>self.stdout, self.curframe.f_locals['__return__']
else:
print >>self.stdout, '*** Not yet returned!'
do_rv = do_retval
def _getval(self, arg):
try:
return eval(arg, self.curframe.f_globals,
self.curframe.f_locals)
except:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else: exc_type_name = t.__name__
print >>self.stdout, '***', exc_type_name + ':', repr(v)
raise
def do_p(self, arg):
try:
print >>self.stdout, repr(self._getval(arg))
except:
pass
def do_pp(self, arg):
try:
pprint.pprint(self._getval(arg), self.stdout)
except:
pass
def do_list(self, arg):
self.lastcmd = 'list'
last = None
if arg:
try:
x = eval(arg, {}, {})
if type(x) == type(()):
first, last = x
first = int(first)
last = int(last)
if last < first:
# Assume it's a count
last = first + last
else:
first = max(1, int(x) - 5)
except:
print >>self.stdout, '*** Error in argument:', repr(arg)
return
elif self.lineno is None:
first = max(1, self.curframe.f_lineno - 5)
else:
first = self.lineno + 1
if last is None:
last = first + 10
filename = self.curframe.f_code.co_filename
breaklist = self.get_file_breaks(filename)
try:
for lineno in range(first, last+1):
line = linecache.getline(filename, lineno)
if not line:
print >>self.stdout, '[EOF]'
break
else:
s = repr(lineno).rjust(3)
if len(s) < 4: s = s + ' '
if lineno in breaklist: s = s + 'B'
else: s = s + ' '
if lineno == self.curframe.f_lineno:
s = s + '->'
print >>self.stdout, s + '\t' + line,
self.lineno = lineno
except KeyboardInterrupt:
pass
do_l = do_list
def do_whatis(self, arg):
try:
value = eval(arg, self.curframe.f_globals,
self.curframe.f_locals)
except:
t, v = sys.exc_info()[:2]
if type(t) == type(''):
exc_type_name = t
else: exc_type_name = t.__name__
print >>self.stdout, '***', exc_type_name + ':', repr(v)
return
code = None
# Is it a function?
try: code = value.func_code
except: pass
if code:
print >>self.stdout, 'Function', code.co_name
return
# Is it an instance method?
try: code = value.im_func.func_code
except: pass
if code:
print >>self.stdout, 'Method', code.co_name
return
# None of the above...
print >>self.stdout, type(value)
def do_alias(self, arg):
args = arg.split()
if len(args) == 0:
keys = self.aliases.keys()
keys.sort()
for alias in keys:
print >>self.stdout, "%s = %s" % (alias, self.aliases[alias])
return
if args[0] in self.aliases and len(args) == 1:
print >>self.stdout, "%s = %s" % (args[0], self.aliases[args[0]])
else:
self.aliases[args[0]] = ' '.join(args[1:])
def do_unalias(self, arg):
args = arg.split()
if len(args) == 0: return
if args[0] in self.aliases:
del self.aliases[args[0]]
#list of all the commands making the program resume execution.
commands_resuming = ['do_continue', 'do_step', 'do_next', 'do_return',
'do_quit', 'do_jump']
# Print a traceback starting at the top stack frame.
# The most recently entered frame is printed last;
# this is different from dbx and gdb, but consistent with
# the Python interpreter's stack trace.
# It is also consistent with the up/down commands (which are
# compatible with dbx and gdb: up moves towards 'main()'
# and down moves towards the most recent stack frame).
def print_stack_trace(self):
try:
for frame_lineno in self.stack:
self.print_stack_entry(frame_lineno)
except KeyboardInterrupt:
pass
def print_stack_entry(self, frame_lineno, prompt_prefix=line_prefix):
frame, lineno = frame_lineno
if frame is self.curframe:
print >>self.stdout, '>',
else:
print >>self.stdout, ' ',
print >>self.stdout, self.format_stack_entry(frame_lineno,
prompt_prefix)
# Help methods (derived from pdb.doc)
def help_help(self):
self.help_h()
def help_h(self):
print >>self.stdout, """h(elp)
Without argument, print the list of available commands.
With a command name as argument, print help about that command
"help pdb" pipes the full documentation file to the $PAGER
"help exec" gives help on the ! command"""
def help_where(self):
self.help_w()
def help_w(self):
print >>self.stdout, """w(here)
Print a stack trace, with the most recent frame at the bottom.
An arrow indicates the "current frame", which determines the
context of most commands. 'bt' is an alias for this command."""
help_bt = help_w
def help_down(self):
self.help_d()
def help_d(self):
print >>self.stdout, """d(own)
Move the current frame one level down in the stack trace
(to a newer frame)."""
def help_up(self):
self.help_u()
def help_u(self):
print >>self.stdout, """u(p)
Move the current frame one level up in the stack trace
(to an older frame)."""
def help_break(self):
self.help_b()
def help_b(self):
print >>self.stdout, """b(reak) ([file:]lineno | function) [, condition]
With a line number argument, set a break there in the current
file. With a function name, set a break at first executable line
of that function. Without argument, list all breaks. If a second
argument is present, it is a string specifying an expression
which must evaluate to true before the breakpoint is honored.
The line number may be prefixed with a filename and a colon,
to specify a breakpoint in another file (probably one that
hasn't been loaded yet). The file is searched for on sys.path;
the .py suffix may be omitted."""
def help_clear(self):
self.help_cl()
def help_cl(self):
print >>self.stdout, "cl(ear) filename:lineno"
print >>self.stdout, """cl(ear) [bpnumber [bpnumber...]]
With a space separated list of breakpoint numbers, clear
those breakpoints. Without argument, clear all breaks (but
first ask confirmation). With a filename:lineno argument,
clear all breaks at that line in that file.
Note that the argument is different from previous versions of
the debugger (in python distributions 1.5.1 and before) where
a linenumber was used instead of either filename:lineno or
breakpoint numbers."""
def help_tbreak(self):
print >>self.stdout, """tbreak same arguments as break, but breakpoint is
removed when first hit."""
def help_enable(self):
print >>self.stdout, """enable bpnumber [bpnumber ...]
Enables the breakpoints given as a space separated list of
bp numbers."""
def help_disable(self):
print >>self.stdout, """disable bpnumber [bpnumber ...]
Disables the breakpoints given as a space separated list of
bp numbers."""
def help_ignore(self):
print >>self.stdout, """ignore bpnumber count
Sets the ignore count for the given breakpoint number. A breakpoint
becomes active when the ignore count is zero. When non-zero, the
count is decremented each time the breakpoint is reached and the
breakpoint is not disabled and any associated condition evaluates
to true."""
def help_condition(self):
print >>self.stdout, """condition bpnumber str_condition
str_condition is a string specifying an expression which
must evaluate to true before the breakpoint is honored.
If str_condition is absent, any existing condition is removed;
i.e., the breakpoint is made unconditional."""
def help_step(self):
self.help_s()
def help_s(self):
print >>self.stdout, """s(tep)
Execute the current line, stop at the first possible occasion
(either in a function that is called or in the current function)."""
def help_next(self):
self.help_n()
def help_n(self):
print >>self.stdout, """n(ext)
Continue execution until the next line in the current function
is reached or it returns."""
def help_return(self):
self.help_r()
def help_r(self):
print >>self.stdout, """r(eturn)
Continue execution until the current function returns."""
def help_continue(self):
self.help_c()
def help_cont(self):
self.help_c()
def help_c(self):
print >>self.stdout, """c(ont(inue))
Continue execution, only stop when a breakpoint is encountered."""
def help_jump(self):
self.help_j()
def help_j(self):
print >>self.stdout, """j(ump) lineno
Set the next line that will be executed."""
def help_debug(self):
print >>self.stdout, """debug code
Enter a recursive debugger that steps through the code argument
(which is an arbitrary expression or statement to be executed
in the current environment)."""
def help_list(self):
self.help_l()
def help_l(self):
print >>self.stdout, """l(ist) [first [,last]]
List source code for the current file.
Without arguments, list 11 lines around the current line
or continue the previous listing.
With one argument, list 11 lines starting at that line.
With two arguments, list the given range;
if the second argument is less than the first, it is a count."""
def help_args(self):
self.help_a()
def help_a(self):
print >>self.stdout, """a(rgs)
Print the arguments of the current function."""
def help_p(self):
print >>self.stdout, """p expression
Print the value of the expression."""
def help_pp(self):
print >>self.stdout, """pp expression
Pretty-print the value of the expression."""
def help_exec(self):
print >>self.stdout, """(!) statement
Execute the (one-line) statement in the context of
the current stack frame.
The exclamation point can be omitted unless the first word
of the statement resembles a debugger command.
To assign to a global variable you must always prefix the
command with a 'global' command, e.g.:
(Pdb) global list_options; list_options = ['-l']
(Pdb)"""
def help_quit(self):
self.help_q()
def help_q(self):
print >>self.stdout, """q(uit) or exit - Quit from the debugger.
The program being executed is aborted."""
help_exit = help_q
def help_whatis(self):
print >>self.stdout, """whatis arg
Prints the type of the argument."""
def help_EOF(self):
print >>self.stdout, """EOF
Handles the receipt of EOF as a command."""
def help_alias(self):
print >>self.stdout, """alias [name [command [parameter parameter ...] ]]
Creates an alias called 'name' the executes 'command'. The command
must *not* be enclosed in quotes. Replaceable parameters are
indicated by %1, %2, and so on, while %* is replaced by all the
parameters. If no command is given, the current alias for name
is shown. If no name is given, all aliases are listed.
Aliases may be nested and can contain anything that can be
legally typed at the pdb prompt. Note! You *can* override
internal pdb commands with aliases! Those internal commands
are then hidden until the alias is removed. Aliasing is recursively
applied to the first word of the command line; all other words
in the line are left alone.
Some useful aliases (especially when placed in the .pdbrc file) are:
#Print instance variables (usage "pi classInst")
alias pi for k in %1.__dict__.keys(): print "%1.",k,"=",%1.__dict__[k]
#Print instance variables in self
alias ps pi self
"""
def help_unalias(self):
print >>self.stdout, """unalias name
Deletes the specified alias."""
def help_commands(self):
print >>self.stdout, """commands [bpnumber]
(com) ...
(com) end
(Pdb)
Specify a list of commands for breakpoint number bpnumber. The
commands themselves appear on the following lines. Type a line
containing just 'end' to terminate the commands.
To remove all commands from a breakpoint, type commands and
follow it immediately with end; that is, give no commands.
With no bpnumber argument, commands refers to the last
breakpoint set.
You can use breakpoint commands to start your program up again.
Simply use the continue command, or step, or any other
command that resumes execution.
Specifying any command resuming execution (currently continue,
step, next, return, jump, quit and their abbreviations) terminates
the command list (as if that command was immediately followed by end).
This is because any time you resume execution
(even with a simple next or step), you may encounter
another breakpoint--which could have its own command list, leading to
ambiguities about which list to execute.
If you use the 'silent' command in the command list, the
usual message about stopping at a breakpoint is not printed. This may
be desirable for breakpoints that are to print a specific message and
then continue. If none of the other commands print anything, you
see no sign that the breakpoint was reached.
"""
def help_pdb(self):
help()
def lookupmodule(self, filename):
"""Helper function for break/clear parsing -- may be overridden.
lookupmodule() translates (possibly incomplete) file or module name
into an absolute file name.
"""
if os.path.isabs(filename) and os.path.exists(filename):
return filename
f = os.path.join(sys.path[0], filename)
if os.path.exists(f) and self.canonic(f) == self.mainpyfile:
return f
root, ext = os.path.splitext(filename)
if ext == '':
filename = filename + '.py'
if os.path.isabs(filename):
return filename
for dirname in sys.path:
while os.path.islink(dirname):
dirname = os.readlink(dirname)
fullname = os.path.join(dirname, filename)
if os.path.exists(fullname):
return fullname
return None
def _runscript(self, filename):
# Start with fresh empty copy of globals and locals and tell the script
# that it's being run as __main__ to avoid scripts being able to access
# the pdb.py namespace.
globals_ = {"__name__" : "__main__", "__file__" : filename}
locals_ = globals_
# When bdb sets tracing, a number of call and line events happens
# BEFORE debugger even reaches user's code (and the exact sequence of
# events depends on python version). So we take special measures to
# avoid stopping before we reach the main script (see user_line and
# user_call for details).
self._wait_for_mainpyfile = 1
self.mainpyfile = self.canonic(filename)
self._user_requested_quit = 0
statement = 'execfile( "%s")' % filename
self.run(statement, globals=globals_, locals=locals_)
# Simplified interface
def run(statement, globals=None, locals=None):
Pdb().run(statement, globals, locals)
def runeval(expression, globals=None, locals=None):
return Pdb().runeval(expression, globals, locals)
def runctx(statement, globals, locals):
# B/W compatibility
run(statement, globals, locals)
def runcall(*args, **kwds):
return Pdb().runcall(*args, **kwds)
def set_trace():
Pdb().set_trace(sys._getframe().f_back)
# Post-Mortem interface
def post_mortem(t):
p = Pdb()
p.reset()
while t.tb_next is not None:
t = t.tb_next
p.interaction(t.tb_frame, t)
def pm():
post_mortem(sys.last_traceback)
# Main program for testing
TESTCMD = 'import x; x.main()'
def test():
run(TESTCMD)
# print help
def help():
for dirname in sys.path:
fullname = os.path.join(dirname, 'pdb.doc')
if os.path.exists(fullname):
sts = os.system('${PAGER-more} '+fullname)
if sts: print '*** Pager exit status:', sts
break
else:
print 'Sorry, can\'t find the help file "pdb.doc"',
print 'along the Python search path'
def main():
if not sys.argv[1:]:
print "usage: pdb.py scriptfile [arg] ..."
sys.exit(2)
mainpyfile = sys.argv[1] # Get script filename
if not os.path.exists(mainpyfile):
print 'Error:', mainpyfile, 'does not exist'
sys.exit(1)
del sys.argv[0] # Hide "pdb.py" from argument list
# Replace pdb's dir with script's dir in front of module search path.
sys.path[0] = os.path.dirname(mainpyfile)
# Note on saving/restoring sys.argv: it's a good idea when sys.argv was
# modified by the script being debugged. It's a bad idea when it was
# changed by the user from the command line. The best approach would be to
# have a "restart" command which would allow explicit specification of
# command line arguments.
pdb = Pdb()
while 1:
try:
pdb._runscript(mainpyfile)
if pdb._user_requested_quit:
break
print "The program finished and will be restarted"
except SystemExit:
# In most cases SystemExit does not warrant a post-mortem session.
print "The program exited via sys.exit(). Exit status: ",
print sys.exc_info()[1]
except:
traceback.print_exc()
print "Uncaught exception. Entering post mortem debugging"
print "Running 'cont' or 'step' will restart the program"
t = sys.exc_info()[2]
while t.tb_next is not None:
t = t.tb_next
pdb.interaction(t.tb_frame,t)
print "Post mortem debugger finished. The "+mainpyfile+" will be restarted"
# When invoked as main program, invoke the debugger on a script
if __name__=='__main__':
main()
| apache-2.0 |
public-ink/public-ink | server/appengine-staging/lib/unidecode/x055.py | 252 | 4599 | data = (
'You ', # 0x00
'Yan ', # 0x01
'Gu ', # 0x02
'Gu ', # 0x03
'Bai ', # 0x04
'Han ', # 0x05
'Suo ', # 0x06
'Chun ', # 0x07
'Yi ', # 0x08
'Ai ', # 0x09
'Jia ', # 0x0a
'Tu ', # 0x0b
'Xian ', # 0x0c
'Huan ', # 0x0d
'Li ', # 0x0e
'Xi ', # 0x0f
'Tang ', # 0x10
'Zuo ', # 0x11
'Qiu ', # 0x12
'Che ', # 0x13
'Wu ', # 0x14
'Zao ', # 0x15
'Ya ', # 0x16
'Dou ', # 0x17
'Qi ', # 0x18
'Di ', # 0x19
'Qin ', # 0x1a
'Ma ', # 0x1b
'Mal ', # 0x1c
'Hong ', # 0x1d
'Dou ', # 0x1e
'Kes ', # 0x1f
'Lao ', # 0x20
'Liang ', # 0x21
'Suo ', # 0x22
'Zao ', # 0x23
'Huan ', # 0x24
'Lang ', # 0x25
'Sha ', # 0x26
'Ji ', # 0x27
'Zuo ', # 0x28
'Wo ', # 0x29
'Feng ', # 0x2a
'Yin ', # 0x2b
'Hu ', # 0x2c
'Qi ', # 0x2d
'Shou ', # 0x2e
'Wei ', # 0x2f
'Shua ', # 0x30
'Chang ', # 0x31
'Er ', # 0x32
'Li ', # 0x33
'Qiang ', # 0x34
'An ', # 0x35
'Jie ', # 0x36
'Yo ', # 0x37
'Nian ', # 0x38
'Yu ', # 0x39
'Tian ', # 0x3a
'Lai ', # 0x3b
'Sha ', # 0x3c
'Xi ', # 0x3d
'Tuo ', # 0x3e
'Hu ', # 0x3f
'Ai ', # 0x40
'Zhou ', # 0x41
'Nou ', # 0x42
'Ken ', # 0x43
'Zhuo ', # 0x44
'Zhuo ', # 0x45
'Shang ', # 0x46
'Di ', # 0x47
'Heng ', # 0x48
'Lan ', # 0x49
'A ', # 0x4a
'Xiao ', # 0x4b
'Xiang ', # 0x4c
'Tun ', # 0x4d
'Wu ', # 0x4e
'Wen ', # 0x4f
'Cui ', # 0x50
'Sha ', # 0x51
'Hu ', # 0x52
'Qi ', # 0x53
'Qi ', # 0x54
'Tao ', # 0x55
'Dan ', # 0x56
'Dan ', # 0x57
'Ye ', # 0x58
'Zi ', # 0x59
'Bi ', # 0x5a
'Cui ', # 0x5b
'Chuo ', # 0x5c
'He ', # 0x5d
'Ya ', # 0x5e
'Qi ', # 0x5f
'Zhe ', # 0x60
'Pei ', # 0x61
'Liang ', # 0x62
'Xian ', # 0x63
'Pi ', # 0x64
'Sha ', # 0x65
'La ', # 0x66
'Ze ', # 0x67
'Qing ', # 0x68
'Gua ', # 0x69
'Pa ', # 0x6a
'Zhe ', # 0x6b
'Se ', # 0x6c
'Zhuan ', # 0x6d
'Nie ', # 0x6e
'Guo ', # 0x6f
'Luo ', # 0x70
'Yan ', # 0x71
'Di ', # 0x72
'Quan ', # 0x73
'Tan ', # 0x74
'Bo ', # 0x75
'Ding ', # 0x76
'Lang ', # 0x77
'Xiao ', # 0x78
'[?] ', # 0x79
'Tang ', # 0x7a
'Chi ', # 0x7b
'Ti ', # 0x7c
'An ', # 0x7d
'Jiu ', # 0x7e
'Dan ', # 0x7f
'Ke ', # 0x80
'Yong ', # 0x81
'Wei ', # 0x82
'Nan ', # 0x83
'Shan ', # 0x84
'Yu ', # 0x85
'Zhe ', # 0x86
'La ', # 0x87
'Jie ', # 0x88
'Hou ', # 0x89
'Han ', # 0x8a
'Die ', # 0x8b
'Zhou ', # 0x8c
'Chai ', # 0x8d
'Wai ', # 0x8e
'Re ', # 0x8f
'Yu ', # 0x90
'Yin ', # 0x91
'Zan ', # 0x92
'Yao ', # 0x93
'Wo ', # 0x94
'Mian ', # 0x95
'Hu ', # 0x96
'Yun ', # 0x97
'Chuan ', # 0x98
'Hui ', # 0x99
'Huan ', # 0x9a
'Huan ', # 0x9b
'Xi ', # 0x9c
'He ', # 0x9d
'Ji ', # 0x9e
'Kui ', # 0x9f
'Zhong ', # 0xa0
'Wei ', # 0xa1
'Sha ', # 0xa2
'Xu ', # 0xa3
'Huang ', # 0xa4
'Du ', # 0xa5
'Nie ', # 0xa6
'Xuan ', # 0xa7
'Liang ', # 0xa8
'Yu ', # 0xa9
'Sang ', # 0xaa
'Chi ', # 0xab
'Qiao ', # 0xac
'Yan ', # 0xad
'Dan ', # 0xae
'Pen ', # 0xaf
'Can ', # 0xb0
'Li ', # 0xb1
'Yo ', # 0xb2
'Zha ', # 0xb3
'Wei ', # 0xb4
'Miao ', # 0xb5
'Ying ', # 0xb6
'Pen ', # 0xb7
'Phos ', # 0xb8
'Kui ', # 0xb9
'Xi ', # 0xba
'Yu ', # 0xbb
'Jie ', # 0xbc
'Lou ', # 0xbd
'Ku ', # 0xbe
'Sao ', # 0xbf
'Huo ', # 0xc0
'Ti ', # 0xc1
'Yao ', # 0xc2
'He ', # 0xc3
'A ', # 0xc4
'Xiu ', # 0xc5
'Qiang ', # 0xc6
'Se ', # 0xc7
'Yong ', # 0xc8
'Su ', # 0xc9
'Hong ', # 0xca
'Xie ', # 0xcb
'Yi ', # 0xcc
'Suo ', # 0xcd
'Ma ', # 0xce
'Cha ', # 0xcf
'Hai ', # 0xd0
'Ke ', # 0xd1
'Ta ', # 0xd2
'Sang ', # 0xd3
'Tian ', # 0xd4
'Ru ', # 0xd5
'Sou ', # 0xd6
'Wa ', # 0xd7
'Ji ', # 0xd8
'Pang ', # 0xd9
'Wu ', # 0xda
'Xian ', # 0xdb
'Shi ', # 0xdc
'Ge ', # 0xdd
'Zi ', # 0xde
'Jie ', # 0xdf
'Luo ', # 0xe0
'Weng ', # 0xe1
'Wa ', # 0xe2
'Si ', # 0xe3
'Chi ', # 0xe4
'Hao ', # 0xe5
'Suo ', # 0xe6
'Jia ', # 0xe7
'Hai ', # 0xe8
'Suo ', # 0xe9
'Qin ', # 0xea
'Nie ', # 0xeb
'He ', # 0xec
'Cis ', # 0xed
'Sai ', # 0xee
'Ng ', # 0xef
'Ge ', # 0xf0
'Na ', # 0xf1
'Dia ', # 0xf2
'Ai ', # 0xf3
'[?] ', # 0xf4
'Tong ', # 0xf5
'Bi ', # 0xf6
'Ao ', # 0xf7
'Ao ', # 0xf8
'Lian ', # 0xf9
'Cui ', # 0xfa
'Zhe ', # 0xfb
'Mo ', # 0xfc
'Sou ', # 0xfd
'Sou ', # 0xfe
'Tan ', # 0xff
)
| gpl-3.0 |
harshilasu/LinkurApp | y/google-cloud-sdk/platform/gcutil/lib/google_compute_engine/gcutil_lib/gcutil_logging.py | 4 | 2573 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common code for logging across gcutil."""
import logging
from apiclient import model
import gflags as flags
from gcutil_lib import gcutil_flags
FLAGS = flags.FLAGS
_LOG_ROOT = 'gcutil-logs'
LOGGER = logging.getLogger(_LOG_ROOT)
CRITICAL = logging.CRITICAL
FATAL = logging.FATAL
ERROR = logging.ERROR
WARNING = logging.WARNING
WARN = logging.WARN
INFO = logging.INFO
DEBUG = logging.DEBUG
_LOG_LEVELS = (DEBUG, INFO, WARNING, ERROR, CRITICAL)
_LOG_LEVEL_NAMES = tuple(map(logging.getLevelName, _LOG_LEVELS))
gcutil_flags.DEFINE_case_insensitive_enum(
'log_level',
logging.getLevelName(logging.INFO),
_LOG_LEVEL_NAMES,
'Logging output level for core Google Compute Engine messages. '
'For logging output from other libraries, use library_log_level.')
gcutil_flags.DEFINE_case_insensitive_enum(
'library_log_level',
logging.getLevelName(logging.WARN),
_LOG_LEVEL_NAMES,
'Logging output level for libraries.')
if hasattr(model, 'dump_request_response'):
flags.DEFINE_boolean(
'dump_request_response',
False,
'Dump all http server requests and responses. ')
def SetupLogging():
"""Set up a logger that will have its own logging level."""
gc_handler = logging.StreamHandler()
gc_handler.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
LOGGER.addHandler(gc_handler)
LOGGER.propagate = False
log_level_map = dict(
[(logging.getLevelName(level), level) for level in _LOG_LEVELS])
# Update library_log_level to INFO if user wants to see
# dump_request_response.
if hasattr(model, 'dump_request_response'):
model.dump_request_response = FLAGS.dump_request_response
if FLAGS.dump_request_response:
if (not FLAGS['library_log_level'].present and
not logging.getLogger().isEnabledFor(logging.INFO)):
FLAGS.library_log_level = 'INFO'
LOGGER.setLevel(log_level_map[FLAGS.log_level])
logging.getLogger().setLevel(log_level_map[FLAGS.library_log_level])
| gpl-3.0 |
tuxfux-hlp-notes/python-batches | archieves/batch-58/modules/sheets/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/lxmletree.py | 355 | 6215 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from lxml import etree
from ..treebuilders.etree import tag_regexp
from gettext import gettext
_ = gettext
from . import _base
from .. import ihatexml
def ensure_str(s):
if s is None:
return None
elif isinstance(s, text_type):
return s
else:
return s.decode("utf-8", "strict")
class Root(object):
def __init__(self, et):
self.elementtree = et
self.children = []
if et.docinfo.internalDTD:
self.children.append(Doctype(self,
ensure_str(et.docinfo.root_name),
ensure_str(et.docinfo.public_id),
ensure_str(et.docinfo.system_url)))
root = et.getroot()
node = root
while node.getprevious() is not None:
node = node.getprevious()
while node is not None:
self.children.append(node)
node = node.getnext()
self.text = None
self.tail = None
def __getitem__(self, key):
return self.children[key]
def getnext(self):
return None
def __len__(self):
return 1
class Doctype(object):
def __init__(self, root_node, name, public_id, system_id):
self.root_node = root_node
self.name = name
self.public_id = public_id
self.system_id = system_id
self.text = None
self.tail = None
def getnext(self):
return self.root_node.children[1]
class FragmentRoot(Root):
def __init__(self, children):
self.children = [FragmentWrapper(self, child) for child in children]
self.text = self.tail = None
def getnext(self):
return None
class FragmentWrapper(object):
def __init__(self, fragment_root, obj):
self.root_node = fragment_root
self.obj = obj
if hasattr(self.obj, 'text'):
self.text = ensure_str(self.obj.text)
else:
self.text = None
if hasattr(self.obj, 'tail'):
self.tail = ensure_str(self.obj.tail)
else:
self.tail = None
self.isstring = isinstance(obj, str) or isinstance(obj, bytes)
# Support for bytes here is Py2
if self.isstring:
self.obj = ensure_str(self.obj)
def __getattr__(self, name):
return getattr(self.obj, name)
def getnext(self):
siblings = self.root_node.children
idx = siblings.index(self)
if idx < len(siblings) - 1:
return siblings[idx + 1]
else:
return None
def __getitem__(self, key):
return self.obj[key]
def __bool__(self):
return bool(self.obj)
def getparent(self):
return None
def __str__(self):
return str(self.obj)
def __unicode__(self):
return str(self.obj)
def __len__(self):
return len(self.obj)
class TreeWalker(_base.NonRecursiveTreeWalker):
def __init__(self, tree):
if hasattr(tree, "getroot"):
tree = Root(tree)
elif isinstance(tree, list):
tree = FragmentRoot(tree)
_base.NonRecursiveTreeWalker.__init__(self, tree)
self.filter = ihatexml.InfosetFilter()
def getNodeDetails(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key
return _base.TEXT, ensure_str(getattr(node, key))
elif isinstance(node, Root):
return (_base.DOCUMENT,)
elif isinstance(node, Doctype):
return _base.DOCTYPE, node.name, node.public_id, node.system_id
elif isinstance(node, FragmentWrapper) and node.isstring:
return _base.TEXT, node.obj
elif node.tag == etree.Comment:
return _base.COMMENT, ensure_str(node.text)
elif node.tag == etree.Entity:
return _base.ENTITY, ensure_str(node.text)[1:-1] # strip &;
else:
# This is assumed to be an ordinary element
match = tag_regexp.match(ensure_str(node.tag))
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = ensure_str(node.tag)
attrs = {}
for name, value in list(node.attrib.items()):
name = ensure_str(name)
value = ensure_str(value)
match = tag_regexp.match(name)
if match:
attrs[(match.group(1), match.group(2))] = value
else:
attrs[(None, name)] = value
return (_base.ELEMENT, namespace, self.filter.fromXmlName(tag),
attrs, len(node) > 0 or node.text)
def getFirstChild(self, node):
assert not isinstance(node, tuple), _("Text nodes have no children")
assert len(node) or node.text, "Node has no children"
if node.text:
return (node, "text")
else:
return node[0]
def getNextSibling(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key
if key == "text":
# XXX: we cannot use a "bool(node) and node[0] or None" construct here
# because node[0] might evaluate to False if it has no child element
if len(node):
return node[0]
else:
return None
else: # tail
return node.getnext()
return (node, "tail") if node.tail else node.getnext()
def getParentNode(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key
if key == "text":
return node
# else: fallback to "normal" processing
return node.getparent()
| gpl-3.0 |
n4hy/gnuradio | grc/base/Platform.py | 9 | 6735 | """
Copyright 2008-2011 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
import os
import sys
from .. base import ParseXML, odict
from Element import Element as _Element
from FlowGraph import FlowGraph as _FlowGraph
from Connection import Connection as _Connection
from Block import Block as _Block
from Port import Port as _Port
from Param import Param as _Param
from Constants import BLOCK_TREE_DTD, FLOW_GRAPH_DTD
class Platform(_Element):
def __init__(self, name, version, key,
block_paths, block_dtd, default_flow_graph, generator,
license='', website=None, colors=[]):
"""
Make a platform from the arguments.
@param name the platform name
@param version the version string
@param key the unique platform key
@param block_paths the file paths to blocks in this platform
@param block_dtd the dtd validator for xml block wrappers
@param default_flow_graph the default flow graph file path
@param generator the generator class for this platform
@param colors a list of title, color_spec tuples
@param license a multi-line license (first line is copyright)
@param website the website url for this platform
@return a platform object
"""
_Element.__init__(self)
self._name = name
self._version = version
self._key = key
self._license = license
self._website = website
self._block_paths = block_paths
self._block_dtd = block_dtd
self._default_flow_graph = default_flow_graph
self._generator = generator
self._colors = colors
#create a dummy flow graph for the blocks
self._flow_graph = _Element(self)
#search for *.xml files in the given search path
xml_files = list()
for block_path in self._block_paths:
if os.path.isfile(block_path): xml_files.append(block_path)
elif os.path.isdir(block_path):
for dirpath, dirnames, filenames in os.walk(block_path):
for filename in sorted(filter(lambda f: f.endswith('.xml'), filenames)):
xml_files.append(os.path.join(dirpath, filename))
#load the blocks
self._blocks = odict()
self._blocks_n = odict()
self._block_tree_files = list()
for xml_file in xml_files:
try: #try to add the xml file as a block wrapper
ParseXML.validate_dtd(xml_file, self._block_dtd)
n = ParseXML.from_file(xml_file).find('block')
#inject block wrapper path
n['block_wrapper_path'] = xml_file
block = self.Block(self._flow_graph, n)
key = block.get_key()
#test against repeated keys
if key in self.get_block_keys():
print >> sys.stderr, 'Warning: Block with key "%s" already exists.\n\tIgnoring: %s'%(key, xml_file)
#store the block
else:
self._blocks[key] = block
self._blocks_n[key] = n
except ParseXML.XMLSyntaxError, e:
try: #try to add the xml file as a block tree
ParseXML.validate_dtd(xml_file, BLOCK_TREE_DTD)
self._block_tree_files.append(xml_file)
except ParseXML.XMLSyntaxError, e:
print >> sys.stderr, 'Warning: Block validation failed:\n\t%s\n\tIgnoring: %s'%(e, xml_file)
except Exception, e:
print >> sys.stderr, 'Warning: Block loading failed:\n\t%s\n\tIgnoring: %s'%(e, xml_file)
def parse_flow_graph(self, flow_graph_file):
"""
Parse a saved flow graph file.
Ensure that the file exists, and passes the dtd check.
@param flow_graph_file the flow graph file
@return nested data
@throws exception if the validation fails
"""
flow_graph_file = flow_graph_file or self._default_flow_graph
open(flow_graph_file, 'r') #test open
ParseXML.validate_dtd(flow_graph_file, FLOW_GRAPH_DTD)
return ParseXML.from_file(flow_graph_file)
def load_block_tree(self, block_tree):
"""
Load a block tree with categories and blocks.
Step 1: Load all blocks from the xml specification.
Step 2: Load blocks with builtin category specifications.
@param block_tree the block tree object
"""
#recursive function to load categories and blocks
def load_category(cat_n, parent=[]):
#add this category
parent = parent + [cat_n.find('name')]
block_tree.add_block(parent)
#recursive call to load sub categories
map(lambda c: load_category(c, parent), cat_n.findall('cat'))
#add blocks in this category
for block_key in cat_n.findall('block'):
if block_key not in self.get_block_keys():
print >> sys.stderr, 'Warning: Block key "%s" not found when loading category tree.'%(block_key)
continue
block = self.get_block(block_key)
#if it exists, the block's category overrides the block tree
if not block.get_category(): block_tree.add_block(parent, block)
#load the block tree
for block_tree_file in self._block_tree_files:
#recursivly add all blocks in the tree
load_category(ParseXML.from_file(block_tree_file).find('cat'))
#add all other blocks, use the catgory tag
for block in self.get_blocks():
#blocks with empty categories are in the xml block tree or hidden
if not block.get_category(): continue
block_tree.add_block(block.get_category(), block)
def __str__(self): return 'Platform - %s(%s)'%(self.get_key(), self.get_name())
def is_platform(self): return True
def get_new_flow_graph(self): return self.FlowGraph(platform=self)
def get_generator(self): return self._generator
##############################################
# Access Blocks
##############################################
def get_block_keys(self): return self._blocks.keys()
def get_block(self, key): return self._blocks[key]
def get_blocks(self): return self._blocks.values()
def get_new_block(self, flow_graph, key): return self.Block(flow_graph, n=self._blocks_n[key])
def get_name(self): return self._name
def get_version(self): return self._version
def get_key(self): return self._key
def get_license(self): return self._license
def get_website(self): return self._website
def get_colors(self): return self._colors
##############################################
# Constructors
##############################################
FlowGraph = _FlowGraph
Connection = _Connection
Block = _Block
Port = _Port
Param = _Param
| gpl-3.0 |
nic7aller/bip-switch | BipFlaskProject/BipFlaskProject/env/Lib/encodings/iso8859_11.py | 593 | 12591 | """ Python Character Mapping Codec iso8859_11 generated from 'MAPPINGS/ISO8859/8859-11.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-11',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0e01' # 0xA1 -> THAI CHARACTER KO KAI
u'\u0e02' # 0xA2 -> THAI CHARACTER KHO KHAI
u'\u0e03' # 0xA3 -> THAI CHARACTER KHO KHUAT
u'\u0e04' # 0xA4 -> THAI CHARACTER KHO KHWAI
u'\u0e05' # 0xA5 -> THAI CHARACTER KHO KHON
u'\u0e06' # 0xA6 -> THAI CHARACTER KHO RAKHANG
u'\u0e07' # 0xA7 -> THAI CHARACTER NGO NGU
u'\u0e08' # 0xA8 -> THAI CHARACTER CHO CHAN
u'\u0e09' # 0xA9 -> THAI CHARACTER CHO CHING
u'\u0e0a' # 0xAA -> THAI CHARACTER CHO CHANG
u'\u0e0b' # 0xAB -> THAI CHARACTER SO SO
u'\u0e0c' # 0xAC -> THAI CHARACTER CHO CHOE
u'\u0e0d' # 0xAD -> THAI CHARACTER YO YING
u'\u0e0e' # 0xAE -> THAI CHARACTER DO CHADA
u'\u0e0f' # 0xAF -> THAI CHARACTER TO PATAK
u'\u0e10' # 0xB0 -> THAI CHARACTER THO THAN
u'\u0e11' # 0xB1 -> THAI CHARACTER THO NANGMONTHO
u'\u0e12' # 0xB2 -> THAI CHARACTER THO PHUTHAO
u'\u0e13' # 0xB3 -> THAI CHARACTER NO NEN
u'\u0e14' # 0xB4 -> THAI CHARACTER DO DEK
u'\u0e15' # 0xB5 -> THAI CHARACTER TO TAO
u'\u0e16' # 0xB6 -> THAI CHARACTER THO THUNG
u'\u0e17' # 0xB7 -> THAI CHARACTER THO THAHAN
u'\u0e18' # 0xB8 -> THAI CHARACTER THO THONG
u'\u0e19' # 0xB9 -> THAI CHARACTER NO NU
u'\u0e1a' # 0xBA -> THAI CHARACTER BO BAIMAI
u'\u0e1b' # 0xBB -> THAI CHARACTER PO PLA
u'\u0e1c' # 0xBC -> THAI CHARACTER PHO PHUNG
u'\u0e1d' # 0xBD -> THAI CHARACTER FO FA
u'\u0e1e' # 0xBE -> THAI CHARACTER PHO PHAN
u'\u0e1f' # 0xBF -> THAI CHARACTER FO FAN
u'\u0e20' # 0xC0 -> THAI CHARACTER PHO SAMPHAO
u'\u0e21' # 0xC1 -> THAI CHARACTER MO MA
u'\u0e22' # 0xC2 -> THAI CHARACTER YO YAK
u'\u0e23' # 0xC3 -> THAI CHARACTER RO RUA
u'\u0e24' # 0xC4 -> THAI CHARACTER RU
u'\u0e25' # 0xC5 -> THAI CHARACTER LO LING
u'\u0e26' # 0xC6 -> THAI CHARACTER LU
u'\u0e27' # 0xC7 -> THAI CHARACTER WO WAEN
u'\u0e28' # 0xC8 -> THAI CHARACTER SO SALA
u'\u0e29' # 0xC9 -> THAI CHARACTER SO RUSI
u'\u0e2a' # 0xCA -> THAI CHARACTER SO SUA
u'\u0e2b' # 0xCB -> THAI CHARACTER HO HIP
u'\u0e2c' # 0xCC -> THAI CHARACTER LO CHULA
u'\u0e2d' # 0xCD -> THAI CHARACTER O ANG
u'\u0e2e' # 0xCE -> THAI CHARACTER HO NOKHUK
u'\u0e2f' # 0xCF -> THAI CHARACTER PAIYANNOI
u'\u0e30' # 0xD0 -> THAI CHARACTER SARA A
u'\u0e31' # 0xD1 -> THAI CHARACTER MAI HAN-AKAT
u'\u0e32' # 0xD2 -> THAI CHARACTER SARA AA
u'\u0e33' # 0xD3 -> THAI CHARACTER SARA AM
u'\u0e34' # 0xD4 -> THAI CHARACTER SARA I
u'\u0e35' # 0xD5 -> THAI CHARACTER SARA II
u'\u0e36' # 0xD6 -> THAI CHARACTER SARA UE
u'\u0e37' # 0xD7 -> THAI CHARACTER SARA UEE
u'\u0e38' # 0xD8 -> THAI CHARACTER SARA U
u'\u0e39' # 0xD9 -> THAI CHARACTER SARA UU
u'\u0e3a' # 0xDA -> THAI CHARACTER PHINTHU
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\u0e3f' # 0xDF -> THAI CURRENCY SYMBOL BAHT
u'\u0e40' # 0xE0 -> THAI CHARACTER SARA E
u'\u0e41' # 0xE1 -> THAI CHARACTER SARA AE
u'\u0e42' # 0xE2 -> THAI CHARACTER SARA O
u'\u0e43' # 0xE3 -> THAI CHARACTER SARA AI MAIMUAN
u'\u0e44' # 0xE4 -> THAI CHARACTER SARA AI MAIMALAI
u'\u0e45' # 0xE5 -> THAI CHARACTER LAKKHANGYAO
u'\u0e46' # 0xE6 -> THAI CHARACTER MAIYAMOK
u'\u0e47' # 0xE7 -> THAI CHARACTER MAITAIKHU
u'\u0e48' # 0xE8 -> THAI CHARACTER MAI EK
u'\u0e49' # 0xE9 -> THAI CHARACTER MAI THO
u'\u0e4a' # 0xEA -> THAI CHARACTER MAI TRI
u'\u0e4b' # 0xEB -> THAI CHARACTER MAI CHATTAWA
u'\u0e4c' # 0xEC -> THAI CHARACTER THANTHAKHAT
u'\u0e4d' # 0xED -> THAI CHARACTER NIKHAHIT
u'\u0e4e' # 0xEE -> THAI CHARACTER YAMAKKAN
u'\u0e4f' # 0xEF -> THAI CHARACTER FONGMAN
u'\u0e50' # 0xF0 -> THAI DIGIT ZERO
u'\u0e51' # 0xF1 -> THAI DIGIT ONE
u'\u0e52' # 0xF2 -> THAI DIGIT TWO
u'\u0e53' # 0xF3 -> THAI DIGIT THREE
u'\u0e54' # 0xF4 -> THAI DIGIT FOUR
u'\u0e55' # 0xF5 -> THAI DIGIT FIVE
u'\u0e56' # 0xF6 -> THAI DIGIT SIX
u'\u0e57' # 0xF7 -> THAI DIGIT SEVEN
u'\u0e58' # 0xF8 -> THAI DIGIT EIGHT
u'\u0e59' # 0xF9 -> THAI DIGIT NINE
u'\u0e5a' # 0xFA -> THAI CHARACTER ANGKHANKHU
u'\u0e5b' # 0xFB -> THAI CHARACTER KHOMUT
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit |
spinellic/Mission-Planner | Lib/site-packages/numpy/oldnumeric/ufuncs.py | 102 | 1231 | __all__ = ['less', 'cosh', 'arcsinh', 'add', 'ceil', 'arctan2', 'floor_divide',
'fmod', 'hypot', 'logical_and', 'power', 'sinh', 'remainder', 'cos',
'equal', 'arccos', 'less_equal', 'divide', 'bitwise_or',
'bitwise_and', 'logical_xor', 'log', 'subtract', 'invert',
'negative', 'log10', 'arcsin', 'arctanh', 'logical_not',
'not_equal', 'tanh', 'true_divide', 'maximum', 'arccosh',
'logical_or', 'minimum', 'conjugate', 'tan', 'greater',
'bitwise_xor', 'fabs', 'floor', 'sqrt', 'arctan', 'right_shift',
'absolute', 'sin', 'multiply', 'greater_equal', 'left_shift',
'exp', 'divide_safe']
from numpy import less, cosh, arcsinh, add, ceil, arctan2, floor_divide, \
fmod, hypot, logical_and, power, sinh, remainder, cos, \
equal, arccos, less_equal, divide, bitwise_or, bitwise_and, \
logical_xor, log, subtract, invert, negative, log10, arcsin, \
arctanh, logical_not, not_equal, tanh, true_divide, maximum, \
arccosh, logical_or, minimum, conjugate, tan, greater, bitwise_xor, \
fabs, floor, sqrt, arctan, right_shift, absolute, sin, \
multiply, greater_equal, left_shift, exp, divide as divide_safe
| gpl-3.0 |
WillisXChen/django-oscar | tests/functional/customer/history_tests.py | 50 | 2344 | from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.http import HttpRequest
from oscar.test.factories import create_product
from oscar.core.compat import get_user_model
from oscar.apps.customer import history
from oscar.templatetags.history_tags import get_back_button
User = get_user_model()
COOKIE_NAME = settings.OSCAR_RECENTLY_VIEWED_COOKIE_NAME
class HistoryHelpersTest(TestCase):
def setUp(self):
self.product = create_product()
def test_viewing_product_creates_cookie(self):
response = self.client.get(self.product.get_absolute_url())
self.assertTrue(COOKIE_NAME in response.cookies)
def test_id_gets_added_to_cookie(self):
response = self.client.get(self.product.get_absolute_url())
request = HttpRequest()
request.COOKIES[COOKIE_NAME] = response.cookies[COOKIE_NAME].value
self.assertTrue(self.product.id in history.extract(request))
def test_get_back_button(self):
request = HttpRequest()
request.META['SERVER_NAME'] = 'test'
request.META['SERVER_PORT'] = 8000
request.META['HTTP_REFERER'] = 'http://www.google.com'
backbutton = get_back_button({'request': request})
self.assertEqual(backbutton, None)
request.META['HTTP_REFERER'] = 'http://test:8000/search/'
backbutton = get_back_button({'request': request})
self.assertTrue(backbutton)
self.assertEqual(backbutton['title'], 'Back to search results')
class TestAUserWhoLogsOut(TestCase):
username = 'customer'
password = 'cheeseshop'
email = 'customer@example.com'
def setUp(self):
self.product = create_product()
User.objects.create_user(username=self.username,
email=self.email, password=self.password)
self.client.login(email=self.email, password=self.password)
def test_has_their_cookies_deleted_on_logout(self):
response = self.client.get(self.product.get_absolute_url())
self.assertTrue(COOKIE_NAME in response.cookies)
response = self.client.get(reverse('customer:logout'))
self.assertTrue((COOKIE_NAME not in response.cookies)
or not self.client.cookies['oscar_recently_viewed_products'].coded_value)
| bsd-3-clause |
PaloAltoNetworks-BD/SplunkforPaloAltoNetworks | Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/urllib3/exceptions.py | 20 | 6607 | from __future__ import absolute_import
from .packages.six.moves.http_client import IncompleteRead as httplib_IncompleteRead
# Base Exceptions
class HTTPError(Exception):
"Base exception used by this module."
pass
class HTTPWarning(Warning):
"Base warning used by this module."
pass
class PoolError(HTTPError):
"Base exception for errors caused within a pool."
def __init__(self, pool, message):
self.pool = pool
HTTPError.__init__(self, "%s: %s" % (pool, message))
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, None)
class RequestError(PoolError):
"Base exception for PoolErrors that have associated URLs."
def __init__(self, pool, url, message):
self.url = url
PoolError.__init__(self, pool, message)
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, self.url, None)
class SSLError(HTTPError):
"Raised when SSL certificate fails in an HTTPS connection."
pass
class ProxyError(HTTPError):
"Raised when the connection to a proxy fails."
pass
class DecodeError(HTTPError):
"Raised when automatic decoding based on Content-Type fails."
pass
class ProtocolError(HTTPError):
"Raised when something unexpected happens mid-request/response."
pass
#: Renamed to ProtocolError but aliased for backwards compatibility.
ConnectionError = ProtocolError
# Leaf Exceptions
class MaxRetryError(RequestError):
"""Raised when the maximum number of retries is exceeded.
:param pool: The connection pool
:type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
:param string url: The requested Url
:param exceptions.Exception reason: The underlying error
"""
def __init__(self, pool, url, reason=None):
self.reason = reason
message = "Max retries exceeded with url: %s (Caused by %r)" % (url, reason)
RequestError.__init__(self, pool, url, message)
class HostChangedError(RequestError):
"Raised when an existing pool gets a request for a foreign host."
def __init__(self, pool, url, retries=3):
message = "Tried to open a foreign host with url: %s" % url
RequestError.__init__(self, pool, url, message)
self.retries = retries
class TimeoutStateError(HTTPError):
""" Raised when passing an invalid state to a timeout """
pass
class TimeoutError(HTTPError):
""" Raised when a socket timeout error occurs.
Catching this error will catch both :exc:`ReadTimeoutErrors
<ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
"""
pass
class ReadTimeoutError(TimeoutError, RequestError):
"Raised when a socket timeout occurs while receiving data from a server"
pass
# This timeout error does not have a URL attached and needs to inherit from the
# base HTTPError
class ConnectTimeoutError(TimeoutError):
"Raised when a socket timeout occurs while connecting to a server"
pass
class NewConnectionError(ConnectTimeoutError, PoolError):
"Raised when we fail to establish a new connection. Usually ECONNREFUSED."
pass
class EmptyPoolError(PoolError):
"Raised when a pool runs out of connections and no more are allowed."
pass
class ClosedPoolError(PoolError):
"Raised when a request enters a pool after the pool has been closed."
pass
class LocationValueError(ValueError, HTTPError):
"Raised when there is something wrong with a given URL input."
pass
class LocationParseError(LocationValueError):
"Raised when get_host or similar fails to parse the URL input."
def __init__(self, location):
message = "Failed to parse: %s" % location
HTTPError.__init__(self, message)
self.location = location
class ResponseError(HTTPError):
"Used as a container for an error reason supplied in a MaxRetryError."
GENERIC_ERROR = "too many error responses"
SPECIFIC_ERROR = "too many {status_code} error responses"
class SecurityWarning(HTTPWarning):
"Warned when performing security reducing actions"
pass
class SubjectAltNameWarning(SecurityWarning):
"Warned when connecting to a host with a certificate missing a SAN."
pass
class InsecureRequestWarning(SecurityWarning):
"Warned when making an unverified HTTPS request."
pass
class SystemTimeWarning(SecurityWarning):
"Warned when system time is suspected to be wrong"
pass
class InsecurePlatformWarning(SecurityWarning):
"Warned when certain SSL configuration is not available on a platform."
pass
class SNIMissingWarning(HTTPWarning):
"Warned when making a HTTPS request without SNI available."
pass
class DependencyWarning(HTTPWarning):
"""
Warned when an attempt is made to import a module with missing optional
dependencies.
"""
pass
class ResponseNotChunked(ProtocolError, ValueError):
"Response needs to be chunked in order to read it as chunks."
pass
class BodyNotHttplibCompatible(HTTPError):
"""
Body should be httplib.HTTPResponse like (have an fp attribute which
returns raw chunks) for read_chunked().
"""
pass
class IncompleteRead(HTTPError, httplib_IncompleteRead):
"""
Response length doesn't match expected Content-Length
Subclass of http_client.IncompleteRead to allow int value
for `partial` to avoid creating large objects on streamed
reads.
"""
def __init__(self, partial, expected):
super(IncompleteRead, self).__init__(partial, expected)
def __repr__(self):
return "IncompleteRead(%i bytes read, %i more expected)" % (
self.partial,
self.expected,
)
class InvalidHeader(HTTPError):
"The header provided was somehow invalid."
pass
class ProxySchemeUnknown(AssertionError, ValueError):
"ProxyManager does not support the supplied scheme"
# TODO(t-8ch): Stop inheriting from AssertionError in v2.0.
def __init__(self, scheme):
message = "Not supported proxy scheme %s" % scheme
super(ProxySchemeUnknown, self).__init__(message)
class HeaderParsingError(HTTPError):
"Raised by assert_header_parsing, but we convert it to a log.warning statement."
def __init__(self, defects, unparsed_data):
message = "%s, unparsed data: %r" % (defects or "Unknown", unparsed_data)
super(HeaderParsingError, self).__init__(message)
class UnrewindableBodyError(HTTPError):
"urllib3 encountered an error when trying to rewind a body"
pass
| isc |
warszawajug/warsjawa2014 | app/buildsite.py | 1 | 1997 | # -*- coding: utf-8 -*-
import jinja2
import sys
import os
import shutil
import contents
import json
absolute_path = os.path.dirname(os.path.abspath(__file__))
reload(sys)
sys.setdefaultencoding("utf-8")
absolute_template_path = absolute_path + '/templates'
absolute_output_path = absolute_path + '/output'
print(' ++++ compiling sass...')
os.system('compass compile /warsjawa/app/')
print(' ++++ compiling templates...')
globals()['templateVariables'] = {}
contents.update_variables(globals()['templateVariables'])
template_files = [
'index.html',
'call-for-papers.html',
'thank-you-for-filling-call-for-papers-form.html',
]
loader = jinja2.FileSystemLoader(searchpath=absolute_template_path)
template_environment = jinja2.Environment(loader=loader)
for template_file in template_files:
template = template_environment.get_template(template_file)
outputText = template.render(globals()['templateVariables'])
output_file = open(absolute_output_path + '/' + template_file, 'wr+')
output_file.write(outputText.encode('utf8'))
output_file.close()
workshop_data = {'time_slots':globals()['templateVariables']['time_slots'], 'workshops':globals()['templateVariables']['workshops']}
workshop_data_file = open(absolute_output_path + '/workshops.html', 'wr+')
workshop_data_file.write(json.dumps(workshop_data).encode('utf8').replace('"', '\\"'))
workshop_data_file.close()
speakers_data = globals()['templateVariables']['speakers']
speakers_data_file = open(absolute_output_path + '/speakers.html', 'wr+')
speakers_data_file.write(json.dumps(speakers_data).encode('utf8').replace('"', '\\"'))
speakers_data_file.close()
print(' ++++ copying static to output')
os.system('cp -rf %s %s' % (absolute_path + '/static/*', absolute_output_path))
print (' ============================================= ')
print (os.system('ls -a /warsjawa/app'))
print (' ============================================= ')
print (os.system('ls -a /warsjawa/app/output'))
| mit |
shubhangiKishore/pattern | test/test_metrics.py | 4 | 17518 | from __future__ import print_function
from util import *
from pattern import metrics
#-------------------------------------------------------------------------
class TestProfiling(unittest.TestCase):
def setUp(self):
# Test set for accuracy, precision and recall:
self.documents = (
(None, True),
(None, True),
(None, False)
)
def test_duration(self):
# Assert 0.1 or slightly higher.
v = metrics.duration(time.sleep, 0.1)
self.assertTrue(v > 0.1)
print("pattern.metrics.duration()")
def test_confustion_matrix(self):
# Assert 2 true positives (TP) and 1 false positive (FP).
v = metrics.confusion_matrix(lambda document: True, self.documents)
self.assertEqual(v, (2, 0, 1, 0))
# Assert 1 true negative (TN) and 2 false negatives (FN).
v = metrics.confusion_matrix(lambda document: False, self.documents)
self.assertEqual(v, (0, 1, 0, 2))
print("pattern.metrics.confusion_matrix()")
def test_accuracy(self):
# Assert 2.0/3.0 (two out of three correct predictions).
v = metrics.accuracy(lambda document: True, self.documents)
self.assertEqual(v, 2.0 / 3.0)
print("pattern.metrics.accuracy()")
def test_precision(self):
# Assert 2.0/3.0 (2 TP, 1 FP).
v = metrics.precision(lambda document: True, self.documents)
self.assertEqual(v, 2.0 / 3.0)
# Assert 0.0 (no TP).
v = metrics.precision(lambda document: False, self.documents)
self.assertEqual(v, 0.0)
print("pattern.metrics.precision()")
def test_recall(self):
# Assert 1.0 (no FN).
v = metrics.recall(lambda document: True, self.documents)
self.assertEqual(v, 1.0)
# Assert 0.0 (no TP).
v = metrics.recall(lambda document: False, self.documents)
self.assertEqual(v, 0.0)
print("pattern.metrics.recall()")
def test_F1(self):
# Assert 0.8 (F1 for precision=2/3 and recall=1).
v = metrics.F1(lambda document: True, self.documents)
self.assertEqual(v, 0.8)
self.assertEqual(
v, metrics.F(lambda document: True, self.documents, beta=1))
print("pattern.metrics.F1()")
def test_agreement(self):
# Assert 0.210 (example from
# http://en.wikipedia.org/wiki/Fleiss'_kappa).
m = [[0, 0, 0, 0, 14],
[0, 2, 6, 4, 2],
[0, 0, 3, 5, 6],
[0, 3, 9, 2, 0],
[2, 2, 8, 1, 1],
[7, 7, 0, 0, 0],
[3, 2, 6, 3, 0],
[2, 5, 3, 2, 2],
[6, 5, 2, 1, 0],
[0, 2, 2, 3, 7]]
v = metrics.agreement(m)
self.assertAlmostEqual(v, 0.210, places=3)
print("pattern.metrics.agreement()")
class TestTextMetrics(unittest.TestCase):
def setUp(self):
pass
def test_levenshtein(self):
# Assert 0 (identical strings).
v = metrics.levenshtein("gallahad", "gallahad")
self.assertEqual(v, 0)
# Assert 3 (1 insert, 1 delete, 1 replace).
v = metrics.levenshtein("gallahad", "_g_llaha")
self.assertEqual(v, 3)
print("pattern.metrics.levenshtein()")
def test_levenshtein_similarity(self):
# Assert 1.0 (identical strings).
v = metrics.levenshtein_similarity("gallahad", "gallahad")
self.assertEqual(v, 1.0)
# Assert 0.75 (2 out of 8 characters differ).
v = metrics.levenshtein_similarity("gallahad", "g_ll_had")
self.assertEqual(v, 0.75)
print("pattern.metrics.levenshtein_similarity()")
def test_dice_coefficient(self):
# Assert 1.0 (identical strings).
v = metrics.dice_coefficient("gallahad", "gallahad")
self.assertEqual(v, 1.0)
# Assert 0.25 (example from
# http://en.wikipedia.org/wiki/Dice_coefficient).
v = metrics.dice_coefficient("night", "nacht")
self.assertEqual(v, 0.25)
print("pattern.metrics.dice_coefficient()")
def test_similarity(self):
self.assertEqual(
metrics.levenshtein_similarity("night", "nacht"),
metrics.similarity("night", "nacht", metrics.LEVENSHTEIN))
self.assertEqual(
metrics.dice_coefficient("night", "nacht"),
metrics.similarity("night", "nacht", metrics.DICE))
print("pattern.metrics.similarity()")
def test_readability(self):
# Assert that technical jargon is in the "difficult" range (< 0.30).
s = "The Australian platypus is seemingly a hybrid of a mammal and reptilian creature."
v = metrics.readability(s)
self.assertTrue(v < 0.30)
# Assert that Dr. Seuss is in the "easy" range (> 0.70).
s = "'I know some good games we could play,' said the cat. " + \
"'I know some new tricks,' said the cat in the hat. " + \
"'A lot of good tricks. I will show them to you.' " + \
"'Your mother will not mind at all if I do.'"
v = metrics.readability(s)
self.assertTrue(v > 0.70)
print("pattern.metrics.readability()")
def test_intertextuality(self):
# Evaluate accuracy for plagiarism detection.
from pattern.db import Datasheet
data = Datasheet.load(
os.path.join(PATH, "corpora", "plagiarism-clough&stevenson.csv"))
data = [((txt, src), int(plagiarism) > 0)
for txt, src, plagiarism in data]
def plagiarism(txt, src):
return metrics.intertextuality([txt, src], n=3)[0, 1] > 0.05
A, P, R, F = metrics.test(lambda x: plagiarism(*x), data)
self.assertTrue(P > 0.96)
self.assertTrue(R > 0.94)
print("pattern.metrics.intertextuality()")
def test_ttr(self):
# Assert type-token ratio: words = 7, unique words = 6.
s = "The black cat \n sat on the mat."
v = metrics.ttr(s)
self.assertAlmostEqual(v, 0.86, places=2)
print("pattern.metrics.ttr()")
def test_suffixes(self):
# Assert base => inflected and reversed inflected => base suffixes.
s = [("beau", "beaux"), ("jeune", "jeunes"), ("hautain", "hautaines")]
v = metrics.suffixes(s, n=3)
v[0][2].sort() # order is not well-defined in python 3
expected = [
(2, "nes", [("n", 0.5), ("ne", 0.5)]),
(1, "aux", [("au", 1.0)])]
self.assertEqual(v, expected)
v = metrics.suffixes(s, n=2, reverse=False)
expected = [
(1, "ne", [("nes", 1.0)]),
(1, "in", [("ines", 1.0)]),
(1, "au", [("aux", 1.0)])]
self.assertEqual(v, expected)
print("pattern.metrics.suffixes()")
def test_isplit(self):
# Assert string.split() iterator.
v = metrics.isplit("test\nisplit")
self.assertTrue(not isinstance(v, list)) # TODO is this needed?
self.assertEqual(list(v), ["test", "isplit"])
print("pattern.metrics.isplit()")
def test_cooccurrence(self):
s = "The black cat sat on the mat."
v = metrics.cooccurrence(s, window=(-1, 1),
term1 = lambda w: w in ("cat",),
normalize = lambda w: w.lower().strip(".:;,!?()[]'\""))
self.assertEqual(sorted(v.keys()), ["cat"])
self.assertEqual(sorted(v["cat"].keys()), ["black", "cat", "sat"])
self.assertEqual(sorted(v["cat"].values()), [1, 1, 1])
s = [("The", "DT"), ("black", "JJ"), ("cat", "NN"),
("sat", "VB"), ("on", "IN"), ("the", "DT"), ("mat", "NN")]
v = metrics.co_occurrence(s, window=(-2, -1),
term1 = lambda token: token[
1].startswith("NN"),
term2 = lambda token: token[1].startswith("JJ"))
self.assertEqual(v, {("cat", "NN"): {("black", "JJ"): 1}})
print("pattern.metrics.cooccurrence()")
class TestInterpolation(unittest.TestCase):
def setUp(self):
pass
def test_lerp(self):
# Assert linear interpolation.
v = metrics.lerp(100, 200, 0.5)
self.assertEqual(v, 150.0)
print("pattern.metrics.lerp()")
def test_smoothstep(self):
# Assert cubic interpolation.
v1 = metrics.smoothstep(0.0, 1.0, 0.5)
v2 = metrics.smoothstep(0.0, 1.0, 0.9)
v3 = metrics.smoothstep(0.0, 1.0, 0.1)
self.assertEqual(v1, 0.5)
self.assertTrue(v2 > 0.9)
self.assertTrue(v3 < 0.1)
print("pattern.metrics.smoothstep()")
def test_smoothrange(self):
# Assert nice ranges for line charts.
v = list(metrics.smoothrange(0.0, 1.0))
[self.assertAlmostEqual(x, y, places=1) for x, y in zip(v,
[0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])]
v = list(metrics.smoothrange(-2, 2))
[self.assertAlmostEqual(x, y, places=1) for x, y in zip(v,
[-2.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0])]
v = list(metrics.smoothrange(1, 13))
[self.assertAlmostEqual(x, y, places=1) for x, y in zip(v,
[0.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0])]
print("pattern.metrics.smoothrange()")
class TestStatistics(unittest.TestCase):
def setUp(self):
pass
def test_mean(self):
# Assert (1+2+3+4) / 4 = 2.5.
v = metrics.mean([1, 2, 3, 4])
self.assertEqual(v, 2.5)
print("pattern.metrics.mean()")
def test_median(self):
# Assert 2.5 (between 2 and 3).
v = metrics.median([1, 2, 3, 4])
self.assertEqual(v, 2.5)
# Assert 3 (middle of list).
v = metrics.median([1, 2, 3, 4, 5])
self.assertEqual(v, 3)
# Assert that empty list raises ValueError.
self.assertRaises(ValueError, metrics.median, [])
print("pattern.metrics.median()")
def test_variance(self):
# Assert 2.5.
v = metrics.variance([1, 2, 3, 4, 5], sample=True)
self.assertEqual(v, 2.5)
# Assert 2.0 (population variance).
v = metrics.variance([1, 2, 3, 4, 5], sample=False)
self.assertEqual(v, 2.0)
print("pattern.metrics.variance()")
def test_standard_deviation(self):
# Assert 2.429 (sample).
v = metrics.standard_deviation([1, 5, 6, 7, 6, 8], sample=True)
self.assertAlmostEqual(v, 2.429, places=3)
# Assert 2.217 (population).
v = metrics.standard_deviation([1, 5, 6, 7, 6, 8], sample=False)
self.assertAlmostEqual(v, 2.217, places=3)
print("pattern.metrics.standard_deviation()")
def test_histogram(self):
# Assert 1 bin.
v = metrics.histogram([1, 2, 3, 4], k=0)
self.assertTrue(len(v) == 1)
# Assert 4 bins, each with one value, each with midpoint == value.
v = metrics.histogram([1, 2, 3, 4], k=4, range=(0.5, 4.5))
for i, ((start, stop), v) in enumerate(sorted(v.items())):
self.assertTrue(i + 1 == v[0])
self.assertAlmostEqual(start + (stop - start) / 2, i + 1, places=3)
# Assert 2 bins, one with all the low numbers, one with the high
# number.
v = metrics.histogram([1, 2, 3, 4, 100], k=2)
v = sorted(v.values(), key=lambda item: len(item))
self.assertTrue(v[0] == [100])
self.assertTrue(v[1] == [1, 2, 3, 4])
print("pattern.metrics.histogram()")
def test_moment(self):
# Assert 0.0 (1st central moment = 0.0).
v = metrics.moment([1, 2, 3, 4, 5], n=1)
self.assertEqual(v, 0.0)
# Assert 2.0 (2nd central moment = population variance).
v = metrics.moment([1, 2, 3, 4, 5], n=2)
self.assertEqual(v, 2.0)
print("pattern.metrics.moment()")
def test_skewness(self):
# Assert < 0.0 (few low values).
v = metrics.skewness([1, 100, 101, 102, 103])
self.assertTrue(v < 0.0)
# Assert > 0.0 (few high values).
v = metrics.skewness([1, 2, 3, 4, 100])
self.assertTrue(v > 0.0)
# Assert 0.0 (evenly distributed).
v = metrics.skewness([1, 2, 3, 4])
self.assertTrue(v == 0.0)
print("pattern.metrics.skewness()")
def test_kurtosis(self):
# Assert -1.2 for the uniform distribution.
a = 1
b = 1000
v = metrics.kurtosis([float(i - a) / (b - a) for i in range(a, b)])
self.assertAlmostEqual(v, -1.2, places=3)
print("pattern.metrics.kurtosis()")
def test_quantile(self):
# Assert 2.5 (quantile with p=0.5 == median).
v = metrics.quantile([1, 2, 3, 4], p=0.5, a=1, b=-1, c=0, d=1)
self.assertEqual(v, 2.5)
# Assert 3.0 (discontinuous sample).
v = metrics.quantile([1, 2, 3, 4], p=0.5, a=0.5, b=0, c=1, d=0)
self.assertEqual(v, 3.0)
return "pattern.metrics.quantile()"
def test_boxplot(self):
# Different a,b,c,d quantile parameters produce different results.
# By approximation, assert (53, 79.5, 84.5, 92, 98).
a = [79, 53, 82, 91, 87, 98, 80, 93]
v = metrics.boxplot(a)
self.assertEqual(v[0], min(a))
self.assertTrue(abs(v[1] - 79.5) <= 0.5)
self.assertTrue(abs(v[2] - metrics.median(a)) <= 0.5)
self.assertTrue(abs(v[3] - 92.0) <= 0.5)
self.assertEqual(v[4], max(a))
print("pattern.metrics.boxplot()")
class TestStatisticalTests(unittest.TestCase):
def setUp(self):
pass
def test_fisher_test(self):
# Assert Fisher exact test significance.
v = metrics.fisher_exact_test(a=1, b=9, c=11, d=3)
self.assertAlmostEqual(v, 0.0028, places=4)
v = metrics.fisher_exact_test(a=45, b=15, c=75, d=45)
self.assertAlmostEqual(v, 0.1307, places=4)
print("pattern.metrics.fisher_test()")
def test_chi_squared(self):
# Assert chi-squared test (upper tail).
o1, e1 = [[44, 56]], [[50, 50]]
o2, e2 = [[22, 21, 22, 27, 22, 36]], []
o3, e3 = [[48, 35, 15, 3]], [[58, 34.5, 7, 0.5]]
o4, e4 = [[36, 14], [30, 25]], []
o5, e5 = [[46, 71], [37, 83]], [[40.97, 76.02], [42.03, 77.97]]
v1 = metrics.chi2(o1, e1)
v2 = metrics.chi2(o2, e2)
v3 = metrics.chi2(o3, e3)
v4 = metrics.chi2(o4, e4)
v5 = metrics.chi2(o5, e5)
self.assertAlmostEqual(v1[0], 1.4400, places=4)
self.assertAlmostEqual(v1[1], 0.2301, places=4)
self.assertAlmostEqual(v2[0], 6.7200, places=4)
self.assertAlmostEqual(v2[1], 0.2423, places=4)
self.assertAlmostEqual(v3[0], 23.3742, places=4)
self.assertAlmostEqual(v4[0], 3.4177, places=4)
self.assertAlmostEqual(v5[0], 1.8755, places=4)
print("pattern.metrics.chi2()")
def test_chi_squared_p(self):
# Assert chi-squared P-value (upper tail).
for df, X2 in [
(1, (3.85, 5.05, 6.65, 7.90)),
(2, (6.00, 7.40, 9.25, 10.65)),
(3, (7.85, 9.40, 11.35, 12.85)),
(4, (9.50, 11.15, 13.30, 14.90)),
(5, (11.10, 12.85, 15.10, 16.80))]:
for i, x2 in enumerate(X2):
v = metrics.chi2p(x2, df, tail=metrics.UPPER)
self.assertTrue(v < (0.05, 0.025, 0.01, 0.005)[i])
print("pattern.metrics.chi2p()")
def test_kolmogorov_smirnov(self):
v = metrics.ks2([1, 2, 3], [1, 2, 4])
self.assertAlmostEqual(v[0], 0.3333, places=4)
self.assertAlmostEqual(v[1], 0.9762, places=4)
print("pattern.metrics.ks2()")
class TestSpecialFunctions(unittest.TestCase):
def setUp(self):
pass
def test_gamma(self):
# Assert complete gamma function.
v = metrics.gamma(0.5)
self.assertAlmostEqual(v, math.sqrt(math.pi), places=4)
print("pattern.metrics.gamma()")
def test_gammai(self):
# Assert incomplete gamma function.
v = metrics.gammai(a=1, x=2)
self.assertAlmostEqual(v, 0.1353, places=4)
print("pattern.metrics.gammai()")
def test_erfc(self):
# Assert complementary error function.
for x, y in [
(-3.00, 2.000),
(-2.00, 1.995),
(-1.00, 1.843),
(-0.50, 1.520),
(-0.25, 1.276),
(0.00, 1.000),
(0.25, 0.724),
(0.50, 0.480),
(1.00, 0.157),
(2.00, 0.005),
(3.00, 0.000)]:
self.assertAlmostEqual(metrics.erfc(x), y, places=3)
print("pattern.metrics.erfc()")
def test_kolmogorov(self):
# Assert Kolmogorov limit distribution.
self.assertAlmostEqual(metrics.kolmogorov(0.0), 1.0000, places=4)
self.assertAlmostEqual(metrics.kolmogorov(0.5), 0.9639, places=4)
self.assertAlmostEqual(metrics.kolmogorov(1.0), 0.2700, places=4)
self.assertAlmostEqual(metrics.kolmogorov(2.0), 0.0007, places=4)
self.assertAlmostEqual(metrics.kolmogorov(4.0), 0.0000, places=4)
print("pattern.metrics.kolmogorov()")
#-------------------------------------------------------------------------
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
theheros/kbengine | kbe/res/scripts/common/Lib/encodings/koi8_u.py | 37 | 14069 | """ Python Character Mapping Codec koi8_u generated from 'python-mappings/KOI8-U.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='koi8-u',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u2500' # 0x80 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u2502' # 0x81 -> BOX DRAWINGS LIGHT VERTICAL
'\u250c' # 0x82 -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2510' # 0x83 -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x84 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2518' # 0x85 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u251c' # 0x86 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2524' # 0x87 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u252c' # 0x88 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u2534' # 0x89 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u253c' # 0x8A -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u2580' # 0x8B -> UPPER HALF BLOCK
'\u2584' # 0x8C -> LOWER HALF BLOCK
'\u2588' # 0x8D -> FULL BLOCK
'\u258c' # 0x8E -> LEFT HALF BLOCK
'\u2590' # 0x8F -> RIGHT HALF BLOCK
'\u2591' # 0x90 -> LIGHT SHADE
'\u2592' # 0x91 -> MEDIUM SHADE
'\u2593' # 0x92 -> DARK SHADE
'\u2320' # 0x93 -> TOP HALF INTEGRAL
'\u25a0' # 0x94 -> BLACK SQUARE
'\u2219' # 0x95 -> BULLET OPERATOR
'\u221a' # 0x96 -> SQUARE ROOT
'\u2248' # 0x97 -> ALMOST EQUAL TO
'\u2264' # 0x98 -> LESS-THAN OR EQUAL TO
'\u2265' # 0x99 -> GREATER-THAN OR EQUAL TO
'\xa0' # 0x9A -> NO-BREAK SPACE
'\u2321' # 0x9B -> BOTTOM HALF INTEGRAL
'\xb0' # 0x9C -> DEGREE SIGN
'\xb2' # 0x9D -> SUPERSCRIPT TWO
'\xb7' # 0x9E -> MIDDLE DOT
'\xf7' # 0x9F -> DIVISION SIGN
'\u2550' # 0xA0 -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u2551' # 0xA1 -> BOX DRAWINGS DOUBLE VERTICAL
'\u2552' # 0xA2 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u0451' # 0xA3 -> CYRILLIC SMALL LETTER IO
'\u0454' # 0xA4 -> CYRILLIC SMALL LETTER UKRAINIAN IE
'\u2554' # 0xA5 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u0456' # 0xA6 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
'\u0457' # 0xA7 -> CYRILLIC SMALL LETTER YI (UKRAINIAN)
'\u2557' # 0xA8 -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u2558' # 0xA9 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2559' # 0xAA -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u255a' # 0xAB -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u255b' # 0xAC -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u0491' # 0xAD -> CYRILLIC SMALL LETTER UKRAINIAN GHE WITH UPTURN
'\u255d' # 0xAE -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255e' # 0xAF -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0xB0 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u2560' # 0xB1 -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2561' # 0xB2 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u0401' # 0xB3 -> CYRILLIC CAPITAL LETTER IO
'\u0404' # 0xB4 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
'\u2563' # 0xB5 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u0406' # 0xB6 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
'\u0407' # 0xB7 -> CYRILLIC CAPITAL LETTER YI (UKRAINIAN)
'\u2566' # 0xB8 -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2567' # 0xB9 -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0xBA -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2569' # 0xBB -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u256a' # 0xBC -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u0490' # 0xBD -> CYRILLIC CAPITAL LETTER UKRAINIAN GHE WITH UPTURN
'\u256c' # 0xBE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\xa9' # 0xBF -> COPYRIGHT SIGN
'\u044e' # 0xC0 -> CYRILLIC SMALL LETTER YU
'\u0430' # 0xC1 -> CYRILLIC SMALL LETTER A
'\u0431' # 0xC2 -> CYRILLIC SMALL LETTER BE
'\u0446' # 0xC3 -> CYRILLIC SMALL LETTER TSE
'\u0434' # 0xC4 -> CYRILLIC SMALL LETTER DE
'\u0435' # 0xC5 -> CYRILLIC SMALL LETTER IE
'\u0444' # 0xC6 -> CYRILLIC SMALL LETTER EF
'\u0433' # 0xC7 -> CYRILLIC SMALL LETTER GHE
'\u0445' # 0xC8 -> CYRILLIC SMALL LETTER HA
'\u0438' # 0xC9 -> CYRILLIC SMALL LETTER I
'\u0439' # 0xCA -> CYRILLIC SMALL LETTER SHORT I
'\u043a' # 0xCB -> CYRILLIC SMALL LETTER KA
'\u043b' # 0xCC -> CYRILLIC SMALL LETTER EL
'\u043c' # 0xCD -> CYRILLIC SMALL LETTER EM
'\u043d' # 0xCE -> CYRILLIC SMALL LETTER EN
'\u043e' # 0xCF -> CYRILLIC SMALL LETTER O
'\u043f' # 0xD0 -> CYRILLIC SMALL LETTER PE
'\u044f' # 0xD1 -> CYRILLIC SMALL LETTER YA
'\u0440' # 0xD2 -> CYRILLIC SMALL LETTER ER
'\u0441' # 0xD3 -> CYRILLIC SMALL LETTER ES
'\u0442' # 0xD4 -> CYRILLIC SMALL LETTER TE
'\u0443' # 0xD5 -> CYRILLIC SMALL LETTER U
'\u0436' # 0xD6 -> CYRILLIC SMALL LETTER ZHE
'\u0432' # 0xD7 -> CYRILLIC SMALL LETTER VE
'\u044c' # 0xD8 -> CYRILLIC SMALL LETTER SOFT SIGN
'\u044b' # 0xD9 -> CYRILLIC SMALL LETTER YERU
'\u0437' # 0xDA -> CYRILLIC SMALL LETTER ZE
'\u0448' # 0xDB -> CYRILLIC SMALL LETTER SHA
'\u044d' # 0xDC -> CYRILLIC SMALL LETTER E
'\u0449' # 0xDD -> CYRILLIC SMALL LETTER SHCHA
'\u0447' # 0xDE -> CYRILLIC SMALL LETTER CHE
'\u044a' # 0xDF -> CYRILLIC SMALL LETTER HARD SIGN
'\u042e' # 0xE0 -> CYRILLIC CAPITAL LETTER YU
'\u0410' # 0xE1 -> CYRILLIC CAPITAL LETTER A
'\u0411' # 0xE2 -> CYRILLIC CAPITAL LETTER BE
'\u0426' # 0xE3 -> CYRILLIC CAPITAL LETTER TSE
'\u0414' # 0xE4 -> CYRILLIC CAPITAL LETTER DE
'\u0415' # 0xE5 -> CYRILLIC CAPITAL LETTER IE
'\u0424' # 0xE6 -> CYRILLIC CAPITAL LETTER EF
'\u0413' # 0xE7 -> CYRILLIC CAPITAL LETTER GHE
'\u0425' # 0xE8 -> CYRILLIC CAPITAL LETTER HA
'\u0418' # 0xE9 -> CYRILLIC CAPITAL LETTER I
'\u0419' # 0xEA -> CYRILLIC CAPITAL LETTER SHORT I
'\u041a' # 0xEB -> CYRILLIC CAPITAL LETTER KA
'\u041b' # 0xEC -> CYRILLIC CAPITAL LETTER EL
'\u041c' # 0xED -> CYRILLIC CAPITAL LETTER EM
'\u041d' # 0xEE -> CYRILLIC CAPITAL LETTER EN
'\u041e' # 0xEF -> CYRILLIC CAPITAL LETTER O
'\u041f' # 0xF0 -> CYRILLIC CAPITAL LETTER PE
'\u042f' # 0xF1 -> CYRILLIC CAPITAL LETTER YA
'\u0420' # 0xF2 -> CYRILLIC CAPITAL LETTER ER
'\u0421' # 0xF3 -> CYRILLIC CAPITAL LETTER ES
'\u0422' # 0xF4 -> CYRILLIC CAPITAL LETTER TE
'\u0423' # 0xF5 -> CYRILLIC CAPITAL LETTER U
'\u0416' # 0xF6 -> CYRILLIC CAPITAL LETTER ZHE
'\u0412' # 0xF7 -> CYRILLIC CAPITAL LETTER VE
'\u042c' # 0xF8 -> CYRILLIC CAPITAL LETTER SOFT SIGN
'\u042b' # 0xF9 -> CYRILLIC CAPITAL LETTER YERU
'\u0417' # 0xFA -> CYRILLIC CAPITAL LETTER ZE
'\u0428' # 0xFB -> CYRILLIC CAPITAL LETTER SHA
'\u042d' # 0xFC -> CYRILLIC CAPITAL LETTER E
'\u0429' # 0xFD -> CYRILLIC CAPITAL LETTER SHCHA
'\u0427' # 0xFE -> CYRILLIC CAPITAL LETTER CHE
'\u042a' # 0xFF -> CYRILLIC CAPITAL LETTER HARD SIGN
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| lgpl-3.0 |
erinn/ansible | v1/ansible/utils/hashing.py | 104 | 3014 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
# Note, sha1 is the only hash algorithm compatible with python2.4 and with
# FIPS-140 mode (as of 11-2014)
try:
from hashlib import sha1 as sha1
except ImportError:
from sha import sha as sha1
# Backwards compat only
try:
from hashlib import md5 as _md5
except ImportError:
try:
from md5 import md5 as _md5
except ImportError:
# Assume we're running in FIPS mode here
_md5 = None
def secure_hash_s(data, hash_func=sha1):
''' Return a secure hash hex digest of data. '''
digest = hash_func()
try:
digest.update(data)
except UnicodeEncodeError:
digest.update(data.encode('utf-8'))
return digest.hexdigest()
def secure_hash(filename, hash_func=sha1):
''' Return a secure hash hex digest of local file, None if file is not present or a directory. '''
if not os.path.exists(filename) or os.path.isdir(filename):
return None
digest = hash_func()
blocksize = 64 * 1024
try:
infile = open(filename, 'rb')
block = infile.read(blocksize)
while block:
digest.update(block)
block = infile.read(blocksize)
infile.close()
except IOError, e:
raise errors.AnsibleError("error while accessing the file %s, error was: %s" % (filename, e))
return digest.hexdigest()
# The checksum algorithm must match with the algorithm in ShellModule.checksum() method
checksum = secure_hash
checksum_s = secure_hash_s
# Backwards compat functions. Some modules include md5s in their return values
# Continue to support that for now. As of ansible-1.8, all of those modules
# should also return "checksum" (sha1 for now)
# Do not use md5 unless it is needed for:
# 1) Optional backwards compatibility
# 2) Compliance with a third party protocol
#
# MD5 will not work on systems which are FIPS-140-2 compliant.
def md5s(data):
if not _md5:
raise ValueError('MD5 not available. Possibly running in FIPS mode')
return secure_hash_s(data, _md5)
def md5(filename):
if not _md5:
raise ValueError('MD5 not available. Possibly running in FIPS mode')
return secure_hash(filename, _md5)
| gpl-3.0 |
frouty/odoogoeen | addons/edi/__init__.py | 437 | 1157 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import controllers
from . import models
from . import edi_service
from .models.edi import EDIMixin, edi
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
NMGRL/pychron | pychron/extraction_line/ipyscript_runner.py | 2 | 1204 | # ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import Interface
# ============= standard library imports ========================
# ============= local library imports ==========================
class IPyScriptRunner(Interface):
def get_resource(self, name):
pass
def reset_connection(self):
pass
# ============= EOF =============================================
| apache-2.0 |
gskachkov/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/closebug.py | 126 | 2748 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from webkitpy.tool.steps.abstractstep import AbstractStep
from webkitpy.tool.steps.options import Options
_log = logging.getLogger(__name__)
class CloseBug(AbstractStep):
@classmethod
def options(cls):
return AbstractStep.options() + [
Options.close_bug,
]
def run(self, state):
if not self._options.close_bug:
return
# Check to make sure there are no r? or r+ patches on the bug before closing.
# Assume that r- patches are just previous patches someone forgot to obsolete.
# FIXME: Should this use self.cached_lookup('bug')? It's unclear if
# state["patch"].bug_id() always equals state['bug_id'].
patches = self._tool.bugs.fetch_bug(state["patch"].bug_id()).patches()
for patch in patches:
if patch.review() == "?" or patch.review() == "+":
_log.info("Not closing bug %s as attachment %s has review=%s. Assuming there are more patches to land from this bug." % (patch.bug_id(), patch.id(), patch.review()))
return
self._tool.bugs.close_bug_as_fixed(state["patch"].bug_id(), "All reviewed patches have been landed. Closing bug.")
| bsd-3-clause |
JohnOrlando/gnuradio-bitshark | grc/base/Param.py | 5 | 6477 | """
Copyright 2008, 2009 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
from . import odict
from Element import Element
def _get_keys(lst): return [elem.get_key() for elem in lst]
def _get_elem(lst, key):
try: return lst[_get_keys(lst).index(key)]
except ValueError: raise ValueError, 'Key "%s" not found in %s.'%(key, _get_keys(lst))
class Option(Element):
def __init__(self, param, n):
Element.__init__(self, param)
self._name = n.find('name')
self._key = n.find('key')
self._opts = dict()
opts = n.findall('opt')
#test against opts when non enum
try: assert self.get_parent().is_enum() or not opts
except AssertionError: raise Exception, 'Options for non-enum types cannot have sub-options'
#extract opts
for opt in opts:
#separate the key:value
try: key, value = opt.split(':')
except: raise Exception, 'Error separating "%s" into key:value'%opt
#test against repeated keys
try: assert not self._opts.has_key(key)
except AssertionError: raise Exception, 'Key "%s" already exists in option'%key
#store the option
self._opts[key] = value
def __str__(self): return 'Option %s(%s)'%(self.get_name(), self.get_key())
def get_name(self): return self._name
def get_key(self): return self._key
##############################################
# Access Opts
##############################################
def get_opt_keys(self): return self._opts.keys()
def get_opt(self, key): return self._opts[key]
def get_opts(self): return self._opts.values()
class Param(Element):
def __init__(self, block, n):
"""
Make a new param from nested data.
@param block the parent element
@param n the nested odict
"""
#grab the data
self._name = n.find('name')
self._key = n.find('key')
value = n.find('value') or ''
self._type = n.find('type')
self._hide = n.find('hide') or ''
#build the param
Element.__init__(self, block)
#create the Option objects from the n data
self._options = list()
for option in map(lambda o: Option(param=self, n=o), n.findall('option')):
key = option.get_key()
#test against repeated keys
try: assert key not in self.get_option_keys()
except AssertionError: raise Exception, 'Key "%s" already exists in options'%key
#store the option
self.get_options().append(option)
#test the enum options
if self.is_enum():
#test against options with identical keys
try: assert len(set(self.get_option_keys())) == len(self.get_options())
except AssertionError: raise Exception, 'Options keys "%s" are not unique.'%self.get_option_keys()
#test against inconsistent keys in options
opt_keys = self.get_options()[0].get_opt_keys()
for option in self.get_options():
try: assert set(opt_keys) == set(option.get_opt_keys())
except AssertionError: raise Exception, 'Opt keys "%s" are not identical across all options.'%opt_keys
#if a value is specified, it must be in the options keys
self._value = value or self.get_option_keys()[0]
try: assert self.get_value() in self.get_option_keys()
except AssertionError: raise Exception, 'The value "%s" is not in the possible values of "%s".'%(self.get_value(), self.get_option_keys())
else: self._value = value or ''
def validate(self):
"""
Validate the param.
The value must be evaluated and type must a possible type.
"""
Element.validate(self)
try: assert self.get_type() in self.get_types()
except AssertionError: self.add_error_message('Type "%s" is not a possible type.'%self.get_type())
def get_evaluated(self): raise NotImplementedError
def to_code(self):
"""
Convert the value to code.
@throw NotImplementedError
"""
raise NotImplementedError
def get_types(self):
"""
Get a list of all possible param types.
@throw NotImplementedError
"""
raise NotImplementedError
def get_color(self): return '#FFFFFF'
def __str__(self): return 'Param - %s(%s)'%(self.get_name(), self.get_key())
def is_param(self): return True
def get_name(self): return self._name
def get_key(self): return self._key
def get_hide(self): return self.get_parent().resolve_dependencies(self._hide).strip()
def get_value(self):
value = self._value
if self.is_enum() and value not in self.get_option_keys():
value = self.get_option_keys()[0]
self.set_value(value)
return value
def set_value(self, value): self._value = str(value) #must be a string
def get_type(self): return self.get_parent().resolve_dependencies(self._type)
def is_enum(self): return self._type == 'enum'
def __repr__(self):
"""
Get the repr (nice string format) for this param.
Just return the value (special case enum).
Derived classes can handle complex formatting.
@return the string representation
"""
if self.is_enum(): return self.get_option(self.get_value()).get_name()
return self.get_value()
##############################################
# Access Options
##############################################
def get_option_keys(self): return _get_keys(self.get_options())
def get_option(self, key): return _get_elem(self.get_options(), key)
def get_options(self): return self._options
##############################################
# Access Opts
##############################################
def get_opt_keys(self): return self.get_option(self.get_value()).get_opt_keys()
def get_opt(self, key): return self.get_option(self.get_value()).get_opt(key)
def get_opts(self): return self.get_option(self.get_value()).get_opts()
##############################################
## Import/Export Methods
##############################################
def export_data(self):
"""
Export this param's key/value.
@return a nested data odict
"""
n = odict()
n['key'] = self.get_key()
n['value'] = self.get_value()
return n
| gpl-3.0 |
ajnirp/servo | tests/wpt/web-platform-tests/old-tests/webdriver/command_contexts/window_handle_test.py | 141 | 1067 | import os
import sys
import random
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(__file__, "../..")))
import base_test
repo_root = os.path.abspath(os.path.join(__file__, "../../.."))
sys.path.insert(1, os.path.join(repo_root, "tools", "webdriver"))
from webdriver import exceptions
class WindowHandleTest(base_test.WebDriverBaseTest):
def setUp(self):
self.driver.get(self.webserver.where_is("command_contexts/res/first-page.html"))
def test_window_handle_is_not_current(self):
handle = self.driver.get_window_handle()
self.assertNotEquals(handle, "current")
def test_window_handles_are_unique(self):
number_of_windows = 20
new_window_button = self.driver.find_element_by_id("open_new_window")
for i in range(0, number_of_windows):
new_window_button.click()
handles = self.driver.get_window_handles()
if len(handles) > len(set(handles)):
self.fail('At least one window handle was repeated')
if __name__ == "__main__":
unittest.main()
| mpl-2.0 |
rg3/youtube-dl | youtube_dl/extractor/hotstar.py | 6 | 5764 | # coding: utf-8
from __future__ import unicode_literals
import hashlib
import hmac
import time
from .common import InfoExtractor
from ..compat import compat_HTTPError
from ..utils import (
determine_ext,
ExtractorError,
int_or_none,
try_get,
)
class HotStarBaseIE(InfoExtractor):
_AKAMAI_ENCRYPTION_KEY = b'\x05\xfc\x1a\x01\xca\xc9\x4b\xc4\x12\xfc\x53\x12\x07\x75\xf9\xee'
def _call_api(self, path, video_id, query_name='contentId'):
st = int(time.time())
exp = st + 6000
auth = 'st=%d~exp=%d~acl=/*' % (st, exp)
auth += '~hmac=' + hmac.new(self._AKAMAI_ENCRYPTION_KEY, auth.encode(), hashlib.sha256).hexdigest()
response = self._download_json(
'https://api.hotstar.com/' + path,
video_id, headers={
'hotstarauth': auth,
'x-country-code': 'IN',
'x-platform-code': 'JIO',
}, query={
query_name: video_id,
'tas': 10000,
})
if response['statusCode'] != 'OK':
raise ExtractorError(
response['body']['message'], expected=True)
return response['body']['results']
class HotStarIE(HotStarBaseIE):
IE_NAME = 'hotstar'
_VALID_URL = r'https?://(?:www\.)?hotstar\.com/(?:.+?[/-])?(?P<id>\d{10})'
_TESTS = [{
# contentData
'url': 'https://www.hotstar.com/can-you-not-spread-rumours/1000076273',
'info_dict': {
'id': '1000076273',
'ext': 'mp4',
'title': 'Can You Not Spread Rumours?',
'description': 'md5:c957d8868e9bc793ccb813691cc4c434',
'timestamp': 1447248600,
'upload_date': '20151111',
'duration': 381,
},
'params': {
# m3u8 download
'skip_download': True,
}
}, {
# contentDetail
'url': 'https://www.hotstar.com/movies/radha-gopalam/1000057157',
'only_matching': True,
}, {
'url': 'http://www.hotstar.com/sports/cricket/rajitha-sizzles-on-debut-with-329/2001477583',
'only_matching': True,
}, {
'url': 'http://www.hotstar.com/1000000515',
'only_matching': True,
}]
_GEO_BYPASS = False
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
app_state = self._parse_json(self._search_regex(
r'<script>window\.APP_STATE\s*=\s*({.+?})</script>',
webpage, 'app state'), video_id)
video_data = {}
getters = list(
lambda x, k=k: x['initialState']['content%s' % k]['content']
for k in ('Data', 'Detail')
)
for v in app_state.values():
content = try_get(v, getters, dict)
if content and content.get('contentId') == video_id:
video_data = content
break
title = video_data['title']
if video_data.get('drmProtected'):
raise ExtractorError('This video is DRM protected.', expected=True)
formats = []
format_data = self._call_api('h/v1/play', video_id)['item']
format_url = format_data['playbackUrl']
ext = determine_ext(format_url)
if ext == 'm3u8':
try:
formats.extend(self._extract_m3u8_formats(
format_url, video_id, 'mp4', m3u8_id='hls'))
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
self.raise_geo_restricted(countries=['IN'])
raise
elif ext == 'f4m':
# produce broken files
pass
else:
formats.append({
'url': format_url,
'width': int_or_none(format_data.get('width')),
'height': int_or_none(format_data.get('height')),
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': video_data.get('description'),
'duration': int_or_none(video_data.get('duration')),
'timestamp': int_or_none(video_data.get('broadcastDate') or video_data.get('startDate')),
'formats': formats,
'channel': video_data.get('channelName'),
'channel_id': video_data.get('channelId'),
'series': video_data.get('showName'),
'season': video_data.get('seasonName'),
'season_number': int_or_none(video_data.get('seasonNo')),
'season_id': video_data.get('seasonId'),
'episode': title,
'episode_number': int_or_none(video_data.get('episodeNo')),
}
class HotStarPlaylistIE(HotStarBaseIE):
IE_NAME = 'hotstar:playlist'
_VALID_URL = r'https?://(?:www\.)?hotstar\.com/tv/[^/]+/s-\w+/list/[^/]+/t-(?P<id>\w+)'
_TESTS = [{
'url': 'https://www.hotstar.com/tv/savdhaan-india/s-26/list/popular-clips/t-3_2_26',
'info_dict': {
'id': '3_2_26',
},
'playlist_mincount': 20,
}, {
'url': 'https://www.hotstar.com/tv/savdhaan-india/s-26/list/extras/t-2480',
'only_matching': True,
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
collection = self._call_api('o/v1/tray/find', playlist_id, 'uqId')
entries = [
self.url_result(
'https://www.hotstar.com/%s' % video['contentId'],
ie=HotStarIE.ie_key(), video_id=video['contentId'])
for video in collection['assets']['items']
if video.get('contentId')]
return self.playlist_result(entries, playlist_id)
| unlicense |
ittailup/scrape4jobs | scrape4jobs/spiders/resumator_spider.py | 1 | 1539 | from scrapy.spider import Spider
from scrapy import signals
from scrapy.selector import Selector
import logging
import csv
from careerpagescrapers.items import Startup, StartupJob
from scrapy.log import ScrapyFileLogObserver
from scrapy.xlib.pydispatch import dispatcher
from scrapy import log
logfile = open('testlog.log', 'w')
log_observer = ScrapyFileLogObserver(logfile, level=logging.DEBUG)
log_observer.start()
class ResumatorSpider(Spider):
name = 'resumator'
allowed_domains = ['theresumator.com']
start_urls = ['']
def __init__(self, stats):
self.stats = stats
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.stats)
def parse(self, response):
sel = Selector(response)
categories = sel.xpath("//h3/text()").extract()
items = []
jobindex = 0
joblist = sel.xpath("//a[@class='job_title_link']")
for jobs in joblist:
listing = StartupJob()
listing['startup'] = response.url
listing['title'] = sel.xpath("//a[@class='job_title_link']/text()").extract()[jobindex]
listing['url'] = sel.xpath("//a[@class='job_title_link']/@href").extract()[jobindex]
listing['category'] = sel.xpath("//span[@class='resumator_department']/text()").extract()[jobindex]
listing['location'] = sel.xpath("((//tbody/tr)["+ str(jobindex+1)+ "]/td)[2]/text()").extract()
items.append(listing)
jobindex += 1
return items | mit |
joegomes/deepchem | deepchem/models/tests/test_overfit.py | 1 | 35451 | """
Tests to make sure deepchem models can overfit on tiny datasets.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import os
import tempfile
import numpy as np
import unittest
import sklearn
import shutil
import tensorflow as tf
import deepchem as dc
import scipy.io
from tensorflow.python.framework import test_util
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
class TestOverfit(test_util.TensorFlowTestCase):
"""
Test that models can overfit simple datasets.
"""
def setUp(self):
super(TestOverfit, self).setUp()
self.current_dir = os.path.dirname(os.path.abspath(__file__))
def test_sklearn_regression_overfit(self):
"""Test that sklearn models can overfit simple regression datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.rand(n_samples, n_tasks)
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(dc.metrics.r2_score)
sklearn_model = RandomForestRegressor()
model = dc.models.SklearnModel(sklearn_model)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .7
def test_sklearn_classification_overfit(self):
"""Test that sklearn models can overfit simple classification datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
sklearn_model = RandomForestClassifier()
model = dc.models.SklearnModel(sklearn_model)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_sklearn_skewed_classification_overfit(self):
"""Test sklearn models can overfit 0/1 datasets with few actives."""
n_samples = 100
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
p = .05
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
sklearn_model = RandomForestClassifier()
model = dc.models.SklearnModel(sklearn_model)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_tf_regression_overfit(self):
"""Test that TensorFlow models can overfit simple regression datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error)
# TODO(rbharath): This breaks with optimizer="momentum". Why?
model = dc.models.TensorflowMultiTaskRegressor(
n_tasks,
n_features,
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=100)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .1
def test_tf_classification_overfit(self):
"""Test that tensorflow models can overfit simple classification datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
model = dc.models.TensorflowMultiTaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
learning_rate=0.0003,
weight_init_stddevs=[.1],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=100)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_tf_fittransform_regression_overfit(self):
"""Test that TensorFlow FitTransform models can overfit simple regression datasets."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
fit_transformers = [dc.trans.CoulombFitTransformer(dataset)]
regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error)
model = dc.models.TensorflowMultiTaskFitTransformRegressor(
n_tasks, [n_features, n_features],
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)],
batch_size=n_samples,
fit_transformers=fit_transformers,
n_evals=1)
# Fit trained model
model.fit(dataset, nb_epoch=100)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .1
def test_tf_skewed_classification_overfit(self):
"""Test tensorflow models can overfit 0/1 datasets with few actives."""
#n_samples = 100
n_samples = 100
n_features = 3
n_tasks = 1
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
p = .05
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
model = dc.models.TensorflowMultiTaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[.1],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=100)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .75
def test_tf_skewed_missing_classification_overfit(self):
"""TF, skewed data, few actives
Test tensorflow models overfit 0/1 datasets with missing data and few
actives. This is intended to be as close to singletask MUV datasets as
possible.
"""
n_samples = 5120
n_features = 6
n_tasks = 1
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
p = .002
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.binomial(1, p, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
y_flat, w_flat = np.squeeze(y), np.squeeze(w)
y_nonzero = y_flat[w_flat != 0]
num_nonzero = np.count_nonzero(y_nonzero)
weight_nonzero = len(y_nonzero) / num_nonzero
w_flat[y_flat != 0] = weight_nonzero
w = np.reshape(w_flat, (n_samples, n_tasks))
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
model = dc.models.TensorflowMultiTaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[1.],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=50)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .8
def test_sklearn_multitask_classification_overfit(self):
"""Test SKLearn singletask-to-multitask overfits tiny data."""
n_tasks = 10
tasks = ["task%d" % task for task in range(n_tasks)]
n_samples = 10
n_features = 3
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
classification_metric = dc.metrics.Metric(
dc.metrics.roc_auc_score, task_averager=np.mean)
def model_builder(model_dir):
sklearn_model = RandomForestClassifier()
return dc.models.SklearnModel(sklearn_model, model_dir)
model = dc.models.SingletaskToMultitask(tasks, model_builder)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_tf_multitask_classification_overfit(self):
"""Test tf multitask overfits tiny data."""
n_tasks = 10
n_samples = 10
n_features = 3
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(
dc.metrics.accuracy_score, task_averager=np.mean)
model = dc.models.TensorflowMultiTaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
learning_rate=0.0003,
weight_init_stddevs=[.1],
batch_size=n_samples)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_tf_robust_multitask_classification_overfit(self):
"""Test tf robust multitask overfits tiny data."""
n_tasks = 10
n_samples = 10
n_features = 3
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(
dc.metrics.accuracy_score, task_averager=np.mean)
model = dc.models.RobustMultitaskClassifier(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[.1],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=25)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_tf_logreg_multitask_classification_overfit(self):
"""Test tf multitask overfits tiny data."""
n_tasks = 10
n_samples = 10
n_features = 3
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(
dc.metrics.accuracy_score, task_averager=np.mean)
model = dc.models.TensorflowLogisticRegression(
n_tasks,
n_features,
learning_rate=0.5,
weight_init_stddevs=[.01],
batch_size=n_samples)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_IRV_multitask_classification_overfit(self):
"""Test IRV classifier overfits tiny data."""
n_tasks = 5
n_samples = 10
n_features = 128
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.randint(2, size=(n_samples, n_features))
y = np.ones((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
IRV_transformer = dc.trans.IRVTransformer(5, n_tasks, dataset)
dataset_trans = IRV_transformer.transform(dataset)
classification_metric = dc.metrics.Metric(
dc.metrics.accuracy_score, task_averager=np.mean)
model = dc.models.TensorflowMultiTaskIRVClassifier(
n_tasks, K=5, learning_rate=0.01, batch_size=n_samples)
# Fit trained model
model.fit(dataset_trans)
model.save()
# Eval model on train
scores = model.evaluate(dataset_trans, [classification_metric])
assert scores[classification_metric.name] > .9
def test_sklearn_multitask_regression_overfit(self):
"""Test SKLearn singletask-to-multitask overfits tiny regression data."""
n_tasks = 2
tasks = ["task%d" % task for task in range(n_tasks)]
n_samples = 10
n_features = 3
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.rand(n_samples, n_tasks)
w = np.ones((n_samples, n_tasks))
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
regression_metric = dc.metrics.Metric(
dc.metrics.r2_score, task_averager=np.mean)
def model_builder(model_dir):
sklearn_model = RandomForestRegressor()
return dc.models.SklearnModel(sklearn_model, model_dir)
model = dc.models.SingletaskToMultitask(tasks, model_builder)
# Fit trained model
model.fit(dataset)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .7
def test_tf_multitask_regression_overfit(self):
"""Test tf multitask overfits tiny data."""
n_tasks = 10
n_samples = 10
n_features = 3
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(
dc.metrics.mean_squared_error, task_averager=np.mean, mode="regression")
model = dc.models.TensorflowMultiTaskRegressor(
n_tasks,
n_features,
dropouts=[0.],
learning_rate=0.0003,
weight_init_stddevs=[.1],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=50)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .1
def test_tf_robust_multitask_regression_overfit(self):
"""Test tf robust multitask overfits tiny data."""
np.random.seed(123)
tf.set_random_seed(123)
n_tasks = 10
n_samples = 10
n_features = 3
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(
dc.metrics.mean_squared_error, task_averager=np.mean, mode="regression")
model = dc.models.RobustMultitaskRegressor(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[.1],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=25)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .2
def test_graph_conv_singletask_classification_overfit(self):
"""Test graph-conv multitask overfits tiny data."""
np.random.seed(123)
tf.set_random_seed(123)
g = tf.Graph()
sess = tf.Session(graph=g)
n_tasks = 1
n_samples = 10
n_features = 3
n_classes = 2
# Load mini log-solubility dataset.
featurizer = dc.feat.ConvMolFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(self.current_dir, "example_classification.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(input_file)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
n_feat = 75
batch_size = 10
graph_model = dc.nn.SequentialGraph(n_feat)
graph_model.add(dc.nn.GraphConv(64, n_feat, activation='relu'))
graph_model.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(dc.nn.GraphPool())
# Gather Projection
graph_model.add(dc.nn.Dense(128, 64, activation='relu'))
graph_model.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(dc.nn.GraphGather(batch_size, activation="tanh"))
model = dc.models.MultitaskGraphClassifier(
graph_model,
n_tasks,
n_feat,
batch_size=batch_size,
learning_rate=1e-3,
learning_rate_decay_time=1000,
optimizer_type="adam",
beta1=.9,
beta2=.999)
# Fit trained model
model.fit(dataset, nb_epoch=20)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .65
def test_graph_conv_singletask_regression_overfit(self):
"""Test graph-conv multitask overfits tiny data."""
np.random.seed(123)
tf.set_random_seed(123)
g = tf.Graph()
sess = tf.Session(graph=g)
n_tasks = 1
n_samples = 10
n_features = 3
n_classes = 2
# Load mini log-solubility dataset.
featurizer = dc.feat.ConvMolFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(self.current_dir, "example_regression.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(input_file)
classification_metric = dc.metrics.Metric(
dc.metrics.mean_squared_error, task_averager=np.mean)
n_feat = 75
batch_size = 10
graph_model = dc.nn.SequentialGraph(n_feat)
graph_model.add(dc.nn.GraphConv(64, n_feat, activation='relu'))
graph_model.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(dc.nn.GraphPool())
# Gather Projection
graph_model.add(dc.nn.Dense(128, 64))
graph_model.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(dc.nn.GraphGather(batch_size, activation="tanh"))
model = dc.models.MultitaskGraphRegressor(
graph_model,
n_tasks,
n_feat,
batch_size=batch_size,
learning_rate=1e-2,
learning_rate_decay_time=1000,
optimizer_type="adam",
beta1=.9,
beta2=.999)
# Fit trained model
model.fit(dataset, nb_epoch=40)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] < .2
def test_DTNN_multitask_regression_overfit(self):
"""Test deep tensor neural net overfits tiny data."""
np.random.seed(123)
tf.set_random_seed(123)
# Load mini log-solubility dataset.
input_file = os.path.join(self.current_dir, "example_DTNN.mat")
dataset = scipy.io.loadmat(input_file)
X = dataset['X']
y = dataset['T']
w = np.ones_like(y)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids=None)
regression_metric = dc.metrics.Metric(
dc.metrics.pearson_r2_score, task_averager=np.mean)
n_tasks = y.shape[1]
max_n_atoms = list(dataset.get_data_shape())[0]
batch_size = 10
graph_model = dc.nn.SequentialDTNNGraph(max_n_atoms=max_n_atoms)
graph_model.add(dc.nn.DTNNEmbedding(n_embedding=20))
graph_model.add(dc.nn.DTNNStep(n_embedding=20))
graph_model.add(dc.nn.DTNNStep(n_embedding=20))
graph_model.add(dc.nn.DTNNGather(n_embedding=20))
n_feat = 20
model = dc.models.DTNNGraphRegressor(
graph_model,
n_tasks,
n_feat,
batch_size=batch_size,
learning_rate=1e-3,
learning_rate_decay_time=1000,
optimizer_type="adam",
beta1=.9,
beta2=.999)
# Fit trained model
model.fit(dataset, nb_epoch=20)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .9
def test_DAG_singletask_regression_overfit(self):
"""Test DAG regressor multitask overfits tiny data."""
np.random.seed(123)
tf.set_random_seed(123)
n_tasks = 1
# Load mini log-solubility dataset.
featurizer = dc.feat.ConvMolFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(self.current_dir, "example_regression.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(input_file)
regression_metric = dc.metrics.Metric(
dc.metrics.pearson_r2_score, task_averager=np.mean)
n_feat = 75
batch_size = 10
transformer = dc.trans.DAGTransformer(max_atoms=50)
dataset = transformer.transform(dataset)
graph = dc.nn.SequentialDAGGraph(
n_feat, batch_size=batch_size, max_atoms=50)
graph.add(dc.nn.DAGLayer(30, n_feat, max_atoms=50))
graph.add(dc.nn.DAGGather(max_atoms=50))
model = dc.models.MultitaskGraphRegressor(
graph,
n_tasks,
n_feat,
batch_size=batch_size,
learning_rate=0.005,
learning_rate_decay_time=1000,
optimizer_type="adam",
beta1=.9,
beta2=.999)
# Fit trained model
model.fit(dataset, nb_epoch=50)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .8
def test_weave_singletask_classification_overfit(self):
"""Test weave model overfits tiny data."""
np.random.seed(123)
tf.set_random_seed(123)
n_tasks = 1
# Load mini log-solubility dataset.
featurizer = dc.feat.WeaveFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(self.current_dir, "example_classification.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(input_file)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
n_atom_feat = 75
n_pair_feat = 14
n_feat = 128
batch_size = 10
max_atoms = 50
graph = dc.nn.SequentialWeaveGraph(
max_atoms=max_atoms, n_atom_feat=n_atom_feat, n_pair_feat=n_pair_feat)
graph.add(dc.nn.WeaveLayer(max_atoms, 75, 14))
graph.add(dc.nn.WeaveConcat(batch_size, n_output=n_feat))
graph.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph.add(dc.nn.WeaveGather(batch_size, n_input=n_feat))
model = dc.models.MultitaskGraphClassifier(
graph,
n_tasks,
n_feat,
batch_size=batch_size,
learning_rate=1e-3,
learning_rate_decay_time=1000,
optimizer_type="adam",
beta1=.9,
beta2=.999)
# Fit trained model
model.fit(dataset, nb_epoch=20)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .65
def test_weave_singletask_regression_overfit(self):
"""Test weave model overfits tiny data."""
np.random.seed(123)
tf.set_random_seed(123)
n_tasks = 1
# Load mini log-solubility dataset.
featurizer = dc.feat.WeaveFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(self.current_dir, "example_regression.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(input_file)
regression_metric = dc.metrics.Metric(
dc.metrics.pearson_r2_score, task_averager=np.mean)
n_atom_feat = 75
n_pair_feat = 14
n_feat = 128
batch_size = 10
max_atoms = 50
graph = dc.nn.SequentialWeaveGraph(
max_atoms=max_atoms, n_atom_feat=n_atom_feat, n_pair_feat=n_pair_feat)
graph.add(dc.nn.WeaveLayer(max_atoms, 75, 14))
graph.add(dc.nn.WeaveConcat(batch_size, n_output=n_feat))
graph.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph.add(dc.nn.WeaveGather(batch_size, n_input=n_feat))
model = dc.models.MultitaskGraphRegressor(
graph,
n_tasks,
n_feat,
batch_size=batch_size,
learning_rate=1e-3,
learning_rate_decay_time=1000,
optimizer_type="adam",
beta1=.9,
beta2=.999)
# Fit trained model
model.fit(dataset, nb_epoch=40)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .9
def test_siamese_singletask_classification_overfit(self):
"""Test siamese singletask model overfits tiny data."""
np.random.seed(123)
tf.set_random_seed(123)
n_tasks = 1
n_feat = 75
max_depth = 4
n_pos = 6
n_neg = 4
test_batch_size = 10
n_train_trials = 80
support_batch_size = n_pos + n_neg
# Load mini log-solubility dataset.
featurizer = dc.feat.ConvMolFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(self.current_dir, "example_classification.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(input_file)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
support_model = dc.nn.SequentialSupportGraph(n_feat)
# Add layers
# output will be (n_atoms, 64)
support_model.add(dc.nn.GraphConv(64, n_feat, activation='relu'))
# Need to add batch-norm separately to test/support due to differing
# shapes.
# output will be (n_atoms, 64)
support_model.add_test(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
# output will be (n_atoms, 64)
support_model.add_support(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
support_model.add(dc.nn.GraphPool())
support_model.add_test(dc.nn.GraphGather(test_batch_size))
support_model.add_support(dc.nn.GraphGather(support_batch_size))
model = dc.models.SupportGraphClassifier(
support_model,
test_batch_size=test_batch_size,
support_batch_size=support_batch_size,
learning_rate=1e-3)
# Fit trained model. Dataset has 6 positives and 4 negatives, so set
# n_pos/n_neg accordingly.
model.fit(
dataset, n_episodes_per_epoch=n_train_trials, n_pos=n_pos, n_neg=n_neg)
model.save()
# Eval model on train. Dataset has 6 positives and 4 negatives, so set
# n_pos/n_neg accordingly. Note that support is *not* excluded (so we
# can measure model has memorized support). Replacement is turned off to
# ensure that support contains full training set. This checks that the
# model has mastered memorization of provided support.
scores, _ = model.evaluate(
dataset,
classification_metric,
n_trials=5,
n_pos=n_pos,
n_neg=n_neg,
exclude_support=False)
##################################################### DEBUG
# TODO(rbharath): Check if something went wrong here...
# Measure performance on 0-th task.
#assert scores[0] > .9
assert scores[0] > .75
##################################################### DEBUG
def test_attn_lstm_singletask_classification_overfit(self):
"""Test attn lstm singletask overfits tiny data."""
np.random.seed(123)
tf.set_random_seed(123)
n_tasks = 1
n_feat = 75
max_depth = 4
n_pos = 6
n_neg = 4
test_batch_size = 10
support_batch_size = n_pos + n_neg
n_train_trials = 80
# Load mini log-solubility dataset.
featurizer = dc.feat.ConvMolFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(self.current_dir, "example_classification.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(input_file)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
support_model = dc.nn.SequentialSupportGraph(n_feat)
# Add layers
# output will be (n_atoms, 64)
support_model.add(dc.nn.GraphConv(64, n_feat, activation='relu'))
# Need to add batch-norm separately to test/support due to differing
# shapes.
# output will be (n_atoms, 64)
support_model.add_test(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
# output will be (n_atoms, 64)
support_model.add_support(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
support_model.add(dc.nn.GraphPool())
support_model.add_test(dc.nn.GraphGather(test_batch_size))
support_model.add_support(dc.nn.GraphGather(support_batch_size))
# Apply an attention lstm layer
support_model.join(
dc.nn.AttnLSTMEmbedding(test_batch_size, support_batch_size, 64,
max_depth))
model = dc.models.SupportGraphClassifier(
support_model,
test_batch_size=test_batch_size,
support_batch_size=support_batch_size,
learning_rate=1e-3)
# Fit trained model. Dataset has 6 positives and 4 negatives, so set
# n_pos/n_neg accordingly.
model.fit(
dataset, n_episodes_per_epoch=n_train_trials, n_pos=n_pos, n_neg=n_neg)
model.save()
# Eval model on train. Dataset has 6 positives and 4 negatives, so set
# n_pos/n_neg accordingly. Note that support is *not* excluded (so we
# can measure model has memorized support). Replacement is turned off to
# ensure that support contains full training set. This checks that the
# model has mastered memorization of provided support.
scores, _ = model.evaluate(
dataset,
classification_metric,
n_trials=5,
n_pos=n_pos,
n_neg=n_neg,
exclude_support=False)
# Measure performance on 0-th task.
##################################################### DEBUG
# TODO(rbharath): Check if something went wrong here...
# Measure performance on 0-th task.
#assert scores[0] > .85
assert scores[0] > .79
##################################################### DEBUG
def test_residual_lstm_singletask_classification_overfit(self):
"""Test resi-lstm multitask overfits tiny data."""
n_tasks = 1
n_feat = 75
max_depth = 4
n_pos = 6
n_neg = 4
test_batch_size = 10
support_batch_size = n_pos + n_neg
n_train_trials = 80
# Load mini log-solubility dataset.
featurizer = dc.feat.ConvMolFeaturizer()
tasks = ["outcome"]
input_file = os.path.join(self.current_dir, "example_classification.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(input_file)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
support_model = dc.nn.SequentialSupportGraph(n_feat)
# Add layers
# output will be (n_atoms, 64)
support_model.add(dc.nn.GraphConv(64, n_feat, activation='relu'))
# Need to add batch-norm separately to test/support due to differing
# shapes.
# output will be (n_atoms, 64)
support_model.add_test(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
# output will be (n_atoms, 64)
support_model.add_support(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
support_model.add(dc.nn.GraphPool())
support_model.add_test(dc.nn.GraphGather(test_batch_size))
support_model.add_support(dc.nn.GraphGather(support_batch_size))
# Apply a residual lstm layer
support_model.join(
dc.nn.ResiLSTMEmbedding(test_batch_size, support_batch_size, 64,
max_depth))
model = dc.models.SupportGraphClassifier(
support_model,
test_batch_size=test_batch_size,
support_batch_size=support_batch_size,
learning_rate=1e-3)
# Fit trained model. Dataset has 6 positives and 4 negatives, so set
# n_pos/n_neg accordingly.
model.fit(
dataset, n_episodes_per_epoch=n_train_trials, n_pos=n_pos, n_neg=n_neg)
model.save()
# Eval model on train. Dataset has 6 positives and 4 negatives, so set
# n_pos/n_neg accordingly. Note that support is *not* excluded (so we
# can measure model has memorized support). Replacement is turned off to
# ensure that support contains full training set. This checks that the
# model has mastered memorization of provided support.
scores, _ = model.evaluate(
dataset,
classification_metric,
n_trials=5,
n_pos=n_pos,
n_neg=n_neg,
exclude_support=False)
# Measure performance on 0-th task.
##################################################### DEBUG
# TODO(rbharath): Check if something went wrong here...
# Measure performance on 0-th task.
#assert scores[0] > .9
assert scores[0] > .65
##################################################### DEBUG
def test_tf_progressive_regression_overfit(self):
"""Test tf progressive multitask overfits tiny data."""
np.random.seed(123)
n_tasks = 9
n_samples = 10
n_features = 3
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.ones((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
metric = dc.metrics.Metric(dc.metrics.rms_score, task_averager=np.mean)
model = dc.models.ProgressiveMultitaskRegressor(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[.1],
seed=123,
alpha_init_stddevs=[.02],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=20)
model.save()
# Eval model on train
scores = model.evaluate(dataset, [metric])
y_pred = model.predict(dataset)
assert scores[metric.name] < .2
| mit |
mavit/ansible | lib/ansible/modules/network/f5/bigip_iapplx_package.py | 8 | 11731 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_iapplx_package
short_description: Manages Javascript iApp packages on a BIG-IP
description:
- Manages Javascript iApp packages on a BIG-IP. This module will allow
you to deploy iAppLX packages to the BIG-IP and manage their lifecycle.
version_added: 2.5
options:
package:
description:
- The iAppLX package that you want to upload or remove. When C(state) is C(present),
and you intend to use this module in a C(role), it is recommended that you use
the C({{ role_path }}) variable. An example is provided in the C(EXAMPLES) section.
- When C(state) is C(absent), it is not necessary for the package to exist on the
Ansible controller. If the full path to the package is provided, the fileame will
specifically be cherry picked from it to properly remove the package.
state:
description:
- Whether the iAppLX package should exist or not.
default: present
choices:
- present
- absent
notes:
- Requires the rpm tool be installed on the host. This can be accomplished through
different ways on each platform. On Debian based systems with C(apt);
C(apt-get install rpm). On Mac with C(brew); C(brew install rpm).
This command is already present on RedHat based systems.
- Requires BIG-IP >= 12.1.0 because the required functionality is missing
on versions earlier than that.
requirements:
- Requires BIG-IP >= 12.1.0
- The 'rpm' tool installed on the Ansible controller
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Add an iAppLX package
bigip_iapplx_package:
package: MyApp-0.1.0-0001.noarch.rpm
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
- name: Add an iAppLX package stored in a role
bigip_iapplx_package:
package: "{{ roles_path }}/files/MyApp-0.1.0-0001.noarch.rpm'"
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
- name: Remove an iAppLX package
bigip_iapplx_package:
package: MyApp-0.1.0-0001.noarch.rpm
password: secret
server: lb.mydomain.com
state: absent
user: admin
delegate_to: localhost
'''
RETURN = r'''
# only common fields returned
'''
import os
import time
from ansible.module_utils.basic import AnsibleModule
from distutils.version import LooseVersion
try:
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import f5_argument_spec
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
except ImportError:
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import f5_argument_spec
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_attributes = []
returnables = []
@property
def package(self):
if self._values['package'] is None:
return None
return self._values['package']
@property
def package_file(self):
if self._values['package'] is None:
return None
return os.path.basename(self._values['package'])
@property
def package_name(self):
"""Return a valid name for the package
BIG-IP determines the package name by the content of the RPM info.
It does not use the filename. Therefore, we do the same. This method
is only used though when the file actually exists on your Ansible
controller.
If the package does not exist, then we instead use the filename
portion of the 'package' argument that is provided.
Non-existence typically occurs when using 'state' = 'absent'
:return:
"""
cmd = ['rpm', '-qp', '--queryformat', '%{NAME}-%{VERSION}-%{RELEASE}.%{ARCH}', self.package]
rc, out, err = self._module.run_command(cmd)
if not out:
return str(self.package_file)
return out
@property
def package_root(self):
if self._values['package'] is None:
return None
base = os.path.basename(self._values['package'])
result = os.path.splitext(base)
return result[0]
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = Parameters(module=self.module, params=self.module.params)
self.changes = Parameters()
def exec_module(self):
result = dict()
changed = False
state = self.want.state
version = self.client.api.tmos_version
if LooseVersion(version) <= LooseVersion('12.0.0'):
raise F5ModuleError(
"This version of BIG-IP is not supported."
)
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def present(self):
if self.exists():
return False
else:
return self.create()
def absent(self):
changed = False
if self.exists():
changed = self.remove()
return changed
def exists(self):
exists = False
packages = self.get_installed_packages_on_device()
if os.path.exists(self.want.package):
exists = True
for package in packages:
if exists:
if self.want.package_name == package['packageName']:
return True
else:
if self.want.package_root == package['packageName']:
return True
return False
def get_installed_packages_on_device(self):
collection = self.client.api.shared.iapp.package_management_tasks_s
task = collection.package_management_task.create(
operation='QUERY'
)
status = self._wait_for_task(task)
if status == 'FINISHED':
return task.queryResponse
raise F5ModuleError(
"Failed to find the installed packages on the device"
)
def create(self):
if self.module.check_mode:
return True
if not os.path.exists(self.want.package):
if self.want.package.startswith('/'):
raise F5ModuleError(
"The specified iAppLX package was not found at {0}.".format(self.want.package)
)
else:
raise F5ModuleError(
"The specified iAppLX package was not found in {0}.".format(os.getcwd())
)
self.upload_to_device()
self.create_on_device()
self.enable_iapplx_on_device()
self.remove_package_file_from_device()
if self.exists():
return True
else:
raise F5ModuleError("Failed to create the iApp template")
def upload_to_device(self):
upload = self.client.api.shared.file_transfer.uploads
upload.upload_file(
self.want.package
)
def remove_package_file_from_device(self):
self.client.api.tm.util.unix_rm.exec_cmd(
'run',
utilCmdArgs="/var/config/rest/downloads/{0}".format(self.want.package_file)
)
def create_on_device(self):
remote_path = "/var/config/rest/downloads/{0}".format(self.want.package_file)
collection = self.client.api.shared.iapp.package_management_tasks_s
task = collection.package_management_task.create(
operation='INSTALL',
packageFilePath=remote_path
)
status = self._wait_for_task(task)
if status == 'FINISHED':
return True
else:
raise F5ModuleError(task.errorMessage)
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the iAppLX package")
return True
def remove_from_device(self):
collection = self.client.api.shared.iapp.package_management_tasks_s
task = collection.package_management_task.create(
operation='UNINSTALL',
packageName=self.want.package_root
)
status = self._wait_for_task(task)
if status == 'FINISHED':
return True
return False
def _wait_for_task(self, task):
for x in range(0, 60):
task.refresh()
if task.status in ['FINISHED', 'FAILED']:
return task.status
time.sleep(1)
return task.status
def enable_iapplx_on_device(self):
self.client.api.tm.util.bash.exec_cmd(
'run',
utilCmdArgs='-c "touch /var/config/rest/iapps/enable"'
)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
state=dict(
default='present',
choices=['present', 'absent']
),
package=dict()
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.required_if = [
['state', 'present', ['package']]
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
required_if=spec.required_if
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as e:
cleanup_tokens(client)
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
alivecor/tensorflow | tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py | 23 | 28748 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A decoder that performs beam search."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.seq2seq.python.ops import beam_search_ops
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.layers import base as layers_base
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.util import nest
__all__ = [
"BeamSearchDecoderOutput",
"BeamSearchDecoderState",
"BeamSearchDecoder",
"FinalBeamSearchDecoderOutput",
"tile_batch",
]
class BeamSearchDecoderState(
collections.namedtuple("BeamSearchDecoderState", ("cell_state", "log_probs",
"finished", "lengths"))):
pass
class BeamSearchDecoderOutput(
collections.namedtuple("BeamSearchDecoderOutput",
("scores", "predicted_ids", "parent_ids"))):
pass
class FinalBeamSearchDecoderOutput(
collections.namedtuple("FinalBeamDecoderOutput",
["predicted_ids", "beam_search_decoder_output"])):
"""Final outputs returned by the beam search after all decoding is finished.
Args:
predicted_ids: The final prediction. A tensor of shape
`[T, batch_size, beam_width]`.
beam_search_output: An instance of `BeamSearchDecoderOutput` that describes
the state of the beam search.
"""
pass
def _tile_batch(t, multiplier):
"""Core single-tensor implementation of tile_batch."""
t = ops.convert_to_tensor(t, name="t")
shape_t = array_ops.shape(t)
if t.shape.ndims is None or t.shape.ndims < 1:
raise ValueError("t must have statically known rank")
tiling = [1] * (t.shape.ndims + 1)
tiling[1] = multiplier
tiled_static_batch_size = (
t.shape[0].value * multiplier if t.shape[0].value is not None else None)
tiled = array_ops.tile(array_ops.expand_dims(t, 1), tiling)
tiled = array_ops.reshape(
tiled, array_ops.concat(([shape_t[0] * multiplier], shape_t[1:]), 0))
tiled.set_shape(
tensor_shape.TensorShape(
[tiled_static_batch_size]).concatenate(t.shape[1:]))
return tiled
def tile_batch(t, multiplier, name=None):
"""Tile the batch dimension of a (possibly nested structure of) tensor(s) t.
For each tensor t in a (possibly nested structure) of tensors,
this function takes a tensor t shaped `[batch_size, s0, s1, ...]` composed of
minibatch entries `t[0], ..., t[batch_size - 1]` and tiles it to have a shape
`[batch_size * multiplier, s0, s1, ...]` composed of minibatch entries
`t[0], t[0], ..., t[1], t[1], ...` where each minibatch entry is repeated
`multiplier` times.
Args:
t: `Tensor` shaped `[batch_size, ...]`.
multiplier: Python int.
name: Name scope for any created operations.
Returns:
A (possibly nested structure of) `Tensor` shaped
`[batch_size * multiplier, ...]`.
Raises:
ValueError: if tensor(s) `t` do not have a statically known rank or
the rank is < 1.
"""
flat_t = nest.flatten(t)
with ops.name_scope(name, "tile_batch", flat_t + [multiplier]):
return nest.map_structure(lambda t_: _tile_batch(t_, multiplier), t)
def _check_maybe(t):
if isinstance(t, tensor_array_ops.TensorArray):
raise TypeError(
"TensorArray state is not supported by BeamSearchDecoder: %s" % t.name)
if t.shape.ndims is None:
raise ValueError(
"Expected tensor (%s) to have known rank, but ndims == None." % t)
class BeamSearchDecoder(decoder.Decoder):
"""BeamSearch sampling decoder."""
def __init__(self,
cell,
embedding,
start_tokens,
end_token,
initial_state,
beam_width,
output_layer=None,
length_penalty_weight=0.0):
"""Initialize BeamSearchDecoder.
Args:
cell: An `RNNCell` instance.
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`.
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
initial_state: A (possibly nested tuple of...) tensors and TensorArrays.
beam_width: Python integer, the number of beams.
output_layer: (Optional) An instance of `tf.layers.Layer`, i.e.,
`tf.layers.Dense`. Optional layer to apply to the RNN output prior
to storing the result or sampling.
length_penalty_weight: Float weight to penalize length. Disabled with 0.0.
Raises:
TypeError: if `cell` is not an instance of `RNNCell`,
or `output_layer` is not an instance of `tf.layers.Layer`.
ValueError: If `start_tokens` is not a vector or
`end_token` is not a scalar.
"""
if not rnn_cell_impl._like_rnncell(cell): # pylint: disable=protected-access
raise TypeError("cell must be an RNNCell, received: %s" % type(cell))
if (output_layer is not None
and not isinstance(output_layer, layers_base.Layer)):
raise TypeError(
"output_layer must be a Layer, received: %s" % type(output_layer))
self._cell = cell
self._output_layer = output_layer
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._start_tokens = ops.convert_to_tensor(
start_tokens, dtype=dtypes.int32, name="start_tokens")
if self._start_tokens.get_shape().ndims != 1:
raise ValueError("start_tokens must be a vector")
self._end_token = ops.convert_to_tensor(
end_token, dtype=dtypes.int32, name="end_token")
if self._end_token.get_shape().ndims != 0:
raise ValueError("end_token must be a scalar")
self._batch_size = array_ops.size(start_tokens)
self._beam_width = beam_width
self._length_penalty_weight = length_penalty_weight
self._initial_cell_state = nest.map_structure(
self._maybe_split_batch_beams,
initial_state, self._cell.state_size)
self._start_tokens = array_ops.tile(
array_ops.expand_dims(self._start_tokens, 1), [1, self._beam_width])
self._start_inputs = self._embedding_fn(self._start_tokens)
self._finished = array_ops.zeros(
[self._batch_size, self._beam_width], dtype=dtypes.bool)
@property
def batch_size(self):
return self._batch_size
def _rnn_output_size(self):
size = self._cell.output_size
if self._output_layer is None:
return size
else:
# To use layer's compute_output_shape, we need to convert the
# RNNCell's output_size entries into shapes with an unknown
# batch size. We then pass this through the layer's
# compute_output_shape and read off all but the first (batch)
# dimensions to get the output size of the rnn with the layer
# applied to the top.
output_shape_with_unknown_batch = nest.map_structure(
lambda s: tensor_shape.TensorShape([None]).concatenate(s),
size)
layer_output_shape = self._output_layer._compute_output_shape( # pylint: disable=protected-access
output_shape_with_unknown_batch)
return nest.map_structure(lambda s: s[1:], layer_output_shape)
@property
def output_size(self):
# Return the cell output and the id
return BeamSearchDecoderOutput(
scores=tensor_shape.TensorShape([self._beam_width]),
predicted_ids=tensor_shape.TensorShape([self._beam_width]),
parent_ids=tensor_shape.TensorShape([self._beam_width]))
@property
def output_dtype(self):
# Assume the dtype of the cell is the output_size structure
# containing the input_state's first component's dtype.
# Return that structure and int32 (the id)
dtype = nest.flatten(self._initial_cell_state)[0].dtype
return BeamSearchDecoderOutput(
scores=nest.map_structure(lambda _: dtype, self._rnn_output_size()),
predicted_ids=dtypes.int32,
parent_ids=dtypes.int32)
def initialize(self, name=None):
"""Initialize the decoder.
Args:
name: Name scope for any created operations.
Returns:
`(finished, start_inputs, initial_state)`.
"""
finished, start_inputs = self._finished, self._start_inputs
initial_state = BeamSearchDecoderState(
cell_state=self._initial_cell_state,
log_probs=array_ops.zeros(
[self._batch_size, self._beam_width],
dtype=nest.flatten(self._initial_cell_state)[0].dtype),
finished=finished,
lengths=array_ops.zeros(
[self._batch_size, self._beam_width], dtype=dtypes.int32))
return (finished, start_inputs, initial_state)
def finalize(self, outputs, final_state, sequence_lengths):
"""Finalize and return the predicted_ids.
Args:
outputs: An instance of BeamSearchDecoderOutput.
final_state: An instance of BeamSearchDecoderState. Passed through to the
output.
sequence_lengths: An `int32` tensor shaped `[batch_size, beam_width]`.
The sequence lengths determined for each beam during decode.
Returns:
outputs: An instance of FinalBeamSearchDecoderOutput where the
predicted_ids are the result of calling _gather_tree.
final_state: The same input instance of BeamSearchDecoderState.
"""
predicted_ids = beam_search_ops.gather_tree(
outputs.predicted_ids, outputs.parent_ids,
sequence_length=sequence_lengths)
outputs = FinalBeamSearchDecoderOutput(
beam_search_decoder_output=outputs, predicted_ids=predicted_ids)
return outputs, final_state
def _merge_batch_beams(self, t, s=None):
"""Merges the tensor from a batch of beams into a batch by beams.
More exactly, t is a tensor of dimension [batch_size, beam_width, s]. We
reshape this into [batch_size*beam_width, s]
Args:
t: Tensor of dimension [batch_size, beam_width, s]
s: (Possibly known) depth shape.
Returns:
A reshaped version of t with dimension [batch_size * beam_width, s].
"""
if isinstance(s, ops.Tensor):
s = tensor_shape.as_shape(tensor_util.constant_value(s))
else:
s = tensor_shape.TensorShape(s)
t_shape = array_ops.shape(t)
static_batch_size = tensor_util.constant_value(self._batch_size)
batch_size_beam_width = (
None if static_batch_size is None
else static_batch_size * self._beam_width)
reshaped_t = array_ops.reshape(
t, array_ops.concat(
([self._batch_size * self._beam_width], t_shape[2:]), 0))
reshaped_t.set_shape(
(tensor_shape.TensorShape([batch_size_beam_width]).concatenate(s)))
return reshaped_t
def _split_batch_beams(self, t, s=None):
"""Splits the tensor from a batch by beams into a batch of beams.
More exactly, t is a tensor of dimension [batch_size*beam_width, s]. We
reshape this into [batch_size, beam_width, s]
Args:
t: Tensor of dimension [batch_size*beam_width, s].
s: (Possibly known) depth shape.
Returns:
A reshaped version of t with dimension [batch_size, beam_width, s].
Raises:
ValueError: If, after reshaping, the new tensor is not shaped
`[batch_size, beam_width, s]` (assuming batch_size and beam_width
are known statically).
"""
if isinstance(s, ops.Tensor):
s = tensor_shape.TensorShape(tensor_util.constant_value(s))
else:
s = tensor_shape.TensorShape(s)
t_shape = array_ops.shape(t)
reshaped_t = array_ops.reshape(
t, array_ops.concat(
([self._batch_size, self._beam_width], t_shape[1:]), 0))
static_batch_size = tensor_util.constant_value(self._batch_size)
expected_reshaped_shape = tensor_shape.TensorShape(
[static_batch_size, self._beam_width]).concatenate(s)
if not reshaped_t.shape.is_compatible_with(expected_reshaped_shape):
raise ValueError("Unexpected behavior when reshaping between beam width "
"and batch size. The reshaped tensor has shape: %s. "
"We expected it to have shape "
"(batch_size, beam_width, depth) == %s. Perhaps you "
"forgot to create a zero_state with "
"batch_size=encoder_batch_size * beam_width?"
% (reshaped_t.shape, expected_reshaped_shape))
reshaped_t.set_shape(expected_reshaped_shape)
return reshaped_t
def _maybe_split_batch_beams(self, t, s):
"""Maybe splits the tensor from a batch by beams into a batch of beams.
We do this so that we can use nest and not run into problems with shapes.
Args:
t: Tensor of dimension [batch_size*beam_width, s]
s: Tensor, Python int, or TensorShape.
Returns:
Either a reshaped version of t with dimension
[batch_size, beam_width, s] if t's first dimension is of size
batch_size*beam_width or t if not.
Raises:
TypeError: If t is an instance of TensorArray.
ValueError: If the rank of t is not statically known.
"""
_check_maybe(t)
if t.shape.ndims >= 1:
return self._split_batch_beams(t, s)
else:
return t
def _maybe_merge_batch_beams(self, t, s):
"""Splits the tensor from a batch by beams into a batch of beams.
More exactly, t is a tensor of dimension [batch_size*beam_width, s]. We
reshape this into [batch_size, beam_width, s]
Args:
t: Tensor of dimension [batch_size*beam_width, s]
s: Tensor, Python int, or TensorShape.
Returns:
A reshaped version of t with dimension [batch_size, beam_width, s].
Raises:
TypeError: If t is an instance of TensorArray.
ValueError: If the rank of t is not statically known.
"""
_check_maybe(t)
if t.shape.ndims >= 2:
return self._merge_batch_beams(t, s)
else:
return t
def step(self, time, inputs, state, name=None):
"""Perform a decoding step.
Args:
time: scalar `int32` tensor.
inputs: A (structure of) input tensors.
state: A (structure of) state tensors and TensorArrays.
name: Name scope for any created operations.
Returns:
`(outputs, next_state, next_inputs, finished)`.
"""
batch_size = self._batch_size
beam_width = self._beam_width
end_token = self._end_token
length_penalty_weight = self._length_penalty_weight
with ops.name_scope(name, "BeamSearchDecoderStep", (time, inputs, state)):
cell_state = state.cell_state
inputs = nest.map_structure(
lambda inp: self._merge_batch_beams(inp, s=inp.shape[2:]), inputs)
cell_state = nest.map_structure(
self._maybe_merge_batch_beams,
cell_state, self._cell.state_size)
cell_outputs, next_cell_state = self._cell(inputs, cell_state)
cell_outputs = nest.map_structure(
lambda out: self._split_batch_beams(out, out.shape[1:]), cell_outputs)
next_cell_state = nest.map_structure(
self._maybe_split_batch_beams,
next_cell_state, self._cell.state_size)
if self._output_layer is not None:
cell_outputs = self._output_layer(cell_outputs)
beam_search_output, beam_search_state = _beam_search_step(
time=time,
logits=cell_outputs,
next_cell_state=next_cell_state,
beam_state=state,
batch_size=batch_size,
beam_width=beam_width,
end_token=end_token,
length_penalty_weight=length_penalty_weight)
finished = beam_search_state.finished
sample_ids = beam_search_output.predicted_ids
next_inputs = control_flow_ops.cond(
math_ops.reduce_all(finished), lambda: self._start_inputs,
lambda: self._embedding_fn(sample_ids))
return (beam_search_output, beam_search_state, next_inputs, finished)
def _beam_search_step(time, logits, next_cell_state, beam_state, batch_size,
beam_width, end_token, length_penalty_weight):
"""Performs a single step of Beam Search Decoding.
Args:
time: Beam search time step, should start at 0. At time 0 we assume
that all beams are equal and consider only the first beam for
continuations.
logits: Logits at the current time step. A tensor of shape
`[batch_size, beam_width, vocab_size]`
next_cell_state: The next state from the cell, e.g. an instance of
AttentionWrapperState if the cell is attentional.
beam_state: Current state of the beam search.
An instance of `BeamSearchDecoderState`.
batch_size: The batch size for this input.
beam_width: Python int. The size of the beams.
end_token: The int32 end token.
length_penalty_weight: Float weight to penalize length. Disabled with 0.0.
Returns:
A new beam state.
"""
static_batch_size = tensor_util.constant_value(batch_size)
# Calculate the current lengths of the predictions
prediction_lengths = beam_state.lengths
previously_finished = beam_state.finished
# Calculate the total log probs for the new hypotheses
# Final Shape: [batch_size, beam_width, vocab_size]
step_log_probs = nn_ops.log_softmax(logits)
step_log_probs = _mask_probs(step_log_probs, end_token, previously_finished)
total_probs = array_ops.expand_dims(beam_state.log_probs, 2) + step_log_probs
# Calculate the continuation lengths by adding to all continuing beams.
vocab_size = logits.shape[-1].value or array_ops.shape(logits)[-1]
lengths_to_add = array_ops.one_hot(
indices=array_ops.tile(
array_ops.reshape(end_token, [1, 1]), [batch_size, beam_width]),
depth=vocab_size,
on_value=0,
off_value=1)
add_mask = (1 - math_ops.to_int32(previously_finished))
lengths_to_add = array_ops.expand_dims(add_mask, 2) * lengths_to_add
new_prediction_lengths = (
lengths_to_add + array_ops.expand_dims(prediction_lengths, 2))
# Calculate the scores for each beam
scores = _get_scores(
log_probs=total_probs,
sequence_lengths=new_prediction_lengths,
length_penalty_weight=length_penalty_weight)
time = ops.convert_to_tensor(time, name="time")
# During the first time step we only consider the initial beam
scores_shape = array_ops.shape(scores)
scores_flat = control_flow_ops.cond(
time > 0,
lambda: array_ops.reshape(scores, [batch_size, -1]),
lambda: scores[:, 0])
num_available_beam = control_flow_ops.cond(
time > 0, lambda: math_ops.reduce_prod(scores_shape[1:]),
lambda: math_ops.reduce_prod(scores_shape[2:]))
# Pick the next beams according to the specified successors function
next_beam_size = math_ops.minimum(
ops.convert_to_tensor(beam_width, dtype=dtypes.int32, name="beam_width"),
num_available_beam)
next_beam_scores, word_indices = nn_ops.top_k(scores_flat, k=next_beam_size)
next_beam_scores.set_shape([static_batch_size, beam_width])
word_indices.set_shape([static_batch_size, beam_width])
# Pick out the probs, beam_ids, and states according to the chosen predictions
next_beam_probs = _tensor_gather_helper(
gather_indices=word_indices,
gather_from=total_probs,
batch_size=batch_size,
range_size=beam_width * vocab_size,
gather_shape=[-1])
next_word_ids = math_ops.to_int32(word_indices % vocab_size)
next_beam_ids = math_ops.to_int32(word_indices / vocab_size)
# Append new ids to current predictions
previously_finished = _tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=previously_finished,
batch_size=batch_size,
range_size=beam_width,
gather_shape=[-1])
next_finished = math_ops.logical_or(previously_finished,
math_ops.equal(next_word_ids, end_token))
# Calculate the length of the next predictions.
# 1. Finished beams remain unchanged
# 2. Beams that are now finished (EOS predicted) remain unchanged
# 3. Beams that are not yet finished have their length increased by 1
lengths_to_add = math_ops.to_int32(
math_ops.not_equal(next_word_ids, end_token))
lengths_to_add = (1 - math_ops.to_int32(next_finished)) * lengths_to_add
next_prediction_len = _tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=beam_state.lengths,
batch_size=batch_size,
range_size=beam_width,
gather_shape=[-1])
next_prediction_len += lengths_to_add
# Pick out the cell_states according to the next_beam_ids. We use a
# different gather_shape here because the cell_state tensors, i.e.
# the tensors that would be gathered from, all have dimension
# greater than two and we need to preserve those dimensions.
# pylint: disable=g-long-lambda
next_cell_state = nest.map_structure(
lambda gather_from: _maybe_tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=gather_from,
batch_size=batch_size,
range_size=beam_width,
gather_shape=[batch_size * beam_width, -1]),
next_cell_state)
# pylint: enable=g-long-lambda
next_state = BeamSearchDecoderState(
cell_state=next_cell_state,
log_probs=next_beam_probs,
lengths=next_prediction_len,
finished=next_finished)
output = BeamSearchDecoderOutput(
scores=next_beam_scores,
predicted_ids=next_word_ids,
parent_ids=next_beam_ids)
return output, next_state
def _get_scores(log_probs, sequence_lengths, length_penalty_weight):
"""Calculates scores for beam search hypotheses.
Args:
log_probs: The log probabilities with shape
`[batch_size, beam_width, vocab_size]`.
sequence_lengths: The array of sequence lengths.
length_penalty_weight: Float weight to penalize length. Disabled with 0.0.
Returns:
The scores normalized by the length_penalty.
"""
length_penality_ = _length_penalty(
sequence_lengths=sequence_lengths, penalty_factor=length_penalty_weight)
return log_probs / length_penality_
def _length_penalty(sequence_lengths, penalty_factor):
"""Calculates the length penalty. See https://arxiv.org/abs/1609.08144.
Args:
sequence_lengths: The sequence length of all hypotheses, a tensor
of shape [beam_size, vocab_size].
penalty_factor: A scalar that weights the length penalty.
Returns:
The length penalty factor, a tensor fo shape [beam_size].
"""
penalty_factor = ops.convert_to_tensor(penalty_factor, name="penalty_factor")
penalty_factor.set_shape(()) # penalty should be a scalar.
static_penalty = tensor_util.constant_value(penalty_factor)
if static_penalty is not None and static_penalty == 0:
return 1.0
return math_ops.div((5. + math_ops.to_float(sequence_lengths))
**penalty_factor, (5. + 1.)**penalty_factor)
def _mask_probs(probs, eos_token, finished):
"""Masks log probabilities.
The result is that finished beams allocate all probability mass to eos and
unfinished beams remain unchanged.
Args:
probs: Log probabiltiies of shape `[batch_size, beam_width, vocab_size]`
eos_token: An int32 id corresponding to the EOS token to allocate
probability to.
finished: A boolean tensor of shape `[batch_size, beam_width]` that
specifies which
elements in the beam are finished already.
Returns:
A tensor of shape `[batch_size, beam_width, vocab_size]`, where unfinished
beams stay unchanged and finished beams are replaced with a tensor with all
probability on the EOS token.
"""
vocab_size = array_ops.shape(probs)[2]
finished_mask = array_ops.expand_dims(
math_ops.to_float(1. - math_ops.to_float(finished)), 2)
# These examples are not finished and we leave them
non_finished_examples = finished_mask * probs
# All finished examples are replaced with a vector that has all
# probability on EOS
finished_row = array_ops.one_hot(
eos_token,
vocab_size,
dtype=probs.dtype,
on_value=0.,
off_value=probs.dtype.min)
finished_examples = (1. - finished_mask) * finished_row
return finished_examples + non_finished_examples
def _maybe_tensor_gather_helper(gather_indices, gather_from, batch_size,
range_size, gather_shape):
"""Maybe applies _tensor_gather_helper.
This applies _tensor_gather_helper when the gather_from dims is at least as
big as the length of gather_shape. This is used in conjunction with nest so
that we don't apply _tensor_gather_helper to inapplicable values like scalars.
Args:
gather_indices: The tensor indices that we use to gather.
gather_from: The tensor that we are gathering from.
batch_size: The batch size.
range_size: The number of values in each range. Likely equal to beam_width.
gather_shape: What we should reshape gather_from to in order to preserve the
correct values. An example is when gather_from is the attention from an
AttentionWrapperState with shape [batch_size, beam_width, attention_size].
There, we want to preserve the attention_size elements, so gather_shape is
[batch_size * beam_width, -1]. Then, upon reshape, we still have the
attention_size as desired.
Returns:
output: Gathered tensor of shape tf.shape(gather_from)[:1+len(gather_shape)]
or the original tensor if its dimensions are too small.
"""
_check_maybe(gather_from)
if gather_from.shape.ndims >= len(gather_shape):
return _tensor_gather_helper(
gather_indices=gather_indices,
gather_from=gather_from,
batch_size=batch_size,
range_size=range_size,
gather_shape=gather_shape)
else:
return gather_from
def _tensor_gather_helper(gather_indices, gather_from, batch_size,
range_size, gather_shape):
"""Helper for gathering the right indices from the tensor.
This works by reshaping gather_from to gather_shape (e.g. [-1]) and then
gathering from that according to the gather_indices, which are offset by
the right amounts in order to preserve the batch order.
Args:
gather_indices: The tensor indices that we use to gather.
gather_from: The tensor that we are gathering from.
batch_size: The input batch size.
range_size: The number of values in each range. Likely equal to beam_width.
gather_shape: What we should reshape gather_from to in order to preserve the
correct values. An example is when gather_from is the attention from an
AttentionWrapperState with shape [batch_size, beam_width, attention_size].
There, we want to preserve the attention_size elements, so gather_shape is
[batch_size * beam_width, -1]. Then, upon reshape, we still have the
attention_size as desired.
Returns:
output: Gathered tensor of shape tf.shape(gather_from)[:1+len(gather_shape)]
"""
range_ = array_ops.expand_dims(math_ops.range(batch_size) * range_size, 1)
gather_indices = array_ops.reshape(gather_indices + range_, [-1])
output = array_ops.gather(
array_ops.reshape(gather_from, gather_shape), gather_indices)
final_shape = array_ops.shape(gather_from)[:1 + len(gather_shape)]
static_batch_size = tensor_util.constant_value(batch_size)
final_static_shape = (tensor_shape.TensorShape([static_batch_size])
.concatenate(
gather_from.shape[1:1 + len(gather_shape)]))
output = array_ops.reshape(output, final_shape)
output.set_shape(final_static_shape)
return output
| apache-2.0 |
telwertowski/Books-Mac-OS-X | Versions/Books_3.0b6/Library of Congress.plugin/Contents/Resources/PyZ3950/CQLParser.py | 30 | 33090 | #!/usr/bin/python
# Author: Rob Sanderson (azaroth@liv.ac.uk)
# Distributed and Usable under the GPL
# Version: 1.7
# Most Recent Changes: contexts, new modifier style for 1.1
#
# With thanks to Adam from IndexData and Mike Taylor for their valuable input
from shlex import shlex
from xml.sax.saxutils import escape
from xml.dom.minidom import Node, parseString
from PyZ3950.SRWDiagnostics import *
# Don't use cStringIO as it borks Unicode (apparently)
from StringIO import StringIO
import types
# Parsing strictness flags
errorOnEmptyTerm = 0 # index = "" (often meaningless)
errorOnQuotedIdentifier = 0 # "/foo/bar" = "" (unnecessary BNF restriction)
errorOnDuplicatePrefix = 0 # >a=b >a=c "" (impossible due to BNF)
fullResultSetNameCheck = 1 # srw.rsn=foo and srw.rsn=foo (mutant!!)
# Base values for CQL
serverChoiceRelation = "scr"
serverChoiceIndex = "cql.serverchoice"
order = ['=', '>', '>=', '<', '<=', '<>']
modifierSeparator = "/"
booleans = ['and', 'or', 'not', 'prox']
reservedPrefixes = {"srw" : "http://www.loc.gov/zing/cql/srw-indexes/v1.0/",
"cql" : "info:srw/cql-context-set/1/cql-v1.1"}
XCQLNamespace = "http://www.loc.gov/zing/cql/xcql/"
# End of 'configurable' stuff
class PrefixableObject:
"Root object for triple and searchClause"
prefixes = {}
parent = None
config = None
def __init__(self):
self.prefixes = {}
self.parent = None
self.config = None
def toXCQL(self, depth=0):
# Just generate our prefixes
space = " " * depth
xml = ['%s<prefixes>\n' % (space)]
for p in self.prefixes.keys():
xml.append("%s <prefix>\n%s <name>%s</name>\n%s <identifier>%s</identifier>\n%s </prefix>\n" % (space, space, escape(p), space, escape(self.prefixes[p]), space))
xml.append("%s</prefixes>\n" % (space))
return ''.join(xml)
def addPrefix(self, name, identifier):
if (errorOnDuplicatePrefix and (self.prefixes.has_key(name) or reservedPrefixes.has_key(name))):
# Maybe error
diag = Diagnostic45()
diag.details = name
raise diag;
self.prefixes[name] = identifier
def resolvePrefix(self, name):
# Climb tree
if (reservedPrefixes.has_key(name)):
return reservedPrefixes[name]
elif (self.prefixes.has_key(name)):
return self.prefixes[name]
elif (self.parent <> None):
return self.parent.resolvePrefix(name)
elif (self.config <> None):
# Config is some sort of server config which specifies defaults
return self.config.resolvePrefix(name)
else:
# Top of tree, no config, no resolution->Unknown indexset
# For client we need to allow no prefix?
#diag = Diagnostic15()
#diag.details = name
#raise diag
return None
class PrefixedObject:
"Root object for relation, relationModifier and index"
prefix = ""
prefixURI = ""
value = ""
parent = None
def __init__(self, val):
# All prefixed things are case insensitive
val = val.lower()
if val and val[0] == '"' and val[-1] == '"':
if errorOnQuotedIdentifier:
diag = Diagnostic14()
diag.details = val
raise diag
else:
val = val[1:-1]
self.value = val
self.splitValue()
def __str__(self):
if (self.prefix):
return "%s.%s" % (self.prefix, self.value)
else:
return self.value
def splitValue(self):
f = self.value.find(".")
if (self.value.count('.') > 1):
diag = Diagnostic15()
diag.details = "Multiple '.' characters: %s" % (self.value)
raise(diag)
elif (f == 0):
diag = Diagnostic15()
diag.details = "Null indexset: %s" % (irt.index)
raise(diag)
elif f >= 0:
self.prefix = self.value[:f].lower()
self.value = self.value[f+1:].lower()
def resolvePrefix(self):
if (not self.prefixURI):
self.prefixURI = self.parent.resolvePrefix(self.prefix)
return self.prefixURI
class ModifiableObject:
# Treat modifiers as keys on boolean/relation?
modifiers = []
def __getitem__(self, k):
if (type(k) == types.IntType):
try:
return self.modifiers[k]
except:
return None
for m in self.modifiers:
if (str(m.type) == k or m.type.value == k):
return m
return None
class Triple (PrefixableObject):
"Object to represent a CQL triple"
leftOperand = None
boolean = None
rightOperand = None
def toXCQL(self, depth=0):
"Create the XCQL representation of the object"
space = " " * depth
if (depth == 0):
xml = ['<triple xmlns="%s">\n' % (XCQLNamespace)]
else:
xml = ['%s<triple>\n' % (space)]
if self.prefixes:
xml.append(PrefixableObject.toXCQL(self, depth+1))
xml.append(self.boolean.toXCQL(depth+1))
xml.append("%s <leftOperand>\n" % (space))
xml.append(self.leftOperand.toXCQL(depth+2))
xml.append("%s </leftOperand>\n" % (space))
xml.append("%s <rightOperand>\n" % (space))
xml.append(self.rightOperand.toXCQL(depth+2))
xml.append("%s </rightOperand>\n" % (space))
xml.append("%s</triple>\n" % (space))
return ''.join(xml)
def toCQL(self):
txt = []
if (self.prefixes):
for p in self.prefixes.keys():
if (p <> ''):
txt.append('>%s="%s"' % (p, self.prefixes[p]))
else:
txt.append('>"%s"' % (self.prefixes[p]))
prefs = ' '.join(txt)
return "(%s %s %s %s)" % (prefs, self.leftOperand.toCQL(), self.boolean.toCQL(), self.rightOperand.toCQL())
else:
return "(%s %s %s)" % (self.leftOperand.toCQL(), self.boolean.toCQL(), self.rightOperand.toCQL())
def getResultSetId(self, top=None):
if fullResultSetNameCheck == 0 or self.boolean.value in ['not', 'prox']:
return ""
if top == None:
topLevel = 1
top = self;
else:
topLevel = 0
# Iterate over operands and build a list
rsList = []
if isinstance(self.leftOperand, Triple):
rsList.extend(self.leftOperand.getResultSetId(top))
else:
rsList.append(self.leftOperand.getResultSetId(top))
if isinstance(self.rightOperand, Triple):
rsList.extend(self.rightOperand.getResultSetId(top))
else:
rsList.append(self.rightOperand.getResultSetId(top))
if topLevel == 1:
# Check all elements are the same, if so we're a fubar form of present
if (len(rsList) == rsList.count(rsList[0])):
return rsList[0]
else:
return ""
else:
return rsList
class SearchClause (PrefixableObject):
"Object to represent a CQL searchClause"
index = None
relation = None
term = None
def __init__(self, ind, rel, t):
PrefixableObject.__init__(self)
self.index = ind
self.relation = rel
self.term = t
ind.parent = self
rel.parent = self
t.parent = self
def toXCQL(self, depth=0):
"Produce XCQL version of the object"
space = " " * depth
if (depth == 0):
xml = ['<searchClause xmlns="%s">\n' % (XCQLNamespace)]
else:
xml = ['%s<searchClause>\n' % (space)]
if self.prefixes:
xml.append(PrefixableObject.toXCQL(self, depth+1))
xml.append(self.index.toXCQL(depth+1))
xml.append(self.relation.toXCQL(depth+1))
xml.append(self.term.toXCQL(depth+1))
xml.append("%s</searchClause>\n" % (space))
return ''.join(xml)
def toCQL(self):
text = []
for p in self.prefixes.keys():
if (p <> ''):
text.append('>%s="%s"' % (p, self.prefixes[p]))
else:
text.append('>"%s"' % (self.prefixes[p]))
text.append('%s %s "%s"' % (self.index, self.relation.toCQL(), self.term))
return ' '.join(text)
def getResultSetId(self, top=None):
idx = self.index
idx.resolvePrefix()
if (idx.prefixURI == reservedPrefixes['cql'] and idx.value.lower() == 'resultsetid'):
return self.term.value
else:
return ""
class Index(PrefixedObject):
"Object to represent a CQL index"
def toXCQL(self, depth=0):
if (depth == 0):
ns = ' xmlns="%s"' % (XCQLNamespace)
else:
ns = ""
return "%s<index%s>%s</index>\n" % (" "*depth, ns, escape(str(self)))
def toCQL(self):
return str(self)
class Relation(PrefixedObject, ModifiableObject):
"Object to represent a CQL relation"
def __init__(self, rel, mods=[]):
self.prefix = "cql"
PrefixedObject.__init__(self, rel)
self.modifiers = mods
for m in mods:
m.parent = self
def toXCQL(self, depth=0):
"Create XCQL representation of object"
if (depth == 0):
ns = ' xmlns="%s"' % (XCQLNamespace)
else:
ns = ""
space = " " * depth
xml = ["%s<relation%s>\n" % (space, ns)]
xml.append("%s <value>%s</value>\n" % (space, escape(self.value)))
if self.modifiers:
xml.append("%s <modifiers>\n" % (space))
for m in self.modifiers:
xml.append(m.toXCQL(depth+2))
xml.append("%s </modifiers>\n" % (space))
xml.append("%s</relation>\n" % (space))
return ''.join(xml)
def toCQL(self):
txt = [self.value]
txt.extend(map(str, self.modifiers))
return '/'.join(txt)
class Term:
value = ""
def __init__(self, v):
if (v <> ""):
# Unquoted literal
if v in ['>=', '<=', '>', '<', '<>', "/", '=']:
diag = Diagnostic25()
diag.details = self.value
raise diag
# Check existence of meaningful term
nonanchor = 0
for c in v:
if c != "^":
nonanchor = 1
break
if not nonanchor:
diag = Diagnostic32()
diag.details = "Only anchoring charater(s) in term: " + v
raise diag
# Unescape quotes
if (v[0] == '"' and v[-1] == '"'):
v = v[1:-1]
v = v.replace('\\"', '"')
if (not v and errorOnEmptyTerm):
diag = Diagnostic27()
raise diag
# Check for badly placed \s
startidx = 0
idx = v.find("\\", startidx)
while (idx > -1):
startidx = idx+1
if not irt.term[idx+1] in ['?', '\\', '*', '^']:
diag = Diagnostic26()
diag.details = irt.term
raise diag
v = v.find("\\", startidx)
elif (errorOnEmptyTerm):
diag = Diagnostic27()
raise diag
self.value = v
def __str__(self):
return self.value
def toXCQL(self, depth=0):
if (depth == 0):
ns = ' xmlns="%s"' % (XCQLNamespace)
else:
ns = ""
return "%s<term%s>%s</term>\n" % (" "*depth, ns, escape(self.value))
class Boolean(ModifiableObject):
"Object to represent a CQL boolean"
value = ""
parent = None
def __init__(self, bool, mods=[]):
self.value = bool
self.modifiers = mods
self.parent = None
def toXCQL(self, depth=0):
"Create XCQL representation of object"
space = " " * depth
xml = ["%s<boolean>\n" % (space)]
xml.append("%s <value>%s</value>\n" % (space, escape(self.value)))
if self.modifiers:
xml.append("%s <modifiers>\n" % (space))
for m in self.modifiers:
xml.append(m.toXCQL(depth+2))
xml.append("%s </modifiers>\n" % (space))
xml.append("%s</boolean>\n" % (space))
return ''.join(xml)
def toCQL(self):
txt = [self.value]
for m in self.modifiers:
txt.append(m.toCQL())
return '/'.join(txt)
def resolvePrefix(self, name):
return self.parent.resolvePrefix(name)
class ModifierType(PrefixedObject):
# Same as index, but we'll XCQLify in ModifierClause
parent = None
prefix = "cql"
class ModifierClause:
"Object to represent a relation modifier"
parent = None
type = None
comparison = ""
value = ""
def __init__(self, type, comp="", val=""):
self.type = ModifierType(type)
self.type.parent = self
self.comparison = comp
self.value = val
def __str__(self):
if (self.value):
return "%s%s%s" % (str(self.type), self.comparison, self.value)
else:
return "%s" % (str(self.type))
def toXCQL(self, depth=0):
if (self.value):
return "%s<modifier>\n%s<type>%s</type>\n%s<comparison>%s</comparison>\n%s<value>%s</value>\n%s</modifier>\n" % (" " * depth, " " * (depth+1), escape(str(self.type)), " " * (depth+1), escape(self.comparison), " " * (depth+1), escape(self.value), " " * depth)
else:
return "%s<modifier><type>%s</type></modifier>\n" % (" " * depth, escape(str(self.type)))
def toCQL(self):
return str(self)
def resolvePrefix(self, name):
# Need to skip parent, which has its own resolvePrefix
# eg boolean or relation, neither of which is prefixable
return self.parent.parent.resolvePrefix(name)
# Requires changes for: <= >= <>, and escaped \" in "
# From shlex.py (std library for 2.2+)
class CQLshlex(shlex):
"shlex with additions for CQL parsing"
quotes = '"'
commenters = ""
nextToken = ""
def __init__(self, thing):
shlex.__init__(self, thing)
self.wordchars += "!@#$%^&*-+{}[];,.?|~`:\\"
self.wordchars += ''.join(map(chr, range(128,254)))
def read_token(self):
"Read a token from the input stream (no pushback or inclusions)"
while 1:
if (self.nextToken != ""):
self.token = self.nextToken
self.nextToken = ""
# Bah. SUPER ugly non portable
if self.token == "/":
self.state = ' '
break
nextchar = self.instream.read(1)
if nextchar == '\n':
self.lineno = self.lineno + 1
if self.debug >= 3:
print "shlex: in state ", repr(self.state), " I see character:", repr(nextchar)
if self.state is None:
self.token = '' # past end of file
break
elif self.state == ' ':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print "shlex: I see whitespace in whitespace state"
if self.token:
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
elif nextchar in self.wordchars:
self.token = nextchar
self.state = 'a'
elif nextchar in self.quotes:
self.token = nextchar
self.state = nextchar
elif nextchar in ['<', '>']:
self.token = nextchar
self.state = '<'
else:
self.token = nextchar
if self.token:
break # emit current token
else:
continue
elif self.state == '<':
# Only accumulate <=, >= or <>
if self.token == ">" and nextchar == "=":
self.token = self.token + nextchar
self.state = ' '
break
elif self.token == "<" and nextchar in ['>', '=']:
self.token = self.token + nextchar
self.state = ' '
break
elif not nextchar:
self.state = None
break
elif nextchar == "/":
self.state = "/"
self.nextToken = "/"
break
elif nextchar in self.wordchars:
self.state='a'
self.nextToken = nextchar
break
elif nextchar in self.quotes:
self.state=nextchar
self.nextToken = nextchar
break
else:
self.state = ' '
break
elif self.state in self.quotes:
self.token = self.token + nextchar
# Allow escaped quotes
if nextchar == self.state and self.token[-2] != '\\':
self.state = ' '
break
elif not nextchar: # end of file
if self.debug >= 2:
print "shlex: I see EOF in quotes state"
# Override SHLEX's ValueError to throw diagnostic
diag = Diagnostic14()
diag.details = self.token[:-1]
raise diag
elif self.state == 'a':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print "shlex: I see whitespace in word state"
self.state = ' '
if self.token:
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
elif nextchar in self.wordchars or nextchar in self.quotes:
self.token = self.token + nextchar
elif nextchar in ['>', '<']:
self.nextToken = nextchar
self.state = '<'
break
else:
self.pushback = [nextchar] + self.pushback
if self.debug >= 2:
print "shlex: I see punctuation in word state"
self.state = ' '
if self.token:
break # emit current token
else:
continue
result = self.token
self.token = ''
if self.debug > 1:
if result:
print "shlex: raw token=" + `result`
else:
print "shlex: raw token=EOF"
return result
class CQLParser:
"Token parser to create object structure for CQL"
parser = ""
currentToken = ""
nextToken = ""
def __init__(self, p):
""" Initialise with shlex parser """
self.parser = p
self.fetch_token() # Fetches to next
self.fetch_token() # Fetches to curr
def is_boolean(self, token):
"Is the token a boolean"
token = token.lower()
return token in booleans
def fetch_token(self):
""" Read ahead one token """
tok = self.parser.get_token()
self.currentToken = self.nextToken
self.nextToken = tok
def prefixes(self):
"Create prefixes dictionary"
prefs = {}
while (self.currentToken == ">"):
# Strip off maps
self.fetch_token()
if self.nextToken == "=":
# Named map
name = self.currentToken
self.fetch_token() # = is current
self.fetch_token() # id is current
identifier = self.currentToken
self.fetch_token()
else:
name = ""
identifier = self.currentToken
self.fetch_token()
if (errorOnDuplicatePrefix and prefs.has_key(name)):
# Error condition
diag = Diagnostic45()
diag.details = name
raise diag;
if len(identifier) > 1 and identifier[0] == '"' and identifier[-1] == '"':
identifier = identifier[1:-1]
prefs[name.lower()] = identifier
return prefs
def query(self):
""" Parse query """
prefs = self.prefixes()
left = self.subQuery()
while 1:
if not self.currentToken:
break;
bool = self.is_boolean(self.currentToken)
if bool:
boolobject = self.boolean()
right = self.subQuery()
# Setup Left Object
trip = tripleType()
trip.leftOperand = left
trip.boolean = boolobject
trip.rightOperand = right
left.parent = trip
right.parent = trip
boolobject.parent = trip
left = trip
else:
break;
for p in prefs.keys():
left.addPrefix(p, prefs[p])
return left
def subQuery(self):
""" Find either query or clause """
if self.currentToken == "(":
self.fetch_token() # Skip (
object = self.query()
if self.currentToken == ")":
self.fetch_token() # Skip )
else:
diag = Diagnostic13()
diag.details = self.currentToken
raise diag
else:
prefs = self.prefixes()
if (prefs):
object = self.query()
for p in prefs.keys():
object.addPrefix(p, prefs[p])
else:
object = self.clause()
return object
def clause(self):
""" Find searchClause """
bool = self.is_boolean(self.nextToken)
if not bool and not (self.nextToken in [')', '(', '']):
index = indexType(self.currentToken)
self.fetch_token() # Skip Index
rel = self.relation()
if (self.currentToken == ''):
diag = Diagnostic10()
diag.details = "Expected Term, got end of query."
raise(diag)
term = termType(self.currentToken)
self.fetch_token() # Skip Term
irt = searchClauseType(index, rel, term)
elif self.currentToken and (bool or self.nextToken in [')', '']):
irt = searchClauseType(indexType(serverChoiceIndex), relationType(serverChoiceRelation), termType(self.currentToken))
self.fetch_token()
elif self.currentToken == ">":
prefs = self.prefixes()
# iterate to get object
object = self.clause()
for p in prefs.keys():
object.addPrefix(p, prefs[p]);
return object
else:
diag = Diagnostic10()
diag.details = "Expected Boolean or Relation but got: " + self.currentToken
raise diag
return irt
def modifiers(self):
mods = []
while (self.currentToken == modifierSeparator):
self.fetch_token()
mod = self.currentToken
mod = mod.lower()
if (mod == modifierSeparator):
diag = Diagnostic20()
diag.details = "Null modifier"
raise diag
self.fetch_token()
comp = self.currentToken
if (comp in order):
self.fetch_token()
value = self.currentToken
self.fetch_token()
else:
comp = ""
value = ""
mods.append(ModifierClause(mod, comp, value))
return mods
def boolean(self):
""" Find boolean """
self.currentToken = self.currentToken.lower()
if self.currentToken in booleans:
bool = booleanType(self.currentToken)
self.fetch_token()
bool.modifiers = self.modifiers()
for b in bool.modifiers:
b.parent = bool
else:
diag = Diagnostic37()
diag.details = self.currentToken
raise diag
return bool
def relation(self):
""" Find relation """
self.currentToken = self.currentToken.lower()
rel = relationType(self.currentToken)
self.fetch_token()
rel.modifiers = self.modifiers()
for r in rel.modifiers:
r.parent = rel
return rel
class XCQLParser:
""" Parser for XCQL using some very simple DOM """
def firstChildElement(self, elem):
""" Find first child which is an Element """
for c in elem.childNodes:
if c.nodeType == Node.ELEMENT_NODE:
return c
return None
def firstChildData(self,elem):
""" Find first child which is Data """
for c in elem.childNodes:
if c.nodeType == Node.TEXT_NODE:
return c
return None
def searchClause(self, elem):
""" Process a <searchClause> """
sc = searchClauseType()
for c in elem.childNodes:
if c.nodeType == Node.ELEMENT_NODE:
if c.localName == "index":
sc.index = indexType(self.firstChildData(c).data.lower())
elif c.localName == "term":
sc.term = termType(self.firstChildData(c).data)
elif c.localName == "relation":
sc.relation = self.relation(c)
elif c.localName == "prefixes":
sc.prefixes = self.prefixes(c)
else:
raise(ValueError, c.localName)
return sc
def triple(self, elem):
""" Process a <triple> """
trip = tripleType()
for c in elem.childNodes:
if c.nodeType == Node.ELEMENT_NODE:
if c.localName == "boolean":
trip.boolean = self.boolean(c)
elif c.localName == "prefixes":
trip.prefixes = self.prefixes(c)
elif c.localName == "leftOperand":
c2 = self.firstChildElement(c)
if c2.localName == "searchClause":
trip.leftOperand = self.searchClause(c2)
else:
trip.leftOperand = self.triple(c2)
else:
c2 = self.firstChildElement(c)
if c2.localName == "searchClause":
trip.rightOperand = self.searchClause(c2)
else:
trip.rightOperand = self.triple(c2)
return trip
def relation(self, elem):
""" Process a <relation> """
rel = relationType()
for c in elem.childNodes:
if c.nodeType == Node.ELEMENT_NODE:
if c.localName == "value":
rel.value = c.firstChild.data.lower()
elif c.localName == "modifiers":
mods = []
for c2 in c.childNodes:
if c2.nodeType == Node.ELEMENT_NODE:
if c2.localName == "modifier":
for c3 in c2.childNodes:
if c3.localName == "value":
val = self.firstChildData(c2).data.lower()
mods.append(val)
rel.modifiers = mods
return rel
def boolean(self, elem):
"Process a <boolean>"
bool = booleanType()
for c in elem.childNodes:
if c.nodeType == Node.ELEMENT_NODE:
if c.localName == "value":
bool.value = self.firstChildData(c).data.lower()
else:
# Can be in any order, so we need to extract, then order
mods = {}
for c2 in c.childNodes:
if c2.nodeType == Node.ELEMENT_NODE:
if c2.localName == "modifier":
type = ""
value = ""
for c3 in c2.childNodes:
if c3.nodeType == Node.ELEMENT_NODE:
if c3.localName == "value":
value = self.firstChildData(c3).data.lower()
elif c3.localName == "type":
type = self.firstChildData(c3).data
mods[type] = value
modlist = []
for t in booleanModifierTypes[1:]:
if mods.has_key(t):
modlist.append(mods[t])
else:
modlist.append('')
bool.modifiers = modlist
return bool
def prefixes(self, elem):
"Process <prefixes>"
prefs = {}
for c in elem.childNodes:
if c.nodeType == Node.ELEMENT_NODE:
# prefix
name = ""
identifier = ""
for c2 in c.childNodes:
if c2.nodeType == Node.ELEMENT_NODE:
if c2.localName == "name":
name = self.firstChildData(c2).data.lower()
elif c2.localName == "identifier":
identifier = self.firstChildData(c2).data
prefs[name] = identifier
return prefs
def xmlparse(s):
""" API. Return a seachClause/triple object from XML string """
doc = parseString(s)
q = xcqlparse(doc.firstChild)
return q
def xcqlparse(query):
""" API. Return a searchClause/triple object from XML DOM objects"""
# Requires only properties of objects so we don't care how they're generated
p = XCQLParser()
if query.localName == "searchClause":
return p.searchClause(query)
else:
return p.triple(query)
def parse(query):
""" API. Return a searchClause/triple object from CQL string"""
try:
query = query.encode("utf-8")
except:
diag = Diagnostic10()
diag.details = "Cannot parse non utf-8 characters"
raise diag
q = StringIO(query)
lexer = CQLshlex(q)
parser = CQLParser(lexer)
object = parser.query()
if parser.currentToken != '':
diag = Diagnostic10()
diag.details = "Unprocessed tokens remain: " + repr(parser.currentToken)
raise diag
else:
del lexer
del parser
del q
return object
# Assign our objects to generate
tripleType = Triple
booleanType = Boolean
relationType = Relation
searchClauseType = SearchClause
modifierClauseType = ModifierClause
modifierTypeType = ModifierType
indexType = Index
termType = Term
try:
from CQLUtils import *
tripleType = CTriple
booleanType = CBoolean
relationType = CRelation
searchClauseType = CSearchClause
modifierClauseType = CModifierClause
modifierTypeType = CModifierType
indexType = CIndex
termType = CTerm
except:
# Nested scopes. Utils needs our classes to parent
# We need its classes to build (maybe)
pass
if (__name__ == "__main__"):
import sys;
s = sys.stdin.readline()
try:
q = parse(s);
except SRWDiagnostic, diag:
# Print a full version, not just str()
print "Diagnostic Generated."
print " Code: " + str(diag.code)
print " Details: " + str(diag.details)
print " Message: " + str(diag.message)
else:
print q.toXCQL()[:-1];
| mit |
r39132/airflow | tests/contrib/utils/test_weekday.py | 6 | 1607 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from enum import Enum
from airflow.contrib.utils.weekday import WeekDay
class WeekDayTest(unittest.TestCase):
def test_weekday_enum_length(self):
self.assertEqual(len(WeekDay), 7)
def test_weekday_name_value(self):
weekdays = "MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY SUNDAY"
weekdays = weekdays.split()
for i, weekday in enumerate(weekdays, start=1):
e = WeekDay(i)
self.assertEqual(e, i)
self.assertEqual(int(e), i)
self.assertEqual(e.name, weekday)
self.assertTrue(e in WeekDay)
self.assertTrue(0 < e < 8)
self.assertTrue(type(e) is WeekDay)
self.assertTrue(isinstance(e, int))
self.assertTrue(isinstance(e, Enum))
| apache-2.0 |
tmerrick1/spack | var/spack/repos/builtin/packages/subversion/package.py | 5 | 4171 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Subversion(Package):
"""Apache Subversion - an open source version control system."""
homepage = 'https://subversion.apache.org/'
url = 'http://archive.apache.org/dist/subversion/subversion-1.8.13.tar.gz'
version('1.9.7', '1a5f48acf9d0faa60e8c7aea96a9b29ab1d4dcac')
version('1.9.6', '89e1b3f9d79422c094ccb95769360d5fe7df2bb1')
version('1.9.5', 'ac9f8ee235f1b667dd6506864af8035aaedfc2d9')
version('1.9.3', 'a92bcfaec4e5038f82c74a7b5bbd2f46')
version('1.8.17', 'd1f8d45f97168d6271c58c5b25421cc32954c81b')
version('1.8.13', '8065b3698d799507fb72dd7926ed32b6')
variant('perl', default=False, description='Build with Perl bindings')
depends_on('apr')
depends_on('apr-util')
depends_on('zlib')
depends_on('sqlite')
depends_on('serf')
extends('perl', when='+perl')
depends_on('swig@1.3.24:3.0.0', when='+perl')
depends_on('perl-term-readkey', when='+perl')
# Optional: We need swig if we want the Perl, Python or Ruby
# bindings.
# depends_on('swig')
# depends_on('python')
# depends_on('perl')
# depends_on('ruby')
# Installation has race cases.
parallel = False
def install(self, spec, prefix):
# configure, build, install:
# Ref:
# http://www.linuxfromscratch.org/blfs/view/svn/general/subversion.html
options = ['--prefix=%s' % prefix]
options.append('--with-apr=%s' % spec['apr'].prefix)
options.append('--with-apr-util=%s' % spec['apr-util'].prefix)
options.append('--with-zlib=%s' % spec['zlib'].prefix)
options.append('--with-sqlite=%s' % spec['sqlite'].prefix)
options.append('--with-serf=%s' % spec['serf'].prefix)
if 'swig' in spec:
options.append('--with-swig=%s' % spec['swig'].prefix)
if 'perl' in spec:
options.append('PERL=%s' % spec['perl'].command.path)
configure(*options)
make()
if self.run_tests:
make('check')
make('install')
if spec.satisfies('+perl'):
make('swig-pl')
if self.run_tests:
make('check-swig-pl')
make('install-swig-pl-lib')
with working_dir(join_path(
'subversion', 'bindings', 'swig', 'perl', 'native')):
perl = which('perl')
perl('Makefile.PL', 'INSTALL_BASE=%s' % prefix)
make('install')
# python bindings
# make('swig-py',
# 'swig-pydir=/usr/lib/python2.7/site-packages/libsvn',
# 'swig_pydir_extra=/usr/lib/python2.7/site-packages/svn')
# make('install-swig-py',
# 'swig-pydir=/usr/lib/python2.7/site-packages/libsvn',
# 'swig_pydir_extra=/usr/lib/python2.7/site-packages/svn')
# ruby bindings
# make('swig-rb')
# make('isntall-swig-rb')
| lgpl-2.1 |
silly-wacky-3-town-toon/SOURCE-COD | Panda3D-1.10.0/python/Lib/JOD/NewJamoDrum.py | 3 | 10076 | """
A generic Jam-o-Drum input interface for the Jam-o-Drum that uses the OptiPAC
for both spinners and pads.
@author: U{Ben Buchwald <bb2@alumni.cmu.edu>}
Last Updated: 2/27/2006
"""
from direct.showbase.DirectObject import DirectObject
import string, sys, md5
from pandac.PandaModules import Filename
from pandac.PandaModules import WindowProperties
from pandac.PandaModules import ConfigVariableList
class JamoDrum(DirectObject):
"""
Class representing input from a Jam-o-Drum. To handle Jam-o-Drum input
accept the Panda messages JOD_SPIN_x and JOD_HIT_x where x is a number between
0 and 3 for the 4 stations. Spin messages also pass a parameter which is the
angle spun in degrees. Hit messages also pass a parameter which is the force
the pad was hit with in the range 0.0-1.0 (will probably be fairly low). With
or without actual Jam-o-Drum hardware this class will automatically respond
to the keys (j,k,l),(s,d,f),(w,e,r), and (u,i,o) corresponding to spin left 10
degrees, hit with full force, and spin right 10 degrees respectively for each
of the stations. You must call L{poll} periodically to receive input from the
real Jam-o-Drum hardware.
"""
def __init__(self, useJOD=None):
"""
@keyword useJOD: connected to actual drumpads and spinners to read from (default: read from config.prc)
@type useJOD: bool
"""
self.configPath = Filename("/c/jamoconfig.txt")
self.logPath = Filename("/c/jamoconfig.log")
self.clearConfig()
self.simulate()
self.log = sys.stdout
self.configMissing = 0
self.hardwareChanged = 0
if (useJOD==None):
useJOD = base.config.GetBool("want-jamodrum", True)
self.useJOD = useJOD
if (useJOD):
self.setLog(self.logPath)
self.devindices = range(1,base.win.getNumInputDevices())
self.readConfigFile(self.configPath)
self.prepareDevices()
props = WindowProperties()
props.setCursorHidden(1)
if (sys.platform == "win32"):
props.setZOrder(WindowProperties.ZTop)
base.win.requestProperties(props)
self.setLog(None)
def setLog(self, fn):
if (self.log != sys.stdout):
self.log.close()
self.log = sys.stdout
if (fn):
try:
self.log = open(fn.toOsSpecific(), "w")
except:
self.log = sys.stdout
def generateMouseDigest(self):
m = md5.md5()
for i in range(base.win.getNumInputDevices()):
m.update(base.win.getInputDeviceName(i))
m.update("\n")
return m.hexdigest()
def reportDevices(self):
for devindex in self.devindices:
self.log.write("Encoder Detected: "+base.win.getInputDeviceName(devindex)+"\n")
def clearConfig(self):
self.ratio = 8.71
self.wheelConfigs = [[0,0],[0,0],[0,0],[0,0]]
self.padConfigs = [[0,0],[0,0],[0,0],[0,0]]
def getIntVal(self, spec):
try:
return int(spec)
except:
return -1
def setWheelConfig(self, station, axis, device):
if (axis=="x") or (axis=="X"): axis=0
if (axis=="y") or (axis=="Y"): axis=1
istation = self.getIntVal(station)
iaxis = self.getIntVal(axis)
if (istation < 0) or (istation > 3):
self.log.write("Wheel Config: Invalid station index "+str(station)+"\n")
return
if (iaxis < 0) or (iaxis > 1):
self.log.write("Wheel Config: Invalid axis index "+str(axis)+"\n")
return
self.wheelConfigs[istation] = [iaxis, str(device)]
def setPadConfig(self, station, button, device):
istation = self.getIntVal(station)
ibutton = self.getIntVal(button)
if (istation < 0) or (istation > 3):
self.log.write("Pad Config: Invalid station index "+str(station)+"\n")
return
if (ibutton < 0) or (ibutton > 2):
self.log.write("Pad Config: Invalid button index "+str(button)+"\n")
return
self.padConfigs[istation] = [ibutton, device]
def readConfigFile(self, fn):
digest = self.generateMouseDigest()
self.clearConfig()
try:
file = open(fn.toOsSpecific(),"r")
lines = file.readlines()
file.close()
except:
self.configMissing = 1
self.log.write("Could not read "+fn.toOsSpecific()+"\n")
return
for line in lines:
line = line.strip(" \t\r\n")
if (line=="") or (line[0]=="#"):
continue
words = line.split(" ")
if (words[0]=="wheel"):
if (len(words)==4):
self.setWheelConfig(words[1],words[2],words[3])
else:
self.log.write("Wheel Config: invalid syntax\n")
elif (words[0]=="pad"):
if (len(words)==4):
self.setPadConfig(words[1],words[2],words[3])
else:
self.log.write("Pad Config: invalid syntax\n")
elif (words[0]=="ratio"):
try:
self.ratio = float(words[1])
except:
self.log.write("Ratio Config: invalid syntax\n")
elif (words[0]=="digest"):
if (len(words)==2):
if (digest != words[1]):
self.hardwareChanged = 1
else:
self.log.write("Digest: invalid syntax")
else:
self.log.write("Unrecognized config directive "+line+"\n")
def writeConfigFile(self, fn):
try:
file = open(fn.toOsSpecific(),"w")
file.write("ratio "+str(self.ratio)+"\n")
for i in range(4):
wheelinfo = self.wheelConfigs[i]
file.write("wheel "+str(i)+" "+str(wheelinfo[0])+" "+wheelinfo[1]+"\n")
padinfo = self.padConfigs[i]
file.write("pad "+str(i)+" "+str(padinfo[0])+" "+padinfo[1]+"\n")
file.close()
except:
self.log.write("Could not write "+fn.toOsSpecific()+"\n")
def findWheel(self, devaxis, devname):
for wheelindex in range(4):
wheelinfo = self.wheelConfigs[wheelindex]
wheelaxis = wheelinfo[0]
wheeldevice = wheelinfo[1]
if (devname == wheeldevice) and (devaxis == wheelaxis):
return wheelindex
return -1
def findPad(self, devbutton, devname):
for padindex in range(4):
padinfo = self.padConfigs[padindex]
padbutton = padinfo[0]
paddevice = padinfo[1]
if (devname == paddevice) and (devbutton == padbutton):
return padindex
return -1
def prepareDevices(self):
"""
Each axis or button will be associated with a wheel or pad.
Any axis or button not in the config list will be associated
with wheel -1 or pad -1.
"""
self.polls = []
for devindex in range(1, base.win.getNumInputDevices()):
devname = base.win.getInputDeviceName(devindex)
for devaxis in range(2):
target = self.findWheel(devaxis, devname)
self.log.write("Axis "+str(devaxis)+" of "+devname+" controls wheel "+str(target)+"\n")
self.polls.append([devaxis, devindex, target, 0])
for devbutton in range(3):
target = self.findPad(devbutton, devname)
sig = "mousedev"+str(devindex)+"-mouse"+str(devbutton+1)
self.log.write("Button "+str(devbutton)+" of "+devname+" controls pad "+str(target)+"\n")
self.ignore(sig)
self.accept(sig, self.hit, [target, 1.0])
def simulate(self,spin=10.0,hit=1.0):
"""
Accept keyboard keys to simulate Jam-o-Drum input.
@keyword spin: degrees to spin for each keystroke (default: 10.0)
@type spin: float
@keyword hit: force to hit for each keystroke (default: 1.0)
@type hit: float
"""
self.accept('k',self.hit,[0,hit])
self.accept('d',self.hit,[1,hit])
self.accept('e',self.hit,[2,hit])
self.accept('i',self.hit,[3,hit])
self.accept('j',self.spin,[0,spin])
self.accept('l',self.spin,[0,-spin])
self.accept('s',self.spin,[1,spin])
self.accept('f',self.spin,[1,-spin])
self.accept('w',self.spin,[2,-spin])
self.accept('r',self.spin,[2,spin])
self.accept('u',self.spin,[3,-spin])
self.accept('o',self.spin,[3,spin])
# end simulate
def poll(self):
"""
Call this each frame to poll actual drumpads and spinners for input.
If input occurs messages will be sent.
"""
if (not self.useJOD):
return
offsets = [0.0,0.0,0.0,0.0]
for info in self.polls:
axis = info[0]
devindex = info[1]
wheel = info[2]
last = info[3]
if (axis == 0):
pos = base.win.getPointer(devindex).getX()
else:
pos = base.win.getPointer(devindex).getY()
if (pos != last):
diff = (pos-last)/self.ratio
if (wheel < 0):
offsets[0] += diff
offsets[1] += diff
offsets[2] += diff
offsets[3] += diff
else:
offsets[wheel] += diff
info[3] = pos
for i in range(4):
if (offsets[i] != 0.0):
self.spin(i,offsets[i])
def spin(self,station,angle):
"""
Sends a JOD_SPIN_<station> message
"""
sig = "JOD_SPIN_"+str(station)
messenger.send(sig,[angle])
def hit(self,station,force):
"""
Sends a JOD_HIT_<station> message
"""
if (station < 0):
for station in range(4):
sig = "JOD_HIT_"+str(station)
messenger.send(sig,[force])
else:
sig = "JOD_HIT_"+str(station)
messenger.send(sig,[force])
# end class JamoDrum
| apache-2.0 |
ColdSauce/IsSittingOnButt | server/env/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/response.py | 478 | 16459 | try:
import http.client as httplib
except ImportError:
import httplib
import zlib
import io
from socket import timeout as SocketTimeout
from ._collections import HTTPHeaderDict
from .exceptions import (
ProtocolError, DecodeError, ReadTimeoutError, ResponseNotChunked
)
from .packages.six import string_types as basestring, binary_type, PY3
from .connection import HTTPException, BaseSSLError
from .util.response import is_fp_closed
class DeflateDecoder(object):
def __init__(self):
self._first_try = True
self._data = binary_type()
self._obj = zlib.decompressobj()
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not data:
return data
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
return self._obj.decompress(data)
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None
class GzipDecoder(object):
def __init__(self):
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not data:
return data
return self._obj.decompress(data)
def _get_decoder(mode):
if mode == 'gzip':
return GzipDecoder()
return DeflateDecoder()
class HTTPResponse(io.IOBase):
"""
HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed. This
class is also compatible with the Python standard library's :mod:`io`
module, and can hence be treated as a readable object in the context of that
framework.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, attempts to decode specific content-encoding's based on headers
(like 'gzip' and 'deflate') will be skipped and raw data will be used
instead.
:param original_response:
When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
"""
CONTENT_DECODERS = ['gzip', 'deflate']
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True,
original_response=None, pool=None, connection=None):
if isinstance(headers, HTTPHeaderDict):
self.headers = headers
else:
self.headers = HTTPHeaderDict(headers)
self.status = status
self.version = version
self.reason = reason
self.strict = strict
self.decode_content = decode_content
self._decoder = None
self._body = None
self._fp = None
self._original_response = original_response
self._fp_bytes_read = 0
if body and isinstance(body, (basestring, binary_type)):
self._body = body
self._pool = pool
self._connection = connection
if hasattr(body, 'read'):
self._fp = body
# Are we using the chunked-style of transfer encoding?
self.chunked = False
self.chunk_left = None
tr_enc = self.headers.get('transfer-encoding', '').lower()
# Don't incur the penalty of creating a list and then discarding it
encodings = (enc.strip() for enc in tr_enc.split(","))
if "chunked" in encodings:
self.chunked = True
# We certainly don't want to preload content when the response is chunked.
if not self.chunked and preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in self.REDIRECT_STATUSES:
return self.headers.get('location')
return False
def release_conn(self):
if not self._pool or not self._connection:
return
self._pool._put_conn(self._connection)
self._connection = None
@property
def data(self):
# For backwords-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body
if self._fp:
return self.read(cache_content=True)
def tell(self):
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``HTTPResponse.read`` if bytes
are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def _init_decoder(self):
"""
Set-up the _decoder attribute if necessar.
"""
# Note: content-encoding value should be case-insensitive, per RFC 7230
# Section 3.2
content_encoding = self.headers.get('content-encoding', '').lower()
if self._decoder is None and content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
def _decode(self, data, decode_content, flush_decoder):
"""
Decode the data passed in and potentially flush the decoder.
"""
try:
if decode_content and self._decoder:
data = self._decoder.decompress(data)
except (IOError, zlib.error) as e:
content_encoding = self.headers.get('content-encoding', '').lower()
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding, e)
if flush_decoder and decode_content and self._decoder:
buf = self._decoder.decompress(binary_type())
data += buf + self._decoder.flush()
return data
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
self._init_decoder()
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return
flush_decoder = False
try:
try:
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read()
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt)
if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do
# not properly close the connection in all cases. There is
# no harm in redundantly calling close.
self._fp.close()
flush_decoder = True
except SocketTimeout:
# FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
# there is yet no clean way to get at it from this context.
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except BaseSSLError as e:
# FIXME: Is there a better way to differentiate between SSLErrors?
if 'read operation timed out' not in str(e): # Defensive:
# This shouldn't happen but just in case we're missing an edge
# case, let's avoid swallowing SSL errors.
raise
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except HTTPException as e:
# This includes IncompleteRead.
raise ProtocolError('Connection broken: %r' % e, e)
self._fp_bytes_read += len(data)
data = self._decode(data, decode_content, flush_decoder)
if cache_content:
self._body = data
return data
finally:
if self._original_response and self._original_response.isclosed():
self.release_conn()
def stream(self, amt=2**16, decode_content=None):
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
if self.chunked:
for line in self.read_chunked(amt, decode_content=decode_content):
yield line
else:
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
"""
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
headers = r.msg
if not isinstance(headers, HTTPHeaderDict):
if PY3: # Python 3
headers = HTTPHeaderDict(headers.items())
else: # Python 2
headers = HTTPHeaderDict.from_httplib(headers)
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
resp = ResponseCls(body=r,
headers=headers,
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw)
return resp
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):
return self.headers
def getheader(self, name, default=None):
return self.headers.get(name, default)
# Overrides from io.IOBase
def close(self):
if not self.closed:
self._fp.close()
@property
def closed(self):
if self._fp is None:
return True
elif hasattr(self._fp, 'closed'):
return self._fp.closed
elif hasattr(self._fp, 'isclosed'): # Python 2
return self._fp.isclosed()
else:
return True
def fileno(self):
if self._fp is None:
raise IOError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise IOError("The file-like object this HTTPResponse is wrapped "
"around has no file descriptor")
def flush(self):
if self._fp is not None and hasattr(self._fp, 'flush'):
return self._fp.flush()
def readable(self):
# This method is required for `io` module compatibility.
return True
def readinto(self, b):
# This method is required for `io` module compatibility.
temp = self.read(len(b))
if len(temp) == 0:
return 0
else:
b[:len(temp)] = temp
return len(temp)
def _update_chunk_length(self):
# First, we'll figure out length of a chunk and then
# we'll try to read it from socket.
if self.chunk_left is not None:
return
line = self._fp.fp.readline()
line = line.split(b';', 1)[0]
try:
self.chunk_left = int(line, 16)
except ValueError:
# Invalid chunked protocol response, abort.
self.close()
raise httplib.IncompleteRead(line)
def _handle_chunk(self, amt):
returned_chunk = None
if amt is None:
chunk = self._fp._safe_read(self.chunk_left)
returned_chunk = chunk
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
elif amt < self.chunk_left:
value = self._fp._safe_read(amt)
self.chunk_left = self.chunk_left - amt
returned_chunk = value
elif amt == self.chunk_left:
value = self._fp._safe_read(amt)
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
returned_chunk = value
else: # amt > self.chunk_left
returned_chunk = self._fp._safe_read(self.chunk_left)
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
return returned_chunk
def read_chunked(self, amt=None, decode_content=None):
"""
Similar to :meth:`HTTPResponse.read`, but with an additional
parameter: ``decode_content``.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
self._init_decoder()
# FIXME: Rewrite this method and make it a class with a better structured logic.
if not self.chunked:
raise ResponseNotChunked("Response is not chunked. "
"Header 'transfer-encoding: chunked' is missing.")
if self._original_response and self._original_response._method.upper() == 'HEAD':
# Don't bother reading the body of a HEAD request.
# FIXME: Can we do this somehow without accessing private httplib _method?
self._original_response.close()
return
while True:
self._update_chunk_length()
if self.chunk_left == 0:
break
chunk = self._handle_chunk(amt)
yield self._decode(chunk, decode_content=decode_content,
flush_decoder=True)
# Chunk content ends with \r\n: discard it.
while True:
line = self._fp.fp.readline()
if not line:
# Some sites may not end with '\r\n'.
break
if line == b'\r\n':
break
# We read everything; close the "file".
if self._original_response:
self._original_response.close()
self.release_conn()
| apache-2.0 |
mastizada/kuma | vendor/packages/setuptools/setuptools/command/test.py | 32 | 4442 | from setuptools import Command
from distutils.errors import DistutilsOptionError
import sys
from pkg_resources import *
from unittest import TestLoader, main
class ScanningLoader(TestLoader):
def loadTestsFromModule(self, module):
"""Return a suite of all tests cases contained in the given module
If the module is a package, load tests from all the modules in it.
If the module has an ``additional_tests`` function, call it and add
the return value to the tests.
"""
tests = []
if module.__name__!='setuptools.tests.doctest': # ugh
tests.append(TestLoader.loadTestsFromModule(self,module))
if hasattr(module, "additional_tests"):
tests.append(module.additional_tests())
if hasattr(module, '__path__'):
for file in resource_listdir(module.__name__, ''):
if file.endswith('.py') and file!='__init__.py':
submodule = module.__name__+'.'+file[:-3]
else:
if resource_exists(
module.__name__, file+'/__init__.py'
):
submodule = module.__name__+'.'+file
else:
continue
tests.append(self.loadTestsFromName(submodule))
if len(tests)!=1:
return self.suiteClass(tests)
else:
return tests[0] # don't create a nested suite for only one return
class test(Command):
"""Command to run unit tests after in-place build"""
description = "run unit tests after in-place build"
user_options = [
('test-module=','m', "Run 'test_suite' in specified module"),
('test-suite=','s',
"Test suite to run (e.g. 'some_module.test_suite')"),
]
def initialize_options(self):
self.test_suite = None
self.test_module = None
self.test_loader = None
def finalize_options(self):
if self.test_suite is None:
if self.test_module is None:
self.test_suite = self.distribution.test_suite
else:
self.test_suite = self.test_module+".test_suite"
elif self.test_module:
raise DistutilsOptionError(
"You may specify a module or a suite, but not both"
)
self.test_args = [self.test_suite]
if self.verbose:
self.test_args.insert(0,'--verbose')
if self.test_loader is None:
self.test_loader = getattr(self.distribution,'test_loader',None)
if self.test_loader is None:
self.test_loader = "setuptools.command.test:ScanningLoader"
def with_project_on_sys_path(self, func):
# Ensure metadata is up-to-date
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
ei_cmd = self.get_finalized_command("egg_info")
old_path = sys.path[:]
old_modules = sys.modules.copy()
try:
sys.path.insert(0, normalize_path(ei_cmd.egg_base))
working_set.__init__()
add_activation_listener(lambda dist: dist.activate())
require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version))
func()
finally:
sys.path[:] = old_path
sys.modules.clear()
sys.modules.update(old_modules)
working_set.__init__()
def run(self):
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(self.distribution.install_requires)
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
if self.test_suite:
cmd = ' '.join(self.test_args)
if self.dry_run:
self.announce('skipping "unittest %s" (dry run)' % cmd)
else:
self.announce('running "unittest %s"' % cmd)
self.with_project_on_sys_path(self.run_tests)
def run_tests(self):
import unittest
loader_ep = EntryPoint.parse("x="+self.test_loader)
loader_class = loader_ep.load(require=False)
unittest.main(
None, None, [unittest.__file__]+self.test_args,
testLoader = loader_class()
)
| mpl-2.0 |
soldag/home-assistant | homeassistant/components/synology_srm/device_tracker.py | 21 | 4163 | """Device tracker for Synology SRM routers."""
import logging
import synology_srm
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
CONF_USERNAME,
CONF_VERIFY_SSL,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_USERNAME = "admin"
DEFAULT_PORT = 8001
DEFAULT_SSL = True
DEFAULT_VERIFY_SSL = False
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
}
)
ATTRIBUTE_ALIAS = {
"band": None,
"connection": None,
"current_rate": None,
"dev_type": None,
"hostname": None,
"ip6_addr": None,
"ip_addr": None,
"is_baned": "is_banned",
"is_beamforming_on": None,
"is_guest": None,
"is_high_qos": None,
"is_low_qos": None,
"is_manual_dev_type": None,
"is_manual_hostname": None,
"is_online": None,
"is_parental_controled": "is_parental_controlled",
"is_qos": None,
"is_wireless": None,
"mac": None,
"max_rate": None,
"mesh_node_id": None,
"rate_quality": None,
"signalstrength": "signal_strength",
"transferRXRate": "transfer_rx_rate",
"transferTXRate": "transfer_tx_rate",
}
def get_scanner(hass, config):
"""Validate the configuration and return Synology SRM scanner."""
scanner = SynologySrmDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class SynologySrmDeviceScanner(DeviceScanner):
"""This class scans for devices connected to a Synology SRM router."""
def __init__(self, config):
"""Initialize the scanner."""
self.client = synology_srm.Client(
host=config[CONF_HOST],
port=config[CONF_PORT],
username=config[CONF_USERNAME],
password=config[CONF_PASSWORD],
https=config[CONF_SSL],
)
if not config[CONF_VERIFY_SSL]:
self.client.http.disable_https_verify()
self.devices = []
self.success_init = self._update_info()
_LOGGER.info("Synology SRM scanner initialized")
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return [device["mac"] for device in self.devices]
def get_extra_attributes(self, device) -> dict:
"""Get the extra attributes of a device."""
device = next(
(result for result in self.devices if result["mac"] == device), None
)
filtered_attributes = {}
if not device:
return filtered_attributes
for attribute, alias in ATTRIBUTE_ALIAS.items():
value = device.get(attribute)
if value is None:
continue
attr = alias or attribute
filtered_attributes[attr] = value
return filtered_attributes
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
filter_named = [
result["hostname"] for result in self.devices if result["mac"] == device
]
if filter_named:
return filter_named[0]
return None
def _update_info(self):
"""Check the router for connected devices."""
_LOGGER.debug("Scanning for connected devices")
try:
self.devices = self.client.core.get_network_nsm_device({"is_online": True})
except synology_srm.http.SynologyException as ex:
_LOGGER.error("Error with the Synology SRM: %s", ex)
return False
_LOGGER.debug("Found %d device(s) connected to the router", len(self.devices))
return True
| apache-2.0 |
myang321/django | tests/gis_tests/geo3d/models.py | 302 | 1294 | from django.utils.encoding import python_2_unicode_compatible
from ..models import models
@python_2_unicode_compatible
class NamedModel(models.Model):
name = models.CharField(max_length=30)
objects = models.GeoManager()
class Meta:
abstract = True
required_db_features = ['gis_enabled']
def __str__(self):
return self.name
class City3D(NamedModel):
point = models.PointField(dim=3)
class Interstate2D(NamedModel):
line = models.LineStringField(srid=4269)
class Interstate3D(NamedModel):
line = models.LineStringField(dim=3, srid=4269)
class InterstateProj2D(NamedModel):
line = models.LineStringField(srid=32140)
class InterstateProj3D(NamedModel):
line = models.LineStringField(dim=3, srid=32140)
class Polygon2D(NamedModel):
poly = models.PolygonField(srid=32140)
class Polygon3D(NamedModel):
poly = models.PolygonField(dim=3, srid=32140)
class SimpleModel(models.Model):
objects = models.GeoManager()
class Meta:
abstract = True
required_db_features = ['gis_enabled']
class Point2D(SimpleModel):
point = models.PointField()
class Point3D(SimpleModel):
point = models.PointField(dim=3)
class MultiPoint3D(SimpleModel):
mpoint = models.MultiPointField(dim=3)
| bsd-3-clause |
OptimusGitEtna/RestSymf | Python-3.4.2/Tools/pybench/Tuples.py | 92 | 8034 | from pybench import Test
class TupleSlicing(Test):
version = 2.0
operations = 3 * 25 * 10 * 7
rounds = 500
def test(self):
r = range(25)
t = tuple(range(100))
for i in range(self.rounds):
for j in r:
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
def calibrate(self):
r = range(25)
t = tuple(range(100))
for i in range(self.rounds):
for j in r:
pass
class SmallTuples(Test):
version = 2.0
operations = 5*(1 + 3 + 6 + 2)
rounds = 90000
def test(self):
for i in range(self.rounds):
t = (1,2,3,4,5,6)
a,b,c,d,e,f = t
a,b,c,d,e,f = t
a,b,c,d,e,f = t
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
l = list(t)
t = tuple(l)
t = (1,2,3,4,5,6)
a,b,c,d,e,f = t
a,b,c,d,e,f = t
a,b,c,d,e,f = t
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
l = list(t)
t = tuple(l)
t = (1,2,3,4,5,6)
a,b,c,d,e,f = t
a,b,c,d,e,f = t
a,b,c,d,e,f = t
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
l = list(t)
t = tuple(l)
t = (1,2,3,4,5,6)
a,b,c,d,e,f = t
a,b,c,d,e,f = t
a,b,c,d,e,f = t
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
l = list(t)
t = tuple(l)
t = (1,2,3,4,5,6)
a,b,c,d,e,f = t
a,b,c,d,e,f = t
a,b,c,d,e,f = t
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
l = list(t)
t = tuple(l)
def calibrate(self):
for i in range(self.rounds):
pass
| mit |
mitodl/bootcamp-ecommerce | main/urls.py | 1 | 3638 | """
URLs for bootcamp
"""
from django.conf import settings
from django.conf.urls import url
from django.conf.urls.static import static
from django.urls import re_path, include, path
from django.contrib import admin
from django.contrib.auth import views as auth_views
from wagtail.admin import urls as wagtailadmin_urls
from wagtail.core import urls as wagtail_urls
from wagtail.documents import urls as wagtaildocs_urls
from wagtail.images.views.serve import ServeView
from main.views import react, BackgroundImagesCSSView, cms_login_redirect_view
root_urlpatterns = [url("", include(wagtail_urls))]
urlpatterns = (
[
url(r"^status/", include("server_status.urls")),
url(r"^admin/", admin.site.urls),
url(r"^hijack/", include("hijack.urls", namespace="hijack")),
url("", include("applications.urls")),
url("", include("ecommerce.urls")),
url("", include("social_django.urls", namespace="social")),
path("", include("authentication.urls")),
path("", include("mail.urls")),
path("", include("profiles.urls")),
path("", include("klasses.urls")),
url("", include("jobma.urls")),
url(r"^logout/$", auth_views.LogoutView.as_view(), name="logout"),
url(
r"^background-images\.css$",
BackgroundImagesCSSView.as_view(),
name="background-images-css",
),
# named routes mapped to the react app
path("signin/", react, name="login"),
path("signin/password/", react, name="login-password"),
re_path(r"^signin/forgot-password/$", react, name="password-reset"),
re_path(
r"^signin/forgot-password/confirm/(?P<uid>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$",
react,
name="password-reset-confirm",
),
re_path(
r"^images/([^/]*)/(\d*)/([^/]*)/[^/]*$",
ServeView.as_view(),
name="wagtailimages_serve",
),
path("create-account/", react, name="signup"),
path("create-account/details/", react, name="signup-details"),
path("create-account/retry/", react, name="signup-retry"),
path("create-account/extra/", react, name="signup-extra"),
path("create-account/denied/", react, name="signup-denied"),
path("create-account/error/", react, name="signup-error"),
path("create-account/confirm/", react, name="register-confirm"),
path("account/inactive/", react, name="account-inactive"),
path("account/confirm-email/", react, name="account-confirm-email-change"),
path("account-settings/", react, name="account-settings"),
path("applications/", react, name="applications"),
path(
"applications/<int:application_id>/payment-history/",
react,
name="application-history",
),
re_path(r"^review/", react, name="review"),
# Wagtail
re_path(
r"^images/([^/]*)/(\d*)/([^/]*)/[^/]*$",
ServeView.as_view(),
name="wagtailimages_serve",
),
re_path(r"^cms/login", cms_login_redirect_view, name="wagtailadmin_login"),
re_path(r"^cms/", include(wagtailadmin_urls)),
re_path(r"^documents/", include(wagtaildocs_urls)),
re_path(r"^idp/", include("djangosaml2idp.urls")),
]
+ root_urlpatterns
+ (
static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
+ static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
)
)
handler404 = "main.views.page_404"
handler500 = "main.views.page_500"
| bsd-3-clause |
Qalthos/ansible | lib/ansible/modules/cloud/amazon/ec2_snapshot_copy.py | 39 | 5853 | #!/usr/bin/python
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: ec2_snapshot_copy
short_description: copies an EC2 snapshot and returns the new Snapshot ID.
description:
- Copies an EC2 Snapshot from a source region to a destination region.
version_added: "2.4"
options:
source_region:
description:
- The source region the Snapshot should be copied from.
required: true
source_snapshot_id:
description:
- The ID of the Snapshot in source region that should be copied.
required: true
description:
description:
- An optional human-readable string describing purpose of the new Snapshot.
encrypted:
description:
- Whether or not the destination Snapshot should be encrypted.
type: bool
default: 'no'
kms_key_id:
description:
- KMS key id used to encrypt snapshot. If not specified, defaults to EBS Customer Master Key (CMK) for that account.
wait:
description:
- Wait for the copied Snapshot to be in 'Available' state before returning.
type: bool
default: 'no'
wait_timeout:
version_added: "2.6"
description:
- How long before wait gives up, in seconds.
default: 600
tags:
description:
- A hash/dictionary of tags to add to the new Snapshot; '{"key":"value"}' and '{"key":"value","key":"value"}'
author: Deepak Kothandan (@Deepakkothandan) <deepak.kdy@gmail.com>
extends_documentation_fragment:
- aws
- ec2
requirements:
- boto3
'''
EXAMPLES = '''
# Basic Snapshot Copy
- ec2_snapshot_copy:
source_region: eu-central-1
region: eu-west-1
source_snapshot_id: snap-xxxxxxx
# Copy Snapshot and wait until available
- ec2_snapshot_copy:
source_region: eu-central-1
region: eu-west-1
source_snapshot_id: snap-xxxxxxx
wait: yes
wait_timeout: 1200 # Default timeout is 600
register: snapshot_id
# Tagged Snapshot copy
- ec2_snapshot_copy:
source_region: eu-central-1
region: eu-west-1
source_snapshot_id: snap-xxxxxxx
tags:
Name: Snapshot-Name
# Encrypted Snapshot copy
- ec2_snapshot_copy:
source_region: eu-central-1
region: eu-west-1
source_snapshot_id: snap-xxxxxxx
encrypted: yes
# Encrypted Snapshot copy with specified key
- ec2_snapshot_copy:
source_region: eu-central-1
region: eu-west-1
source_snapshot_id: snap-xxxxxxx
encrypted: yes
kms_key_id: arn:aws:kms:eu-central-1:XXXXXXXXXXXX:key/746de6ea-50a4-4bcb-8fbc-e3b29f2d367b
'''
RETURN = '''
snapshot_id:
description: snapshot id of the newly created snapshot
returned: when snapshot copy is successful
type: str
sample: "snap-e9095e8c"
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (boto3_conn, ec2_argument_spec, get_aws_connection_info, camel_dict_to_snake_dict)
from ansible.module_utils._text import to_native
try:
import boto3
from botocore.exceptions import ClientError, WaiterError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def copy_snapshot(module, ec2):
"""
Copies an EC2 Snapshot to another region
module : AnsibleModule object
ec2: ec2 connection object
"""
params = {
'SourceRegion': module.params.get('source_region'),
'SourceSnapshotId': module.params.get('source_snapshot_id'),
'Description': module.params.get('description')
}
if module.params.get('encrypted'):
params['Encrypted'] = True
if module.params.get('kms_key_id'):
params['KmsKeyId'] = module.params.get('kms_key_id')
try:
snapshot_id = ec2.copy_snapshot(**params)['SnapshotId']
if module.params.get('wait'):
delay = 15
# Add one to max_attempts as wait() increment
# its counter before assessing it for time.sleep()
max_attempts = (module.params.get('wait_timeout') // delay) + 1
ec2.get_waiter('snapshot_completed').wait(
SnapshotIds=[snapshot_id],
WaiterConfig=dict(Delay=delay, MaxAttempts=max_attempts)
)
if module.params.get('tags'):
ec2.create_tags(
Resources=[snapshot_id],
Tags=[{'Key': k, 'Value': v} for k, v in module.params.get('tags').items()]
)
except WaiterError as we:
module.fail_json(msg='An error occurred waiting for the snapshot to become available. (%s)' % str(we), exception=traceback.format_exc())
except ClientError as ce:
module.fail_json(msg=str(ce), exception=traceback.format_exc(), **camel_dict_to_snake_dict(ce.response))
module.exit_json(changed=True, snapshot_id=snapshot_id)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
source_region=dict(required=True),
source_snapshot_id=dict(required=True),
description=dict(default=''),
encrypted=dict(type='bool', default=False, required=False),
kms_key_id=dict(type='str', required=False),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=600),
tags=dict(type='dict')))
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='botocore and boto3 are required.')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
client = boto3_conn(module, conn_type='client', resource='ec2',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
copy_snapshot(module, client)
if __name__ == '__main__':
main()
| gpl-3.0 |
foobarbazblarg/stayclean | stayclean-2016-april/display-final-after-month-is-over.py | 1 | 2952 | #!/usr/bin/python
from participantCollection import ParticipantCollection
import re
import datetime
import pyperclip
# Edit Me!
# This script gets run on the first day of the following month, and that month's URL is
# what goes here. E.g. If this directory is the directory for February, this script gets
# run on March 1, and this URL is the URL for the March challenge page.
nextMonthURL = "https://www.reddit.com/r/pornfree/comments/4h92s5/stay_clean_may_this_thread_updated_daily_check_in/"
# If this directory is the directory for November, this script gets run on December 1,
# and currentMonthIndex gets the index of November, i.e. 11.
currentMonthIndex = datetime.date.today().month - 1
if currentMonthIndex == 0:
currentMonthIndex = 12
currentMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[currentMonthIndex]
nextMonthIndex = currentMonthIndex % 12 + 1
nextMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[nextMonthIndex]
participants = ParticipantCollection()
numberStillIn = participants.sizeOfParticipantsWhoAreStillIn()
initialNumber = participants.size()
percentStillIn = int(round(100 * numberStillIn / initialNumber, 0))
def templateForParticipants():
answer = ""
for participant in participants.participantsWhoAreStillInAndHaveCheckedIn():
answer += "/u/" + participant.name
answer += "\n\n"
return answer
def templateToUse():
answer = ""
answer += "The Stay Clean CURRENT_MONTH_NAME challenge is now over. Join us for **[the NEXT_MONTH_NAME challenge](NEXT_MONTH_URL)**.\n"
answer += "\n"
answer += "**NUMBER_STILL_IN** out of INITIAL_NUMBER participants made it all the way through the challenge. That's **PERCENT_STILL_IN%**.\n"
answer += "\n"
answer += "Congratulations to these participants, all of whom were victorious:\n\n"
answer += templateForParticipants()
return answer
def stringToPrint():
answer = templateToUse()
answer = re.sub('NUMBER_STILL_IN', str(numberStillIn), answer)
answer = re.sub('INITIAL_NUMBER', str(initialNumber), answer)
answer = re.sub('PERCENT_STILL_IN', str(percentStillIn), answer)
answer = re.sub('CURRENT_MONTH_INDEX', str(currentMonthIndex), answer)
answer = re.sub('CURRENT_MONTH_NAME', currentMonthName, answer)
answer = re.sub('NEXT_MONTH_INDEX', str(nextMonthIndex), answer)
answer = re.sub('NEXT_MONTH_NAME', nextMonthName, answer)
answer = re.sub('NEXT_MONTH_URL', nextMonthURL, answer)
return answer
outputString = stringToPrint()
print "============================================================="
print outputString
print "============================================================="
pyperclip.copy(outputString)
| mit |
anielsen001/scipy | scipy/ndimage/interpolation.py | 31 | 27885 | # Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import math
import numpy
from . import _ni_support
from . import _nd_image
import warnings
__all__ = ['spline_filter1d', 'spline_filter', 'geometric_transform',
'map_coordinates', 'affine_transform', 'shift', 'zoom', 'rotate']
def _extend_mode_to_code(mode):
mode = _ni_support._extend_mode_to_code(mode)
return mode
def spline_filter1d(input, order=3, axis=-1, output=numpy.float64):
"""
Calculates a one-dimensional spline filter along the given axis.
The lines of the array along the given axis are filtered by a
spline filter. The order of the spline must be >= 2 and <= 5.
Parameters
----------
input : array_like
The input array.
order : int, optional
The order of the spline, default is 3.
axis : int, optional
The axis along which the spline filter is applied. Default is the last
axis.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array. Default is `numpy.float64`.
Returns
-------
spline_filter1d : ndarray or None
The filtered input. If `output` is given as a parameter, None is
returned.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
if order in [0, 1]:
output[...] = numpy.array(input)
else:
axis = _ni_support._check_axis(axis, input.ndim)
_nd_image.spline_filter1d(input, order, axis, output)
return return_value
def spline_filter(input, order=3, output=numpy.float64):
"""
Multi-dimensional spline filter.
For more details, see `spline_filter1d`.
See Also
--------
spline_filter1d
Notes
-----
The multi-dimensional filter is implemented as a sequence of
one-dimensional spline filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
if order < 2 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
if order not in [0, 1] and input.ndim > 0:
for axis in range(input.ndim):
spline_filter1d(input, order, axis, output=output)
input = output
else:
output[...] = input[...]
return return_value
def _geometric_transform(input, mapping, coordinates, matrix, offset, output,
order, mode, cval, extra_arguments, extra_keywords):
"""
Wrapper around _nd_image.geometric_transform to work around
endianness issues
"""
_nd_image.geometric_transform(
input, mapping, coordinates, matrix, offset, output,
order, mode, cval, extra_arguments, extra_keywords)
if output is not None and not output.dtype.isnative:
output.byteswap(True)
return output
def geometric_transform(input, mapping, output_shape=None,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True,
extra_arguments=(), extra_keywords={}):
"""
Apply an arbritrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
Parameters
----------
input : array_like
The input array.
mapping : callable
A callable object that accepts a tuple of length equal to the output
array rank, and returns the corresponding input coordinates as a tuple
of length equal to the input array rank.
output_shape : tuple of ints, optional
Shape tuple.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
extra_arguments : tuple, optional
Extra arguments passed to `mapping`.
extra_keywords : dict, optional
Extra keywords passed to `mapping`.
Returns
-------
return_value : ndarray or None
The filtered input. If `output` is given as a parameter, None is
returned.
See Also
--------
map_coordinates, affine_transform, spline_filter1d
Examples
--------
>>> from scipy import ndimage
>>> a = np.arange(12.).reshape((4, 3))
>>> def shift_func(output_coords):
... return (output_coords[0] - 0.5, output_coords[1] - 0.5)
...
>>> ndimage.geometric_transform(a, shift_func)
array([[ 0. , 0. , 0. ],
[ 0. , 1.362, 2.738],
[ 0. , 4.812, 6.187],
[ 0. , 8.263, 9.637]])
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
_geometric_transform(filtered, mapping, None, None, None, output,
order, mode, cval, extra_arguments, extra_keywords)
return return_value
def map_coordinates(input, coordinates, output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input. The value of the input at
those coordinates is determined by spline interpolation of the
requested order.
The shape of the output is derived from that of the coordinate
array by dropping the first axis. The values of the array along
the first axis are the coordinates in the input array at which the
output value is found.
Parameters
----------
input : ndarray
The input array.
coordinates : array_like
The coordinates at which `input` is evaluated.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
map_coordinates : ndarray
The result of transforming the input. The shape of the output is
derived from that of `coordinates` by dropping the first axis.
See Also
--------
spline_filter, geometric_transform, scipy.interpolate
Examples
--------
>>> from scipy import ndimage
>>> a = np.arange(12.).reshape((4, 3))
>>> a
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.]])
>>> ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
array([ 2., 7.])
Above, the interpolated value of a[0.5, 0.5] gives output[0], while
a[2, 1] is output[1].
>>> inds = np.array([[0.5, 2], [0.5, 4]])
>>> ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
array([ 2. , -33.3])
>>> ndimage.map_coordinates(a, inds, order=1, mode='nearest')
array([ 2., 8.])
>>> ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
array([ True, False], dtype=bool)
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
coordinates = numpy.asarray(coordinates)
if numpy.iscomplexobj(coordinates):
raise TypeError('Complex type not supported')
output_shape = coordinates.shape[1:]
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
if coordinates.shape[0] != input.ndim:
raise RuntimeError('invalid shape for coordinate array')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
_geometric_transform(filtered, None, coordinates, None, None,
output, order, mode, cval, None, None)
return return_value
def affine_transform(input, matrix, offset=0.0, output_shape=None,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Apply an affine transformation.
The given matrix and offset are used to find for each point in the
output the corresponding coordinates in the input by an affine
transformation. The value of the input at those coordinates is
determined by spline interpolation of the requested order. Points
outside the boundaries of the input are filled according to the given
mode.
Given an output image pixel index vector ``o``, the pixel value
is determined from the input image at position ``np.dot(matrix,o) + offset``.
A diagonal matrix can be specified by supplying a one-dimensional
array-like to the matrix parameter, in which case a more efficient
algorithm is applied.
.. versionchanged:: 0.18.0
Previously, the exact interpretation of the affine transformation
depended on whether the matrix was supplied as a one-dimensional or
two-dimensional array. If a one-dimensional array was supplied
to the matrix parameter, the output pixel value at index ``o``
was determined from the input image at position ``matrix * (o + offset)``.
Parameters
----------
input : ndarray
The input array.
matrix : ndarray
The matrix must be two-dimensional or can also be given as a
one-dimensional sequence or array. In the latter case, it is assumed
that the matrix is diagonal. A more efficient algorithms is then
applied that exploits the separability of the problem.
offset : float or sequence, optional
The offset into the array where the transform is applied. If a float,
`offset` is the same for each axis. If a sequence, `offset` should
contain one value for each axis.
output_shape : tuple of ints, optional
Shape tuple.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
affine_transform : ndarray or None
The transformed input. If `output` is given as a parameter, None is
returned.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
matrix = numpy.asarray(matrix, dtype=numpy.float64)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError('no proper affine matrix provided')
if matrix.shape[0] != input.ndim:
raise RuntimeError('affine matrix has wrong number of rows')
if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
raise RuntimeError('affine matrix has wrong number of columns')
if not matrix.flags.contiguous:
matrix = matrix.copy()
offset = _ni_support._normalize_sequence(offset, input.ndim)
offset = numpy.asarray(offset, dtype=numpy.float64)
if offset.ndim != 1 or offset.shape[0] < 1:
raise RuntimeError('no proper offset provided')
if not offset.flags.contiguous:
offset = offset.copy()
if matrix.ndim == 1:
warnings.warn(
"The behaviour of affine_transform with a one-dimensional "
"array supplied for the matrix parameter has changed in "
"scipy 0.18.0."
)
_nd_image.zoom_shift(filtered, matrix, offset/matrix, output, order,
mode, cval)
else:
_geometric_transform(filtered, None, None, matrix, offset,
output, order, mode, cval, None, None)
return return_value
def shift(input, shift, output=None, order=3, mode='constant', cval=0.0,
prefilter=True):
"""
Shift an array.
The array is shifted using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode.
Parameters
----------
input : ndarray
The input array.
shift : float or sequence, optional
The shift along the axes. If a float, `shift` is the same for each
axis. If a sequence, `shift` should contain one value for each axis.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
shift : ndarray or None
The shifted input. If `output` is given as a parameter, None is
returned.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input)
shift = _ni_support._normalize_sequence(shift, input.ndim)
shift = [-ii for ii in shift]
shift = numpy.asarray(shift, dtype=numpy.float64)
if not shift.flags.contiguous:
shift = shift.copy()
_nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval)
return return_value
def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0,
prefilter=True):
"""
Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Parameters
----------
input : ndarray
The input array.
zoom : float or sequence, optional
The zoom factor along the axes. If a float, `zoom` is the same for each
axis. If a sequence, `zoom` should contain one value for each axis.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
zoom : ndarray or None
The zoomed input. If `output` is given as a parameter, None is
returned.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
zoom = _ni_support._normalize_sequence(zoom, input.ndim)
output_shape = tuple(
[int(round(ii * jj)) for ii, jj in zip(input.shape, zoom)])
output_shape_old = tuple(
[int(ii * jj) for ii, jj in zip(input.shape, zoom)])
if output_shape != output_shape_old:
warnings.warn(
"From scipy 0.13.0, the output shape of zoom() is calculated "
"with round() instead of int() - for these inputs the size of "
"the returned array has changed.", UserWarning)
zoom_div = numpy.array(output_shape, float) - 1
zoom = (numpy.array(input.shape) - 1) / zoom_div
# Zooming to non-finite values is unpredictable, so just choose
# zoom factor 1 instead
zoom[~numpy.isfinite(zoom)] = 1
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
zoom = numpy.asarray(zoom, dtype=numpy.float64)
zoom = numpy.ascontiguousarray(zoom)
_nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval)
return return_value
def _minmax(coor, minc, maxc):
if coor[0] < minc[0]:
minc[0] = coor[0]
if coor[0] > maxc[0]:
maxc[0] = coor[0]
if coor[1] < minc[1]:
minc[1] = coor[1]
if coor[1] > maxc[1]:
maxc[1] = coor[1]
return minc, maxc
def rotate(input, angle, axes=(1, 0), reshape=True,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Rotate an array.
The array is rotated in the plane defined by the two axes given by the
`axes` parameter using spline interpolation of the requested order.
Parameters
----------
input : ndarray
The input array.
angle : float
The rotation angle in degrees.
axes : tuple of 2 ints, optional
The two axes that define the plane of rotation. Default is the first
two axes.
reshape : bool, optional
If `reshape` is true, the output shape is adapted so that the input
array is contained completely in the output. Default is True.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
rotate : ndarray or None
The rotated input. If `output` is given as a parameter, None is
returned.
"""
input = numpy.asarray(input)
axes = list(axes)
rank = input.ndim
if axes[0] < 0:
axes[0] += rank
if axes[1] < 0:
axes[1] += rank
if axes[0] < 0 or axes[1] < 0 or axes[0] > rank or axes[1] > rank:
raise RuntimeError('invalid rotation plane specified')
if axes[0] > axes[1]:
axes = axes[1], axes[0]
angle = numpy.pi / 180 * angle
m11 = math.cos(angle)
m12 = math.sin(angle)
m21 = -math.sin(angle)
m22 = math.cos(angle)
matrix = numpy.array([[m11, m12],
[m21, m22]], dtype=numpy.float64)
iy = input.shape[axes[0]]
ix = input.shape[axes[1]]
if reshape:
mtrx = numpy.array([[m11, -m21],
[-m12, m22]], dtype=numpy.float64)
minc = [0, 0]
maxc = [0, 0]
coor = numpy.dot(mtrx, [0, ix])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, 0])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, ix])
minc, maxc = _minmax(coor, minc, maxc)
oy = int(maxc[0] - minc[0] + 0.5)
ox = int(maxc[1] - minc[1] + 0.5)
else:
oy = input.shape[axes[0]]
ox = input.shape[axes[1]]
offset = numpy.zeros((2,), dtype=numpy.float64)
offset[0] = float(oy) / 2.0 - 0.5
offset[1] = float(ox) / 2.0 - 0.5
offset = numpy.dot(matrix, offset)
tmp = numpy.zeros((2,), dtype=numpy.float64)
tmp[0] = float(iy) / 2.0 - 0.5
tmp[1] = float(ix) / 2.0 - 0.5
offset = tmp - offset
output_shape = list(input.shape)
output_shape[axes[0]] = oy
output_shape[axes[1]] = ox
output_shape = tuple(output_shape)
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
if input.ndim <= 2:
affine_transform(input, matrix, offset, output_shape, output,
order, mode, cval, prefilter)
else:
coordinates = []
size = numpy.product(input.shape,axis=0)
size //= input.shape[axes[0]]
size //= input.shape[axes[1]]
for ii in range(input.ndim):
if ii not in axes:
coordinates.append(0)
else:
coordinates.append(slice(None, None, None))
iter_axes = list(range(input.ndim))
iter_axes.reverse()
iter_axes.remove(axes[0])
iter_axes.remove(axes[1])
os = (output_shape[axes[0]], output_shape[axes[1]])
for ii in range(size):
ia = input[tuple(coordinates)]
oa = output[tuple(coordinates)]
affine_transform(ia, matrix, offset, os, oa, order, mode,
cval, prefilter)
for jj in iter_axes:
if coordinates[jj] < input.shape[jj] - 1:
coordinates[jj] += 1
break
else:
coordinates[jj] = 0
return return_value
| bsd-3-clause |
mbinette91/ConstructionLCA | webapp.py | 1 | 7974 | import os
import threading
import urlparse
import time
from SimpleHTTPServer import SimpleHTTPRequestHandler
from ModelBuilder import ModelBuilder
import pickle
import sqlite3
import json
db = None;
def GetUniqueProjectId():
global db
filename = "../temp.db"
if not db:
db = {'last_project_id': 0}
if os.path.isfile(filename):
file = open(filename, 'r')
db = pickle.load(file)
file.close()
db['last_project_id'] += 1
file = open(filename, 'w')
pickle.dump(db, file);
file.close()
return db['last_project_id']
class ProductTreeBuilder:
IGNORED_CLASSES = ["Building", "BuildingStorey", "Space"]
def __init__(self):
self.data = []
self.last_class_name = None;
self.class_data = []
self.last_class_type = None;
self.class_type_data = [];
self.undefined_data = [];
def add_product_row(self, row):
class_name = row[2].replace("Ifc", "");
if class_name in ProductTreeBuilder.IGNORED_CLASSES:
return;
type = row[3];
product_data = [row[0], row[1]];
if class_name != self.last_class_name:
self.close_class();
self.last_class_name = class_name;
if not type:
self.undefined_data.append(product_data)
else:
if type != self.last_class_type:
self.close_class_type();
self.last_class_type = type;
self.class_type_data.append(product_data)
def close_class_type(self):
if len(self.class_type_data) != 0:
self.class_data.append([self.last_class_type, self.class_type_data]);
self.class_type_data = [];
self.last_class_type = None;
def close_undefined_type(self):
if len(self.undefined_data) != 0:
self.class_data.append(["Others", self.undefined_data]);
self.undefined_data = []
def close_class(self):
self.close_class_type();
self.close_undefined_type();
if len(self.class_data) != 0:
self.data.append([self.last_class_name, self.class_data]);
self.class_data = []
self.last_class_name = None;
def end(self):
self.close_class();
def get_tree(self):
return self.data;
class CustomHTTPRequestHandler(SimpleHTTPRequestHandler):
def do_GET(self):
url = urlparse.urlparse(self.path)
params = urlparse.parse_qs(url.query)
if url.path == "/project":
self.show_project(params)
if url.path == "/project/info":
self.get_project_info(params)
else: # Default
SimpleHTTPRequestHandler.do_GET(self);
def show_project(self, query):
print "Requesting /project with", query
if 'id' not in query:
self.send_response(302)
self.send_header('Location', '/')
self.end_headers()
return;
with open("_project.html") as f:
response = f.read().replace('{PROJECT_ID}', query['id'][0])
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-length", len(response))
self.end_headers()
self.wfile.write(response)
def get_project_info(self, query):
print "Requesting /project/info with", query
if 'id' not in query or 'get' not in query:
self.send_response(302)
self.send_header('Location', '/')
self.end_headers()
return;
if query['get'][0] == 'tree':
conn = sqlite3.connect('../database.db3')
conn.text_factory = str
c = conn.cursor()
c.execute('SELECT p.guid, p.name, p.class_name, m.name FROM products p LEFT JOIN materials m ON p.id=m.product_id WHERE project_id=? ORDER BY p.class_name, m.name', (query['id'][0],))
builder = ProductTreeBuilder();
for row in c.fetchall():
builder.add_product_row(row);
builder.end();
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(json.dumps(builder.get_tree(), encoding='latin1'))
return;
elif query['get'][0] == 'info':
conn = sqlite3.connect('../database.db3')
conn.text_factory = str
c = conn.cursor()
c.execute('SELECT guid,p.name,description,class_name,m.name,m.thickness,m.layer_name FROM products p LEFT JOIN materials m ON p.id=m.product_id WHERE project_id=?', (query['id'][0],))
data = []
for row in c.fetchall():
data.append({
'guid': row[0],
'name': row[1],
'description': row[2],
'className': row[3],
'material': {'name': row[4], 'thickness': row[5], 'layerName': row[6]}
})
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(json.dumps(data, encoding='latin1'))
return;
elif query['get'][0] == 'properties':
conn = sqlite3.connect('../database.db3')
conn.text_factory = str
c = conn.cursor()
c.execute('SELECT ps.id, ps.name FROM products p JOIN property_set ps ON p.id=ps.product_id WHERE project_id=? AND p.guid=?', (query['id'][0],query['product_id'][0],))
data = []
for row in c.fetchall():
properties = []
c2 = conn.cursor()
c2.execute('SELECT p.name, p.value FROM property p WHERE property_set_id=?', (row[0],))
for prop_row in c2.fetchall():
properties.append({
'name': prop_row[0],
'value': prop_row[1]
})
data.append({
'name': row[1],
'properties': properties
})
c2.close();
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(json.dumps(data, encoding='latin1'))
return;
def get_tree(self, query):
print "Requesting /project/tree with", query
if 'id' not in query:
self.send_response(302)
self.send_header('Location', '/')
self.end_headers()
return;
def do_POST(self):
url = urlparse.urlparse(self.path)
params = urlparse.parse_qs(url.query)
if url.path == "/project":
self.new_project(params)
else: # Default
SimpleHTTPRequestHandler.do_POST(self);
def new_project(self, query):
id = str(GetUniqueProjectId());
print "Creating new project with id =", id
result = self._upload_ifc(id)
if result[0]:
thread = threading.Thread(target = self._build_unity, args = (id,)) # Start in a new thread
thread.start();
self.send_response(302)
self.send_header('Location', '/project?id='+id)
self.end_headers()
else: # For debugging purposes only
print result;
self.send_response(302)
self.send_header('Location', '/?error='+result[1])
self.end_headers()
def _build_unity(self, model_id):
# This method can take a long time and should NOT be called from the main HTTPHandler's thread.
start_time = time.time()
builder = ModelBuilder(model_id);
builder.build();
def _upload_ifc(self, id):
# Inspired by https://gist.github.com/UniIsland/3346170
boundary = self.headers.plisttext.split("=")[1]
remainbytes = int(self.headers['content-length'])
line = self.rfile.readline()
remainbytes -= len(line)
if not boundary in line:
return (False, "Content NOT begin with boundary")
line = self.rfile.readline()
remainbytes -= len(line)
fn = "../tmp/IFC_" + id + ".ifc"
line = self.rfile.readline()
remainbytes -= len(line)
line = self.rfile.readline()
remainbytes -= len(line)
try:
out = open(fn, 'wb')
except IOError:
return (False, "Can't create file to write, do you have permission to write?")
preline = self.rfile.readline()
remainbytes -= len(preline)
while remainbytes > 0:
line = self.rfile.readline()
remainbytes -= len(line)
if boundary in line:
preline = preline[0:-1]
if preline.endswith('\r'):
preline = preline[0:-1]
out.write(preline)
out.close()
return (True, "File '%s' upload success!" % fn)
else:
out.write(preline)
preline = line
return (False, "Unexpect Ends of data.")
if __name__ == "__main__":
import sys
import BaseHTTPServer
os.chdir('webapp/')
HandlerClass = CustomHTTPRequestHandler
ServerClass = BaseHTTPServer.HTTPServer
Protocol = "HTTP/1.0"
port = 8000
server_address = ('127.0.0.1', port)
HandlerClass.protocol_version = Protocol
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever() | gpl-2.0 |
jhaux/tensorflow | tensorflow/python/kernel_tests/matrix_solve_ls_op_test.py | 80 | 8075 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_solve."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import linalg_ops
from tensorflow.python.platform import test
def BatchMatMul(a, b):
# A numpy implementation of tf.matmul().
if a.ndim < 3:
return np.dot(a, b)
# Get the number of matrices.
n = np.prod(a.shape[:-2])
assert n == np.prod(b.shape[:-2])
a_flat = np.reshape(a, tuple([n]) + a.shape[-2:])
b_flat = np.reshape(b, tuple([n]) + b.shape[-2:])
c_flat_shape = [n, a.shape[-2], b.shape[-1]]
c_flat = np.empty(c_flat_shape)
for i in range(n):
c_flat[i, :, :] = np.dot(a_flat[i, :, :], b_flat[i, :, :])
return np.reshape(c_flat, a.shape[:-1] + b_flat.shape[-1:])
def BatchRegularizedLeastSquares(matrices, rhss, l2_regularization=0.0):
# A numpy implementation of regularized least squares solver using
# the normal equations.
matrix_dims = matrices.shape
matrices_transposed = np.swapaxes(matrices, -2, -1)
rows = matrix_dims[-2]
cols = matrix_dims[-1]
if rows >= cols:
preconditioner = l2_regularization * np.identity(cols)
gramian = BatchMatMul(matrices_transposed, matrices) + preconditioner
inverse = np.linalg.inv(gramian)
left_pseudo_inverse = BatchMatMul(inverse, matrices_transposed)
return BatchMatMul(left_pseudo_inverse, rhss)
else:
preconditioner = l2_regularization * np.identity(rows)
gramian = BatchMatMul(matrices, matrices_transposed) + preconditioner
inverse = np.linalg.inv(gramian)
right_pseudo_inverse = BatchMatMul(matrices_transposed, inverse)
return BatchMatMul(right_pseudo_inverse, rhss)
class MatrixSolveLsOpTest(test.TestCase):
def _verifySolve(self, x, y):
for np_type in [np.float32, np.float64]:
a = x.astype(np_type)
b = y.astype(np_type)
np_ans, _, _, _ = np.linalg.lstsq(a, b)
for fast in [True, False]:
with self.test_session():
tf_ans = linalg_ops.matrix_solve_ls(a, b, fast=fast)
ans = tf_ans.eval()
self.assertEqual(np_ans.shape, tf_ans.get_shape())
self.assertEqual(np_ans.shape, ans.shape)
# Check residual norm.
tf_r = b - BatchMatMul(a, ans)
tf_r_norm = np.sum(tf_r * tf_r)
np_r = b - BatchMatMul(a, np_ans)
np_r_norm = np.sum(np_r * np_r)
self.assertAllClose(np_r_norm, tf_r_norm)
# Check solution.
self.assertAllClose(np_ans, ans, atol=1e-5, rtol=1e-5)
def _verifySolveBatch(self, x, y):
# Since numpy.linalg.lsqr does not support batch solves, as opposed
# to numpy.linalg.solve, we just perform this test for a fixed batch size
# of 2x3.
for np_type in [np.float32, np.float64]:
a = np.tile(x.astype(np_type), [2, 3, 1, 1])
b = np.tile(y.astype(np_type), [2, 3, 1, 1])
np_ans = np.empty([2, 3, a.shape[-1], b.shape[-1]])
for dim1 in range(2):
for dim2 in range(3):
np_ans[dim1, dim2, :, :], _, _, _ = np.linalg.lstsq(
a[dim1, dim2, :, :], b[dim1, dim2, :, :])
for fast in [True, False]:
with self.test_session():
tf_ans = linalg_ops.matrix_solve_ls(a, b, fast=fast).eval()
self.assertEqual(np_ans.shape, tf_ans.shape)
# Check residual norm.
tf_r = b - BatchMatMul(a, tf_ans)
tf_r_norm = np.sum(tf_r * tf_r)
np_r = b - BatchMatMul(a, np_ans)
np_r_norm = np.sum(np_r * np_r)
self.assertAllClose(np_r_norm, tf_r_norm)
# Check solution.
if fast or a.shape[-2] >= a.shape[-1]:
# We skip this test for the underdetermined case when using the
# slow path, because Eigen does not return a minimum norm solution.
# TODO(rmlarsen): Enable this check for all paths if/when we fix
# Eigen's solver.
self.assertAllClose(np_ans, tf_ans, atol=1e-5, rtol=1e-5)
def _verifyRegularized(self, x, y, l2_regularizer):
for np_type in [np.float32, np.float64]:
# Test with a single matrix.
a = x.astype(np_type)
b = y.astype(np_type)
np_ans = BatchRegularizedLeastSquares(a, b, l2_regularizer)
with self.test_session():
# Test matrix_solve_ls on regular matrices
tf_ans = linalg_ops.matrix_solve_ls(
a, b, l2_regularizer=l2_regularizer, fast=True).eval()
self.assertAllClose(np_ans, tf_ans, atol=1e-5, rtol=1e-5)
# Test with a 2x3 batch of matrices.
a = np.tile(x.astype(np_type), [2, 3, 1, 1])
b = np.tile(y.astype(np_type), [2, 3, 1, 1])
np_ans = BatchRegularizedLeastSquares(a, b, l2_regularizer)
with self.test_session():
tf_ans = linalg_ops.matrix_solve_ls(
a, b, l2_regularizer=l2_regularizer, fast=True).eval()
self.assertAllClose(np_ans, tf_ans, atol=1e-5, rtol=1e-5)
def testSquare(self):
# 2x2 matrices, 2x3 right-hand sides.
matrix = np.array([[1., 2.], [3., 4.]])
rhs = np.array([[1., 0., 1.], [0., 1., 1.]])
self._verifySolve(matrix, rhs)
self._verifySolveBatch(matrix, rhs)
self._verifyRegularized(matrix, rhs, l2_regularizer=0.1)
def testOverdetermined(self):
# 2x2 matrices, 2x3 right-hand sides.
matrix = np.array([[1., 2.], [3., 4.], [5., 6.]])
rhs = np.array([[1., 0., 1.], [0., 1., 1.], [1., 1., 0.]])
self._verifySolve(matrix, rhs)
self._verifySolveBatch(matrix, rhs)
self._verifyRegularized(matrix, rhs, l2_regularizer=0.1)
def testUnderdetermined(self):
# 2x2 matrices, 2x3 right-hand sides.
matrix = np.array([[1., 2., 3], [4., 5., 6.]])
rhs = np.array([[1., 0., 1.], [0., 1., 1.]])
self._verifySolve(matrix, rhs)
self._verifySolveBatch(matrix, rhs)
self._verifyRegularized(matrix, rhs, l2_regularizer=0.1)
def testWrongDimensions(self):
# The matrix and right-hand sides should have the same number of rows.
with self.test_session():
matrix = constant_op.constant([[1., 0.], [0., 1.]])
rhs = constant_op.constant([[1., 0.]])
with self.assertRaises(ValueError):
linalg_ops.matrix_solve_ls(matrix, rhs)
def testEmpty(self):
full = np.array([[1., 2.], [3., 4.], [5., 6.]])
empty0 = np.empty([3, 0])
empty1 = np.empty([0, 2])
for fast in [True, False]:
with self.test_session():
tf_ans = linalg_ops.matrix_solve_ls(empty0, empty0, fast=fast).eval()
self.assertEqual(tf_ans.shape, (0, 0))
tf_ans = linalg_ops.matrix_solve_ls(empty0, full, fast=fast).eval()
self.assertEqual(tf_ans.shape, (0, 2))
tf_ans = linalg_ops.matrix_solve_ls(full, empty0, fast=fast).eval()
self.assertEqual(tf_ans.shape, (2, 0))
tf_ans = linalg_ops.matrix_solve_ls(empty1, empty1, fast=fast).eval()
self.assertEqual(tf_ans.shape, (2, 2))
def testBatchResultSize(self):
# 3x3x3 matrices, 3x3x1 right-hand sides.
matrix = np.array([1., 2., 3., 4., 5., 6., 7., 8., 9.] * 3).reshape(3, 3, 3)
rhs = np.array([1., 2., 3.] * 3).reshape(3, 3, 1)
answer = linalg_ops.matrix_solve(matrix, rhs)
ls_answer = linalg_ops.matrix_solve_ls(matrix, rhs)
self.assertEqual(ls_answer.get_shape(), [3, 3, 1])
self.assertEqual(answer.get_shape(), [3, 3, 1])
if __name__ == "__main__":
test.main()
| apache-2.0 |
SurfasJones/djcmsrc3 | venv/lib/python2.7/site-packages/cms/signals/permissions.py | 4 | 2716 | # -*- coding: utf-8 -*-
from cms.cache.permissions import clear_user_permission_cache
from cms.models import PageUser, PageUserGroup
from cms.compat import user_related_name
from menus.menu_pool import menu_pool
def post_save_user(instance, raw, created, **kwargs):
"""Signal called when new user is created, required only when CMS_PERMISSION.
Assigns creator of the user to PageUserInfo model, so we know who had created
this user account.
requires: CurrentUserMiddleware
"""
from cms.utils.permissions import get_current_user
# read current user from thread locals
creator = get_current_user()
if not creator or not created or creator.is_anonymous():
return
page_user = PageUser(user_ptr_id=instance.pk, created_by=creator)
page_user.__dict__.update(instance.__dict__)
page_user.save()
def post_save_user_group(instance, raw, created, **kwargs):
"""The same like post_save_user, but for Group, required only when
CMS_PERMISSION.
Assigns creator of the group to PageUserGroupInfo model, so we know who had
created this user account.
requires: CurrentUserMiddleware
"""
from cms.utils.permissions import get_current_user
# read current user from thread locals
creator = get_current_user()
if not creator or not created or creator.is_anonymous():
return
page_user = PageUserGroup(group_ptr_id=instance.pk, created_by=creator)
page_user.__dict__.update(instance.__dict__)
page_user.save()
def pre_save_user(instance, raw, **kwargs):
clear_user_permission_cache(instance)
def pre_delete_user(instance, **kwargs):
clear_user_permission_cache(instance)
def pre_save_group(instance, raw, **kwargs):
if instance.pk:
user_set = getattr(instance, user_related_name)
for user in user_set.all():
clear_user_permission_cache(user)
def pre_delete_group(instance, **kwargs):
for user in instance.user_set.all():
clear_user_permission_cache(user)
def _clear_users_permissions(instance):
if instance.user:
clear_user_permission_cache(instance.user)
if instance.group:
user_set = getattr(instance.group, user_related_name)
for user in user_set.all():
clear_user_permission_cache(user)
def pre_save_pagepermission(instance, raw, **kwargs):
_clear_users_permissions(instance)
def pre_delete_pagepermission(instance, **kwargs):
_clear_users_permissions(instance)
def pre_save_globalpagepermission(instance, raw, **kwargs):
_clear_users_permissions(instance)
menu_pool.clear(all=True)
def pre_delete_globalpagepermission(instance, **kwargs):
_clear_users_permissions(instance)
| mit |
maxhutch/mapcombine | examples/word_count/word_count.py | 1 | 1274 | #!/usr/bin/env python3
from argparse import ArgumentParser
from mapcombine import outer_process
parser = ArgumentParser(description='MapCombine example')
parser.add_argument('--mapreduce', default='MapReduce',
help="Module that implements map_ and reduce_")
parser.add_argument('--MR_init', default=None,
help="MR_init")
parser.add_argument('--map', default=None,
help="map")
parser.add_argument('--reduce', default=None,
help="reduce")
parser.add_argument('--filereader', default=None,
help="Module that implements DefaultFileReader")
parser.add_argument('--post', default=None,
help="Module that implements post_frame")
parser.add_argument('-t', '--thread', type=int, default=1,
help="Number of threads")
parser.add_argument('-b', '--block', type=int, default=1024,
help="Number of entries per block")
parser.add_argument('-v', '--verbose', action="store_true", default=False,
help="Verbose?")
args = parser.parse_args()
params = {}
jobs = [(args, params, 0),]
stuff = map(outer_process, jobs)
for i, res in enumerate(stuff):
print(i, res["words"]['linen'], res["words"]['Rotherhithe'])
| mit |
samarthmed/emacs-config | .python-environments/default/lib/python2.7/site-packages/epc/handler.py | 4 | 13330 | # Copyright (C) 2012- Takafumi Arakaki
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import itertools
import threading
from sexpdata import loads, dumps, Symbol, String
from .py3compat import SocketServer, Queue
from .utils import autolog, LockingDict, newthread, callwith
class BaseRemoteError(Exception):
"""
All exceptions from remote method are derived from this class.
"""
class CallerUnknown(BaseRemoteError):
"""
Error raised in remote method, but caller of the method is unknown.
"""
class EPCError(BaseRemoteError):
"""
Error returned by `epc-error` protocol.
"""
class ReturnError(BaseRemoteError):
"""
Error returned by `return-error` protocol.
"""
class EPCErrorCallerUnknown(CallerUnknown, EPCError):
"""
Same as :class:`EPCError`, but caller is unknown.
"""
class ReturnErrorCallerUnknown(CallerUnknown, ReturnError):
"""
Same as :class:`ReturnError`, but caller is unknown.
"""
class EPCClosed(Exception):
"""
Trying to send to a closed socket.
"""
def encode_string(string):
data = string.encode('utf-8')
datalen = '{0:06x}'.format(len(data) + 1).encode()
return _JOIN_BYTES([datalen, data, _NEWLINE_BYTE])
_JOIN_BYTES = ''.encode().join
_NEWLINE_BYTE = '\n'.encode()
def encode_object(obj, **kwds):
return encode_string(dumps(obj, **kwds))
def encode_message(name, *args, **kwds):
return encode_object([Symbol(name)] + list(args), **kwds)
def unpack_message(bytes):
data = loads(bytes.decode('utf-8'))
return (data[0].value(), data[1], data[2:])
def itermessage(read):
while True:
head = read(6)
if not head:
return
length = int(head, 16)
data = read(length)
if len(data) < length:
raise ValueError('need {0}-length data; got {1}'
.format(length, len(data)))
yield data
class BlockingCallback(object):
def __init__(self):
self.queue = q = Queue.Queue()
self.callback = lambda x: q.put(('return', x))
self.errback = lambda x: q.put(('error', x))
self.cbs = {'callback': self.callback, 'errback': self.errback}
def result(self, timeout):
(rtype, reply) = self.queue.get(timeout=timeout)
if rtype == 'return':
return reply
else:
raise reply
class EPCCallManager:
Dict = LockingDict # FIXME: make it configurable from server class.
"""
Dictionary class used to store callbacks.
"""
def __init__(self):
self.callbacks = self.Dict()
counter = itertools.count(1)
self.get_uid = callwith(threading.Lock())(lambda: next(counter))
# Wrapping by threading.Lock is useless for non-threading
# handler. Probably it is better to make it optional.
def call(self, handler, name, args=[], callback=None, errback=None):
uid = self.get_uid()
self.callbacks[uid] = (callback, errback)
handler._send('call', uid, Symbol(name), args)
def methods(self, handler, callback=None, errback=None):
uid = self.get_uid()
self.callbacks[uid] = (callback, errback)
handler._send('methods', uid)
def handle_return(self, uid, reply):
try:
(callback, _) = self.callbacks.pop(uid)
except (KeyError, TypeError):
raise CallerUnknown(reply)
if callback is not None:
callback(reply)
def _handle_error_reply(self, uid, reply, eclass, notfound):
try:
(_, errback) = self.callbacks.pop(uid)
except (KeyError, TypeError):
raise notfound(reply)
error = eclass(reply)
if errback is None:
raise error
else:
errback(error)
def handle_return_error(self, uid, reply):
self._handle_error_reply(uid, reply, ReturnError,
ReturnErrorCallerUnknown)
def handle_epc_error(self, uid, reply):
self._handle_error_reply(uid, reply, EPCError,
EPCErrorCallerUnknown)
class EPCHandler(SocketServer.StreamRequestHandler):
# These attribute are defined in `SocketServer.BaseRequestHandler`
# self.server : an instance of `EPCServer`
# self.request :
# self.client_address
# These attribute are defined in `SocketServer.StreamRequestHandler`
# self.connection : = self.request
# self.rfile : stream from client
# self.wfile : stream to client
@property
def logger(self):
return self.server.logger
@autolog('debug')
def setup(self):
SocketServer.StreamRequestHandler.setup(self)
self.callmanager = EPCCallManager()
self.server.add_client(self)
@autolog('debug')
def finish(self):
try:
SocketServer.StreamRequestHandler.finish(self)
finally:
self.server.remove_client(self)
def _rfile_read_safely(self, size):
try:
return self.rfile.read(size)
except (AttributeError, ValueError):
if self.rfile.closed:
# Calling read on closed socket raises
# AttributeError in 2.x and ValueError in 3.x.
# http://bugs.python.org/issue9177
raise StopIteration
else:
raise # if not, just re-raise it.
def _recv(self):
self.logger.debug('receiving...')
for data in itermessage(self._rfile_read_safely):
self.logger.debug(
'received: length = %r; data = %r', len(data), data)
yield data
self.logger.debug('receiving...')
@autolog('debug')
def _send(self, *args):
string = encode_message(*args)
try:
self.wfile.write(string)
except (AttributeError, ValueError):
# See also: :meth:`_rfile_read_safely`
raise EPCClosed
@autolog('debug')
def handle(self):
for sexp in self._recv():
self._handle(sexp)
@autolog('debug')
def _handle(self, sexp):
uid = undefined = [] # default: nil
try:
(name, uid, args) = unpack_message(sexp)
pyname = name.replace('-', '_')
getattr(self, '_validate_{0}'.format(pyname))(uid, args)
handler = getattr(self, '_handle_{0}'.format(pyname))
reply = handler(uid, *args)
if reply is not None:
self._send(*reply)
except Exception as err:
if self.handle_error(err):
return
if self.server.debugger or self.server.log_traceback:
exc_info = sys.exc_info()
self.logger.error('Unexpected error', exc_info=exc_info)
if self.server.debugger:
self.server.debugger.post_mortem(exc_info[2])
name = 'epc-error' if uid is undefined else 'return-error'
self._send(name, uid, repr(err))
@autolog('debug')
def _handle_call(self, uid, meth, args):
# See: `epc:handler-called-method`
name = meth.value()
try:
func = self.server.get_method(name)
except AttributeError:
return ['epc-error', uid,
"EPC-ERROR: No such method : {0}".format(name)]
return ['return', uid, func(*args)]
def _handle_methods(self, uid):
return ['return', uid, [
(Symbol(name), [], String(func.__doc__ or ""))
# FIXNE: implement arg-specs
for (name, func)
in self.server.funcs.items()]]
def _handle_return(self, uid, reply):
self.callmanager.handle_return(uid, reply)
def _handle_return_error(self, uid, reply=None, *_):
self.callmanager.handle_return_error(uid, reply)
def _handle_epc_error(self, uid, reply=None, *_):
self.callmanager.handle_epc_error(uid, reply)
_epc_error_template = \
"(%s %d ...): Got %s arguments in the reply: %r"
def _validate_call(self, uid, args, num_expect=2, name='call'):
len_args = len(args)
if len_args == num_expect:
return
elif len_args < num_expect:
message = 'Not enough arguments {0!r}'.format(args)
else:
message = 'Too many arguments {0!r}'.format(args)
self._send("epc-error", uid, message)
raise EPCError('({0} {1} ...): {2}'.format(name, uid, message))
def _validate_methods(self, uid, args):
self._validate_call(uid, args, 0, 'methods')
def _validate_return(self, uid, args):
len_args = len(args)
error = lambda x: self._epc_error_template % ('return', uid, x, args)
if len_args == 0:
message = error('not enough')
elif len_args > 1:
message = error('too many')
else:
return
self.logger.error(message)
self._handle_epc_error(uid, message)
raise EPCError(message)
def _validate_return_error(self, uid, args):
self._log_extra_argument_error('return-error', uid, args)
def _validate_epc_error(self, uid, args):
self._log_extra_argument_error('epc-error', uid, args)
def _log_extra_argument_error(self, name, uid, args):
if len(args) > 1:
self.logger.error(self._epc_error_template,
'return-error', uid, 'too many', args)
def handle_error(self, err):
"""
Handle error which is not handled by errback.
:type err: Exception
:arg err: An error not handled by other mechanisms.
:rtype: boolean
Return True from this function means that error is properly
handled, so the error is not sent to client. Do not confuse
this with :meth:`SocketServer.BaseServer.handle_error`. This
method is for handling error for each client, not for entire
server. Default implementation logs the error and returns
True if the error is coming from remote [#]_ or returns False
otherwise. Therefore, only the error occurs in this handler
class is sent to remote.
.. [#] More specifically, it returns True if `err` is an
instance of :class:`BaseRemoteError` or :class:`EPCClosed`.
"""
self.logger.error(repr(err))
if isinstance(err, (BaseRemoteError, EPCClosed)):
# BaseRemoteError: do not send error back
# EPCClosed: no exception from thread
return True
def call(self, name, *args, **kwds):
"""
Call method connected to this handler.
:type name: str
:arg name: Method name to call.
:type args: list
:arg args: Arguments for remote method to call.
:type callback: callable
:arg callback: A function to be called with returned value of
the remote method.
:type errback: callable
:arg errback: A function to be called with an error occurred
in the remote method. It is either an instance
of :class:`ReturnError` or :class:`EPCError`.
"""
self.callmanager.call(self, name, *args, **kwds)
def methods(self, *args, **kwds):
"""
Request info of callable remote methods.
Arguments for :meth:`call` except for `name` can be applied to
this function too.
"""
self.callmanager.methods(self, *args, **kwds)
@staticmethod
def _blocking_request(call, timeout, *args):
bc = BlockingCallback()
call(*args, **bc.cbs)
return bc.result(timeout=timeout)
def call_sync(self, name, args, timeout=None):
"""
Blocking version of :meth:`call`.
:type name: str
:arg name: Remote function name to call.
:type args: list
:arg args: Arguments passed to the remote function.
:type timeout: int or None
:arg timeout: Timeout in second. None means no timeout.
If the called remote function raise an exception, this method
raise an exception. If you give `timeout`, this method may
raise an `Empty` exception.
"""
return self._blocking_request(self.call, timeout, name, args)
def methods_sync(self, timeout=None):
"""
Blocking version of :meth:`methods`. See also :meth:`call_sync`.
"""
return self._blocking_request(self.methods, timeout)
class ThreadingEPCHandler(EPCHandler):
def _handle(self, sexp):
newthread(self, target=EPCHandler._handle, args=(self, sexp)).start()
| gpl-2.0 |
mims2707/bite-project | deps/gdata-python-client/tests/gdata_tests/docs/data_test.py | 39 | 15349 | #!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'e.bidelman (Eric Bidelman)'
import unittest
import atom
from gdata import test_data
import gdata.acl.data
import gdata.data
import gdata.docs.data
import gdata.test_config as conf
class DocsEntryTest(unittest.TestCase):
def setUp(self):
self.entry = atom.core.parse(test_data.DOCUMENT_LIST_ENTRY_V3,
gdata.docs.data.Resource)
def testToAndFromStringDocsEntry(self):
self.assert_(isinstance(self.entry, gdata.docs.data.Resource))
self.assertEqual(self.entry.GetResourceType(), 'spreadsheet')
self.assert_(isinstance(self.entry.last_viewed, gdata.docs.data.LastViewed))
self.assertEqual(self.entry.last_viewed.text, '2009-03-05T07:48:21.493Z')
self.assert_(
isinstance(self.entry.last_modified_by, gdata.docs.data.LastModifiedBy))
self.assertEqual(
self.entry.last_modified_by.email.text, 'test.user@gmail.com')
self.assertEqual(self.entry.last_modified_by.name.text, 'test.user')
self.assert_(isinstance(self.entry.resource_id, gdata.docs.data.ResourceId))
self.assertEqual(self.entry.resource_id.text,
'spreadsheet:supercalifragilisticexpealidocious')
self.assert_(isinstance(self.entry.writers_can_invite,
gdata.docs.data.WritersCanInvite))
self.assertEqual(self.entry.writers_can_invite.value, 'true')
self.assert_(isinstance(self.entry.quota_bytes_used,
gdata.docs.data.QuotaBytesUsed))
self.assertEqual(self.entry.quota_bytes_used.text, '1000')
self.assertEqual(len(self.entry.feed_link), 2)
self.assert_(isinstance(self.entry.feed_link[0], gdata.data.FeedLink))
self.assertEqual(
self.entry.GetAclFeedLink().href,
('https://docs.google.com/feeds/default/private/full/'
'spreadsheet%3Asupercalifragilisticexpealidocious/acl'))
self.assertEqual(
self.entry.GetRevisionsFeedLink().href,
('https://docs.google.com/feeds/default/private/full/'
'spreadsheet%3Asupercalifragilisticexpealidocious/revisions'))
self.assertEqual(len(self.entry.InCollections()), 1)
self.assertEqual(self.entry.InCollections()[0].title, 'AFolderName')
class AclTest(unittest.TestCase):
def setUp(self):
self.acl_entry = atom.core.parse(test_data.DOCUMENT_LIST_ACL_ENTRY,
gdata.docs.data.AclEntry)
self.acl_entry_withkey = atom.core.parse(
test_data.DOCUMENT_LIST_ACL_WITHKEY_ENTRY, gdata.docs.data.AclEntry)
self.acl_entry_additional_role = atom.core.parse(
test_data.DOCUMENT_LIST_ACL_ADDITIONAL_ROLE_ENTRY,
gdata.docs.data.AclEntry)
def testToAndFromString(self):
self.assert_(isinstance(self.acl_entry, gdata.docs.data.AclEntry))
self.assert_(isinstance(self.acl_entry.role, gdata.acl.data.AclRole))
self.assert_(isinstance(self.acl_entry.scope, gdata.acl.data.AclScope))
self.assertEqual(self.acl_entry.scope.value, 'user@gmail.com')
self.assertEqual(self.acl_entry.scope.type, 'user')
self.assertEqual(self.acl_entry.role.value, 'writer')
acl_entry_str = str(self.acl_entry)
new_acl_entry = atom.core.parse(acl_entry_str, gdata.docs.data.AclEntry)
self.assert_(isinstance(new_acl_entry, gdata.docs.data.AclEntry))
self.assert_(isinstance(new_acl_entry.role, gdata.acl.data.AclRole))
self.assert_(isinstance(new_acl_entry.scope, gdata.acl.data.AclScope))
self.assertEqual(new_acl_entry.scope.value, self.acl_entry.scope.value)
self.assertEqual(new_acl_entry.scope.type, self.acl_entry.scope.type)
self.assertEqual(new_acl_entry.role.value, self.acl_entry.role.value)
def testToAndFromStringWithKey(self):
self.assert_(isinstance(self.acl_entry_withkey, gdata.docs.data.AclEntry))
self.assert_(self.acl_entry_withkey.role is None)
self.assert_(isinstance(self.acl_entry_withkey.with_key,
gdata.acl.data.AclWithKey))
self.assert_(isinstance(self.acl_entry_withkey.with_key.role,
gdata.acl.data.AclRole))
self.assert_(isinstance(self.acl_entry_withkey.scope,
gdata.acl.data.AclScope))
self.assertEqual(self.acl_entry_withkey.with_key.key, 'somekey')
self.assertEqual(self.acl_entry_withkey.with_key.role.value, 'writer')
self.assertEqual(self.acl_entry_withkey.scope.value, 'example.com')
self.assertEqual(self.acl_entry_withkey.scope.type, 'domain')
acl_entry_withkey_str = str(self.acl_entry_withkey)
new_acl_entry_withkey = atom.core.parse(acl_entry_withkey_str,
gdata.docs.data.AclEntry)
self.assert_(isinstance(new_acl_entry_withkey, gdata.docs.data.AclEntry))
self.assert_(new_acl_entry_withkey.role is None)
self.assert_(isinstance(new_acl_entry_withkey.with_key,
gdata.acl.data.AclWithKey))
self.assert_(isinstance(new_acl_entry_withkey.with_key.role,
gdata.acl.data.AclRole))
self.assert_(isinstance(new_acl_entry_withkey.scope,
gdata.acl.data.AclScope))
self.assertEqual(new_acl_entry_withkey.with_key.key,
self.acl_entry_withkey.with_key.key)
self.assertEqual(new_acl_entry_withkey.with_key.role.value,
self.acl_entry_withkey.with_key.role.value)
self.assertEqual(new_acl_entry_withkey.scope.value,
self.acl_entry_withkey.scope.value)
self.assertEqual(new_acl_entry_withkey.scope.type,
self.acl_entry_withkey.scope.type)
def testCreateNewAclEntry(self):
cat = gdata.atom.Category(
term='http://schemas.google.com/acl/2007#accessRule',
scheme='http://schemas.google.com/g/2005#kind')
acl_entry = gdata.docs.DocumentListAclEntry(category=[cat])
acl_entry.scope = gdata.docs.Scope(value='user@gmail.com', type='user')
acl_entry.role = gdata.docs.Role(value='writer')
self.assert_(isinstance(acl_entry, gdata.docs.DocumentListAclEntry))
self.assert_(isinstance(acl_entry.role, gdata.docs.Role))
self.assert_(isinstance(acl_entry.scope, gdata.docs.Scope))
self.assertEqual(acl_entry.scope.value, 'user@gmail.com')
self.assertEqual(acl_entry.scope.type, 'user')
self.assertEqual(acl_entry.role.value, 'writer')
def testAdditionalRole(self):
self.assertEqual(
self.acl_entry_additional_role.additional_role.value,
'commenter')
self.assertEqual(
self.acl_entry_additional_role.with_key.additional_role.value,
'commenter')
class AclFeedTest(unittest.TestCase):
def setUp(self):
self.feed = atom.core.parse(test_data.DOCUMENT_LIST_ACL_FEED,
gdata.docs.data.AclFeed)
def testToAndFromString(self):
for entry in self.feed.entry:
self.assert_(isinstance(entry, gdata.docs.data.AclEntry))
feed = atom.core.parse(str(self.feed), gdata.docs.data.AclFeed)
for entry in feed.entry:
self.assert_(isinstance(entry, gdata.docs.data.AclEntry))
def testConvertActualData(self):
entries = self.feed.entry
self.assert_(len(entries) == 2)
self.assertEqual(entries[0].title.text,
'Document Permission - user@gmail.com')
self.assertEqual(entries[0].role.value, 'owner')
self.assertEqual(entries[0].scope.type, 'user')
self.assertEqual(entries[0].scope.value, 'user@gmail.com')
self.assert_(entries[0].GetSelfLink() is not None)
self.assert_(entries[0].GetEditLink() is not None)
self.assertEqual(entries[1].title.text,
'Document Permission - user2@google.com')
self.assertEqual(entries[1].role.value, 'writer')
self.assertEqual(entries[1].scope.type, 'domain')
self.assertEqual(entries[1].scope.value, 'google.com')
self.assert_(entries[1].GetSelfLink() is not None)
self.assert_(entries[1].GetEditLink() is not None)
class RevisionFeedTest(unittest.TestCase):
def setUp(self):
self.feed = atom.core.parse(test_data.DOCUMENT_LIST_REVISION_FEED,
gdata.docs.data.RevisionFeed)
def testToAndFromString(self):
for entry in self.feed.entry:
self.assert_(isinstance(entry, gdata.docs.data.Revision))
feed = atom.core.parse(str(self.feed), gdata.docs.data.RevisionFeed)
for entry in feed.entry:
self.assert_(isinstance(entry, gdata.docs.data.Revision))
def testConvertActualData(self):
entries = self.feed.entry
self.assert_(len(entries) == 1)
self.assertEqual(entries[0].title.text, 'Revision 2')
self.assertEqual(entries[0].publish.value, 'true')
self.assertEqual(entries[0].publish_auto.value, 'true')
self.assertEqual(entries[0].publish_outside_domain.value, 'false')
self.assertEqual(
entries[0].GetPublishLink().href,
'https://docs.google.com/View?docid=dfr4&pageview=1&hgd=1')
self.assertEqual(
entries[0].FindPublishLink(),
'https://docs.google.com/View?docid=dfr4&pageview=1&hgd=1')
class DataClassSanityTest(unittest.TestCase):
def test_basic_element_structure(self):
conf.check_data_classes(self, [
gdata.docs.data.ResourceId, gdata.docs.data.LastModifiedBy,
gdata.docs.data.LastViewed, gdata.docs.data.WritersCanInvite,
gdata.docs.data.QuotaBytesUsed, gdata.docs.data.Publish,
gdata.docs.data.PublishAuto, gdata.docs.data.PublishOutsideDomain,
gdata.docs.data.Resource, gdata.docs.data.AclEntry, gdata.docs.data.AclFeed,
gdata.docs.data.ResourceFeed, gdata.docs.data.Revision,
gdata.docs.data.RevisionFeed])
class CategoryTest(unittest.TestCase):
def setUp(self):
self.entry = atom.core.parse(test_data.DOCUMENT_LIST_ENTRY_V3,
gdata.docs.data.Resource)
def testAddCategory(self):
entry = gdata.docs.data.Resource()
entry.AddCategory('test_scheme', 'test_term', 'test_label')
self.assertEqual(entry.GetFirstCategory('test_scheme').scheme,
'test_scheme')
self.assertEqual(entry.GetFirstCategory('test_scheme').term, 'test_term')
self.assertEqual(entry.GetFirstCategory('test_scheme').label, 'test_label')
def testGetFirstCategory(self):
entry = gdata.docs.data.Resource()
cat1 = entry.AddCategory('test_scheme', 'test_term1', 'test_label1')
cat2 = entry.AddCategory('test_scheme', 'test_term2', 'test_label2')
self.assertEqual(entry.GetFirstCategory('test_scheme'), cat1)
def testGetCategories(self):
cat1 = self.entry.AddCategory('test_scheme', 'test_term1', 'test_label1')
cat2 = self.entry.AddCategory('test_scheme', 'test_term2', 'test_label2')
cats = list(self.entry.GetCategories('test_scheme'))
self.assertTrue(cat1 in cats)
self.assertTrue(cat2 in cats)
def testRemoveCategories(self):
self.entry.RemoveCategories(gdata.docs.data.LABELS_SCHEME)
self.assertEqual(self.entry.GetLabels(), set())
def testResourceType(self):
entry = gdata.docs.data.Resource('spreadsheet')
self.assertEqual(self.entry.GetResourceType(), 'spreadsheet')
def testGetResourceType(self):
self.assertEqual(self.entry.GetResourceType(), 'spreadsheet')
def testSetResourceType(self):
self.assertEqual(self.entry.GetResourceType(), 'spreadsheet')
self.entry.SetResourceType('drawing')
self.assertEqual(self.entry.GetResourceType(), 'drawing')
def testGetLabels(self):
self.assertEqual(self.entry.GetLabels(),
set(['mine', 'private', 'restricted-download',
'shared-with-domain', 'viewed', 'starred', 'hidden',
'trashed']))
def testAddLabel(self):
entry = gdata.docs.data.Resource()
entry.AddLabel('banana')
self.assertTrue('banana' in entry.GetLabels())
def testRemoveLabel(self):
entry = gdata.docs.data.Resource()
entry.AddLabel('banana')
entry.AddLabel('orange')
self.assertTrue('banana' in entry.GetLabels())
self.assertTrue('orange' in entry.GetLabels())
entry.RemoveLabel('orange')
self.assertFalse('orange' in entry.GetLabels())
def testIsHidden(self):
self.assertTrue(self.entry.IsHidden())
def testIsNotHidden(self):
self.entry.remove_categories(gdata.docs.data.LABELS_SCHEME)
self.assertFalse(self.entry.IsHidden())
def testIsViewed(self):
self.assertTrue(self.entry.IsViewed())
def testIsNotViewed(self):
self.entry.remove_categories(gdata.docs.data.LABELS_SCHEME)
self.assertFalse(self.entry.IsViewed())
def testIsStarred(self):
self.assertTrue(self.entry.IsStarred())
def testIsNotStarred(self):
self.entry.remove_categories(gdata.docs.data.LABELS_SCHEME)
self.assertFalse(self.entry.IsStarred())
def testIsTrashed(self):
self.assertTrue(self.entry.IsTrashed())
def testIsNotTrashed(self):
self.entry.remove_categories(gdata.docs.data.LABELS_SCHEME)
self.assertFalse(self.entry.IsTrashed())
def testIsPrivate(self):
self.assertTrue(self.entry.IsPrivate())
def testIsNotPrivate(self):
self.entry.remove_categories(gdata.docs.data.LABELS_SCHEME)
self.assertFalse(self.entry.IsPrivate())
def testIsMine(self):
self.assertTrue(self.entry.IsMine())
def testIsNotMine(self):
self.entry.remove_categories(gdata.docs.data.LABELS_SCHEME)
self.assertFalse(self.entry.IsMine())
def testIsSharedWithDomain(self):
self.assertTrue(self.entry.IsSharedWithDomain())
def testIsNotSharedWithDomain(self):
self.entry.remove_categories(gdata.docs.data.LABELS_SCHEME)
self.assertFalse(self.entry.IsSharedWithDomain())
def testIsRestrictedDownload(self):
self.assertTrue(self.entry.IsRestrictedDownload())
def testIsNotRestrictedDownload(self):
self.entry.remove_categories(gdata.docs.data.LABELS_SCHEME)
self.assertFalse(self.entry.IsRestrictedDownload())
class MetadataTest(unittest.TestCase):
def setUp(self):
self.entry = atom.core.parse(test_data.DOCUMENT_LIST_METADATA,
gdata.docs.data.Metadata)
def testAdditionalRoleInfo(self):
self.assertEqual(self.entry.additional_role_info[0].kind, 'document')
def testAdditionalRoleSet(self):
self.assertEqual(
self.entry.additional_role_info[0].additional_role_set[0].primaryRole,
'reader')
def testAdditionalRole(self):
self.assertEqual(
self.entry.additional_role_info[0].additional_role_set[0].\
additional_role[0].value, 'commenter')
def suite():
return conf.build_suite(
[DataClassSanityTest, CategoryTest, DocsHelperTest, DocsEntryTest,
AclTest, AclFeed, MetadataTest])
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
RobinQuetin/CAIRIS-web | cairis/cairis/InternalDocumentDialog.py | 1 | 3183 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
import armid
from InternalDocumentPanel import InternalDocumentPanel
from InternalDocumentParameters import InternalDocumentParameters
import DialogClassParameters
class InternalDocumentDialog(wx.Dialog):
def __init__(self,parent,parameters):
wx.Dialog.__init__(self,parent,parameters.id(),parameters.label(),style=wx.DEFAULT_DIALOG_STYLE|wx.MAXIMIZE_BOX|wx.THICK_FRAME|wx.RESIZE_BORDER,size=(400,300))
self.theName = ''
self.theDescription = ''
self.theContent = ''
self.theCodes = {}
self.theMemos = {}
self.theId = -1
self.panel = 0
self.buildControls(parameters)
self.commitVerb = 'Add'
def buildControls(self,parameters):
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.panel = InternalDocumentPanel(self)
self.panel.buildControls(parameters.createFlag())
mainSizer.Add(self.panel,1,wx.EXPAND)
self.SetSizer(mainSizer)
wx.EVT_BUTTON(self,armid.INTERNALDOCUMENT_BUTTONCOMMIT_ID,self.onCommit)
def load(self,objt):
self.theId = objt.id()
self.panel.loadControls(objt)
self.commitVerb = 'Edit'
def onCommit(self,evt):
commitLabel = self.commitVerb + ' internal document'
nameCtrl = self.FindWindowById(armid.INTERNALDOCUMENT_TEXTNAME_ID)
descCtrl = self.FindWindowById(armid.INTERNALDOCUMENT_TEXTDESCRIPTION_ID)
contCtrl = self.FindWindowById(armid.INTERNALDOCUMENT_TEXTCONTENT_ID)
self.theName = nameCtrl.GetValue()
self.theDescription = descCtrl.GetValue()
self.theContent = contCtrl.GetValue()
self.theCodes = contCtrl.codes()
self.theMemos = contCtrl.memos()
if len(self.theName) == 0:
dlg = wx.MessageDialog(self,'Name cannot be empty',commitLabel,wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
if len(self.theDescription) == 0:
dlg = wx.MessageDialog(self,'Description cannot be empty',commitLabel,wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
elif (len(self.theContent) == 0):
dlg = wx.MessageDialog(self,'Content cannot be empty',commitLabel,wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
else:
self.EndModal(armid.INTERNALDOCUMENT_BUTTONCOMMIT_ID)
def parameters(self):
parameters = InternalDocumentParameters(self.theName,self.theDescription,self.theContent,self.theCodes,self.theMemos)
parameters.setId(self.theId)
return parameters
| apache-2.0 |
fafaman/django | django/forms/forms.py | 27 | 19335 | """
Form classes
"""
from __future__ import unicode_literals
import copy
from collections import OrderedDict
from django.core.exceptions import NON_FIELD_ERRORS, ValidationError
from django.forms.fields import Field, FileField
# pretty_name is imported for backwards compatibility in Django 1.9
from django.forms.utils import ErrorDict, ErrorList, pretty_name # NOQA
from django.forms.widgets import Media, MediaDefiningClass
from django.utils import six
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.functional import cached_property
from django.utils.html import conditional_escape, html_safe
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
__all__ = ('BaseForm', 'Form')
class DeclarativeFieldsMetaclass(MediaDefiningClass):
"""
Metaclass that collects Fields declared on the base classes.
"""
def __new__(mcs, name, bases, attrs):
# Collect fields from current class.
current_fields = []
for key, value in list(attrs.items()):
if isinstance(value, Field):
current_fields.append((key, value))
attrs.pop(key)
current_fields.sort(key=lambda x: x[1].creation_counter)
attrs['declared_fields'] = OrderedDict(current_fields)
new_class = (super(DeclarativeFieldsMetaclass, mcs)
.__new__(mcs, name, bases, attrs))
# Walk through the MRO.
declared_fields = OrderedDict()
for base in reversed(new_class.__mro__):
# Collect fields from base class.
if hasattr(base, 'declared_fields'):
declared_fields.update(base.declared_fields)
# Field shadowing.
for attr, value in base.__dict__.items():
if value is None and attr in declared_fields:
declared_fields.pop(attr)
new_class.base_fields = declared_fields
new_class.declared_fields = declared_fields
return new_class
@html_safe
@python_2_unicode_compatible
class BaseForm(object):
# This is the main implementation of all the Form logic. Note that this
# class is different than Form. See the comments by the Form class for more
# information. Any improvements to the form API should be made to *this*
# class, not to the Form class.
field_order = None
prefix = None
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=None,
empty_permitted=False, field_order=None):
self.is_bound = data is not None or files is not None
self.data = data or {}
self.files = files or {}
self.auto_id = auto_id
if prefix is not None:
self.prefix = prefix
self.initial = initial or {}
self.error_class = error_class
# Translators: This is the default suffix added to form field labels
self.label_suffix = label_suffix if label_suffix is not None else _(':')
self.empty_permitted = empty_permitted
self._errors = None # Stores the errors after clean() has been called.
# The base_fields class attribute is the *class-wide* definition of
# fields. Because a particular *instance* of the class might want to
# alter self.fields, we create self.fields here by copying base_fields.
# Instances should always modify self.fields; they should not modify
# self.base_fields.
self.fields = copy.deepcopy(self.base_fields)
self._bound_fields_cache = {}
self.order_fields(self.field_order if field_order is None else field_order)
def order_fields(self, field_order):
"""
Rearranges the fields according to field_order.
field_order is a list of field names specifying the order. Fields not
included in the list are appended in the default order for backward
compatibility with subclasses not overriding field_order. If field_order
is None, all fields are kept in the order defined in the class.
Unknown fields in field_order are ignored to allow disabling fields in
form subclasses without redefining ordering.
"""
if field_order is None:
return
fields = OrderedDict()
for key in field_order:
try:
fields[key] = self.fields.pop(key)
except KeyError: # ignore unknown fields
pass
fields.update(self.fields) # add remaining fields in original order
self.fields = fields
def __str__(self):
return self.as_table()
def __repr__(self):
if self._errors is None:
is_valid = "Unknown"
else:
is_valid = self.is_bound and not bool(self._errors)
return '<%(cls)s bound=%(bound)s, valid=%(valid)s, fields=(%(fields)s)>' % {
'cls': self.__class__.__name__,
'bound': self.is_bound,
'valid': is_valid,
'fields': ';'.join(self.fields),
}
def __iter__(self):
for name in self.fields:
yield self[name]
def __getitem__(self, name):
"Returns a BoundField with the given name."
try:
field = self.fields[name]
except KeyError:
raise KeyError(
"Key %r not found in '%s'" % (name, self.__class__.__name__))
if name not in self._bound_fields_cache:
self._bound_fields_cache[name] = field.get_bound_field(self, name)
return self._bound_fields_cache[name]
@property
def errors(self):
"Returns an ErrorDict for the data provided for the form"
if self._errors is None:
self.full_clean()
return self._errors
def is_valid(self):
"""
Returns True if the form has no errors. Otherwise, False. If errors are
being ignored, returns False.
"""
return self.is_bound and not self.errors
def add_prefix(self, field_name):
"""
Returns the field name with a prefix appended, if this Form has a
prefix set.
Subclasses may wish to override.
"""
return '%s-%s' % (self.prefix, field_name) if self.prefix else field_name
def add_initial_prefix(self, field_name):
"""
Add a 'initial' prefix for checking dynamic initial values
"""
return 'initial-%s' % self.add_prefix(field_name)
def _html_output(self, normal_row, error_row, row_ender, help_text_html, errors_on_separate_row):
"Helper function for outputting HTML. Used by as_table(), as_ul(), as_p()."
top_errors = self.non_field_errors() # Errors that should be displayed above all fields.
output, hidden_fields = [], []
for name, field in self.fields.items():
html_class_attr = ''
bf = self[name]
# Escape and cache in local variable.
bf_errors = self.error_class([conditional_escape(error) for error in bf.errors])
if bf.is_hidden:
if bf_errors:
top_errors.extend(
[_('(Hidden field %(name)s) %(error)s') % {'name': name, 'error': force_text(e)}
for e in bf_errors])
hidden_fields.append(six.text_type(bf))
else:
# Create a 'class="..."' attribute if the row should have any
# CSS classes applied.
css_classes = bf.css_classes()
if css_classes:
html_class_attr = ' class="%s"' % css_classes
if errors_on_separate_row and bf_errors:
output.append(error_row % force_text(bf_errors))
if bf.label:
label = conditional_escape(force_text(bf.label))
label = bf.label_tag(label) or ''
else:
label = ''
if field.help_text:
help_text = help_text_html % force_text(field.help_text)
else:
help_text = ''
output.append(normal_row % {
'errors': force_text(bf_errors),
'label': force_text(label),
'field': six.text_type(bf),
'help_text': help_text,
'html_class_attr': html_class_attr,
'css_classes': css_classes,
'field_name': bf.html_name,
})
if top_errors:
output.insert(0, error_row % force_text(top_errors))
if hidden_fields: # Insert any hidden fields in the last row.
str_hidden = ''.join(hidden_fields)
if output:
last_row = output[-1]
# Chop off the trailing row_ender (e.g. '</td></tr>') and
# insert the hidden fields.
if not last_row.endswith(row_ender):
# This can happen in the as_p() case (and possibly others
# that users write): if there are only top errors, we may
# not be able to conscript the last row for our purposes,
# so insert a new, empty row.
last_row = (normal_row % {
'errors': '',
'label': '',
'field': '',
'help_text': '',
'html_class_attr': html_class_attr,
'css_classes': '',
'field_name': '',
})
output.append(last_row)
output[-1] = last_row[:-len(row_ender)] + str_hidden + row_ender
else:
# If there aren't any rows in the output, just append the
# hidden fields.
output.append(str_hidden)
return mark_safe('\n'.join(output))
def as_table(self):
"Returns this form rendered as HTML <tr>s -- excluding the <table></table>."
return self._html_output(
normal_row='<tr%(html_class_attr)s><th>%(label)s</th><td>%(errors)s%(field)s%(help_text)s</td></tr>',
error_row='<tr><td colspan="2">%s</td></tr>',
row_ender='</td></tr>',
help_text_html='<br /><span class="helptext">%s</span>',
errors_on_separate_row=False)
def as_ul(self):
"Returns this form rendered as HTML <li>s -- excluding the <ul></ul>."
return self._html_output(
normal_row='<li%(html_class_attr)s>%(errors)s%(label)s %(field)s%(help_text)s</li>',
error_row='<li>%s</li>',
row_ender='</li>',
help_text_html=' <span class="helptext">%s</span>',
errors_on_separate_row=False)
def as_p(self):
"Returns this form rendered as HTML <p>s."
return self._html_output(
normal_row='<p%(html_class_attr)s>%(label)s %(field)s%(help_text)s</p>',
error_row='%s',
row_ender='</p>',
help_text_html=' <span class="helptext">%s</span>',
errors_on_separate_row=True)
def non_field_errors(self):
"""
Returns an ErrorList of errors that aren't associated with a particular
field -- i.e., from Form.clean(). Returns an empty ErrorList if there
are none.
"""
return self.errors.get(NON_FIELD_ERRORS, self.error_class(error_class='nonfield'))
def add_error(self, field, error):
"""
Update the content of `self._errors`.
The `field` argument is the name of the field to which the errors
should be added. If its value is None the errors will be treated as
NON_FIELD_ERRORS.
The `error` argument can be a single error, a list of errors, or a
dictionary that maps field names to lists of errors. What we define as
an "error" can be either a simple string or an instance of
ValidationError with its message attribute set and what we define as
list or dictionary can be an actual `list` or `dict` or an instance
of ValidationError with its `error_list` or `error_dict` attribute set.
If `error` is a dictionary, the `field` argument *must* be None and
errors will be added to the fields that correspond to the keys of the
dictionary.
"""
if not isinstance(error, ValidationError):
# Normalize to ValidationError and let its constructor
# do the hard work of making sense of the input.
error = ValidationError(error)
if hasattr(error, 'error_dict'):
if field is not None:
raise TypeError(
"The argument `field` must be `None` when the `error` "
"argument contains errors for multiple fields."
)
else:
error = error.error_dict
else:
error = {field or NON_FIELD_ERRORS: error.error_list}
for field, error_list in error.items():
if field not in self.errors:
if field != NON_FIELD_ERRORS and field not in self.fields:
raise ValueError(
"'%s' has no field named '%s'." % (self.__class__.__name__, field))
if field == NON_FIELD_ERRORS:
self._errors[field] = self.error_class(error_class='nonfield')
else:
self._errors[field] = self.error_class()
self._errors[field].extend(error_list)
if field in self.cleaned_data:
del self.cleaned_data[field]
def has_error(self, field, code=None):
if code is None:
return field in self.errors
if field in self.errors:
for error in self.errors.as_data()[field]:
if error.code == code:
return True
return False
def full_clean(self):
"""
Cleans all of self.data and populates self._errors and
self.cleaned_data.
"""
self._errors = ErrorDict()
if not self.is_bound: # Stop further processing.
return
self.cleaned_data = {}
# If the form is permitted to be empty, and none of the form data has
# changed from the initial data, short circuit any validation.
if self.empty_permitted and not self.has_changed():
return
self._clean_fields()
self._clean_form()
self._post_clean()
def _clean_fields(self):
for name, field in self.fields.items():
# value_from_datadict() gets the data from the data dictionaries.
# Each widget type knows how to retrieve its own data, because some
# widgets split data over several HTML fields.
if field.disabled:
value = self.initial.get(name, field.initial)
else:
value = field.widget.value_from_datadict(self.data, self.files, self.add_prefix(name))
try:
if isinstance(field, FileField):
initial = self.initial.get(name, field.initial)
value = field.clean(value, initial)
else:
value = field.clean(value)
self.cleaned_data[name] = value
if hasattr(self, 'clean_%s' % name):
value = getattr(self, 'clean_%s' % name)()
self.cleaned_data[name] = value
except ValidationError as e:
self.add_error(name, e)
def _clean_form(self):
try:
cleaned_data = self.clean()
except ValidationError as e:
self.add_error(None, e)
else:
if cleaned_data is not None:
self.cleaned_data = cleaned_data
def _post_clean(self):
"""
An internal hook for performing additional cleaning after form cleaning
is complete. Used for model validation in model forms.
"""
pass
def clean(self):
"""
Hook for doing any extra form-wide cleaning after Field.clean() has been
called on every field. Any ValidationError raised by this method will
not be associated with a particular field; it will have a special-case
association with the field named '__all__'.
"""
return self.cleaned_data
def has_changed(self):
"""
Returns True if data differs from initial.
"""
return bool(self.changed_data)
@cached_property
def changed_data(self):
data = []
for name, field in self.fields.items():
prefixed_name = self.add_prefix(name)
data_value = field.widget.value_from_datadict(self.data, self.files, prefixed_name)
if not field.show_hidden_initial:
initial_value = self.initial.get(name, field.initial)
if callable(initial_value):
initial_value = initial_value()
else:
initial_prefixed_name = self.add_initial_prefix(name)
hidden_widget = field.hidden_widget()
try:
initial_value = field.to_python(hidden_widget.value_from_datadict(
self.data, self.files, initial_prefixed_name))
except ValidationError:
# Always assume data has changed if validation fails.
data.append(name)
continue
if field.has_changed(initial_value, data_value):
data.append(name)
return data
@property
def media(self):
"""
Provide a description of all media required to render the widgets on this form
"""
media = Media()
for field in self.fields.values():
media = media + field.widget.media
return media
def is_multipart(self):
"""
Returns True if the form needs to be multipart-encoded, i.e. it has
FileInput. Otherwise, False.
"""
for field in self.fields.values():
if field.widget.needs_multipart_form:
return True
return False
def hidden_fields(self):
"""
Returns a list of all the BoundField objects that are hidden fields.
Useful for manual form layout in templates.
"""
return [field for field in self if field.is_hidden]
def visible_fields(self):
"""
Returns a list of BoundField objects that aren't hidden fields.
The opposite of the hidden_fields() method.
"""
return [field for field in self if not field.is_hidden]
class Form(six.with_metaclass(DeclarativeFieldsMetaclass, BaseForm)):
"A collection of Fields, plus their associated data."
# This is a separate class from BaseForm in order to abstract the way
# self.fields is specified. This class (Form) is the one that does the
# fancy metaclass stuff purely for the semantic sugar -- it allows one
# to define a form using declarative syntax.
# BaseForm itself has no way of designating self.fields.
| bsd-3-clause |
petesburgh/or-tools | examples/python/knapsack_cp.py | 34 | 2309 | # Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Knapsack problem in Google CP Solver.
Simple knapsack problem.
This model was created by Hakan Kjellerstrand (hakank@bonetmail.com)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from ortools.constraint_solver import pywrapcp
def knapsack(solver, values, weights, n):
z = solver.IntVar(0, 10000)
x = [solver.IntVar(0, 1, "x(%i)" % i) for i in range(len(values))]
solver.Add(z >= 0)
solver.Add(z == solver.ScalProd(x, values))
solver.Add(solver.ScalProd(x, weights) <= n)
return [x, z]
def main(values, weights, n):
# Create the solver.
solver = pywrapcp.Solver("n-queens")
#
# data
#
print "values:", values
print "weights:", weights
print "n:", n
print
# declare variables
#
# constraints
#
[x, z] = knapsack(solver, values, weights, n)
# objective
objective = solver.Maximize(z, 1)
#
# solution and search
#
solution = solver.Assignment()
solution.Add(x)
solution.Add(z)
# db: DecisionBuilder
db = solver.Phase(x,
solver.CHOOSE_FIRST_UNBOUND,
solver.ASSIGN_MAX_VALUE)
solver.NewSearch(db, [objective])
num_solutions = 0
while solver.NextSolution():
print "x:", [x[i].Value() for i in range(len(values))]
print "z:", z.Value()
print
num_solutions += 1
solver.EndSearch()
print
print "num_solutions:", num_solutions
print "failures:", solver.Failures()
print "branches:", solver.Branches()
print "WallTime:", solver.WallTime()
values = [15, 100, 90, 60, 40, 15, 10, 1, 12, 12, 100]
weights = [2, 20, 20, 30, 40, 30, 60, 10, 21, 12, 2]
n = 102
if __name__ == "__main__":
main(values, weights, n)
| apache-2.0 |
bzbarsky/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/mod_pywebsocket/http_header_util.py | 694 | 6905 | # Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Utilities for parsing and formatting headers that follow the grammar defined
in HTTP RFC http://www.ietf.org/rfc/rfc2616.txt.
"""
import urlparse
_SEPARATORS = '()<>@,;:\\"/[]?={} \t'
def _is_char(c):
"""Returns true iff c is in CHAR as specified in HTTP RFC."""
return ord(c) <= 127
def _is_ctl(c):
"""Returns true iff c is in CTL as specified in HTTP RFC."""
return ord(c) <= 31 or ord(c) == 127
class ParsingState(object):
def __init__(self, data):
self.data = data
self.head = 0
def peek(state, pos=0):
"""Peeks the character at pos from the head of data."""
if state.head + pos >= len(state.data):
return None
return state.data[state.head + pos]
def consume(state, amount=1):
"""Consumes specified amount of bytes from the head and returns the
consumed bytes. If there's not enough bytes to consume, returns None.
"""
if state.head + amount > len(state.data):
return None
result = state.data[state.head:state.head + amount]
state.head = state.head + amount
return result
def consume_string(state, expected):
"""Given a parsing state and a expected string, consumes the string from
the head. Returns True if consumed successfully. Otherwise, returns
False.
"""
pos = 0
for c in expected:
if c != peek(state, pos):
return False
pos += 1
consume(state, pos)
return True
def consume_lws(state):
"""Consumes a LWS from the head. Returns True if any LWS is consumed.
Otherwise, returns False.
LWS = [CRLF] 1*( SP | HT )
"""
original_head = state.head
consume_string(state, '\r\n')
pos = 0
while True:
c = peek(state, pos)
if c == ' ' or c == '\t':
pos += 1
else:
if pos == 0:
state.head = original_head
return False
else:
consume(state, pos)
return True
def consume_lwses(state):
"""Consumes *LWS from the head."""
while consume_lws(state):
pass
def consume_token(state):
"""Consumes a token from the head. Returns the token or None if no token
was found.
"""
pos = 0
while True:
c = peek(state, pos)
if c is None or c in _SEPARATORS or _is_ctl(c) or not _is_char(c):
if pos == 0:
return None
return consume(state, pos)
else:
pos += 1
def consume_token_or_quoted_string(state):
"""Consumes a token or a quoted-string, and returns the token or unquoted
string. If no token or quoted-string was found, returns None.
"""
original_head = state.head
if not consume_string(state, '"'):
return consume_token(state)
result = []
expect_quoted_pair = False
while True:
if not expect_quoted_pair and consume_lws(state):
result.append(' ')
continue
c = consume(state)
if c is None:
# quoted-string is not enclosed with double quotation
state.head = original_head
return None
elif expect_quoted_pair:
expect_quoted_pair = False
if _is_char(c):
result.append(c)
else:
# Non CHAR character found in quoted-pair
state.head = original_head
return None
elif c == '\\':
expect_quoted_pair = True
elif c == '"':
return ''.join(result)
elif _is_ctl(c):
# Invalid character %r found in qdtext
state.head = original_head
return None
else:
result.append(c)
def quote_if_necessary(s):
"""Quotes arbitrary string into quoted-string."""
quote = False
if s == '':
return '""'
result = []
for c in s:
if c == '"' or c in _SEPARATORS or _is_ctl(c) or not _is_char(c):
quote = True
if c == '"' or _is_ctl(c):
result.append('\\' + c)
else:
result.append(c)
if quote:
return '"' + ''.join(result) + '"'
else:
return ''.join(result)
def parse_uri(uri):
"""Parse absolute URI then return host, port and resource."""
parsed = urlparse.urlsplit(uri)
if parsed.scheme != 'wss' and parsed.scheme != 'ws':
# |uri| must be a relative URI.
# TODO(toyoshim): Should validate |uri|.
return None, None, uri
if parsed.hostname is None:
return None, None, None
port = None
try:
port = parsed.port
except ValueError, e:
# port property cause ValueError on invalid null port description like
# 'ws://host:/path'.
return None, None, None
if port is None:
if parsed.scheme == 'ws':
port = 80
else:
port = 443
path = parsed.path
if not path:
path += '/'
if parsed.query:
path += '?' + parsed.query
if parsed.fragment:
path += '#' + parsed.fragment
return parsed.hostname, port, path
try:
urlparse.uses_netloc.index('ws')
except ValueError, e:
# urlparse in Python2.5.1 doesn't have 'ws' and 'wss' entries.
urlparse.uses_netloc.append('ws')
urlparse.uses_netloc.append('wss')
# vi:sts=4 sw=4 et
| mpl-2.0 |
NunoEdgarGub1/nupic | nupic/support/pymysqlhelpers.py | 8 | 4303 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# Helper utilities for python scripts that use pymysql
import inspect
import logging
from socket import error as socket_error
import pymysql
from pymysql.constants import ER
from nupic.support.decorators import retry as make_retry_decorator
# Client mysql error codes of interest; pymysql didn't have constants for these
# at the time of this writing.
# (per https://dev.mysql.com/doc/refman/5.5/en/error-messages-client.html)
CR_CONNECTION_ERROR = 2002
""" Can't connect to local MySQL server through socket '%s' (%d) """
CR_CONN_HOST_ERROR = 2003
""" Can't connect to MySQL server on '%s' (%d) """
CR_UNKNOWN_HOST = 2005
""" Unknown MySQL server host '%s' (%d) """
CR_SERVER_GONE_ERROR = 2006
""" MySQL server has gone away """
CR_TCP_CONNECTION = 2011
""" %s via TCP/IP """
CR_SERVER_HANDSHAKE_ERR = 2012
""" Error in server handshake """
CR_SERVER_LOST = 2013
""" Lost connection to MySQL server during query """
CR_SERVER_LOST_EXTENDED = 2055
""" Lost connection to MySQL server at '%s', system error: %d """
_RETRIABLE_CLIENT_ERROR_CODES = (
CR_CONNECTION_ERROR,
CR_CONN_HOST_ERROR,
CR_UNKNOWN_HOST,
CR_SERVER_GONE_ERROR,
CR_TCP_CONNECTION,
CR_SERVER_HANDSHAKE_ERR,
CR_SERVER_LOST,
CR_SERVER_LOST_EXTENDED,
)
_RETRIABLE_SERVER_ERROR_CODES = (
ER.TABLE_DEF_CHANGED,
ER.LOCK_WAIT_TIMEOUT,
ER.LOCK_DEADLOCK,
#Maybe these also?
# ER_TOO_MANY_DELAYED_THREADS
# ER_BINLOG_PURGE_EMFILE
# ER_TOO_MANY_CONCURRENT_TRXS
# ER_CON_COUNT_ERROR
# ER_OUTOFMEMORY
)
_ALL_RETRIABLE_ERROR_CODES = set(_RETRIABLE_CLIENT_ERROR_CODES +
_RETRIABLE_SERVER_ERROR_CODES)
def retrySQL(timeoutSec=60*5, getLoggerCallback=logging.getLogger):
""" Return a closure suitable for use as a decorator for
retrying a pymysql DAO function on certain failures that warrant retries (
e.g., RDS/MySQL server down temporarily, transaction deadlock, etc.).
We share this function across multiple scripts (e.g., ClientJobsDAO,
StreamMgr) for consitent behavior.
NOTE: please ensure that the operation being retried is idempotent.
timeoutSec: How many seconds from time of initial call to stop retrying
(floating point)
getLoggerCallback:
user-supplied callback function that takes no args and
returns the logger instance to use for logging.
Usage Example:
NOTE: logging must be initialized *before* any loggers are created, else
there will be no output; see nupic.support.initLogging()
@retrySQL()
def jobInfo(self, jobID):
...
"""
def retryFilter(e, args, kwargs):
if isinstance(e, (pymysql.InternalError, pymysql.OperationalError)):
if e.args and e.args[0] in _ALL_RETRIABLE_ERROR_CODES:
return True
elif isinstance(e, pymysql.Error):
if (e.args and
inspect.isclass(e.args[0]) and issubclass(e.args[0], socket_error)):
return True
return False
retryExceptions = tuple([
pymysql.InternalError,
pymysql.OperationalError,
pymysql.Error,
])
return make_retry_decorator(
timeoutSec=timeoutSec, initialRetryDelaySec=0.1, maxRetryDelaySec=10,
retryExceptions=retryExceptions, retryFilter=retryFilter,
getLoggerCallback=getLoggerCallback)
| gpl-3.0 |
stackmagic/repod | repod.py | 1 | 4681 | #!/usr/bin/env python
# -*- coding: utf_8 -*-
import eyed3
import os
import sqlite3 as sl
import shutil
eyed3.require("0.7")
pod = '/media/ipod-backup'
dst = '/media/ipod-music-export'
dbf = '%s/iTunes_Control/iTunes/MediaLibrary.sqlitedb' % pod
print '>>> using database file %s' % dbf
con = sl.connect(dbf)
cur = con.cursor()
cur.execute('SELECT SQLITE_VERSION()')
data = cur.fetchone()
print 'SQLite version: %s' % data
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
con.row_factory = dict_factory
con.text_factory = str
cur = con.cursor()
sql = """
SELECT
a.album AS album
, aa.album_artist AS artist
, g.genre AS genre
, ie.YEAR AS year
, lk.kind AS kind
, i.disc_number AS disc_number
, i.track_number AS track_number
, ie.title AS title
, bl.path AS path
, ie.location AS location
, st.user_rating AS user_rating
, st.play_count_user AS play_count
FROM
item i
, album a
, album_artist aa
, genre g
, item_extra ie
, location_kind lk
, base_location bl
, item_stats st
WHERE 1
AND i.album_pid = a.album_pid
AND i.album_artist_pid = aa.album_artist_pid
AND i.genre_id = g.genre_id
AND i.item_pid = ie.item_pid
AND i.location_kind_id = lk.location_kind_id
AND i.base_location_id = bl.base_location_id
AND i.item_pid = st.item_pid
ORDER BY
artist ASC
, year ASC
, album ASC
, disc_number ASC
, track_number ASC
"""
# get all items, skip some, cleanup others
cur.execute(sql)
rows = cur.fetchall()
keep = []
for row in rows:
if row['path'] == '':
continue
if row['location'] == '':
continue
if row['artist'] == '':
row['artist'] = 'unknown'
if row['album'] == '':
row['album'] = 'unknown'
if row['title'] == '':
row['title'] = 'unknown'
row['artist'] = row['artist'].strip().replace('/', '-')
row['album' ] = row['album' ].strip().replace('/', '-')
row['title' ] = row['title' ].strip().replace('/', '-')
keep.append(row)
#print "{path}/{location} ({user_rating: 3d}) => {artist}/{year:04d} - {album}/CD{disc_number}/{track_number:02} - {title}.{filetype}".format( **row )
print '>>> total %d files (%d filtered)' % (len(keep), len(rows) - len(keep))
con.close()
# remove files if they already exist
shutil.rmtree(dst)
# go trough all files and
count = 0
total = len(keep)
for row in keep:
count += 1
row['podDir'] = pod
row['dstDir'] = dst
row['filetype'] = row['location'].split('.')[1]
srcFile = "{podDir}/{path}/{location}".format( **row )
dstDir = "{dstDir}/{artist}/{year:04d} - {album}/CD{disc_number}".format( **row )
dstFile = dstDir + "/{track_number:02} - {title}.{filetype}".format( **row )
if not os.path.isfile(srcFile):
continue
print "[ % 7d / % 7d ] %s (%3d) => %s" % (count, total, srcFile, row['user_rating'], dstFile)
if not os.path.isdir(dstDir):
os.makedirs(dstDir)
shutil.copyfile(srcFile, dstFile)
try:
mp3 = eyed3.load(dstFile)
except BaseException as error:
print '>>> Error processing file'
continue
if mp3 is None or mp3.tag == None:
print '>>> skipping file because not an mp3'
continue
# get rid of all comments
for c in mp3.tag.comments:
mp3.tag.comments.remove(c.description)
# stupid unicode
mp3.tag.album = u'%s' % row['album' ].decode('UTF-8')
mp3.tag.artist = u'%s' % row['artist'].decode('UTF-8')
mp3.tag.title = u'%s' % row['title' ].decode('UTF-8')
# handle genre, which might be missing
genre = u'%s' % row['genre'].decode('UTF-8')
if mp3.tag.genre is None:
mp3.tag.genre = eyed3.id3.Genre(genre)
else:
mp3.tag.genre.name = genre
# some simple numbers
mp3.tag.disc_num = row['disc_number']
mp3.tag.play_count = row['play_count']
# requires a tuple of track# and totalcount
mp3.tag.track_num = (row['track_number'], None)
# it appears 0 is not valid as a year, haven't tested negative values ;)
if row['year'] > 0:
mp3.tag.release_date = eyed3.core.Date(row['year'])
mp3.tag.recording_date = eyed3.core.Date(row['year'])
# not sure if the popularimeter stuff works as intended
for p in mp3.tag.popularities:
mp3.tag.popularities.remove(p.email)
rating = int(2.55 * int(row['user_rating']))
mp3.tag.popularities.set('rating@mp3.com', rating, row['play_count'])
# it seems some frames can't be converted to v2.4
for name in ('TYER', 'RGAD', 'RVAD', 'TSO2'):
if name in mp3.tag.frame_set:
del mp3.tag.frame_set[name]
# commit
mp3.tag.save(version = eyed3.id3.ID3_V2_4)
| apache-2.0 |
openhardnudd/QMarkdowner | utildialog/ipaddressdialog.py | 4 | 2612 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
from PyQt4 import QtGui
from PyQt4 import QtCore
from basedialog import BaseDialog
class IPaddressDialog(BaseDialog):
def __init__(self, styleoptions, parent=None):
super(IPaddressDialog, self).__init__(styleoptions, parent)
# url内容输入
self.urlwidget = QtGui.QWidget()
ip_mainlayout = QtGui.QGridLayout()
self.ipLabel = QtGui.QLabel(u'输入主机ip:')
self.ipLineEdit = QtGui.QLineEdit(u'192.168.100.100')
self.ipLineEdit.setInputMask('000.000.000.000')
self.portLabel = QtGui.QLabel(u'输入主机port:')
self.portLineEdit = QtGui.QLineEdit(u'8000')
ip_mainlayout.addWidget(self.ipLabel, 0, 0)
ip_mainlayout.addWidget(self.ipLineEdit, 0, 1)
ip_mainlayout.addWidget(self.portLabel, 1, 0)
ip_mainlayout.addWidget(self.portLineEdit, 1, 1)
self.urlwidget.setLayout(ip_mainlayout)
#确认按钮布局
self.enterwidget = QtGui.QWidget()
self.pbEnter = QtGui.QPushButton(u'确定', self)
self.pbCancel = QtGui.QPushButton(u'取消', self)
self.pbEnter.clicked.connect(self.enter)
self.pbCancel.clicked.connect(self.reject)
enterwidget_mainlayout = QtGui.QGridLayout()
enterwidget_mainlayout.addWidget(self.pbEnter, 0, 0)
enterwidget_mainlayout.addWidget(self.pbCancel, 0, 1)
self.enterwidget.setLayout(enterwidget_mainlayout)
self.layout().addWidget(self.urlwidget)
self.layout().addWidget(self.enterwidget)
self.resize(self.width(), self.height())
def enter(self):
self.accept() # 关闭对话框并返回1
def ipaddressinput(options):
dialog = IPaddressDialog(options)
if dialog.exec_():
return True, unicode(dialog.ipLineEdit.text()), int(dialog.portLineEdit.text())
else:
return False, unicode(dialog.ipLineEdit.text()), int(dialog.portLineEdit.text())
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
styleoptions = {
'title': u'请输入相应的ip地址和端口号:',
'windowicon': os.sep.join([os.path.dirname(__file__), 'utildialogskin', 'images', 'bg.jpg']),
'minsize': (400, 300),
'size': (400, 300),
'logo_title': u'智能光纤云终端管理平台',
'logo_img_url': os.sep.join([os.path.dirname(__file__), 'utildialogskin', 'images', 'bg.jpg'])
}
print ipaddressinput(styleoptions)
sys.exit(app.exec_())
| mit |
mancoast/CPythonPyc_test | fail/313_test_strlit.py | 10 | 5040 | r"""Test correct treatment of various string literals by the parser.
There are four types of string literals:
'abc' -- normal str
r'abc' -- raw str
b'xyz' -- normal bytes
br'xyz' -- raw bytes
The difference between normal and raw strings is of course that in a
raw string, \ escapes (while still used to determine the end of the
literal) are not interpreted, so that r'\x00' contains four
characters: a backslash, an x, and two zeros; while '\x00' contains a
single character (code point zero).
The tricky thing is what should happen when non-ASCII bytes are used
inside literals. For bytes literals, this is considered illegal. But
for str literals, those bytes are supposed to be decoded using the
encoding declared for the file (UTF-8 by default).
We have to test this with various file encodings. We also test it with
exec()/eval(), which uses a different code path.
This file is really about correct treatment of encodings and
backslashes. It doens't concern itself with issues like single
vs. double quotes or singly- vs. triply-quoted strings: that's dealt
with elsewhere (I assume).
"""
import os
import sys
import shutil
import tempfile
import unittest
TEMPLATE = r"""# coding: %s
a = 'x'
assert ord(a) == 120
b = '\x01'
assert ord(b) == 1
c = r'\x01'
assert list(map(ord, c)) == [92, 120, 48, 49]
d = '\x81'
assert ord(d) == 0x81
e = r'\x81'
assert list(map(ord, e)) == [92, 120, 56, 49]
f = '\u1881'
assert ord(f) == 0x1881
g = r'\u1881'
assert list(map(ord, g)) == [92, 117, 49, 56, 56, 49]
"""
def byte(i):
return bytes([i])
class TestLiterals(unittest.TestCase):
def setUp(self):
self.save_path = sys.path[:]
self.tmpdir = tempfile.mkdtemp()
sys.path.insert(0, self.tmpdir)
def tearDown(self):
sys.path = self.save_path
shutil.rmtree(self.tmpdir, ignore_errors=True)
def test_template(self):
# Check that the template doesn't contain any non-printables
# except for \n.
for c in TEMPLATE:
assert c == '\n' or ' ' <= c <= '~', repr(c)
def test_eval_str_normal(self):
self.assertEqual(eval(""" 'x' """), 'x')
self.assertEqual(eval(r""" '\x01' """), chr(1))
self.assertEqual(eval(""" '\x01' """), chr(1))
self.assertEqual(eval(r""" '\x81' """), chr(0x81))
self.assertEqual(eval(""" '\x81' """), chr(0x81))
self.assertEqual(eval(r""" '\u1881' """), chr(0x1881))
self.assertEqual(eval(""" '\u1881' """), chr(0x1881))
def test_eval_str_raw(self):
self.assertEqual(eval(""" r'x' """), 'x')
self.assertEqual(eval(r""" r'\x01' """), '\\' + 'x01')
self.assertEqual(eval(""" r'\x01' """), chr(1))
self.assertEqual(eval(r""" r'\x81' """), '\\' + 'x81')
self.assertEqual(eval(""" r'\x81' """), chr(0x81))
self.assertEqual(eval(r""" r'\u1881' """), '\\' + 'u1881')
self.assertEqual(eval(""" r'\u1881' """), chr(0x1881))
def test_eval_bytes_normal(self):
self.assertEqual(eval(""" b'x' """), b'x')
self.assertEqual(eval(r""" b'\x01' """), byte(1))
self.assertEqual(eval(""" b'\x01' """), byte(1))
self.assertEqual(eval(r""" b'\x81' """), byte(0x81))
self.assertRaises(SyntaxError, eval, """ b'\x81' """)
self.assertEqual(eval(r""" b'\u1881' """), b'\\' + b'u1881')
self.assertRaises(SyntaxError, eval, """ b'\u1881' """)
def test_eval_bytes_raw(self):
self.assertEqual(eval(""" br'x' """), b'x')
self.assertEqual(eval(r""" br'\x01' """), b'\\' + b'x01')
self.assertEqual(eval(""" br'\x01' """), byte(1))
self.assertEqual(eval(r""" br'\x81' """), b"\\" + b"x81")
self.assertRaises(SyntaxError, eval, """ br'\x81' """)
self.assertEqual(eval(r""" br'\u1881' """), b"\\" + b"u1881")
self.assertRaises(SyntaxError, eval, """ br'\u1881' """)
def check_encoding(self, encoding, extra=""):
modname = "xx_" + encoding.replace("-", "_")
fn = os.path.join(self.tmpdir, modname + ".py")
f = open(fn, "w", encoding=encoding)
try:
f.write(TEMPLATE % encoding)
f.write(extra)
finally:
f.close()
__import__(modname)
del sys.modules[modname]
def test_file_utf_8(self):
extra = "z = '\u1234'; assert ord(z) == 0x1234\n"
self.check_encoding("utf-8", extra)
def test_file_utf_8_error(self):
extra = "b'\x80'\n"
self.assertRaises(SyntaxError, self.check_encoding, "utf-8", extra)
def test_file_utf8(self):
self.check_encoding("utf8")
def test_file_iso_8859_1(self):
self.check_encoding("iso-8859-1")
def test_file_latin_1(self):
self.check_encoding("latin-1")
def test_file_latin9(self):
self.check_encoding("latin9")
if __name__ == "__main__":
# Hack so that error messages containing non-ASCII can be printed
sys.stdout._encoding = sys.stderr._encoding = "utf-8"
unittest.main()
| gpl-3.0 |
yawnosnorous/python-for-android | python-build/python-libs/python-twitter/simplejson/scanner.py | 928 | 2227 | """JSON token scanner
"""
import re
try:
from simplejson._speedups import make_scanner as c_make_scanner
except ImportError:
c_make_scanner = None
__all__ = ['make_scanner']
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL))
def py_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
return parse_string(string, idx + 1, encoding, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), encoding, strict, _scan_once, object_hook)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
return _scan_once
make_scanner = c_make_scanner or py_make_scanner
| apache-2.0 |
elthariel/dff | testsuite/dffunittest.py | 1 | 6113 | #!/usr/bin/python
# DFF -- An Open Source Digital Forensics Framework
# Copyright (C) 2009-2010 ArxSys
# This program is free software, distributed under the terms of
# the GNU General Public License Version 2. See the LICENSE file
# at the top of the source tree.
#
# See http://www.digital-forensic.org for more information about this
# project. Please do not directly contact any of the maintainers of
# DFF for assistance; the project provides a web site, mailing lists
# and IRC channels for your use.
#
# Author(s):
# Christophe M. <cma@digital-forensic.org>
#
import unittest, sys, os, fcntl
from cStringIO import StringIO
sys.path.insert(0, sys.path[0] + '/..')
if os.name == "posix":
try :
import dl
sys.setdlopenflags(sys.getdlopenflags() | dl.RTLD_GLOBAL)
except ImportError:
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
from api.manager.manager import ApiManager
from ui.ui import ui, usage
from api.taskmanager.taskmanager import *
from ui.console.console import console
from api.loader.loader import loader
class DffUnittest(unittest.TestCase):
debugTest = 0
def setUp(self):
""" Initialize framework
Load modules
Load console without loop
Redirect stdout and stderr to readable fileobjects
"""
if not self.debugTest:
sys.stdout = StringIO()
sys.stderr = StringIO()
self._hook_streams(sys.__stdout__.fileno(), sys.__stderr__.fileno())
self.ui = ui('console')
loader().do_load(sys.path[0] + '/modules/')
self.ui.c = console()
self.tm = TaskManager()
self.vfs = vfs.vfs()
if not self.debugTest:
self._restore_streams()
# Close and re '.. = StringIO()' to drop output from modules loader
# FIXME [...] sys.stderr.close() [...] AttributeError: __RedirectIO instance has no attribute 'close'
try:
sys.stdout.close()
except AttributeError:
del sys.stdout
try:
sys.stderr.close()
except AttributeError:
del sys.stderr
sys.stdout = StringIO()
sys.stderr = StringIO()
def tearDown(self):
""" Restore stdout and stderr before end of each
tests
"""
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
def _set_nonblock(self, fileobj):
""" Set a fileobject non-blocking
"""
fd = fileobj.fileno()
n = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, n|os.O_NONBLOCK)
def _hook_streams(self, stdout, stderr):
""" Avoid output of driver in current shell
"""
self.old_stdout = os.dup(stdout)
self.old_stderr = os.dup(stderr)
self.pipeOut = os.pipe()
self.pipeErr = os.pipe()
os.close(stdout)
os.dup2(self.pipeOut[1], stdout)
os.close(stderr)
os.dup2(self.pipeErr[1], stderr)
self.driverOut = os.fdopen(self.pipeOut[0])
self.driverErr = os.fdopen(self.pipeErr[0])
self._set_nonblock(self.driverOut)
self._set_nonblock(self.driverErr)
def _close(self, *fds):
for fd in fds:
if type(fd) == file:
fd.close()
else:
try:
os.close(fd)
except:
pass
def _restore_streams(self):
""" Restore stdout and stderr for tests be able to display informations
Fetch stdout and stderr from driver and return both in a tuple
"""
try:
readOut = self.driverOut.read(4096)
except:
readOut = None
try:
readErr = self.driverErr.read(4096)
except:
readErr = None
self.driverOut.flush()
self.driverErr.flush()
os.dup2(self.old_stdout, sys.__stdout__.fileno())
os.dup2(self.old_stderr, sys.__stderr__.fileno())
self._close(self.old_stdout, self.old_stderr,
self.driverOut, self.driverErr,
self.pipeOut[1], self.pipeErr[1],
self.pipeOut[0], self.pipeErr[0])
return (readOut, readErr)
def _getResultsArrayByProcName(self, procName):
""" Return array of results from last processus name procName execution.
"""
val_map = None
outTest = []
for i in range(len(self.tm.lprocessus) - 1, 0, -1):
# outTest.append(self.tm.lprocessus[i].name)
if self.tm.lprocessus[i].name == procName:
val_map = self.tm.env.get_val_map(self.tm.lprocessus[i].res.val_m)
break
if val_map:
for type, name, val in val_map:
if name == 'result':
outArray = []
for line in val.split('\n'):
if len(line.rstrip()):
outArray.append(line.rstrip())
return outArray
raise UserWarning
def _readExpectedOutputAsArray(self, filePath):
outArray = []
with open(filePath, 'r') as f:
for line in f:
if len(line.rstrip()):
outArray.append(line.rstrip())
return outArray
def _getOutputArray(self, stringBuff):
sArray = stringBuff.split('\n')
outArray = []
for oneLine in sArray:
if len(oneLine) > 1:
outArray.append(oneLine)
return outArray
def _getEnvObjByProcName(self, procName):
""" Return environement array from last processus named procName
execution.
"""
val_map = None
outTest = []
for i in range(len(self.tm.lprocessus) - 1, -1, -1):
outTest.append(self.tm.lprocessus[i].name)
if self.tm.lprocessus[i].name == procName:
return self.tm.lprocessus[i].args
raise UserWarning
| gpl-2.0 |
picrust/picrust | scripts/start_parallel_picrust_jobs_sge.py | 1 | 3682 | #!/usr/bin/env python
"""A simple qsub based cluster submission script for SGE."""
__author__ = "Jens Reeder"
__copyright__ = "Copyright 2011-2013, The PICRUSt Project"
__credits__ = ["Jens Reeder",
"Rob Knight",
"Greg Caporaso",
"Morgan Langille"]
__license__ = "GPL"
__version__ = "1.1.4"
__maintainer__ = "Greg Caporaso"
__email__ = "gregcaporaso@gmail.com"
__status__ = "Development"
from optparse import OptionParser
from os.path import exists
from os import remove, rename, rmdir, makedirs
from cogent.util.misc import app_path
from cogent.app.util import get_tmp_filename
from picrust.make_cluster_jobs import make_sge_jobs, submit_cluster_jobs
from cogent.util.option_parsing import parse_command_line_parameters, make_option
script_info = {}
script_info['brief_description'] = "Starts multiple jobs in parallel on SGE/qsub based multiprocessor systems."
script_info['script_description'] = "This script is designed to start multiple jobs in parallel on cluster systems with a SGE/qsub based scheduling system."
script_info['script_usage'] = [\
("Example",\
"Start each command listed in test_jobs.txt in parallel. The run id for these jobs will be RUNID. ",\
"%prog -ms test_jobs.txt RUNID")]
script_info['output_description']= "No output is created."
script_info['required_options'] = []
script_info['optional_options'] = [\
make_option('-m','--make_jobs',action='store_true',\
help='make the job files [default: %default]'),
make_option('-s','--submit_jobs',action='store_true',\
help='submit the job files [default: %default]'),
make_option('-d','--delay',action='store',type='int',default=0,
help='Number of seconds to pause between launching each job [default: %default]'),
make_option('-q','--queue',action='store',\
type='string',dest='queue', \
help='name of queue to submit to '+\
' [default: %default]'),
make_option('-j','--job_dir', action='store',\
type='string',dest='job_dir',\
help='directory to store the jobs '+\
'[default: %default]', default="jobs/"),
make_option('-n','--num_jobs',action='store',type='int',\
help='Number of jobs to group commands into. [default: %default]',\
default=100)
]
script_info['version'] = __version__
script_info['disallow_positional_arguments'] = False
def main():
option_parser, opts, args =\
parse_command_line_parameters(**script_info)
if opts.submit_jobs and not opts.make_jobs:
option_parser.error('Must pass -m if passing -s. (Sorry about this, '+\
'it\'s for backwards-compatibility.)')
min_args = 2
if len(args) != min_args:
option_parser.error('Program requires <commands file> and <job prefix>')
if (len(args[1])>10 or len(args[1])==0):
option_parser.error('job prefix must be 1-10 characters long')
commands = list(open(args[0]))
job_prefix = args[1]
if(not exists(opts.job_dir)):
try:
makedirs(opts.job_dir)
except OSError:
exit(" Jobs directory can not be created. "
+"Check for permissions or file with the same name: %s\n"
% opts.job_dir)
if (opts.make_jobs):
filenames = make_sge_jobs(commands, job_prefix, opts.queue, opts.job_dir,opts.num_jobs)
else:
exit("Should we ever get here???")
if (opts.submit_jobs):
submit_cluster_jobs(filenames, opts.verbose, delay=opts.delay)
if __name__ == "__main__":
main()
| gpl-3.0 |
sanjeevtripurari/hue | desktop/core/src/desktop/api.py | 14 | 10396 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
import time
from collections import defaultdict
from django.utils import html
from django.utils.translation import ugettext as _
import desktop.conf
from desktop.lib.django_util import JsonResponse
from desktop.lib.i18n import force_unicode
from desktop.models import Document, DocumentTag
LOG = logging.getLogger(__name__)
def _get_docs(user):
history_tag = DocumentTag.objects.get_history_tag(user)
query = Document.objects.get_docs(user) \
.exclude(tags__in=[history_tag])
# Work around Oracle not supporting SELECT DISTINCT with the CLOB type.
if desktop.conf.DATABASE.ENGINE.get() == 'django.db.backends.oracle':
query = query.only('id')
else:
query = query.defer(None)
docs = query.order_by('-last_modified')[:100]
if desktop.conf.DATABASE.ENGINE.get() == 'django.db.backends.oracle':
ids = [doc.id for doc in docs]
docs = Document.objects.filter(id__in=ids).defer(None)
docs = docs \
.select_related('owner', 'content_type') \
.prefetch_related('tags', 'documentpermission_set')
return docs
def massaged_tags_for_json(docs, user):
"""
var TAGS_DEFAULTS = {
'history': {'name': 'History', 'id': 1, 'docs': [1], 'type': 'history'},
'trash': {'name': 'Trash', 'id': 3, 'docs': [2]},
'mine': [{'name': 'default', 'id': 2, 'docs': [3]}, {'name': 'web', 'id': 3, 'docs': [3]}],
'notmine': [{'name': 'example', 'id': 20, 'docs': [10]}, {'name': 'ex2', 'id': 30, 'docs': [10, 11]}]
};
"""
ts = {
'trash': {},
'history': {},
'mine': [],
'notmine': [],
}
sharers = defaultdict(list)
trash_tag = DocumentTag.objects.get_trash_tag(user)
history_tag = DocumentTag.objects.get_history_tag(user)
tag_doc_mapping = defaultdict(set) # List of documents available in each tag
for doc in docs:
for tag in doc.tags.all():
tag_doc_mapping[tag].add(doc)
ts['trash'] = massaged_tags(trash_tag, tag_doc_mapping)
ts['history'] = massaged_tags(history_tag, tag_doc_mapping)
tags = list(set(tag_doc_mapping.keys() + [tag for tag in DocumentTag.objects.get_tags(user=user)])) # List of all personal and shared tags
for tag in tags:
massaged_tag = massaged_tags(tag, tag_doc_mapping)
if tag == trash_tag:
ts['trash'] = massaged_tag
elif tag == history_tag:
ts['history'] = massaged_tag
elif tag.owner == user:
ts['mine'].append(massaged_tag)
else:
sharers[tag.owner].append(massaged_tag)
ts['notmine'] = [{'name': sharer.username, 'projects': projects} for sharer, projects in sharers.iteritems()]
# Remove from my tags the trashed and history ones
mine_filter = set(ts['trash']['docs'] + ts['history']['docs'])
for tag in ts['mine']:
tag['docs'] = [doc_id for doc_id in tag['docs'] if doc_id not in mine_filter]
return ts
def massaged_tags(tag, tag_doc_mapping):
return {
'id': tag.id,
'name': html.conditional_escape(tag.tag),
'owner': tag.owner.username,
'docs': [doc.id for doc in tag_doc_mapping[tag]] # Could get with one request groupy
}
def massage_permissions(document):
"""
Returns the permissions for a given document as a dictionary
"""
read_perms = document.list_permissions(perm='read')
write_perms = document.list_permissions(perm='write')
return {
'perms': {
'read': {
'users': [{'id': perm_user.id, 'username': perm_user.username} \
for perm_user in read_perms.users.all()],
'groups': [{'id': perm_group.id, 'name': perm_group.name} \
for perm_group in read_perms.groups.all()]
},
'write': {
'users': [{'id': perm_user.id, 'username': perm_user.username} \
for perm_user in write_perms.users.all()],
'groups': [{'id': perm_group.id, 'name': perm_group.name} \
for perm_group in write_perms.groups.all()]
}
}
}
def massaged_documents_for_json(documents, user):
"""
var DOCUMENTS_DEFAULTS = {
'1': {
'id': 1,
'name': 'my query history', 'description': '', 'url': '/beeswax/execute/design/83', 'icon': '/static/beeswax/art/icon_beeswax_24.png',
'lastModified': '03/11/14 16:06:49', 'owner': 'admin', 'lastModifiedInMillis': 1394579209.0, 'isMine': true
},
'2': {
'id': 2,
'name': 'my query 2 trashed', 'description': '', 'url': '/beeswax/execute/design/83', 'icon': '/static/beeswax/art/icon_beeswax_24.png',
'lastModified': '03/11/14 16:06:49', 'owner': 'admin', 'lastModifiedInMillis': 1394579209.0, 'isMine': true
},
'3': {
'id': 3,
'name': 'my query 3 tagged twice', 'description': '', 'url': '/beeswax/execute/design/83', 'icon': '/static/beeswax/art/icon_beeswax_24.png',
'lastModified': '03/11/14 16:06:49', 'owner': 'admin', 'lastModifiedInMillis': 1394579209.0, 'isMine': true
},
'10': {
'id': 10,
'name': 'my query 3 shared', 'description': '', 'url': '/beeswax/execute/design/83', 'icon': '/static/beeswax/art/icon_beeswax_24.png',
'lastModified': '03/11/14 16:06:49', 'owner': 'admin', 'lastModifiedInMillis': 1394579209.0, 'isMine': true
},
'11': {
'id': 11,
'name': 'my query 4 shared', 'description': '', 'url': '/beeswax/execute/design/83', 'icon': '/static/beeswax/art/icon_beeswax_24.png',
'lastModified': '03/11/14 16:06:49', 'owner': 'admin', 'lastModifiedInMillis': 1394579209.0, 'isMine': true
}
};
"""
docs = {}
for document in documents:
try:
url = document.content_object.get_absolute_url()
except:
LOG.exception('failed to get absolute url')
# If app of document is disabled
url = ''
docs[document.id] = massage_doc_for_json(document, user, url)
return docs
def get_document(request):
if request.method == 'POST':
return Http404()
elif request.method == 'GET':
doc_id = request.GET['id']
doc = Document.objects.get(id=doc_id)
response = massage_doc_for_json(doc, request.user)
return JsonResponse(response)
def massage_doc_for_json(document, user, url=''):
read_perms = document.list_permissions(perm='read')
write_perms = document.list_permissions(perm='write')
massaged_doc = {
'id': document.id,
'contentType': html.conditional_escape(document.content_type.name),
'icon': document.icon,
'name': html.conditional_escape(document.name),
'url': html.conditional_escape(url),
'description': html.conditional_escape(document.description),
'tags': [{'id': tag.id, 'name': html.conditional_escape(tag.tag)} \
for tag in document.tags.all()],
'owner': document.owner.username,
'isMine': document.owner == user,
'lastModified': document.last_modified.strftime("%x %X"),
'lastModifiedInMillis': time.mktime(document.last_modified.timetuple())
}
permissions = massage_permissions(document)
massaged_doc.update(permissions)
return massaged_doc
def add_tag(request):
response = {'status': -1, 'message': ''}
if request.method == 'POST':
try:
tag = DocumentTag.objects.create_tag(request.user, request.POST['name'])
response['name'] = request.POST['name']
response['id'] = tag.id
response['docs'] = []
response['owner'] = request.user.username
response['status'] = 0
except Exception, e:
response['message'] = force_unicode(e)
else:
response['message'] = _('POST request only')
return JsonResponse(response)
def tag(request):
response = {'status': -1, 'message': ''}
if request.method == 'POST':
request_json = json.loads(request.POST['data'])
try:
tag = DocumentTag.objects.tag(request.user, request_json['doc_id'], request_json.get('tag'), request_json.get('tag_id'))
response['tag_id'] = tag.id
response['status'] = 0
except Exception, e:
response['message'] = force_unicode(e)
else:
response['message'] = _('POST request only')
return JsonResponse(response)
def update_tags(request):
response = {'status': -1, 'message': ''}
if request.method == 'POST':
request_json = json.loads(request.POST['data'])
try:
doc = DocumentTag.objects.update_tags(request.user, request_json['doc_id'], request_json['tag_ids'])
response['doc'] = massage_doc_for_json(doc, request.user)
response['status'] = 0
except Exception, e:
response['message'] = force_unicode(e)
else:
response['message'] = _('POST request only')
return JsonResponse(response)
def remove_tag(request):
response = {'status': -1, 'message': _('Error')}
if request.method == 'POST':
try:
DocumentTag.objects.delete_tag(request.POST['tag_id'], request.user)
response['message'] = _('Project removed!')
response['status'] = 0
except Exception, e:
response['message'] = force_unicode(e)
else:
response['message'] = _('POST request only')
return JsonResponse(response)
def update_permissions(request):
response = {'status': -1, 'message': _('Error')}
if request.method == 'POST':
data = json.loads(request.POST['data'])
doc_id = request.POST['doc_id']
try:
doc = Document.objects.get_doc(doc_id, request.user)
doc.sync_permissions(data)
response['message'] = _('Permissions updated!')
response['status'] = 0
response['doc'] = massage_doc_for_json(doc, request.user)
except Exception, e:
LOG.exception(e.message)
response['message'] = force_unicode(e)
else:
response['message'] = _('POST request only')
return JsonResponse(response)
| apache-2.0 |
mattcieslak/DSI2 | dsi2/ui/ltpa_result.py | 1 | 9331 | #!/usr/bin/env python
from traits.api import HasTraits, Instance, Array, Bool, Dict, \
on_trait_change, Delegate, List, Color, Any, Instance, Int, File, \
Button, Enum, Str, DelegatesTo, Property, CFloat,Range
from traitsui.api import View, Item, HGroup, VGroup, \
Group, Handler, HSplit, VSplit, RangeEditor, Include, Action, MenuBar, Menu, \
TableEditor, ObjectColumn, Separator
from traitsui.extras.checkbox_column import CheckboxColumn
from ..volumes.scalar_volume import ScalarVolumes
from tvtk.pyface.scene import Scene
from mayavi.core.ui.api import SceneEditor
from traitsui.color_column import ColorColumn
from mayavi.core.api import PipelineBase, Source
from mayavi import mlab
from traitsui.editors.tabular_editor import TabularEditor
from traitsui.tabular_adapter import TabularAdapter
from traitsui.file_dialog import save_file, open_file
from tvtk.pyface.scene import Scene
from tvtk.api import tvtk
from mayavi.core.ui.api import SceneEditor
from mayavi.tools.mlab_scene_model import MlabSceneModel
import os
import numpy as np
from dsi2.volumes.scalar_volume import ScalarVolume
from dsi2.streamlines.track_dataset import TrackDataset
import cPickle as pickle
ltpa_result_table = TableEditor(
columns =
[
ObjectColumn(name="name"),
CheckboxColumn(name="visible"),
ObjectColumn(name="coord_opacity"),
ObjectColumn(name="tracksA_opacity"),
CheckboxColumn(name="tracksA_visible"),
ObjectColumn(name="tracksB_opacity"),
CheckboxColumn(name="tracksB_visible"),
ColorColumn(name="colorA", width=5),
ColorColumn(name="colorB", width=5),
ObjectColumn(name="coord_shape"),
ObjectColumn(name="coord_radius"),
],
auto_size=False,
)
class CoordinatesGraphic(HasTraits):
# Data
scalars = Array
indices = Array
radius = CFloat(0.5)
# Holds the mayavi objects
source = Instance(Source,transient=True)
glyph = Instance(PipelineBase, transient=True)
glyph_drawn = Bool(False, transient=True)
splatter = Instance(PipelineBase,transient=True)
glyph_opacity = Range(high=1.0,low=0.0,value=0.3)
# MayaVi data options
color_map = Enum(
[ "Blues", "Oranges", "pink", "Greens"] )
render_type = Enum(["static_spheres","sized_cubes",
"static_cubes","splatter"])
static_color = Color
visible = Bool(True)
def set_visibility(self, visibility):
if not visibility:
if not self.glyph_drawn: return
else:
if not self.glyph_drawn:
self.render()
# Set visibility of all items
for viz in [self.glyph, self.splatter]:
if viz:
viz.visible = visibility
def render(self):
if not self.visible: return
try:
color = self.static_color.toTuple()
except:
color = (self.static_color.red(),self.static_color.green(),self.static_color.blue())
static_color = color[0]/255., color[1]/255., color[2]/255.
if self.render_type == "sized_cubes":
self.glyph = mlab.pipeline.glyph(
self.source, colormap=self.color_map, mode="cube" )
elif self.render_type == "splatter":
self.splatter = mlab.pipeline.gaussian_splatter(self.source)
self.glyph = mlab.pipeline.volume(
self.splatter,
color=static_color)
elif self.render_type == "static_cubes":
self.source = mlab.pipeline.scalar_scatter(
self.indices[:,0],self.indices[:,1],self.indices[:,2])
self.glyph = mlab.pipeline.glyph(
self.source, color=static_color, mode="cube" )
elif self.render_type == "static_spheres":
self.source = mlab.pipeline.scalar_scatter(
self.indices[:,0],self.indices[:,1],self.indices[:,2])
self.glyph = mlab.pipeline.glyph(
self.source, color=static_color,
mode="sphere" )
self.glyph.glyph.glyph_source.glyph_source.radius = self.radius
self.glyph.actor.property.opacity = self.glyph_opacity
self.glyph_drawn = True
def _color_map_changed(self):
self.clear()
self.render()
instance_view = View(
Group(
Item("filepath"),
Group(Item("visible"),Item("glyph"),Item("splatter"),Item("source"),orientation="horizontal"),
Item("static_color"),
Item("b_render"),
orientation="vertical")
)
class LTPAResult(HasTraits):
name=Str("LTPA Result")
# 3d MayaVi scene that will display slices and streamlines
scene3d = Instance(MlabSceneModel,transient=True)
# Data objects
result_coords = Array
result_coord_scalars = Array
coords_apply_to = Enum("A","B")
tracksA = Instance(TrackDataset)
tracksB = Instance(TrackDataset)
#graphics options
coord_shape = Enum("sphere", "cube")
coord_radius = CFloat(1.0)
colorA = Color("red")
colorB = Color("blue")
showA_as = Enum("splatter","tracks")
showB_as = Enum("splatter","tracks")
coord_group = Enum("A","B")
coord_opacity = Range(0.0,1.0,0.5)
visible = Bool(False)
tracksA_opacity = Range(0.0,1.0,0.5)
tracksA_visible = Bool(True)
tracksB_opacity = Range(0.0,1.0,0.5)
tracksB_visible = Bool(True)
# graphics objects
coord_graphic = Instance(CoordinatesGraphic,transient=True)
coord_opacity = Range(0.0,1.0,0.5)
def __init__(self,**traits):
super(LTPAResult,self).__init__(**traits)
# prepare track datasets for plotting
for tds in [self.tracksA, self.tracksB]:
tds.render_tracks = True
tds.tracks_drawn = False
tds.dynamic_color_clusters = False
self.tracksA.static_color = self.colorA
self.tracksB.static_color = self.colorB
def _coord_graphic_default(self):
"""
Looks at the contents of this result object
"""
if self.coords_apply_to == "A":
c = self.colorA
else:
c = self.colorB
return CoordinatesGraphic(
indices = self.result_coords,
static_color=c,
scalars = self.result_coord_scalars,
radius=self.coord_radius
)
def _coord_opacity_changed(self):
self.coord_graphic.glyph.actor.property.opacity = self.coord_opacity
def _visible_changed(self):
"""
"""
for tds in [self.tracksA, self.tracksB]:
tds.set_track_visibility(self.visible)
self._tracksA_opacity_changed()
self._tracksB_opacity_changed()
self.coord_graphic.set_visibility(self.visible)
def _tracksA_opacity_changed(self):
if self.tracksA.tracks_drawn:
self.tracksA.src.actor.property.opacity = self.tracksA_opacity
def _tracksA_visible_changed(self):
if self.tracksA.tracks_drawn:
self.tracksA.set_track_visibility(self.tracksA_visible)
def _tracksB_opacity_changed(self):
if self.tracksB.tracks_drawn:
self.tracksB.src.actor.property.opacity = self.tracksB_opacity
def _tracksB_visible_changed(self):
if self.tracksB.tracks_drawn:
self.tracksB.set_track_visibility(self.tracksB_visible)
class LTPAResults(HasTraits):
scene3d_inited = Bool(False)
results = List(Instance(LTPAResult))
scene3d = Instance(MlabSceneModel, (),transient=True)
def __init__(self,**traits):
super(LTPAResults,self).__init__(**traits)
for res in self.results:
res.scene3d = self.scene3d
traits_view = View(
Group(
Item("results", editor=ltpa_result_table),
show_labels=False
)
)
test_view = View(
Group(
Item("scene3d",
editor=SceneEditor(scene_class=Scene),
height=500, width=500),
Item("results", editor=ltpa_result_table),
show_labels=False
),
resizable=True
)
@on_trait_change('scene3d.activated')
def display_scene3d(self):
if self.scene3d_inited: return
for res in self.results:
res.visible = True
def load_ltpa_results(results_pth):
if not os.path.exists(results_pth):
raise ValueError("No such file " + results_pth)
fop = open(results_pth,"rb")
try:
res = pickle.load(fop)
except Exception, e:
print "Unable to load", results_pth, "because of\n", e
return LTPAResults()
# When loading from a pickle, the __init__ isn't properly run.
# so explicitly run the __init__ code here before returning the result
#for result in res.results:
# for tds in [result.tracksA, result.tracksB]:
# tds.render_tracks = True
# tds.tracks_drawn = False
# tds.dynamic_color_clusters = False
# result.tracksA.static_color = result.colorA
# result.tracksB.static_color = result.colorB
return res
| gpl-3.0 |
chemelnucfin/tensorflow | tensorflow/python/keras/engine/training_eager_test.py | 5 | 14079 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training routines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.optimizer_v2 import rmsprop
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class TrainingTest(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_dynamic_model_has_trainable_weights(self):
if not context.executing_eagerly():
# Only test Eager modes, as Graph mode is not relevant for dynamic models.
return
class DynamicModel(keras.Model):
def __init__(self):
super(DynamicModel, self).__init__(dynamic=True)
self.dense = keras.layers.Dense(
1, kernel_initializer='zeros', bias_initializer='ones')
def call(self, inputs):
return self.dense(inputs)
model = DynamicModel()
model.compile(
'rmsprop', 'mae',
run_eagerly=True,
experimental_run_tf_function=testing_utils.should_run_tf_function())
hist = model.fit(np.zeros((1, 1)), np.zeros((1, 1)))
self.assertEqual(hist.history['loss'][-1], 1)
self.assertEqual(len(model.trainable_weights), 2)
loss = model.train_on_batch(np.zeros((1, 1)), np.zeros((1, 1)))
# The loss must have been updated if the trainable weights are taken into
# account during tracking.
self.assertLess(loss, 1)
@keras_parameterized.run_with_all_model_types(exclude_models='sequential')
@keras_parameterized.run_all_keras_modes
def test_model_methods_with_eager_tensors_multi_io(self):
if not context.executing_eagerly():
# Only test V2 Function and V2 Eager modes, as V1 Graph mode with
# symbolic tensors has different requirements.
return
input_a = keras.layers.Input(shape=(3,), name='input_a')
input_b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
dropout = keras.layers.Dropout(0.5, name='dropout')
model = testing_utils.get_multi_io_model(
[input_a, dense], [input_b, dense, dropout])
optimizer = rmsprop.RMSprop(learning_rate=0.001)
loss = 'mse'
loss_weights = [1., 0.5]
metrics = ['mae', metrics_module.CategoricalAccuracy()]
model.compile(
optimizer,
loss,
metrics=metrics,
loss_weights=loss_weights,
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function(),
sample_weight_mode=None)
input_a = array_ops.zeros(shape=(10, 3))
input_b = array_ops.zeros(shape=(10, 3))
target_a = array_ops.zeros(shape=(10, 4))
target_b = array_ops.zeros(shape=(10, 4))
model.fit(
[input_a, input_b], [target_a, target_b],
epochs=1,
batch_size=5,
verbose=0)
# Test: no shuffle.
model.fit(
[input_a, input_b], [target_a, target_b],
epochs=1,
batch_size=5,
verbose=0,
shuffle=False)
# Test: validation data.
model.fit([input_a, input_b], [target_a, target_b],
epochs=1, batch_size=2, verbose=0,
validation_data=([input_a, input_b], [target_a, target_b]))
model.train_on_batch([input_a, input_b], [target_a, target_b])
model.predict([input_a, input_b], batch_size=5)
model.evaluate([input_a, input_b], [target_a, target_b],
batch_size=2, verbose=0)
model.test_on_batch([input_a, input_b], [target_a, target_b])
# Test: mix np and tensors.
input_b = np.zeros(shape=(10, 3)).astype('float32')
target_b = np.zeros(shape=(10, 4)).astype('float32')
model.fit(
[input_a, input_b], [target_a, target_b],
epochs=1,
batch_size=5,
verbose=0)
model.fit([input_a, input_b], [target_a, target_b],
epochs=1, batch_size=2, verbose=0,
validation_data=([input_a, input_b], [target_a, target_b]))
model.fit(
[input_a, input_b], [target_a, target_b],
epochs=1,
batch_size=5,
verbose=0,
shuffle=False)
model.train_on_batch([input_a, input_b], [target_a, target_b])
model.predict([input_a, input_b], batch_size=5)
model.evaluate([input_a, input_b], [target_a, target_b],
batch_size=2, verbose=0)
model.test_on_batch([input_a, input_b], [target_a, target_b])
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_model_methods_with_eager_tensors_single_io(self):
if not context.executing_eagerly():
# Only test V2 Function and V2 Eager modes, as V1 Graph mode with
# symbolic tensors has different requirements.
return
model = testing_utils.get_small_mlp(10, 4, 3)
optimizer = rmsprop.RMSprop(learning_rate=0.001)
loss = 'mse'
metrics = ['mae', metrics_module.CategoricalAccuracy()]
model.compile(
optimizer,
loss,
metrics=metrics,
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = array_ops.zeros(shape=(10, 3))
targets = array_ops.zeros(shape=(10, 4))
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=0)
model.fit(inputs, targets, epochs=1, batch_size=3, verbose=0, shuffle=False)
model.fit(inputs, targets, epochs=1, batch_size=4, verbose=0,
validation_data=(inputs, targets))
model.evaluate(inputs, targets, batch_size=2, verbose=0)
model.predict(inputs, batch_size=2)
model.train_on_batch(inputs, targets)
model.test_on_batch(inputs, targets)
@keras_parameterized.run_with_all_model_types
def test_model_fit_and_validation_with_missing_arg_errors(self):
model = testing_utils.get_small_mlp(10, 4, 3)
model.compile(optimizer=rmsprop.RMSprop(learning_rate=0.001),
loss='mse',
run_eagerly=True)
x = array_ops.zeros(shape=(10, 3))
y = array_ops.zeros(shape=(10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).repeat(10).batch(5)
validation_dataset = dataset_ops.Dataset.from_tensor_slices(
(x, y)).repeat().batch(5) # Infinite dataset.
model.fit(dataset, epochs=1, verbose=0)
# Step argument is required for infinite datasets.
with self.assertRaisesRegexp(ValueError,
'specify the `validation_steps` argument.'):
model.fit(dataset, steps_per_epoch=2, epochs=1, verbose=0,
validation_data=validation_dataset)
with self.assertRaisesRegexp(ValueError,
'specify the `validation_steps` argument.'):
model.fit(dataset, steps_per_epoch=2, epochs=1, verbose=0,
validation_data=validation_dataset)
# TODO(b/120931266): Enable test on subclassed models after bug causing an
# extra dimension to be added to predict outputs is fixed.
@keras_parameterized.run_with_all_model_types(exclude_models='subclass')
def test_generator_methods(self):
model = testing_utils.get_small_mlp(10, 4, 3)
optimizer = rmsprop.RMSprop(learning_rate=0.001)
model.compile(
optimizer,
loss='mse',
metrics=['mae', metrics_module.CategoricalAccuracy()],
run_eagerly=True)
x = np.random.random((10, 3))
y = np.random.random((10, 4))
def numpy_iterator():
while True:
yield x, y
model.fit_generator(numpy_iterator(), steps_per_epoch=3, epochs=1)
model.evaluate_generator(numpy_iterator(), steps=3)
def inference_numpy_iterator():
while True:
yield x
out = model.predict_generator(inference_numpy_iterator(), steps=3)
self.assertEqual(out.shape, (30, 4))
class CorrectnessTest(keras_parameterized.TestCase):
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_loss_correctness(self):
# Test that training loss is the same in eager and graph
# (by comparing it to a reference value in a deterministic case)
layers = [
keras.layers.Dense(3, activation='relu',
kernel_initializer='ones'),
keras.layers.Dense(2, activation='softmax', kernel_initializer='ones')]
model = testing_utils.get_model_from_layers(layers, input_shape=(4,))
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=rmsprop.RMSprop(learning_rate=0.001),
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.ones((100, 4))
np.random.seed(123)
y = np.random.randint(0, 1, size=(100, 1))
history = model.fit(x, y, epochs=1, batch_size=10)
self.assertAlmostEqual(history.history['loss'][-1], 0.5836, 4)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_loss_correctness_with_iterator(self):
# Test that training loss is the same in eager and graph
# (by comparing it to a reference value in a deterministic case)
layers = [
keras.layers.Dense(3, activation='relu',
kernel_initializer='ones'),
keras.layers.Dense(2, activation='softmax', kernel_initializer='ones')]
model = testing_utils.get_model_from_layers(layers, input_shape=(4,))
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=rmsprop.RMSprop(learning_rate=0.001),
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.ones((100, 4), dtype=np.float32)
np.random.seed(123)
y = np.random.randint(0, 1, size=(100, 1))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
history = model.fit(dataset, epochs=1, steps_per_epoch=10)
self.assertAlmostEqual(history.history['loss'][-1], 0.5836, 4)
def test_loss_in_call(self):
class HasLoss(keras.layers.Layer):
def call(self, x):
self.add_loss(x)
return x
layer = HasLoss()
layer(1.) # Plain-value inputs are only valid in eager mode.
self.assertEqual(1, len(layer.losses))
@parameterized.named_parameters([
('_None', contextlib.contextmanager(lambda: iter([None])), 0., 4.),
('_0', lambda: keras.backend.learning_phase_scope(0), 4., 4.),
('_1', lambda: keras.backend.learning_phase_scope(1), 0., 0.),
])
def test_nested_model_learning_phase(self, nested_scope_fn,
expected_training_loss,
expected_validation_loss):
"""Tests that learning phase is correctly set in an intermediate layer."""
def _make_unregularized_model():
inputs = keras.Input((4,))
# Zero out activations when `training=True`.
x = keras.layers.Dropout(1. - 1. / (1 << 24))(inputs)
x = keras.layers.Dense(
10,
activation='relu',
trainable=False,
bias_initializer='zeros',
kernel_initializer='ones')(
x) # Just sum together all the activations.
outputs = keras.layers.Dense(3)(x)
return keras.Model(inputs, outputs)
def _regularize_model(unregularized_model):
inputs = keras.Input(unregularized_model.inputs[0].shape[1:])
with nested_scope_fn():
logits = unregularized_model(inputs)
outputs = keras.activations.softmax(logits)
model = keras.Model(inputs, outputs)
# Regularize the most recent activations of a post-dropout layer.
sample_activations = unregularized_model.get_layer(
index=-2).get_output_at(-1)
regularization_loss = keras.backend.mean(sample_activations)
model.add_loss(regularization_loss)
model.add_metric(
regularization_loss, aggregation='mean', name='regularization_loss')
return model
# Make and compile models.
model = _regularize_model(_make_unregularized_model())
model.compile('sgd', 'sparse_categorical_crossentropy')
# Prepare fake data.
x = np.ones((20, 4)).astype(np.float32)
y = np.random.randint(0, 3, size=(20,)).astype(np.int64)
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(2)
evaluation_results = dict(zip(model.metrics_names, model.evaluate(dataset)))
# Rate of dropout depends on the learning phase.
self.assertEqual(evaluation_results['regularization_loss'],
expected_validation_loss)
history = model.fit(dataset, epochs=2, validation_data=dataset).history
self.assertAllEqual(history['regularization_loss'],
[expected_training_loss] * 2)
self.assertAllEqual(history['val_regularization_loss'],
[expected_validation_loss] * 2)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| apache-2.0 |
DavidjohnBlodgett/RackHD | test/tests/api/v2_0/schema_tests.py | 16 | 2321 | from config.api2_0_config import *
from modules.logger import Log
from on_http_api2_0 import ApiApi as api20
from on_http_api2_0 import rest
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_not_equal
from proboscis.asserts import assert_true
from proboscis.asserts import fail
from proboscis import test
from json import loads,dumps
LOG = Log(__name__)
@test(groups=['schemas_api2.tests'])
class SchemaTests(object):
def __init__(self):
self.__client = config.api_client
self.__schemaList = None
def __get_data(self):
return loads(self.__client.last_response.data)
@test(groups=['2.0.list_schemas'])
def test_list_schemas(self):
""" Testing GET /api/2.0/schemas """
api20().schemas_get()
schemas = self.__get_data()
LOG.debug(schemas,json=True)
assert_not_equal(0, len(schemas), message='Schema list was empty')
self.__schemaList = schemas
@test(groups=['2.0.get_schema'], depends_on_groups=['2.0.list_schemas'])
def test_get_schema(self):
""" Testing GET /api/2.0/schemas/{identifier} """
assert_not_equal(None, self.__schemaList)
for member in self.__schemaList:
assert_not_equal(None,member)
dataId = member.split('/api/2.0/schemas/')[1]
api20().schemas_id_get(dataId)
schema_ref = self.__get_data()
LOG.debug(schema_ref,json=True)
id = schema_ref.get('title')
assert_true('title' in schema_ref.keys(), message='title not found in schema')
assert_true('definitions' in schema_ref.keys(), message='definitions not found in schema')
@test(groups=['2.0.get_schema_invalid'], depends_on_groups=['2.0.list_schemas'])
def test_get_schema_invalid(self):
""" Testing GET /api/2.0/schemas/{identifier} 404s properly """
assert_not_equal(None, self.__schemaList)
for member in self.__schemaList:
assert_not_equal(None,member)
try:
api20().schemas_id_get(member + '-invalid')
fail(message='did not raise exception')
except rest.ApiException as e:
assert_equal(404, e.status, message='unexpected response {0}, expected 404'.format(e.status))
break
| apache-2.0 |
JMJAC/Rango | tango_project/rango/models.py | 1 | 1073 | from django.db import models
from django.template.defaultfilters import slugify
from django.contrib.auth.models import User
class Category(models.Model):
name = models.CharField(max_length=128, unique=True)
likes = models.IntegerField(default=0)
views = models.IntegerField(default=0)
slug = models.SlugField(unique=True)
class Meta:
verbose_name_plural = 'Categories'
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(Category, self).save(*args, **kwargs)
class Page(models.Model):
category = models.ForeignKey(Category)
title = models.CharField(max_length=128)
url = models.URLField(max_length=256)
views = models.IntegerField(default=0)
def __str__(self):
return self.title
class UserProfile(models.Model):
user = models.OneToOneField(User)
website = models.URLField(blank=True)
picture = models.ImageField(upload_to='profile_images', blank=True)
def __str__(self):
return self.user.surname
| apache-2.0 |
ChromiumWebApps/chromium | tools/win/link_limiter/build_link_limiter.py | 169 | 2766 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import os
import shutil
import subprocess
import sys
import tempfile
BUILD_DIR = 'build'
def run_with_vsvars(cmd, tmpdir=None):
fd, filename = tempfile.mkstemp('.bat', text=True)
with os.fdopen(fd, 'w') as f:
print >> f, '@echo off'
print >> f, r'call "%VS100COMNTOOLS%\vsvars32.bat"'
if tmpdir:
print >> f, r'cd %s' % tmpdir
print >> f, cmd
try:
p = subprocess.Popen([filename], shell=True, stdout=subprocess.PIPE,
universal_newlines=True)
out, _ = p.communicate()
return p.returncode, out
finally:
os.unlink(filename)
def get_vc_dir():
_, out = run_with_vsvars('echo VCINSTALLDIR=%VCINSTALLDIR%')
for line in out.splitlines(): # pylint: disable-msg=E1103
if line.startswith('VCINSTALLDIR='):
return line[len('VCINSTALLDIR='):]
return None
def build(infile):
if not os.path.exists(BUILD_DIR):
os.makedirs(BUILD_DIR)
outfile = 'limiter.exe'
outpath = os.path.join(BUILD_DIR, outfile)
cpptime = os.path.getmtime(infile)
if not os.path.exists(outpath) or cpptime > os.path.getmtime(outpath):
print 'Building %s...' % outfile
rc, out = run_with_vsvars(
'cl /nologo /Ox /Zi /W4 /WX /D_UNICODE /DUNICODE'
' /D_CRT_SECURE_NO_WARNINGS /EHsc %s /link /out:%s'
% (os.path.join('..', infile), outfile), BUILD_DIR)
if rc:
print out
print 'Failed to build %s' % outfile
sys.exit(1)
else:
print '%s already built' % outfile
return outpath
def main():
# Switch to our own dir.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
if sys.argv[-1] == 'clean':
if os.path.exists(BUILD_DIR):
shutil.rmtree(BUILD_DIR)
for exe in glob.glob('*.exe'):
os.unlink(exe)
return 0
vcdir = os.environ.get('VCINSTALLDIR')
if not vcdir:
vcdir = get_vc_dir()
if not vcdir:
print 'Could not get VCINSTALLDIR. Run vsvars32.bat?'
return 1
os.environ['PATH'] += (';' + os.path.join(vcdir, 'bin') +
';' + os.path.join(vcdir, r'..\Common7\IDE'))
# Verify that we can find link.exe.
link = os.path.join(vcdir, 'bin', 'link.exe')
if not os.path.exists(link):
print 'link.exe not found at %s' % link
return 1
exe_name = build('limiter.cc')
for shim_exe in ('lib.exe', 'link.exe'):
newpath = '%s__LIMITER.exe' % shim_exe
shutil.copyfile(exe_name, newpath)
print '%s shim built. Use with msbuild like: "/p:LinkToolExe=%s"' \
% (shim_exe, os.path.abspath(newpath))
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
xuegang/gpdb | src/test/tinc/tincrepo/mpp/gpdb/tests/package/subt_limit_sanity/functional/test_functional.py | 9 | 1430 | """
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tinctest
from mpp.lib.gppkg.gppkg import Gppkg
from mpp.models import SQLTestCase
from tinctest.lib import run_shell_command
class SubtLimitTestCase(SQLTestCase):
"""
@optimizer_mode off
@tags gppkg
"""
sql_dir = 'sql/'
ans_dir = 'expected'
out_dir = 'output/'
@classmethod
def setUpClass(cls):
"""
Checking if plperl package installed, otherwise install the package
"""
super(SubtLimitTestCase, cls).setUpClass()
cmd = 'gpssh --version'
res = {'rc':0, 'stderr':'', 'stdout':''}
run_shell_command (cmd, 'check product version', res)
gppkg = Gppkg()
product_version = res['stdout']
gppkg.gppkg_install(product_version, 'plperl')
| apache-2.0 |
syphar/django | tests/queries/tests.py | 7 | 158949 | from __future__ import unicode_literals
import datetime
import pickle
import unittest
from collections import OrderedDict
from operator import attrgetter
from django.core.exceptions import EmptyResultSet, FieldError
from django.db import DEFAULT_DB_ALIAS, connection
from django.db.models import Count, F, Q
from django.db.models.sql.constants import LOUTER
from django.db.models.sql.where import NothingNode, WhereNode
from django.test import TestCase, skipUnlessDBFeature
from django.test.utils import CaptureQueriesContext
from django.utils import six
from django.utils.six.moves import range
from .models import (
FK1, Annotation, Article, Author, BaseA, Book, CategoryItem,
CategoryRelationship, Celebrity, Channel, Chapter, Child, ChildObjectA,
Classroom, Company, Cover, CustomPk, CustomPkTag, Detail, DumbCategory,
Eaten, Employment, ExtraInfo, Fan, Food, Identifier, Individual, Item, Job,
JobResponsibilities, Join, LeafA, LeafB, LoopX, LoopZ, ManagedModel,
Member, ModelA, ModelB, ModelC, ModelD, MyObject, NamedCategory, Node,
Note, NullableName, Number, ObjectA, ObjectB, ObjectC, OneToOneCategory,
Order, OrderItem, Page, Paragraph, Person, Plaything, PointerA, Program,
ProxyCategory, ProxyObjectA, ProxyObjectB, Ranking, Related,
RelatedIndividual, RelatedObject, Report, ReservedName, Responsibility,
School, SharedConnection, SimpleCategory, SingleObject, SpecialCategory,
Staff, StaffUser, Student, Tag, Task, Ticket21203Child, Ticket21203Parent,
Ticket23605A, Ticket23605B, Ticket23605C, TvChef, Valid, X,
)
class Queries1Tests(TestCase):
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
cls.t1 = Tag.objects.create(name='t1', category=generic)
cls.t2 = Tag.objects.create(name='t2', parent=cls.t1, category=generic)
cls.t3 = Tag.objects.create(name='t3', parent=cls.t1)
t4 = Tag.objects.create(name='t4', parent=cls.t3)
cls.t5 = Tag.objects.create(name='t5', parent=cls.t3)
cls.n1 = Note.objects.create(note='n1', misc='foo', id=1)
n2 = Note.objects.create(note='n2', misc='bar', id=2)
cls.n3 = Note.objects.create(note='n3', misc='foo', id=3)
ann1 = Annotation.objects.create(name='a1', tag=cls.t1)
ann1.notes.add(cls.n1)
ann2 = Annotation.objects.create(name='a2', tag=t4)
ann2.notes.add(n2, cls.n3)
# Create these out of order so that sorting by 'id' will be different to sorting
# by 'info'. Helps detect some problems later.
cls.e2 = ExtraInfo.objects.create(info='e2', note=n2, value=41)
e1 = ExtraInfo.objects.create(info='e1', note=cls.n1, value=42)
cls.a1 = Author.objects.create(name='a1', num=1001, extra=e1)
cls.a2 = Author.objects.create(name='a2', num=2002, extra=e1)
a3 = Author.objects.create(name='a3', num=3003, extra=cls.e2)
cls.a4 = Author.objects.create(name='a4', num=4004, extra=cls.e2)
cls.time1 = datetime.datetime(2007, 12, 19, 22, 25, 0)
cls.time2 = datetime.datetime(2007, 12, 19, 21, 0, 0)
time3 = datetime.datetime(2007, 12, 20, 22, 25, 0)
time4 = datetime.datetime(2007, 12, 20, 21, 0, 0)
cls.i1 = Item.objects.create(name='one', created=cls.time1, modified=cls.time1, creator=cls.a1, note=cls.n3)
cls.i1.tags.set([cls.t1, cls.t2])
cls.i2 = Item.objects.create(name='two', created=cls.time2, creator=cls.a2, note=n2)
cls.i2.tags.set([cls.t1, cls.t3])
cls.i3 = Item.objects.create(name='three', created=time3, creator=cls.a2, note=cls.n3)
i4 = Item.objects.create(name='four', created=time4, creator=cls.a4, note=cls.n3)
i4.tags.set([t4])
cls.r1 = Report.objects.create(name='r1', creator=cls.a1)
Report.objects.create(name='r2', creator=a3)
Report.objects.create(name='r3')
# Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the Meta.ordering
# will be rank3, rank2, rank1.
cls.rank1 = Ranking.objects.create(rank=2, author=cls.a2)
Cover.objects.create(title="first", item=i4)
Cover.objects.create(title="second", item=cls.i2)
def test_subquery_condition(self):
qs1 = Tag.objects.filter(pk__lte=0)
qs2 = Tag.objects.filter(parent__in=qs1)
qs3 = Tag.objects.filter(parent__in=qs2)
self.assertEqual(qs3.query.subq_aliases, {'T', 'U', 'V'})
self.assertIn('v0', str(qs3.query).lower())
qs4 = qs3.filter(parent__in=qs1)
self.assertEqual(qs4.query.subq_aliases, {'T', 'U', 'V'})
# It is possible to reuse U for the second subquery, no need to use W.
self.assertNotIn('w0', str(qs4.query).lower())
# So, 'U0."id"' is referenced twice.
self.assertTrue(str(qs4.query).lower().count('u0'), 2)
def test_ticket1050(self):
self.assertQuerysetEqual(
Item.objects.filter(tags__isnull=True),
['<Item: three>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__id__isnull=True),
['<Item: three>']
)
def test_ticket1801(self):
self.assertQuerysetEqual(
Author.objects.filter(item=self.i2),
['<Author: a2>']
)
self.assertQuerysetEqual(
Author.objects.filter(item=self.i3),
['<Author: a2>']
)
self.assertQuerysetEqual(
Author.objects.filter(item=self.i2) & Author.objects.filter(item=self.i3),
['<Author: a2>']
)
def test_ticket2306(self):
# Checking that no join types are "left outer" joins.
query = Item.objects.filter(tags=self.t2).query
self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()])
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1)).order_by('name'),
['<Item: one>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1)).filter(Q(tags=self.t2)),
['<Item: one>']
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1)).filter(Q(creator__name='fred') | Q(tags=self.t2)),
['<Item: one>']
)
# Each filter call is processed "at once" against a single table, so this is
# different from the previous example as it tries to find tags that are two
# things at once (rather than two tags).
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1) & Q(tags=self.t2)),
[]
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1), Q(creator__name='fred') | Q(tags=self.t2)),
[]
)
qs = Author.objects.filter(ranking__rank=2, ranking__id=self.rank1.id)
self.assertQuerysetEqual(list(qs), ['<Author: a2>'])
self.assertEqual(2, qs.query.count_active_tables(), 2)
qs = Author.objects.filter(ranking__rank=2).filter(ranking__id=self.rank1.id)
self.assertEqual(qs.query.count_active_tables(), 3)
def test_ticket4464(self):
self.assertQuerysetEqual(
Item.objects.filter(tags=self.t1).filter(tags=self.t2),
['<Item: one>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).distinct().order_by('name'),
['<Item: one>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).filter(tags=self.t3),
['<Item: two>']
)
# Make sure .distinct() works with slicing (this was broken in Oracle).
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).order_by('name')[:3],
['<Item: one>', '<Item: one>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).distinct().order_by('name')[:3],
['<Item: one>', '<Item: two>']
)
def test_tickets_2080_3592(self):
self.assertQuerysetEqual(
Author.objects.filter(item__name='one') | Author.objects.filter(name='a3'),
['<Author: a1>', '<Author: a3>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(item__name='one') | Q(name='a3')),
['<Author: a1>', '<Author: a3>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(name='a3') | Q(item__name='one')),
['<Author: a1>', '<Author: a3>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(item__name='three') | Q(report__name='r3')),
['<Author: a2>']
)
def test_ticket6074(self):
# Merging two empty result sets shouldn't leave a queryset with no constraints
# (which would match everything).
self.assertQuerysetEqual(Author.objects.filter(Q(id__in=[])), [])
self.assertQuerysetEqual(
Author.objects.filter(Q(id__in=[]) | Q(id__in=[])),
[]
)
def test_tickets_1878_2939(self):
self.assertEqual(Item.objects.values('creator').distinct().count(), 3)
# Create something with a duplicate 'name' so that we can test multi-column
# cases (which require some tricky SQL transformations under the covers).
xx = Item(name='four', created=self.time1, creator=self.a2, note=self.n1)
xx.save()
self.assertEqual(
Item.objects.exclude(name='two').values('creator', 'name').distinct().count(),
4
)
self.assertEqual(
(
Item.objects
.exclude(name='two')
.extra(select={'foo': '%s'}, select_params=(1,))
.values('creator', 'name', 'foo')
.distinct()
.count()
),
4
)
self.assertEqual(
(
Item.objects
.exclude(name='two')
.extra(select={'foo': '%s'}, select_params=(1,))
.values('creator', 'name')
.distinct()
.count()
),
4
)
xx.delete()
def test_ticket7323(self):
self.assertEqual(Item.objects.values('creator', 'name').count(), 4)
def test_ticket2253(self):
q1 = Item.objects.order_by('name')
q2 = Item.objects.filter(id=self.i1.id)
self.assertQuerysetEqual(
q1,
['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(q2, ['<Item: one>'])
self.assertQuerysetEqual(
(q1 | q2).order_by('name'),
['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual((q1 & q2).order_by('name'), ['<Item: one>'])
q1 = Item.objects.filter(tags=self.t1)
q2 = Item.objects.filter(note=self.n3, tags=self.t2)
q3 = Item.objects.filter(creator=self.a4)
self.assertQuerysetEqual(
((q1 & q2) | q3).order_by('name'),
['<Item: four>', '<Item: one>']
)
def test_order_by_tables(self):
q1 = Item.objects.order_by('name')
q2 = Item.objects.filter(id=self.i1.id)
list(q2)
combined_query = (q1 & q2).order_by('name').query
self.assertEqual(len([
t for t in combined_query.tables if combined_query.alias_refcount[t]
]), 1)
def test_order_by_join_unref(self):
"""
This test is related to the above one, testing that there aren't
old JOINs in the query.
"""
qs = Celebrity.objects.order_by('greatest_fan__fan_of')
self.assertIn('OUTER JOIN', str(qs.query))
qs = qs.order_by('id')
self.assertNotIn('OUTER JOIN', str(qs.query))
def test_get_clears_ordering(self):
"""
get() should clear ordering for optimization purposes.
"""
with CaptureQueriesContext(connection) as captured_queries:
Author.objects.order_by('name').get(pk=self.a1.pk)
self.assertNotIn('order by', captured_queries[0]['sql'].lower())
def test_tickets_4088_4306(self):
self.assertQuerysetEqual(
Report.objects.filter(creator=1001),
['<Report: r1>']
)
self.assertQuerysetEqual(
Report.objects.filter(creator__num=1001),
['<Report: r1>']
)
self.assertQuerysetEqual(Report.objects.filter(creator__id=1001), [])
self.assertQuerysetEqual(
Report.objects.filter(creator__id=self.a1.id),
['<Report: r1>']
)
self.assertQuerysetEqual(
Report.objects.filter(creator__name='a1'),
['<Report: r1>']
)
def test_ticket4510(self):
self.assertQuerysetEqual(
Author.objects.filter(report__name='r1'),
['<Author: a1>']
)
def test_ticket7378(self):
self.assertQuerysetEqual(self.a1.report_set.all(), ['<Report: r1>'])
def test_tickets_5324_6704(self):
self.assertQuerysetEqual(
Item.objects.filter(tags__name='t4'),
['<Item: four>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t4').order_by('name').distinct(),
['<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t4').order_by('name').distinct().reverse(),
['<Item: two>', '<Item: three>', '<Item: one>']
)
self.assertQuerysetEqual(
Author.objects.exclude(item__name='one').distinct().order_by('name'),
['<Author: a2>', '<Author: a3>', '<Author: a4>']
)
# Excluding across a m2m relation when there is more than one related
# object associated was problematic.
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t1').order_by('name'),
['<Item: four>', '<Item: three>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t1').exclude(tags__name='t4'),
['<Item: three>']
)
# Excluding from a relation that cannot be NULL should not use outer joins.
query = Item.objects.exclude(creator__in=[self.a1, self.a2]).query
self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()])
# Similarly, when one of the joins cannot possibly, ever, involve NULL
# values (Author -> ExtraInfo, in the following), it should never be
# promoted to a left outer join. So the following query should only
# involve one "left outer" join (Author -> Item is 0-to-many).
qs = Author.objects.filter(id=self.a1.id).filter(Q(extra__note=self.n1) | Q(item__note=self.n3))
self.assertEqual(
len([
x for x in qs.query.alias_map.values()
if x.join_type == LOUTER and qs.query.alias_refcount[x.table_alias]
]),
1
)
# The previous changes shouldn't affect nullable foreign key joins.
self.assertQuerysetEqual(
Tag.objects.filter(parent__isnull=True).order_by('name'),
['<Tag: t1>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(parent__isnull=True).order_by('name'),
['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(Q(parent__name='t1') | Q(parent__isnull=True)).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(Q(parent__isnull=True) | Q(parent__name='t1')).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(Q(parent__parent__isnull=True)).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.filter(~Q(parent__parent__isnull=True)).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
def test_ticket2091(self):
t = Tag.objects.get(name='t4')
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[t]),
['<Item: four>']
)
def test_avoid_infinite_loop_on_too_many_subqueries(self):
x = Tag.objects.filter(pk=1)
local_recursion_limit = 127
msg = 'Maximum recursion depth exceeded: too many subqueries.'
with self.assertRaisesMessage(RuntimeError, msg):
for i in six.moves.range(local_recursion_limit * 2):
x = Tag.objects.filter(pk__in=x)
def test_reasonable_number_of_subq_aliases(self):
x = Tag.objects.filter(pk=1)
for _ in range(20):
x = Tag.objects.filter(pk__in=x)
self.assertEqual(
x.query.subq_aliases, {
'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'AA', 'AB', 'AC', 'AD',
'AE', 'AF', 'AG', 'AH', 'AI', 'AJ', 'AK', 'AL', 'AM', 'AN',
}
)
def test_heterogeneous_qs_combination(self):
# Combining querysets built on different models should behave in a well-defined
# fashion. We raise an error.
with self.assertRaisesMessage(AssertionError, 'Cannot combine queries on two different base models.'):
Author.objects.all() & Tag.objects.all()
with self.assertRaisesMessage(AssertionError, 'Cannot combine queries on two different base models.'):
Author.objects.all() | Tag.objects.all()
def test_ticket3141(self):
self.assertEqual(Author.objects.extra(select={'foo': '1'}).count(), 4)
self.assertEqual(
Author.objects.extra(select={'foo': '%s'}, select_params=(1,)).count(),
4
)
def test_ticket2400(self):
self.assertQuerysetEqual(
Author.objects.filter(item__isnull=True),
['<Author: a3>']
)
self.assertQuerysetEqual(
Tag.objects.filter(item__isnull=True),
['<Tag: t5>']
)
def test_ticket2496(self):
self.assertQuerysetEqual(
Item.objects.extra(tables=['queries_author']).select_related().order_by('name')[:1],
['<Item: four>']
)
def test_error_raised_on_filter_with_dictionary(self):
with self.assertRaisesMessage(FieldError, 'Cannot parse keyword query as dict'):
Note.objects.filter({'note': 'n1', 'misc': 'foo'})
def test_tickets_2076_7256(self):
# Ordering on related tables should be possible, even if the table is
# not otherwise involved.
self.assertQuerysetEqual(
Item.objects.order_by('note__note', 'name'),
['<Item: two>', '<Item: four>', '<Item: one>', '<Item: three>']
)
# Ordering on a related field should use the remote model's default
# ordering as a final step.
self.assertQuerysetEqual(
Author.objects.order_by('extra', '-name'),
['<Author: a2>', '<Author: a1>', '<Author: a4>', '<Author: a3>']
)
# Using remote model default ordering can span multiple models (in this
# case, Cover is ordered by Item's default, which uses Note's default).
self.assertQuerysetEqual(
Cover.objects.all(),
['<Cover: first>', '<Cover: second>']
)
# If the remote model does not have a default ordering, we order by its 'id'
# field.
self.assertQuerysetEqual(
Item.objects.order_by('creator', 'name'),
['<Item: one>', '<Item: three>', '<Item: two>', '<Item: four>']
)
# Ordering by a many-valued attribute (e.g. a many-to-many or reverse
# ForeignKey) is legal, but the results might not make sense. That
# isn't Django's problem. Garbage in, garbage out.
self.assertQuerysetEqual(
Item.objects.filter(tags__isnull=False).order_by('tags', 'id'),
['<Item: one>', '<Item: two>', '<Item: one>', '<Item: two>', '<Item: four>']
)
# If we replace the default ordering, Django adjusts the required
# tables automatically. Item normally requires a join with Note to do
# the default ordering, but that isn't needed here.
qs = Item.objects.order_by('name')
self.assertQuerysetEqual(
qs,
['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertEqual(len(qs.query.tables), 1)
def test_tickets_2874_3002(self):
qs = Item.objects.select_related().order_by('note__note', 'name')
self.assertQuerysetEqual(
qs,
['<Item: two>', '<Item: four>', '<Item: one>', '<Item: three>']
)
# This is also a good select_related() test because there are multiple
# Note entries in the SQL. The two Note items should be different.
self.assertTrue(repr(qs[0].note), '<Note: n2>')
self.assertEqual(repr(qs[0].creator.extra.note), '<Note: n1>')
def test_ticket3037(self):
self.assertQuerysetEqual(
Item.objects.filter(Q(creator__name='a3', name='two') | Q(creator__name='a4', name='four')),
['<Item: four>']
)
def test_tickets_5321_7070(self):
# Ordering columns must be included in the output columns. Note that
# this means results that might otherwise be distinct are not (if there
# are multiple values in the ordering cols), as in this example. This
# isn't a bug; it's a warning to be careful with the selection of
# ordering columns.
self.assertSequenceEqual(
Note.objects.values('misc').distinct().order_by('note', '-misc'),
[{'misc': 'foo'}, {'misc': 'bar'}, {'misc': 'foo'}]
)
def test_ticket4358(self):
# If you don't pass any fields to values(), relation fields are
# returned as "foo_id" keys, not "foo". For consistency, you should be
# able to pass "foo_id" in the fields list and have it work, too. We
# actually allow both "foo" and "foo_id".
# The *_id version is returned by default.
self.assertIn('note_id', ExtraInfo.objects.values()[0])
# You can also pass it in explicitly.
self.assertSequenceEqual(ExtraInfo.objects.values('note_id'), [{'note_id': 1}, {'note_id': 2}])
# ...or use the field name.
self.assertSequenceEqual(ExtraInfo.objects.values('note'), [{'note': 1}, {'note': 2}])
def test_ticket2902(self):
# Parameters can be given to extra_select, *if* you use an OrderedDict.
# (First we need to know which order the keys fall in "naturally" on
# your system, so we can put things in the wrong way around from
# normal. A normal dict would thus fail.)
s = [('a', '%s'), ('b', '%s')]
params = ['one', 'two']
if {'a': 1, 'b': 2}.keys() == ['a', 'b']:
s.reverse()
params.reverse()
# This slightly odd comparison works around the fact that PostgreSQL will
# return 'one' and 'two' as strings, not Unicode objects. It's a side-effect of
# using constants here and not a real concern.
d = Item.objects.extra(select=OrderedDict(s), select_params=params).values('a', 'b')[0]
self.assertEqual(d, {'a': 'one', 'b': 'two'})
# Order by the number of tags attached to an item.
l = (
Item.objects
.extra(select={
'count': 'select count(*) from queries_item_tags where queries_item_tags.item_id = queries_item.id'
})
.order_by('-count')
)
self.assertEqual([o.count for o in l], [2, 2, 1, 0])
def test_ticket6154(self):
# Multiple filter statements are joined using "AND" all the time.
self.assertQuerysetEqual(
Author.objects.filter(id=self.a1.id).filter(Q(extra__note=self.n1) | Q(item__note=self.n3)),
['<Author: a1>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(extra__note=self.n1) | Q(item__note=self.n3)).filter(id=self.a1.id),
['<Author: a1>']
)
def test_ticket6981(self):
self.assertQuerysetEqual(
Tag.objects.select_related('parent').order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
def test_ticket9926(self):
self.assertQuerysetEqual(
Tag.objects.select_related("parent", "category").order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.select_related('parent', "parent__category").order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
def test_tickets_6180_6203(self):
# Dates with limits and/or counts
self.assertEqual(Item.objects.count(), 4)
self.assertEqual(Item.objects.datetimes('created', 'month').count(), 1)
self.assertEqual(Item.objects.datetimes('created', 'day').count(), 2)
self.assertEqual(len(Item.objects.datetimes('created', 'day')), 2)
self.assertEqual(Item.objects.datetimes('created', 'day')[0], datetime.datetime(2007, 12, 19, 0, 0))
def test_tickets_7087_12242(self):
# Dates with extra select columns
self.assertQuerysetEqual(
Item.objects.datetimes('created', 'day').extra(select={'a': 1}),
['datetime.datetime(2007, 12, 19, 0, 0)', 'datetime.datetime(2007, 12, 20, 0, 0)']
)
self.assertQuerysetEqual(
Item.objects.extra(select={'a': 1}).datetimes('created', 'day'),
['datetime.datetime(2007, 12, 19, 0, 0)', 'datetime.datetime(2007, 12, 20, 0, 0)']
)
name = "one"
self.assertQuerysetEqual(
Item.objects.datetimes('created', 'day').extra(where=['name=%s'], params=[name]),
['datetime.datetime(2007, 12, 19, 0, 0)']
)
self.assertQuerysetEqual(
Item.objects.extra(where=['name=%s'], params=[name]).datetimes('created', 'day'),
['datetime.datetime(2007, 12, 19, 0, 0)']
)
def test_ticket7155(self):
# Nullable dates
self.assertQuerysetEqual(
Item.objects.datetimes('modified', 'day'),
['datetime.datetime(2007, 12, 19, 0, 0)']
)
def test_ticket7098(self):
# Make sure semi-deprecated ordering by related models syntax still
# works.
self.assertSequenceEqual(
Item.objects.values('note__note').order_by('queries_note.note', 'id'),
[{'note__note': 'n2'}, {'note__note': 'n3'}, {'note__note': 'n3'}, {'note__note': 'n3'}]
)
def test_ticket7096(self):
# Make sure exclude() with multiple conditions continues to work.
self.assertQuerysetEqual(
Tag.objects.filter(parent=self.t1, name='t3').order_by('name'),
['<Tag: t3>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(parent=self.t1, name='t3').order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t1', name='one').order_by('name').distinct(),
['<Item: four>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__in=['three', 'four']).exclude(tags__name='t1').order_by('name'),
['<Item: four>', '<Item: three>']
)
# More twisted cases, involving nested negations.
self.assertQuerysetEqual(
Item.objects.exclude(~Q(tags__name='t1', name='one')),
['<Item: one>']
)
self.assertQuerysetEqual(
Item.objects.filter(~Q(tags__name='t1', name='one'), name='two'),
['<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.exclude(~Q(tags__name='t1', name='one'), name='two'),
['<Item: four>', '<Item: one>', '<Item: three>']
)
def test_tickets_7204_7506(self):
# Make sure querysets with related fields can be pickled. If this
# doesn't crash, it's a Good Thing.
pickle.dumps(Item.objects.all())
def test_ticket7813(self):
# We should also be able to pickle things that use select_related().
# The only tricky thing here is to ensure that we do the related
# selections properly after unpickling.
qs = Item.objects.select_related()
query = qs.query.get_compiler(qs.db).as_sql()[0]
query2 = pickle.loads(pickle.dumps(qs.query))
self.assertEqual(
query2.get_compiler(qs.db).as_sql()[0],
query
)
def test_deferred_load_qs_pickling(self):
# Check pickling of deferred-loading querysets
qs = Item.objects.defer('name', 'creator')
q2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(list(qs), list(q2))
q3 = pickle.loads(pickle.dumps(qs, pickle.HIGHEST_PROTOCOL))
self.assertEqual(list(qs), list(q3))
def test_ticket7277(self):
self.assertQuerysetEqual(
self.n1.annotation_set.filter(
Q(tag=self.t5) | Q(tag__children=self.t5) | Q(tag__children__children=self.t5)
),
['<Annotation: a1>']
)
def test_tickets_7448_7707(self):
# Complex objects should be converted to strings before being used in
# lookups.
self.assertQuerysetEqual(
Item.objects.filter(created__in=[self.time1, self.time2]),
['<Item: one>', '<Item: two>']
)
def test_ticket7235(self):
# An EmptyQuerySet should not raise exceptions if it is filtered.
Eaten.objects.create(meal='m')
q = Eaten.objects.none()
with self.assertNumQueries(0):
self.assertQuerysetEqual(q.all(), [])
self.assertQuerysetEqual(q.filter(meal='m'), [])
self.assertQuerysetEqual(q.exclude(meal='m'), [])
self.assertQuerysetEqual(q.complex_filter({'pk': 1}), [])
self.assertQuerysetEqual(q.select_related('food'), [])
self.assertQuerysetEqual(q.annotate(Count('food')), [])
self.assertQuerysetEqual(q.order_by('meal', 'food'), [])
self.assertQuerysetEqual(q.distinct(), [])
self.assertQuerysetEqual(
q.extra(select={'foo': "1"}),
[]
)
q.query.low_mark = 1
with self.assertRaisesMessage(AssertionError, 'Cannot change a query once a slice has been taken'):
q.extra(select={'foo': "1"})
self.assertQuerysetEqual(q.reverse(), [])
self.assertQuerysetEqual(q.defer('meal'), [])
self.assertQuerysetEqual(q.only('meal'), [])
def test_ticket7791(self):
# There were "issues" when ordering and distinct-ing on fields related
# via ForeignKeys.
self.assertEqual(
len(Note.objects.order_by('extrainfo__info').distinct()),
3
)
# Pickling of QuerySets using datetimes() should work.
qs = Item.objects.datetimes('created', 'month')
pickle.loads(pickle.dumps(qs))
def test_ticket9997(self):
# If a ValuesList or Values queryset is passed as an inner query, we
# make sure it's only requesting a single value and use that as the
# thing to select.
self.assertQuerysetEqual(
Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values('name')),
['<Tag: t2>', '<Tag: t3>']
)
# Multi-valued values() and values_list() querysets should raise errors.
with self.assertRaisesMessage(TypeError, 'Cannot use multi-field values as a filter value.'):
Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values('name', 'id'))
with self.assertRaisesMessage(TypeError, 'Cannot use multi-field values as a filter value.'):
Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values_list('name', 'id'))
def test_ticket9985(self):
# qs.values_list(...).values(...) combinations should work.
self.assertSequenceEqual(
Note.objects.values_list("note", flat=True).values("id").order_by("id"),
[{'id': 1}, {'id': 2}, {'id': 3}]
)
self.assertQuerysetEqual(
Annotation.objects.filter(notes__in=Note.objects.filter(note="n1").values_list('note').values('id')),
['<Annotation: a1>']
)
def test_ticket10205(self):
# When bailing out early because of an empty "__in" filter, we need
# to set things up correctly internally so that subqueries can continue properly.
self.assertEqual(Tag.objects.filter(name__in=()).update(name="foo"), 0)
def test_ticket10432(self):
# Testing an empty "__in" filter with a generator as the value.
def f():
return iter([])
n_obj = Note.objects.all()[0]
def g():
for i in [n_obj.pk]:
yield i
self.assertQuerysetEqual(Note.objects.filter(pk__in=f()), [])
self.assertEqual(list(Note.objects.filter(pk__in=g())), [n_obj])
def test_ticket10742(self):
# Queries used in an __in clause don't execute subqueries
subq = Author.objects.filter(num__lt=3000)
qs = Author.objects.filter(pk__in=subq)
self.assertQuerysetEqual(qs, ['<Author: a1>', '<Author: a2>'])
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
subq = Author.objects.filter(num__lt=3000)
qs = Author.objects.exclude(pk__in=subq)
self.assertQuerysetEqual(qs, ['<Author: a3>', '<Author: a4>'])
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
subq = Author.objects.filter(num__lt=3000)
self.assertQuerysetEqual(
Author.objects.filter(Q(pk__in=subq) & Q(name='a1')),
['<Author: a1>']
)
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
def test_ticket7076(self):
# Excluding shouldn't eliminate NULL entries.
self.assertQuerysetEqual(
Item.objects.exclude(modified=self.time1).order_by('name'),
['<Item: four>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(parent__name=self.t1.name),
['<Tag: t1>', '<Tag: t4>', '<Tag: t5>']
)
def test_ticket7181(self):
# Ordering by related tables should accommodate nullable fields (this
# test is a little tricky, since NULL ordering is database dependent.
# Instead, we just count the number of results).
self.assertEqual(len(Tag.objects.order_by('parent__name')), 5)
# Empty querysets can be merged with others.
self.assertQuerysetEqual(
Note.objects.none() | Note.objects.all(),
['<Note: n1>', '<Note: n2>', '<Note: n3>']
)
self.assertQuerysetEqual(
Note.objects.all() | Note.objects.none(),
['<Note: n1>', '<Note: n2>', '<Note: n3>']
)
self.assertQuerysetEqual(Note.objects.none() & Note.objects.all(), [])
self.assertQuerysetEqual(Note.objects.all() & Note.objects.none(), [])
def test_ticket9411(self):
# Make sure bump_prefix() (an internal Query method) doesn't (re-)break. It's
# sufficient that this query runs without error.
qs = Tag.objects.values_list('id', flat=True).order_by('id')
qs.query.bump_prefix(qs.query)
first = qs[0]
self.assertEqual(list(qs), list(range(first, first + 5)))
def test_ticket8439(self):
# Complex combinations of conjunctions, disjunctions and nullable
# relations.
self.assertQuerysetEqual(
Author.objects.filter(Q(item__note__extrainfo=self.e2) | Q(report=self.r1, name='xyz')),
['<Author: a2>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(report=self.r1, name='xyz') | Q(item__note__extrainfo=self.e2)),
['<Author: a2>']
)
self.assertQuerysetEqual(
Annotation.objects.filter(Q(tag__parent=self.t1) | Q(notes__note='n1', name='a1')),
['<Annotation: a1>']
)
xx = ExtraInfo.objects.create(info='xx', note=self.n3)
self.assertQuerysetEqual(
Note.objects.filter(Q(extrainfo__author=self.a1) | Q(extrainfo=xx)),
['<Note: n1>', '<Note: n3>']
)
q = Note.objects.filter(Q(extrainfo__author=self.a1) | Q(extrainfo=xx)).query
self.assertEqual(
len([x for x in q.alias_map.values() if x.join_type == LOUTER and q.alias_refcount[x.table_alias]]),
1
)
def test_ticket17429(self):
"""
Ensure that Meta.ordering=None works the same as Meta.ordering=[]
"""
original_ordering = Tag._meta.ordering
Tag._meta.ordering = None
try:
self.assertQuerysetEqual(
Tag.objects.all(),
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'],
ordered=False
)
finally:
Tag._meta.ordering = original_ordering
def test_exclude(self):
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t4'),
[repr(i) for i in Item.objects.filter(~Q(tags__name='t4'))])
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name='t4') | Q(tags__name='t3')),
[repr(i) for i in Item.objects.filter(~(Q(tags__name='t4') | Q(tags__name='t3')))])
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name='t4') | ~Q(tags__name='t3')),
[repr(i) for i in Item.objects.filter(~(Q(tags__name='t4') | ~Q(tags__name='t3')))])
def test_nested_exclude(self):
self.assertQuerysetEqual(
Item.objects.exclude(~Q(tags__name='t4')),
[repr(i) for i in Item.objects.filter(~~Q(tags__name='t4'))])
def test_double_exclude(self):
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name='t4')),
[repr(i) for i in Item.objects.filter(~~Q(tags__name='t4'))])
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name='t4')),
[repr(i) for i in Item.objects.filter(~Q(~Q(tags__name='t4')))])
def test_exclude_in(self):
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name__in=['t4', 't3'])),
[repr(i) for i in Item.objects.filter(~Q(tags__name__in=['t4', 't3']))])
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name__in=['t4', 't3'])),
[repr(i) for i in Item.objects.filter(~~Q(tags__name__in=['t4', 't3']))])
def test_ticket_10790_1(self):
# Querying direct fields with isnull should trim the left outer join.
# It also should not create INNER JOIN.
q = Tag.objects.filter(parent__isnull=True)
self.assertQuerysetEqual(q, ['<Tag: t1>'])
self.assertNotIn('JOIN', str(q.query))
q = Tag.objects.filter(parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'],
)
self.assertNotIn('JOIN', str(q.query))
q = Tag.objects.exclude(parent__isnull=True)
self.assertQuerysetEqual(
q,
['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'],
)
self.assertNotIn('JOIN', str(q.query))
q = Tag.objects.exclude(parent__isnull=False)
self.assertQuerysetEqual(q, ['<Tag: t1>'])
self.assertNotIn('JOIN', str(q.query))
q = Tag.objects.exclude(parent__parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>'],
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1)
self.assertNotIn('INNER JOIN', str(q.query))
def test_ticket_10790_2(self):
# Querying across several tables should strip only the last outer join,
# while preserving the preceding inner joins.
q = Tag.objects.filter(parent__parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Tag: t4>', '<Tag: t5>'],
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
# Querying without isnull should not convert anything to left outer join.
q = Tag.objects.filter(parent__parent=self.t1)
self.assertQuerysetEqual(
q,
['<Tag: t4>', '<Tag: t5>'],
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
def test_ticket_10790_3(self):
# Querying via indirect fields should populate the left outer join
q = NamedCategory.objects.filter(tag__isnull=True)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1)
# join to dumbcategory ptr_id
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
self.assertQuerysetEqual(q, [])
# Querying across several tables should strip only the last join, while
# preserving the preceding left outer joins.
q = NamedCategory.objects.filter(tag__parent__isnull=True)
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1)
self.assertQuerysetEqual(q, ['<NamedCategory: Generic>'])
def test_ticket_10790_4(self):
# Querying across m2m field should not strip the m2m table from join.
q = Author.objects.filter(item__tags__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a2>', '<Author: a3>'],
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 2)
self.assertNotIn('INNER JOIN', str(q.query))
q = Author.objects.filter(item__tags__parent__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a3>'],
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 3)
self.assertNotIn('INNER JOIN', str(q.query))
def test_ticket_10790_5(self):
# Querying with isnull=False across m2m field should not create outer joins
q = Author.objects.filter(item__tags__isnull=False)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a4>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 2)
q = Author.objects.filter(item__tags__parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a2>', '<Author: a4>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 3)
q = Author.objects.filter(item__tags__parent__parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Author: a4>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 4)
def test_ticket_10790_6(self):
# Querying with isnull=True across m2m field should not create inner joins
# and strip last outer join
q = Author.objects.filter(item__tags__parent__parent__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a1>', '<Author: a2>', '<Author: a2>',
'<Author: a2>', '<Author: a3>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 4)
self.assertEqual(str(q.query).count('INNER JOIN'), 0)
q = Author.objects.filter(item__tags__parent__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a3>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 3)
self.assertEqual(str(q.query).count('INNER JOIN'), 0)
def test_ticket_10790_7(self):
# Reverse querying with isnull should not strip the join
q = Author.objects.filter(item__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a3>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1)
self.assertEqual(str(q.query).count('INNER JOIN'), 0)
q = Author.objects.filter(item__isnull=False)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a4>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 1)
def test_ticket_10790_8(self):
# Querying with combined q-objects should also strip the left outer join
q = Tag.objects.filter(Q(parent__isnull=True) | Q(parent=self.t1))
self.assertQuerysetEqual(
q,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q.query).count('INNER JOIN'), 0)
def test_ticket_10790_combine(self):
# Combining queries should not re-populate the left outer join
q1 = Tag.objects.filter(parent__isnull=True)
q2 = Tag.objects.filter(parent__isnull=False)
q3 = q1 | q2
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'],
)
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q3 = q1 & q2
self.assertQuerysetEqual(q3, [])
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q2 = Tag.objects.filter(parent=self.t1)
q3 = q1 | q2
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q3 = q2 | q1
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q1 = Tag.objects.filter(parent__isnull=True)
q2 = Tag.objects.filter(parent__parent__isnull=True)
q3 = q1 | q2
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 1)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
q3 = q2 | q1
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 1)
self.assertEqual(str(q3.query).count('INNER JOIN'), 0)
def test_ticket19672(self):
self.assertQuerysetEqual(
Report.objects.filter(Q(creator__isnull=False) & ~Q(creator__extra__value=41)),
['<Report: r1>']
)
def test_ticket_20250(self):
# A negated Q along with an annotated queryset failed in Django 1.4
qs = Author.objects.annotate(Count('item'))
qs = qs.filter(~Q(extra__value=0))
self.assertIn('SELECT', str(qs.query))
self.assertQuerysetEqual(
qs,
['<Author: a1>', '<Author: a2>', '<Author: a3>', '<Author: a4>']
)
def test_lookup_constraint_fielderror(self):
msg = (
"Cannot resolve keyword 'unknown_field' into field. Choices are: "
"annotation, category, category_id, children, id, item, "
"managedmodel, name, note, parent, parent_id"
)
with self.assertRaisesMessage(FieldError, msg):
Tag.objects.filter(unknown_field__name='generic')
class Queries2Tests(TestCase):
@classmethod
def setUpTestData(cls):
Number.objects.create(num=4)
Number.objects.create(num=8)
Number.objects.create(num=12)
def test_ticket4289(self):
# A slight variation on the restricting the filtering choices by the
# lookup constraints.
self.assertQuerysetEqual(Number.objects.filter(num__lt=4), [])
self.assertQuerysetEqual(Number.objects.filter(num__gt=8, num__lt=12), [])
self.assertQuerysetEqual(
Number.objects.filter(num__gt=8, num__lt=13),
['<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__lt=4) | Q(num__gt=8, num__lt=12)),
[]
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__gt=8, num__lt=12) | Q(num__lt=4)),
[]
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__gt=8) & Q(num__lt=12) | Q(num__lt=4)),
[]
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__gt=7) & Q(num__lt=12) | Q(num__lt=4)),
['<Number: 8>']
)
def test_ticket12239(self):
# Custom lookups are registered to round float values correctly on gte
# and lt IntegerField queries.
self.assertQuerysetEqual(
Number.objects.filter(num__gt=11.9),
['<Number: 12>']
)
self.assertQuerysetEqual(Number.objects.filter(num__gt=12), [])
self.assertQuerysetEqual(Number.objects.filter(num__gt=12.0), [])
self.assertQuerysetEqual(Number.objects.filter(num__gt=12.1), [])
self.assertQuerysetEqual(
Number.objects.filter(num__lt=12),
['<Number: 4>', '<Number: 8>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lt=12.0),
['<Number: 4>', '<Number: 8>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lt=12.1),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__gte=11.9),
['<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(num__gte=12),
['<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(num__gte=12.0),
['<Number: 12>']
)
self.assertQuerysetEqual(Number.objects.filter(num__gte=12.1), [])
self.assertQuerysetEqual(Number.objects.filter(num__gte=12.9), [])
self.assertQuerysetEqual(
Number.objects.filter(num__lte=11.9),
['<Number: 4>', '<Number: 8>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12.0),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12.1),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12.9),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
def test_ticket7759(self):
# Count should work with a partially read result set.
count = Number.objects.count()
qs = Number.objects.all()
def run():
for obj in qs:
return qs.count() == count
self.assertTrue(run())
class Queries3Tests(TestCase):
def test_ticket7107(self):
# This shouldn't create an infinite loop.
self.assertQuerysetEqual(Valid.objects.all(), [])
def test_ticket8683(self):
# An error should be raised when QuerySet.datetimes() is passed the
# wrong type of field.
with self.assertRaisesMessage(AssertionError, "'name' isn't a DateField, TimeField, or DateTimeField."):
Item.objects.datetimes('name', 'month')
def test_ticket22023(self):
with self.assertRaisesMessage(TypeError, "Cannot call only() after .values() or .values_list()"):
Valid.objects.values().only()
with self.assertRaisesMessage(TypeError, "Cannot call defer() after .values() or .values_list()"):
Valid.objects.values().defer()
class Queries4Tests(TestCase):
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
cls.t1 = Tag.objects.create(name='t1', category=generic)
n1 = Note.objects.create(note='n1', misc='foo')
n2 = Note.objects.create(note='n2', misc='bar')
e1 = ExtraInfo.objects.create(info='e1', note=n1)
e2 = ExtraInfo.objects.create(info='e2', note=n2)
cls.a1 = Author.objects.create(name='a1', num=1001, extra=e1)
cls.a3 = Author.objects.create(name='a3', num=3003, extra=e2)
cls.r1 = Report.objects.create(name='r1', creator=cls.a1)
cls.r2 = Report.objects.create(name='r2', creator=cls.a3)
cls.r3 = Report.objects.create(name='r3')
Item.objects.create(name='i1', created=datetime.datetime.now(), note=n1, creator=cls.a1)
Item.objects.create(name='i2', created=datetime.datetime.now(), note=n1, creator=cls.a3)
def test_ticket24525(self):
tag = Tag.objects.create()
anth100 = tag.note_set.create(note='ANTH', misc='100')
math101 = tag.note_set.create(note='MATH', misc='101')
s1 = tag.annotation_set.create(name='1')
s2 = tag.annotation_set.create(name='2')
s1.notes.set([math101, anth100])
s2.notes.set([math101])
result = math101.annotation_set.all() & tag.annotation_set.exclude(notes__in=[anth100])
self.assertEqual(list(result), [s2])
def test_ticket11811(self):
unsaved_category = NamedCategory(name="Other")
msg = 'Unsaved model instance <NamedCategory: Other> cannot be used in an ORM query.'
with self.assertRaisesMessage(ValueError, msg):
Tag.objects.filter(pk=self.t1.pk).update(category=unsaved_category)
def test_ticket14876(self):
# Note: when combining the query we need to have information available
# about the join type of the trimmed "creator__isnull" join. If we
# don't have that information, then the join is created as INNER JOIN
# and results will be incorrect.
q1 = Report.objects.filter(Q(creator__isnull=True) | Q(creator__extra__info='e1'))
q2 = Report.objects.filter(Q(creator__isnull=True)) | Report.objects.filter(Q(creator__extra__info='e1'))
self.assertQuerysetEqual(q1, ["<Report: r1>", "<Report: r3>"], ordered=False)
self.assertEqual(str(q1.query), str(q2.query))
q1 = Report.objects.filter(Q(creator__extra__info='e1') | Q(creator__isnull=True))
q2 = Report.objects.filter(Q(creator__extra__info='e1')) | Report.objects.filter(Q(creator__isnull=True))
self.assertQuerysetEqual(q1, ["<Report: r1>", "<Report: r3>"], ordered=False)
self.assertEqual(str(q1.query), str(q2.query))
q1 = Item.objects.filter(Q(creator=self.a1) | Q(creator__report__name='r1')).order_by()
q2 = (
Item.objects
.filter(Q(creator=self.a1)).order_by() | Item.objects.filter(Q(creator__report__name='r1'))
.order_by()
)
self.assertQuerysetEqual(q1, ["<Item: i1>"])
self.assertEqual(str(q1.query), str(q2.query))
q1 = Item.objects.filter(Q(creator__report__name='e1') | Q(creator=self.a1)).order_by()
q2 = (
Item.objects.filter(Q(creator__report__name='e1')).order_by() |
Item.objects.filter(Q(creator=self.a1)).order_by()
)
self.assertQuerysetEqual(q1, ["<Item: i1>"])
self.assertEqual(str(q1.query), str(q2.query))
def test_combine_join_reuse(self):
# Test that we correctly recreate joins having identical connections
# in the rhs query, in case the query is ORed together. Related to
# ticket #18748
Report.objects.create(name='r4', creator=self.a1)
q1 = Author.objects.filter(report__name='r5')
q2 = Author.objects.filter(report__name='r4').filter(report__name='r1')
combined = q1 | q2
self.assertEqual(str(combined.query).count('JOIN'), 2)
self.assertEqual(len(combined), 1)
self.assertEqual(combined[0].name, 'a1')
def test_ticket7095(self):
# Updates that are filtered on the model being updated are somewhat
# tricky in MySQL. This exercises that case.
ManagedModel.objects.create(data='mm1', tag=self.t1, public=True)
self.assertEqual(ManagedModel.objects.update(data='mm'), 1)
# A values() or values_list() query across joined models must use outer
# joins appropriately.
# Note: In Oracle, we expect a null CharField to return '' instead of
# None.
if connection.features.interprets_empty_strings_as_nulls:
expected_null_charfield_repr = ''
else:
expected_null_charfield_repr = None
self.assertSequenceEqual(
Report.objects.values_list("creator__extra__info", flat=True).order_by("name"),
['e1', 'e2', expected_null_charfield_repr],
)
# Similarly for select_related(), joins beyond an initial nullable join
# must use outer joins so that all results are included.
self.assertQuerysetEqual(
Report.objects.select_related("creator", "creator__extra").order_by("name"),
['<Report: r1>', '<Report: r2>', '<Report: r3>']
)
# When there are multiple paths to a table from another table, we have
# to be careful not to accidentally reuse an inappropriate join when
# using select_related(). We used to return the parent's Detail record
# here by mistake.
d1 = Detail.objects.create(data="d1")
d2 = Detail.objects.create(data="d2")
m1 = Member.objects.create(name="m1", details=d1)
m2 = Member.objects.create(name="m2", details=d2)
Child.objects.create(person=m2, parent=m1)
obj = m1.children.select_related("person__details")[0]
self.assertEqual(obj.person.details.data, 'd2')
def test_order_by_resetting(self):
# Calling order_by() with no parameters removes any existing ordering on the
# model. But it should still be possible to add new ordering after that.
qs = Author.objects.order_by().order_by('name')
self.assertIn('ORDER BY', qs.query.get_compiler(qs.db).as_sql()[0])
def test_order_by_reverse_fk(self):
# It is possible to order by reverse of foreign key, although that can lead
# to duplicate results.
c1 = SimpleCategory.objects.create(name="category1")
c2 = SimpleCategory.objects.create(name="category2")
CategoryItem.objects.create(category=c1)
CategoryItem.objects.create(category=c2)
CategoryItem.objects.create(category=c1)
self.assertSequenceEqual(SimpleCategory.objects.order_by('categoryitem', 'pk'), [c1, c2, c1])
def test_ticket10181(self):
# Avoid raising an EmptyResultSet if an inner query is probably
# empty (and hence, not executed).
self.assertQuerysetEqual(
Tag.objects.filter(id__in=Tag.objects.filter(id__in=[])),
[]
)
def test_ticket15316_filter_false(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(name="named category1", special_name="special1")
c3 = SpecialCategory.objects.create(name="named category2", special_name="special2")
CategoryItem.objects.create(category=c1)
ci2 = CategoryItem.objects.create(category=c2)
ci3 = CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.filter(category__specialcategory__isnull=False)
self.assertEqual(qs.count(), 2)
self.assertSequenceEqual(qs, [ci2, ci3])
def test_ticket15316_exclude_false(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(name="named category1", special_name="special1")
c3 = SpecialCategory.objects.create(name="named category2", special_name="special2")
ci1 = CategoryItem.objects.create(category=c1)
CategoryItem.objects.create(category=c2)
CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.exclude(category__specialcategory__isnull=False)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_filter_true(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(name="named category1", special_name="special1")
c3 = SpecialCategory.objects.create(name="named category2", special_name="special2")
ci1 = CategoryItem.objects.create(category=c1)
CategoryItem.objects.create(category=c2)
CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.filter(category__specialcategory__isnull=True)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_exclude_true(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(name="named category1", special_name="special1")
c3 = SpecialCategory.objects.create(name="named category2", special_name="special2")
CategoryItem.objects.create(category=c1)
ci2 = CategoryItem.objects.create(category=c2)
ci3 = CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.exclude(category__specialcategory__isnull=True)
self.assertEqual(qs.count(), 2)
self.assertSequenceEqual(qs, [ci2, ci3])
def test_ticket15316_one2one_filter_false(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
CategoryItem.objects.create(category=c)
ci2 = CategoryItem.objects.create(category=c0)
ci3 = CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.filter(category__onetoonecategory__isnull=False).order_by('pk')
self.assertEqual(qs.count(), 2)
self.assertSequenceEqual(qs, [ci2, ci3])
def test_ticket15316_one2one_exclude_false(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
ci1 = CategoryItem.objects.create(category=c)
CategoryItem.objects.create(category=c0)
CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.exclude(category__onetoonecategory__isnull=False)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_one2one_filter_true(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
ci1 = CategoryItem.objects.create(category=c)
CategoryItem.objects.create(category=c0)
CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.filter(category__onetoonecategory__isnull=True)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_one2one_exclude_true(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
CategoryItem.objects.create(category=c)
ci2 = CategoryItem.objects.create(category=c0)
ci3 = CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.exclude(category__onetoonecategory__isnull=True).order_by('pk')
self.assertEqual(qs.count(), 2)
self.assertSequenceEqual(qs, [ci2, ci3])
class Queries5Tests(TestCase):
@classmethod
def setUpTestData(cls):
# Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the
# Meta.ordering will be rank3, rank2, rank1.
n1 = Note.objects.create(note='n1', misc='foo', id=1)
n2 = Note.objects.create(note='n2', misc='bar', id=2)
e1 = ExtraInfo.objects.create(info='e1', note=n1)
e2 = ExtraInfo.objects.create(info='e2', note=n2)
a1 = Author.objects.create(name='a1', num=1001, extra=e1)
a2 = Author.objects.create(name='a2', num=2002, extra=e1)
a3 = Author.objects.create(name='a3', num=3003, extra=e2)
cls.rank1 = Ranking.objects.create(rank=2, author=a2)
Ranking.objects.create(rank=1, author=a3)
Ranking.objects.create(rank=3, author=a1)
def test_ordering(self):
# Cross model ordering is possible in Meta, too.
self.assertQuerysetEqual(
Ranking.objects.all(),
['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>']
)
self.assertQuerysetEqual(
Ranking.objects.all().order_by('rank'),
['<Ranking: 1: a3>', '<Ranking: 2: a2>', '<Ranking: 3: a1>']
)
# Ordering of extra() pieces is possible, too and you can mix extra
# fields and model fields in the ordering.
self.assertQuerysetEqual(
Ranking.objects.extra(tables=['django_site'], order_by=['-django_site.id', 'rank']),
['<Ranking: 1: a3>', '<Ranking: 2: a2>', '<Ranking: 3: a1>']
)
qs = Ranking.objects.extra(select={'good': 'case when rank > 2 then 1 else 0 end'})
self.assertEqual(
[o.good for o in qs.extra(order_by=('-good',))],
[True, False, False]
)
self.assertQuerysetEqual(
qs.extra(order_by=('-good', 'id')),
['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>']
)
# Despite having some extra aliases in the query, we can still omit
# them in a values() query.
dicts = qs.values('id', 'rank').order_by('id')
self.assertEqual(
[d['rank'] for d in dicts],
[2, 1, 3]
)
def test_ticket7256(self):
# An empty values() call includes all aliases, including those from an
# extra()
qs = Ranking.objects.extra(select={'good': 'case when rank > 2 then 1 else 0 end'})
dicts = qs.values().order_by('id')
for d in dicts:
del d['id']
del d['author_id']
self.assertEqual(
[sorted(d.items()) for d in dicts],
[[('good', 0), ('rank', 2)], [('good', 0), ('rank', 1)], [('good', 1), ('rank', 3)]]
)
def test_ticket7045(self):
# Extra tables used to crash SQL construction on the second use.
qs = Ranking.objects.extra(tables=['django_site'])
qs.query.get_compiler(qs.db).as_sql()
# test passes if this doesn't raise an exception.
qs.query.get_compiler(qs.db).as_sql()
def test_ticket9848(self):
# Make sure that updates which only filter on sub-tables don't
# inadvertently update the wrong records (bug #9848).
# Make sure that the IDs from different tables don't happen to match.
self.assertQuerysetEqual(
Ranking.objects.filter(author__name='a1'),
['<Ranking: 3: a1>']
)
self.assertEqual(
Ranking.objects.filter(author__name='a1').update(rank='4'),
1
)
r = Ranking.objects.filter(author__name='a1')[0]
self.assertNotEqual(r.id, r.author.id)
self.assertEqual(r.rank, 4)
r.rank = 3
r.save()
self.assertQuerysetEqual(
Ranking.objects.all(),
['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>']
)
def test_ticket5261(self):
# Test different empty excludes.
self.assertQuerysetEqual(
Note.objects.exclude(Q()),
['<Note: n1>', '<Note: n2>']
)
self.assertQuerysetEqual(
Note.objects.filter(~Q()),
['<Note: n1>', '<Note: n2>']
)
self.assertQuerysetEqual(
Note.objects.filter(~Q() | ~Q()),
['<Note: n1>', '<Note: n2>']
)
self.assertQuerysetEqual(
Note.objects.exclude(~Q() & ~Q()),
['<Note: n1>', '<Note: n2>']
)
def test_extra_select_literal_percent_s(self):
# Allow %%s to escape select clauses
self.assertEqual(
Note.objects.extra(select={'foo': "'%%s'"})[0].foo,
'%s'
)
self.assertEqual(
Note.objects.extra(select={'foo': "'%%s bar %%s'"})[0].foo,
'%s bar %s'
)
self.assertEqual(
Note.objects.extra(select={'foo': "'bar %%s'"})[0].foo,
'bar %s'
)
class SelectRelatedTests(TestCase):
def test_tickets_3045_3288(self):
# Once upon a time, select_related() with circular relations would loop
# infinitely if you forgot to specify "depth". Now we set an arbitrary
# default upper bound.
self.assertQuerysetEqual(X.objects.all(), [])
self.assertQuerysetEqual(X.objects.select_related(), [])
class SubclassFKTests(TestCase):
def test_ticket7778(self):
# Model subclasses could not be deleted if a nullable foreign key
# relates to a model that relates back.
num_celebs = Celebrity.objects.count()
tvc = TvChef.objects.create(name="Huey")
self.assertEqual(Celebrity.objects.count(), num_celebs + 1)
Fan.objects.create(fan_of=tvc)
Fan.objects.create(fan_of=tvc)
tvc.delete()
# The parent object should have been deleted as well.
self.assertEqual(Celebrity.objects.count(), num_celebs)
class CustomPkTests(TestCase):
def test_ticket7371(self):
self.assertQuerysetEqual(Related.objects.order_by('custom'), [])
class NullableRelOrderingTests(TestCase):
def test_ticket10028(self):
# Ordering by model related to nullable relations(!) should use outer
# joins, so that all results are included.
Plaything.objects.create(name="p1")
self.assertQuerysetEqual(
Plaything.objects.all(),
['<Plaything: p1>']
)
def test_join_already_in_query(self):
# Ordering by model related to nullable relations should not change
# the join type of already existing joins.
Plaything.objects.create(name="p1")
s = SingleObject.objects.create(name='s')
r = RelatedObject.objects.create(single=s, f=1)
Plaything.objects.create(name="p2", others=r)
qs = Plaything.objects.all().filter(others__isnull=False).order_by('pk')
self.assertNotIn('JOIN', str(qs.query))
qs = Plaything.objects.all().filter(others__f__isnull=False).order_by('pk')
self.assertIn('INNER', str(qs.query))
qs = qs.order_by('others__single__name')
# The ordering by others__single__pk will add one new join (to single)
# and that join must be LEFT join. The already existing join to related
# objects must be kept INNER. So, we have both an INNER and a LEFT join
# in the query.
self.assertEqual(str(qs.query).count('LEFT'), 1)
self.assertEqual(str(qs.query).count('INNER'), 1)
self.assertQuerysetEqual(
qs,
['<Plaything: p2>']
)
class DisjunctiveFilterTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.n1 = Note.objects.create(note='n1', misc='foo', id=1)
ExtraInfo.objects.create(info='e1', note=cls.n1)
def test_ticket7872(self):
# Another variation on the disjunctive filtering theme.
# For the purposes of this regression test, it's important that there is no
# Join object related to the LeafA we create.
LeafA.objects.create(data='first')
self.assertQuerysetEqual(LeafA.objects.all(), ['<LeafA: first>'])
self.assertQuerysetEqual(
LeafA.objects.filter(Q(data='first') | Q(join__b__data='second')),
['<LeafA: first>']
)
def test_ticket8283(self):
# Checking that applying filters after a disjunction works correctly.
self.assertQuerysetEqual(
(ExtraInfo.objects.filter(note=self.n1) | ExtraInfo.objects.filter(info='e2')).filter(note=self.n1),
['<ExtraInfo: e1>']
)
self.assertQuerysetEqual(
(ExtraInfo.objects.filter(info='e2') | ExtraInfo.objects.filter(note=self.n1)).filter(note=self.n1),
['<ExtraInfo: e1>']
)
class Queries6Tests(TestCase):
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
t1 = Tag.objects.create(name='t1', category=generic)
Tag.objects.create(name='t2', parent=t1, category=generic)
t3 = Tag.objects.create(name='t3', parent=t1)
t4 = Tag.objects.create(name='t4', parent=t3)
Tag.objects.create(name='t5', parent=t3)
n1 = Note.objects.create(note='n1', misc='foo', id=1)
ann1 = Annotation.objects.create(name='a1', tag=t1)
ann1.notes.add(n1)
Annotation.objects.create(name='a2', tag=t4)
def test_parallel_iterators(self):
# Test that parallel iterators work.
qs = Tag.objects.all()
i1, i2 = iter(qs), iter(qs)
self.assertEqual(repr(next(i1)), '<Tag: t1>')
self.assertEqual(repr(next(i1)), '<Tag: t2>')
self.assertEqual(repr(next(i2)), '<Tag: t1>')
self.assertEqual(repr(next(i2)), '<Tag: t2>')
self.assertEqual(repr(next(i2)), '<Tag: t3>')
self.assertEqual(repr(next(i1)), '<Tag: t3>')
qs = X.objects.all()
self.assertFalse(qs)
self.assertFalse(qs)
def test_nested_queries_sql(self):
# Nested queries should not evaluate the inner query as part of constructing the
# SQL (so we should see a nested query here, indicated by two "SELECT" calls).
qs = Annotation.objects.filter(notes__in=Note.objects.filter(note="xyzzy"))
self.assertEqual(
qs.query.get_compiler(qs.db).as_sql()[0].count('SELECT'),
2
)
def test_tickets_8921_9188(self):
# Incorrect SQL was being generated for certain types of exclude()
# queries that crossed multi-valued relations (#8921, #9188 and some
# preemptively discovered cases).
self.assertQuerysetEqual(
PointerA.objects.filter(connection__pointerb__id=1),
[]
)
self.assertQuerysetEqual(
PointerA.objects.exclude(connection__pointerb__id=1),
[]
)
self.assertQuerysetEqual(
Tag.objects.exclude(children=None),
['<Tag: t1>', '<Tag: t3>']
)
# This example is tricky because the parent could be NULL, so only checking
# parents with annotations omits some results (tag t1, in this case).
self.assertQuerysetEqual(
Tag.objects.exclude(parent__annotation__name="a1"),
['<Tag: t1>', '<Tag: t4>', '<Tag: t5>']
)
# The annotation->tag link is single values and tag->children links is
# multi-valued. So we have to split the exclude filter in the middle
# and then optimize the inner query without losing results.
self.assertQuerysetEqual(
Annotation.objects.exclude(tag__children__name="t2"),
['<Annotation: a2>']
)
# Nested queries are possible (although should be used with care, since
# they have performance problems on backends like MySQL.
self.assertQuerysetEqual(
Annotation.objects.filter(notes__in=Note.objects.filter(note="n1")),
['<Annotation: a1>']
)
def test_ticket3739(self):
# The all() method on querysets returns a copy of the queryset.
q1 = Tag.objects.order_by('name')
self.assertIsNot(q1, q1.all())
def test_ticket_11320(self):
qs = Tag.objects.exclude(category=None).exclude(category__name='foo')
self.assertEqual(str(qs.query).count(' INNER JOIN '), 1)
class RawQueriesTests(TestCase):
def setUp(self):
Note.objects.create(note='n1', misc='foo', id=1)
def test_ticket14729(self):
# Test representation of raw query with one or few parameters passed as list
query = "SELECT * FROM queries_note WHERE note = %s"
params = ['n1']
qs = Note.objects.raw(query, params=params)
self.assertEqual(repr(qs), "<RawQuerySet: SELECT * FROM queries_note WHERE note = n1>")
query = "SELECT * FROM queries_note WHERE note = %s and misc = %s"
params = ['n1', 'foo']
qs = Note.objects.raw(query, params=params)
self.assertEqual(repr(qs), "<RawQuerySet: SELECT * FROM queries_note WHERE note = n1 and misc = foo>")
class GeneratorExpressionTests(TestCase):
def test_ticket10432(self):
# Using an empty generator expression as the rvalue for an "__in"
# lookup is legal.
self.assertQuerysetEqual(
Note.objects.filter(pk__in=(x for x in ())),
[]
)
class ComparisonTests(TestCase):
def setUp(self):
self.n1 = Note.objects.create(note='n1', misc='foo', id=1)
e1 = ExtraInfo.objects.create(info='e1', note=self.n1)
self.a2 = Author.objects.create(name='a2', num=2002, extra=e1)
def test_ticket8597(self):
# Regression tests for case-insensitive comparisons
Item.objects.create(name="a_b", created=datetime.datetime.now(), creator=self.a2, note=self.n1)
Item.objects.create(name="x%y", created=datetime.datetime.now(), creator=self.a2, note=self.n1)
self.assertQuerysetEqual(
Item.objects.filter(name__iexact="A_b"),
['<Item: a_b>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__iexact="x%Y"),
['<Item: x%y>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__istartswith="A_b"),
['<Item: a_b>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__iendswith="A_b"),
['<Item: a_b>']
)
class ExistsSql(TestCase):
def test_exists(self):
with CaptureQueriesContext(connection) as captured_queries:
self.assertFalse(Tag.objects.exists())
# Ok - so the exist query worked - but did it include too many columns?
self.assertEqual(len(captured_queries), 1)
qstr = captured_queries[0]['sql']
id, name = connection.ops.quote_name('id'), connection.ops.quote_name('name')
self.assertNotIn(id, qstr)
self.assertNotIn(name, qstr)
def test_ticket_18414(self):
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='two', created=datetime.datetime.now())
self.assertTrue(Article.objects.exists())
self.assertTrue(Article.objects.distinct().exists())
self.assertTrue(Article.objects.distinct()[1:3].exists())
self.assertFalse(Article.objects.distinct()[1:1].exists())
@skipUnlessDBFeature('can_distinct_on_fields')
def test_ticket_18414_distinct_on(self):
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='two', created=datetime.datetime.now())
self.assertTrue(Article.objects.distinct('name').exists())
self.assertTrue(Article.objects.distinct('name')[1:2].exists())
self.assertFalse(Article.objects.distinct('name')[2:3].exists())
class QuerysetOrderedTests(unittest.TestCase):
"""
Tests for the Queryset.ordered attribute.
"""
def test_no_default_or_explicit_ordering(self):
self.assertIs(Annotation.objects.all().ordered, False)
def test_cleared_default_ordering(self):
self.assertIs(Tag.objects.all().ordered, True)
self.assertIs(Tag.objects.all().order_by().ordered, False)
def test_explicit_ordering(self):
self.assertIs(Annotation.objects.all().order_by('id').ordered, True)
def test_order_by_extra(self):
self.assertIs(Annotation.objects.all().extra(order_by=['id']).ordered, True)
def test_annotated_ordering(self):
qs = Annotation.objects.annotate(num_notes=Count('notes'))
self.assertIs(qs.ordered, False)
self.assertIs(qs.order_by('num_notes').ordered, True)
@skipUnlessDBFeature('allow_sliced_subqueries')
class SubqueryTests(TestCase):
@classmethod
def setUpTestData(cls):
DumbCategory.objects.create(id=1)
DumbCategory.objects.create(id=2)
DumbCategory.objects.create(id=3)
DumbCategory.objects.create(id=4)
def test_ordered_subselect(self):
"Subselects honor any manual ordering"
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:2])
self.assertEqual(set(query.values_list('id', flat=True)), {3, 4})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[:2])
self.assertEqual(set(query.values_list('id', flat=True)), {3, 4})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:2])
self.assertEqual(set(query.values_list('id', flat=True)), {3})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[2:])
self.assertEqual(set(query.values_list('id', flat=True)), {1, 2})
def test_slice_subquery_and_query(self):
"""
Slice a query that has a sliced subquery
"""
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:2])[0:2]
self.assertEqual({x.id for x in query}, {3, 4})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:3])[1:3]
self.assertEqual({x.id for x in query}, {3})
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[2:])[1:]
self.assertEqual({x.id for x in query}, {2})
def test_related_sliced_subquery(self):
"""
Related objects constraints can safely contain sliced subqueries.
refs #22434
"""
generic = NamedCategory.objects.create(id=5, name="Generic")
t1 = Tag.objects.create(name='t1', category=generic)
t2 = Tag.objects.create(name='t2', category=generic)
ManagedModel.objects.create(data='mm1', tag=t1, public=True)
mm2 = ManagedModel.objects.create(data='mm2', tag=t2, public=True)
query = ManagedModel.normal_manager.filter(
tag__in=Tag.objects.order_by('-id')[:1]
)
self.assertEqual({x.id for x in query}, {mm2.id})
def test_sliced_delete(self):
"Delete queries can safely contain sliced subqueries"
DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:1]).delete()
self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), {1, 2, 3})
DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:2]).delete()
self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), {1, 3})
DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:]).delete()
self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), {3})
class CloneTests(TestCase):
def test_evaluated_queryset_as_argument(self):
"#13227 -- If a queryset is already evaluated, it can still be used as a query arg"
n = Note(note='Test1', misc='misc')
n.save()
e = ExtraInfo(info='good', note=n)
e.save()
n_list = Note.objects.all()
# Evaluate the Note queryset, populating the query cache
list(n_list)
# Use the note queryset in a query, and evaluate
# that query in a way that involves cloning.
self.assertEqual(ExtraInfo.objects.filter(note__in=n_list)[0].info, 'good')
def test_no_model_options_cloning(self):
"""
Test that cloning a queryset does not get out of hand. While complete
testing is impossible, this is a sanity check against invalid use of
deepcopy. refs #16759.
"""
opts_class = type(Note._meta)
note_deepcopy = getattr(opts_class, "__deepcopy__", None)
opts_class.__deepcopy__ = lambda obj, memo: self.fail("Model options shouldn't be cloned.")
try:
Note.objects.filter(pk__lte=F('pk') + 1).all()
finally:
if note_deepcopy is None:
delattr(opts_class, "__deepcopy__")
else:
opts_class.__deepcopy__ = note_deepcopy
def test_no_fields_cloning(self):
"""
Test that cloning a queryset does not get out of hand. While complete
testing is impossible, this is a sanity check against invalid use of
deepcopy. refs #16759.
"""
opts_class = type(Note._meta.get_field("misc"))
note_deepcopy = getattr(opts_class, "__deepcopy__", None)
opts_class.__deepcopy__ = lambda obj, memo: self.fail("Model fields shouldn't be cloned")
try:
Note.objects.filter(note=F('misc')).all()
finally:
if note_deepcopy is None:
delattr(opts_class, "__deepcopy__")
else:
opts_class.__deepcopy__ = note_deepcopy
class EmptyQuerySetTests(TestCase):
def test_emptyqueryset_values(self):
# #14366 -- Calling .values() on an empty QuerySet and then cloning
# that should not cause an error
self.assertQuerysetEqual(
Number.objects.none().values('num').order_by('num'), []
)
def test_values_subquery(self):
self.assertQuerysetEqual(
Number.objects.filter(pk__in=Number.objects.none().values("pk")),
[]
)
self.assertQuerysetEqual(
Number.objects.filter(pk__in=Number.objects.none().values_list("pk")),
[]
)
def test_ticket_19151(self):
# #19151 -- Calling .values() or .values_list() on an empty QuerySet
# should return an empty QuerySet and not cause an error.
q = Author.objects.none()
self.assertQuerysetEqual(q.values(), [])
self.assertQuerysetEqual(q.values_list(), [])
class ValuesQuerysetTests(TestCase):
@classmethod
def setUpTestData(cls):
Number.objects.create(num=72)
def test_flat_values_list(self):
qs = Number.objects.values_list("num")
qs = qs.values_list("num", flat=True)
self.assertSequenceEqual(qs, [72])
def test_extra_values(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select=OrderedDict([('value_plus_x', 'num+%s'),
('value_minus_x', 'num-%s')]),
select_params=(1, 2))
qs = qs.order_by('value_minus_x')
qs = qs.values('num')
self.assertSequenceEqual(qs, [{'num': 72}])
def test_extra_values_order_twice(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={'value_plus_one': 'num+1', 'value_minus_one': 'num-1'})
qs = qs.order_by('value_minus_one').order_by('value_plus_one')
qs = qs.values('num')
self.assertSequenceEqual(qs, [{'num': 72}])
def test_extra_values_order_multiple(self):
# Postgres doesn't allow constants in order by, so check for that.
qs = Number.objects.extra(select={
'value_plus_one': 'num+1',
'value_minus_one': 'num-1',
'constant_value': '1'
})
qs = qs.order_by('value_plus_one', 'value_minus_one', 'constant_value')
qs = qs.values('num')
self.assertSequenceEqual(qs, [{'num': 72}])
def test_extra_values_order_in_extra(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(
select={'value_plus_one': 'num+1', 'value_minus_one': 'num-1'},
order_by=['value_minus_one'])
qs = qs.values('num')
def test_extra_select_params_values_order_in_extra(self):
# testing for 23259 issue
qs = Number.objects.extra(
select={'value_plus_x': 'num+%s'},
select_params=[1],
order_by=['value_plus_x'])
qs = qs.filter(num=72)
qs = qs.values('num')
self.assertSequenceEqual(qs, [{'num': 72}])
def test_extra_multiple_select_params_values_order_by(self):
# testing for 23259 issue
qs = Number.objects.extra(select=OrderedDict([('value_plus_x', 'num+%s'),
('value_minus_x', 'num-%s')]),
select_params=(72, 72))
qs = qs.order_by('value_minus_x')
qs = qs.filter(num=1)
qs = qs.values('num')
self.assertSequenceEqual(qs, [])
def test_extra_values_list(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={'value_plus_one': 'num+1'})
qs = qs.order_by('value_plus_one')
qs = qs.values_list('num')
self.assertSequenceEqual(qs, [(72,)])
def test_flat_extra_values_list(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={'value_plus_one': 'num+1'})
qs = qs.order_by('value_plus_one')
qs = qs.values_list('num', flat=True)
self.assertSequenceEqual(qs, [72])
def test_field_error_values_list(self):
# see #23443
msg = "Cannot resolve keyword %r into field. Join on 'name' not permitted." % 'foo'
with self.assertRaisesMessage(FieldError, msg):
Tag.objects.values_list('name__foo')
class QuerySetSupportsPythonIdioms(TestCase):
@classmethod
def setUpTestData(cls):
some_date = datetime.datetime(2014, 5, 16, 12, 1)
for i in range(1, 8):
Article.objects.create(
name="Article {}".format(i), created=some_date)
def get_ordered_articles(self):
return Article.objects.all().order_by('name')
def test_can_get_items_using_index_and_slice_notation(self):
self.assertEqual(self.get_ordered_articles()[0].name, 'Article 1')
self.assertQuerysetEqual(
self.get_ordered_articles()[1:3],
["<Article: Article 2>", "<Article: Article 3>"]
)
def test_slicing_with_steps_can_be_used(self):
self.assertQuerysetEqual(
self.get_ordered_articles()[::2], [
"<Article: Article 1>",
"<Article: Article 3>",
"<Article: Article 5>",
"<Article: Article 7>"
]
)
@unittest.skipUnless(six.PY2, "Python 2 only -- Python 3 doesn't have longs.")
def test_slicing_works_with_longs(self):
# NOQA: long undefined on PY3
self.assertEqual(self.get_ordered_articles()[long(0)].name, 'Article 1') # NOQA
self.assertQuerysetEqual(self.get_ordered_articles()[long(1):long(3)], # NOQA
["<Article: Article 2>", "<Article: Article 3>"])
self.assertQuerysetEqual(
self.get_ordered_articles()[::long(2)], [ # NOQA
"<Article: Article 1>",
"<Article: Article 3>",
"<Article: Article 5>",
"<Article: Article 7>"
]
)
# And can be mixed with ints.
self.assertQuerysetEqual(self.get_ordered_articles()[1:long(3)], # NOQA
["<Article: Article 2>", "<Article: Article 3>"])
def test_slicing_without_step_is_lazy(self):
with self.assertNumQueries(0):
self.get_ordered_articles()[0:5]
def test_slicing_with_tests_is_not_lazy(self):
with self.assertNumQueries(1):
self.get_ordered_articles()[0:5:3]
def test_slicing_can_slice_again_after_slicing(self):
self.assertQuerysetEqual(
self.get_ordered_articles()[0:5][0:2],
["<Article: Article 1>", "<Article: Article 2>"]
)
self.assertQuerysetEqual(self.get_ordered_articles()[0:5][4:], ["<Article: Article 5>"])
self.assertQuerysetEqual(self.get_ordered_articles()[0:5][5:], [])
# Some more tests!
self.assertQuerysetEqual(
self.get_ordered_articles()[2:][0:2],
["<Article: Article 3>", "<Article: Article 4>"]
)
self.assertQuerysetEqual(
self.get_ordered_articles()[2:][:2],
["<Article: Article 3>", "<Article: Article 4>"]
)
self.assertQuerysetEqual(self.get_ordered_articles()[2:][2:3], ["<Article: Article 5>"])
# Using an offset without a limit is also possible.
self.assertQuerysetEqual(
self.get_ordered_articles()[5:],
["<Article: Article 6>", "<Article: Article 7>"]
)
def test_slicing_cannot_filter_queryset_once_sliced(self):
with self.assertRaisesMessage(AssertionError, "Cannot filter a query once a slice has been taken."):
Article.objects.all()[0:5].filter(id=1, )
def test_slicing_cannot_reorder_queryset_once_sliced(self):
with self.assertRaisesMessage(AssertionError, "Cannot reorder a query once a slice has been taken."):
Article.objects.all()[0:5].order_by('id', )
def test_slicing_cannot_combine_queries_once_sliced(self):
with self.assertRaisesMessage(AssertionError, "Cannot combine queries once a slice has been taken."):
Article.objects.all()[0:1] & Article.objects.all()[4:5]
def test_slicing_negative_indexing_not_supported_for_single_element(self):
"""hint: inverting your ordering might do what you need"""
with self.assertRaisesMessage(AssertionError, "Negative indexing is not supported."):
Article.objects.all()[-1]
def test_slicing_negative_indexing_not_supported_for_range(self):
"""hint: inverting your ordering might do what you need"""
with self.assertRaisesMessage(AssertionError, "Negative indexing is not supported."):
Article.objects.all()[0:-5]
def test_can_get_number_of_items_in_queryset_using_standard_len(self):
self.assertEqual(len(Article.objects.filter(name__exact='Article 1')), 1)
def test_can_combine_queries_using_and_and_or_operators(self):
s1 = Article.objects.filter(name__exact='Article 1')
s2 = Article.objects.filter(name__exact='Article 2')
self.assertQuerysetEqual(
(s1 | s2).order_by('name'),
["<Article: Article 1>", "<Article: Article 2>"]
)
self.assertQuerysetEqual(s1 & s2, [])
class WeirdQuerysetSlicingTests(TestCase):
@classmethod
def setUpTestData(cls):
Number.objects.create(num=1)
Number.objects.create(num=2)
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='two', created=datetime.datetime.now())
Article.objects.create(name='three', created=datetime.datetime.now())
Article.objects.create(name='four', created=datetime.datetime.now())
food = Food.objects.create(name='spam')
Eaten.objects.create(meal='spam with eggs', food=food)
def test_tickets_7698_10202(self):
# People like to slice with '0' as the high-water mark.
self.assertQuerysetEqual(Article.objects.all()[0:0], [])
self.assertQuerysetEqual(Article.objects.all()[0:0][:10], [])
self.assertEqual(Article.objects.all()[:0].count(), 0)
with self.assertRaisesMessage(AssertionError, 'Cannot change a query once a slice has been taken.'):
Article.objects.all()[:0].latest('created')
def test_empty_resultset_sql(self):
# ticket #12192
self.assertNumQueries(0, lambda: list(Number.objects.all()[1:1]))
def test_empty_sliced_subquery(self):
self.assertEqual(Eaten.objects.filter(food__in=Food.objects.all()[0:0]).count(), 0)
def test_empty_sliced_subquery_exclude(self):
self.assertEqual(Eaten.objects.exclude(food__in=Food.objects.all()[0:0]).count(), 1)
def test_zero_length_values_slicing(self):
n = 42
with self.assertNumQueries(0):
self.assertQuerysetEqual(Article.objects.values()[n:n], [])
self.assertQuerysetEqual(Article.objects.values_list()[n:n], [])
class EscapingTests(TestCase):
def test_ticket_7302(self):
# Reserved names are appropriately escaped
ReservedName.objects.create(name='a', order=42)
ReservedName.objects.create(name='b', order=37)
self.assertQuerysetEqual(
ReservedName.objects.all().order_by('order'),
['<ReservedName: b>', '<ReservedName: a>']
)
self.assertQuerysetEqual(
ReservedName.objects.extra(select={'stuff': 'name'}, order_by=('order', 'stuff')),
['<ReservedName: b>', '<ReservedName: a>']
)
class ToFieldTests(TestCase):
def test_in_query(self):
apple = Food.objects.create(name="apple")
pear = Food.objects.create(name="pear")
lunch = Eaten.objects.create(food=apple, meal="lunch")
dinner = Eaten.objects.create(food=pear, meal="dinner")
self.assertEqual(
set(Eaten.objects.filter(food__in=[apple, pear])),
{lunch, dinner},
)
def test_in_subquery(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
self.assertEqual(
set(Eaten.objects.filter(food__in=Food.objects.filter(name='apple'))),
{lunch}
)
self.assertEqual(
set(Eaten.objects.filter(food__in=Food.objects.filter(name='apple').values('eaten__meal'))),
set()
)
self.assertEqual(
set(Food.objects.filter(eaten__in=Eaten.objects.filter(meal='lunch'))),
{apple}
)
def test_reverse_in(self):
apple = Food.objects.create(name="apple")
pear = Food.objects.create(name="pear")
lunch_apple = Eaten.objects.create(food=apple, meal="lunch")
lunch_pear = Eaten.objects.create(food=pear, meal="dinner")
self.assertEqual(
set(Food.objects.filter(eaten__in=[lunch_apple, lunch_pear])),
{apple, pear}
)
def test_single_object(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
dinner = Eaten.objects.create(food=apple, meal="dinner")
self.assertEqual(
set(Eaten.objects.filter(food=apple)),
{lunch, dinner}
)
def test_single_object_reverse(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
self.assertEqual(
set(Food.objects.filter(eaten=lunch)),
{apple}
)
def test_recursive_fk(self):
node1 = Node.objects.create(num=42)
node2 = Node.objects.create(num=1, parent=node1)
self.assertEqual(
list(Node.objects.filter(parent=node1)),
[node2]
)
def test_recursive_fk_reverse(self):
node1 = Node.objects.create(num=42)
node2 = Node.objects.create(num=1, parent=node1)
self.assertEqual(
list(Node.objects.filter(node=node2)),
[node1]
)
class IsNullTests(TestCase):
def test_primary_key(self):
custom = CustomPk.objects.create(name='pk')
null = Related.objects.create()
notnull = Related.objects.create(custom=custom)
self.assertSequenceEqual(Related.objects.filter(custom__isnull=False), [notnull])
self.assertSequenceEqual(Related.objects.filter(custom__isnull=True), [null])
def test_to_field(self):
apple = Food.objects.create(name="apple")
Eaten.objects.create(food=apple, meal="lunch")
Eaten.objects.create(meal="lunch")
self.assertQuerysetEqual(
Eaten.objects.filter(food__isnull=False),
['<Eaten: apple at lunch>']
)
self.assertQuerysetEqual(
Eaten.objects.filter(food__isnull=True),
['<Eaten: None at lunch>']
)
class ConditionalTests(TestCase):
"""Tests whose execution depend on different environment conditions like
Python version or DB backend features"""
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
t1 = Tag.objects.create(name='t1', category=generic)
Tag.objects.create(name='t2', parent=t1, category=generic)
t3 = Tag.objects.create(name='t3', parent=t1)
Tag.objects.create(name='t4', parent=t3)
Tag.objects.create(name='t5', parent=t3)
def test_infinite_loop(self):
# If you're not careful, it's possible to introduce infinite loops via
# default ordering on foreign keys in a cycle. We detect that.
with self.assertRaisesMessage(FieldError, 'Infinite loop caused by ordering.'):
list(LoopX.objects.all()) # Force queryset evaluation with list()
with self.assertRaisesMessage(FieldError, 'Infinite loop caused by ordering.'):
list(LoopZ.objects.all()) # Force queryset evaluation with list()
# Note that this doesn't cause an infinite loop, since the default
# ordering on the Tag model is empty (and thus defaults to using "id"
# for the related field).
self.assertEqual(len(Tag.objects.order_by('parent')), 5)
# ... but you can still order in a non-recursive fashion among linked
# fields (the previous test failed because the default ordering was
# recursive).
self.assertQuerysetEqual(
LoopX.objects.all().order_by('y__x__y__x__id'),
[]
)
# When grouping without specifying ordering, we add an explicit "ORDER BY NULL"
# portion in MySQL to prevent unnecessary sorting.
@skipUnlessDBFeature('requires_explicit_null_ordering_when_grouping')
def test_null_ordering_added(self):
query = Tag.objects.values_list('parent_id', flat=True).order_by().query
query.group_by = ['parent_id']
sql = query.get_compiler(DEFAULT_DB_ALIAS).as_sql()[0]
fragment = "ORDER BY "
pos = sql.find(fragment)
self.assertEqual(sql.find(fragment, pos + 1), -1)
self.assertEqual(sql.find("NULL", pos + len(fragment)), pos + len(fragment))
# Sqlite 3 does not support passing in more than 1000 parameters except by
# changing a parameter at compilation time.
@skipUnlessDBFeature('supports_1000_query_parameters')
def test_ticket14244(self):
# Test that the "in" lookup works with lists of 1000 items or more.
# The numbers amount is picked to force three different IN batches
# for Oracle, yet to be less than 2100 parameter limit for MSSQL.
numbers = list(range(2050))
Number.objects.all().delete()
Number.objects.bulk_create(Number(num=num) for num in numbers)
self.assertEqual(
Number.objects.filter(num__in=numbers[:1000]).count(),
1000
)
self.assertEqual(
Number.objects.filter(num__in=numbers[:1001]).count(),
1001
)
self.assertEqual(
Number.objects.filter(num__in=numbers[:2000]).count(),
2000
)
self.assertEqual(
Number.objects.filter(num__in=numbers).count(),
len(numbers)
)
class UnionTests(unittest.TestCase):
"""
Tests for the union of two querysets. Bug #12252.
"""
@classmethod
def setUpTestData(cls):
objectas = []
objectbs = []
objectcs = []
a_info = ['one', 'two', 'three']
for name in a_info:
o = ObjectA(name=name)
o.save()
objectas.append(o)
b_info = [('un', 1, objectas[0]), ('deux', 2, objectas[0]), ('trois', 3, objectas[2])]
for name, number, objecta in b_info:
o = ObjectB(name=name, num=number, objecta=objecta)
o.save()
objectbs.append(o)
c_info = [('ein', objectas[2], objectbs[2]), ('zwei', objectas[1], objectbs[1])]
for name, objecta, objectb in c_info:
o = ObjectC(name=name, objecta=objecta, objectb=objectb)
o.save()
objectcs.append(o)
def check_union(self, model, Q1, Q2):
filter = model.objects.filter
self.assertEqual(set(filter(Q1) | filter(Q2)), set(filter(Q1 | Q2)))
self.assertEqual(set(filter(Q2) | filter(Q1)), set(filter(Q1 | Q2)))
def test_A_AB(self):
Q1 = Q(name='two')
Q2 = Q(objectb__name='deux')
self.check_union(ObjectA, Q1, Q2)
def test_A_AB2(self):
Q1 = Q(name='two')
Q2 = Q(objectb__name='deux', objectb__num=2)
self.check_union(ObjectA, Q1, Q2)
def test_AB_ACB(self):
Q1 = Q(objectb__name='deux')
Q2 = Q(objectc__objectb__name='deux')
self.check_union(ObjectA, Q1, Q2)
def test_BAB_BAC(self):
Q1 = Q(objecta__objectb__name='deux')
Q2 = Q(objecta__objectc__name='ein')
self.check_union(ObjectB, Q1, Q2)
def test_BAB_BACB(self):
Q1 = Q(objecta__objectb__name='deux')
Q2 = Q(objecta__objectc__objectb__name='trois')
self.check_union(ObjectB, Q1, Q2)
def test_BA_BCA__BAB_BAC_BCA(self):
Q1 = Q(objecta__name='one', objectc__objecta__name='two')
Q2 = Q(objecta__objectc__name='ein', objectc__objecta__name='three', objecta__objectb__name='trois')
self.check_union(ObjectB, Q1, Q2)
class DefaultValuesInsertTest(TestCase):
def test_no_extra_params(self):
"""
Can create an instance of a model with only the PK field (#17056)."
"""
DumbCategory.objects.create()
class ExcludeTests(TestCase):
@classmethod
def setUpTestData(cls):
f1 = Food.objects.create(name='apples')
Food.objects.create(name='oranges')
Eaten.objects.create(food=f1, meal='dinner')
j1 = Job.objects.create(name='Manager')
r1 = Responsibility.objects.create(description='Playing golf')
j2 = Job.objects.create(name='Programmer')
r2 = Responsibility.objects.create(description='Programming')
JobResponsibilities.objects.create(job=j1, responsibility=r1)
JobResponsibilities.objects.create(job=j2, responsibility=r2)
def test_to_field(self):
self.assertQuerysetEqual(
Food.objects.exclude(eaten__meal='dinner'),
['<Food: oranges>'])
self.assertQuerysetEqual(
Job.objects.exclude(responsibilities__description='Playing golf'),
['<Job: Programmer>'])
self.assertQuerysetEqual(
Responsibility.objects.exclude(jobs__name='Manager'),
['<Responsibility: Programming>'])
def test_ticket14511(self):
alex = Person.objects.get_or_create(name='Alex')[0]
jane = Person.objects.get_or_create(name='Jane')[0]
oracle = Company.objects.get_or_create(name='Oracle')[0]
google = Company.objects.get_or_create(name='Google')[0]
microsoft = Company.objects.get_or_create(name='Microsoft')[0]
intel = Company.objects.get_or_create(name='Intel')[0]
def employ(employer, employee, title):
Employment.objects.get_or_create(employee=employee, employer=employer, title=title)
employ(oracle, alex, 'Engineer')
employ(oracle, alex, 'Developer')
employ(google, alex, 'Engineer')
employ(google, alex, 'Manager')
employ(microsoft, alex, 'Manager')
employ(intel, alex, 'Manager')
employ(microsoft, jane, 'Developer')
employ(intel, jane, 'Manager')
alex_tech_employers = alex.employers.filter(
employment__title__in=('Engineer', 'Developer')).distinct().order_by('name')
self.assertSequenceEqual(alex_tech_employers, [google, oracle])
alex_nontech_employers = alex.employers.exclude(
employment__title__in=('Engineer', 'Developer')).distinct().order_by('name')
self.assertSequenceEqual(alex_nontech_employers, [google, intel, microsoft])
class ExcludeTest17600(TestCase):
"""
Some regressiontests for ticket #17600. Some of these likely duplicate
other existing tests.
"""
@classmethod
def setUpTestData(cls):
# Create a few Orders.
cls.o1 = Order.objects.create(pk=1)
cls.o2 = Order.objects.create(pk=2)
cls.o3 = Order.objects.create(pk=3)
# Create some OrderItems for the first order with homogeneous
# status_id values
cls.oi1 = OrderItem.objects.create(order=cls.o1, status=1)
cls.oi2 = OrderItem.objects.create(order=cls.o1, status=1)
cls.oi3 = OrderItem.objects.create(order=cls.o1, status=1)
# Create some OrderItems for the second order with heterogeneous
# status_id values
cls.oi4 = OrderItem.objects.create(order=cls.o2, status=1)
cls.oi5 = OrderItem.objects.create(order=cls.o2, status=2)
cls.oi6 = OrderItem.objects.create(order=cls.o2, status=3)
# Create some OrderItems for the second order with heterogeneous
# status_id values
cls.oi7 = OrderItem.objects.create(order=cls.o3, status=2)
cls.oi8 = OrderItem.objects.create(order=cls.o3, status=3)
cls.oi9 = OrderItem.objects.create(order=cls.o3, status=4)
def test_exclude_plain(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertQuerysetEqual(
Order.objects.exclude(items__status=1),
['<Order: 3>'])
def test_exclude_plain_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertQuerysetEqual(
Order.objects.exclude(items__status=1).distinct(),
['<Order: 3>'])
def test_exclude_with_q_object_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertQuerysetEqual(
Order.objects.exclude(Q(items__status=1)).distinct(),
['<Order: 3>'])
def test_exclude_with_q_object_no_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertQuerysetEqual(
Order.objects.exclude(Q(items__status=1)),
['<Order: 3>'])
def test_exclude_with_q_is_equal_to_plain_exclude(self):
"""
Using exclude(condition) and exclude(Q(condition)) should
yield the same QuerySet
"""
self.assertEqual(
list(Order.objects.exclude(items__status=1).distinct()),
list(Order.objects.exclude(Q(items__status=1)).distinct()))
def test_exclude_with_q_is_equal_to_plain_exclude_variation(self):
"""
Using exclude(condition) and exclude(Q(condition)) should
yield the same QuerySet
"""
self.assertEqual(
list(Order.objects.exclude(items__status=1)),
list(Order.objects.exclude(Q(items__status=1)).distinct()))
@unittest.expectedFailure
def test_only_orders_with_all_items_having_status_1(self):
"""
This should only return orders having ALL items set to status 1, or
those items not having any orders at all. The correct way to write
this query in SQL seems to be using two nested subqueries.
"""
self.assertQuerysetEqual(
Order.objects.exclude(~Q(items__status=1)).distinct(),
['<Order: 1>'])
class Exclude15786(TestCase):
"""Regression test for #15786"""
def test_ticket15786(self):
c1 = SimpleCategory.objects.create(name='c1')
c2 = SimpleCategory.objects.create(name='c2')
OneToOneCategory.objects.create(category=c1)
OneToOneCategory.objects.create(category=c2)
rel = CategoryRelationship.objects.create(first=c1, second=c2)
self.assertEqual(
CategoryRelationship.objects.exclude(
first__onetoonecategory=F('second__onetoonecategory')
).get(), rel
)
class NullInExcludeTest(TestCase):
@classmethod
def setUpTestData(cls):
NullableName.objects.create(name='i1')
NullableName.objects.create()
def test_null_in_exclude_qs(self):
none_val = '' if connection.features.interprets_empty_strings_as_nulls else None
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=[]),
['i1', none_val], attrgetter('name'))
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=['i1']),
[none_val], attrgetter('name'))
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=['i3']),
['i1', none_val], attrgetter('name'))
inner_qs = NullableName.objects.filter(name='i1').values_list('name')
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=inner_qs),
[none_val], attrgetter('name'))
# Check that the inner queryset wasn't executed - it should be turned
# into subquery above
self.assertIs(inner_qs._result_cache, None)
@unittest.expectedFailure
def test_col_not_in_list_containing_null(self):
"""
The following case is not handled properly because
SQL's COL NOT IN (list containing null) handling is too weird to
abstract away.
"""
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=[None]),
['i1'], attrgetter('name'))
def test_double_exclude(self):
self.assertEqual(
list(NullableName.objects.filter(~~Q(name='i1'))),
list(NullableName.objects.filter(Q(name='i1'))))
self.assertNotIn(
'IS NOT NULL',
str(NullableName.objects.filter(~~Q(name='i1')).query))
class EmptyStringsAsNullTest(TestCase):
"""
Test that filtering on non-null character fields works as expected.
The reason for these tests is that Oracle treats '' as NULL, and this
can cause problems in query construction. Refs #17957.
"""
@classmethod
def setUpTestData(cls):
cls.nc = NamedCategory.objects.create(name='')
def test_direct_exclude(self):
self.assertQuerysetEqual(
NamedCategory.objects.exclude(name__in=['nonexisting']),
[self.nc.pk], attrgetter('pk')
)
def test_joined_exclude(self):
self.assertQuerysetEqual(
DumbCategory.objects.exclude(namedcategory__name__in=['nonexisting']),
[self.nc.pk], attrgetter('pk')
)
def test_21001(self):
foo = NamedCategory.objects.create(name='foo')
self.assertQuerysetEqual(
NamedCategory.objects.exclude(name=''),
[foo.pk], attrgetter('pk')
)
class ProxyQueryCleanupTest(TestCase):
def test_evaluated_proxy_count(self):
"""
Test that generating the query string doesn't alter the query's state
in irreversible ways. Refs #18248.
"""
ProxyCategory.objects.create()
qs = ProxyCategory.objects.all()
self.assertEqual(qs.count(), 1)
str(qs.query)
self.assertEqual(qs.count(), 1)
class WhereNodeTest(TestCase):
class DummyNode(object):
def as_sql(self, compiler, connection):
return 'dummy', []
class MockCompiler(object):
def compile(self, node):
return node.as_sql(self, connection)
def __call__(self, name):
return connection.ops.quote_name(name)
def test_empty_full_handling_conjunction(self):
compiler = WhereNodeTest.MockCompiler()
w = WhereNode(children=[NothingNode()])
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w = WhereNode(children=[self.DummyNode(), self.DummyNode()])
self.assertEqual(w.as_sql(compiler, connection), ('(dummy AND dummy)', []))
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('NOT (dummy AND dummy)', []))
w = WhereNode(children=[NothingNode(), self.DummyNode()])
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('', []))
def test_empty_full_handling_disjunction(self):
compiler = WhereNodeTest.MockCompiler()
w = WhereNode(children=[NothingNode()], connector='OR')
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w = WhereNode(children=[self.DummyNode(), self.DummyNode()], connector='OR')
self.assertEqual(w.as_sql(compiler, connection), ('(dummy OR dummy)', []))
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('NOT (dummy OR dummy)', []))
w = WhereNode(children=[NothingNode(), self.DummyNode()], connector='OR')
self.assertEqual(w.as_sql(compiler, connection), ('dummy', []))
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('NOT (dummy)', []))
def test_empty_nodes(self):
compiler = WhereNodeTest.MockCompiler()
empty_w = WhereNode()
w = WhereNode(children=[empty_w, empty_w])
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w.negate()
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.connector = 'OR'
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w = WhereNode(children=[empty_w, NothingNode()], connector='OR')
self.assertEqual(w.as_sql(compiler, connection), ('', []))
w = WhereNode(children=[empty_w, NothingNode()], connector='AND')
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
class QuerySetExceptionTests(TestCase):
def test_iter_exceptions(self):
qs = ExtraInfo.objects.only('author')
with self.assertRaises(AttributeError):
list(qs)
def test_invalid_qs_list(self):
# Test for #19895 - second iteration over invalid queryset
# raises errors.
qs = Article.objects.order_by('invalid_column')
msg = "Cannot resolve keyword 'invalid_column' into field."
with self.assertRaisesMessage(FieldError, msg):
list(qs)
with self.assertRaisesMessage(FieldError, msg):
list(qs)
def test_invalid_order_by(self):
msg = "Invalid order_by arguments: ['*']"
if six.PY2:
msg = msg.replace("[", "[u")
with self.assertRaisesMessage(FieldError, msg):
list(Article.objects.order_by('*'))
def test_invalid_queryset_model(self):
msg = 'Cannot use QuerySet for "Article": Use a QuerySet for "ExtraInfo".'
with self.assertRaisesMessage(ValueError, msg):
list(Author.objects.filter(extra=Article.objects.all()))
class NullJoinPromotionOrTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.d1 = ModelD.objects.create(name='foo')
d2 = ModelD.objects.create(name='bar')
cls.a1 = ModelA.objects.create(name='a1', d=cls.d1)
c = ModelC.objects.create(name='c')
b = ModelB.objects.create(name='b', c=c)
cls.a2 = ModelA.objects.create(name='a2', b=b, d=d2)
def test_ticket_17886(self):
# The first Q-object is generating the match, the rest of the filters
# should not remove the match even if they do not match anything. The
# problem here was that b__name generates a LOUTER JOIN, then
# b__c__name generates join to c, which the ORM tried to promote but
# failed as that join isn't nullable.
q_obj = (
Q(d__name='foo') |
Q(b__name='foo') |
Q(b__c__name='foo')
)
qset = ModelA.objects.filter(q_obj)
self.assertEqual(list(qset), [self.a1])
# We generate one INNER JOIN to D. The join is direct and not nullable
# so we can use INNER JOIN for it. However, we can NOT use INNER JOIN
# for the b->c join, as a->b is nullable.
self.assertEqual(str(qset.query).count('INNER JOIN'), 1)
def test_isnull_filter_promotion(self):
qs = ModelA.objects.filter(Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count('LEFT OUTER'), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(~Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(list(qs), [self.a2])
qs = ModelA.objects.filter(~~Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count('LEFT OUTER'), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(list(qs), [self.a2])
qs = ModelA.objects.filter(~Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count('LEFT OUTER'), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(~~Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(list(qs), [self.a2])
def test_null_join_demotion(self):
qs = ModelA.objects.filter(Q(b__name__isnull=False) & Q(b__name__isnull=True))
self.assertIn(' INNER JOIN ', str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=True) & Q(b__name__isnull=False))
self.assertIn(' INNER JOIN ', str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=False) | Q(b__name__isnull=True))
self.assertIn(' LEFT OUTER JOIN ', str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=True) | Q(b__name__isnull=False))
self.assertIn(' LEFT OUTER JOIN ', str(qs.query))
def test_ticket_21366(self):
n = Note.objects.create(note='n', misc='m')
e = ExtraInfo.objects.create(info='info', note=n)
a = Author.objects.create(name='Author1', num=1, extra=e)
Ranking.objects.create(rank=1, author=a)
r1 = Report.objects.create(name='Foo', creator=a)
r2 = Report.objects.create(name='Bar')
Report.objects.create(name='Bar', creator=a)
qs = Report.objects.filter(
Q(creator__ranking__isnull=True) |
Q(creator__ranking__rank=1, name='Foo')
)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
self.assertEqual(str(qs.query).count(' JOIN '), 2)
self.assertSequenceEqual(qs.order_by('name'), [r2, r1])
def test_ticket_21748(self):
i1 = Identifier.objects.create(name='i1')
i2 = Identifier.objects.create(name='i2')
i3 = Identifier.objects.create(name='i3')
Program.objects.create(identifier=i1)
Channel.objects.create(identifier=i1)
Program.objects.create(identifier=i2)
self.assertSequenceEqual(Identifier.objects.filter(program=None, channel=None), [i3])
self.assertSequenceEqual(Identifier.objects.exclude(program=None, channel=None).order_by('name'), [i1, i2])
def test_ticket_21748_double_negated_and(self):
i1 = Identifier.objects.create(name='i1')
i2 = Identifier.objects.create(name='i2')
Identifier.objects.create(name='i3')
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
Program.objects.create(identifier=i2)
# Check the ~~Q() (or equivalently .exclude(~Q)) works like Q() for
# join promotion.
qs1_doubleneg = Identifier.objects.exclude(~Q(program__id=p1.id, channel__id=c1.id)).order_by('pk')
qs1_filter = Identifier.objects.filter(program__id=p1.id, channel__id=c1.id).order_by('pk')
self.assertQuerysetEqual(qs1_doubleneg, qs1_filter, lambda x: x)
self.assertEqual(str(qs1_filter.query).count('JOIN'),
str(qs1_doubleneg.query).count('JOIN'))
self.assertEqual(2, str(qs1_doubleneg.query).count('INNER JOIN'))
self.assertEqual(str(qs1_filter.query).count('INNER JOIN'),
str(qs1_doubleneg.query).count('INNER JOIN'))
def test_ticket_21748_double_negated_or(self):
i1 = Identifier.objects.create(name='i1')
i2 = Identifier.objects.create(name='i2')
Identifier.objects.create(name='i3')
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
p2 = Program.objects.create(identifier=i2)
# Test OR + doubleneg. The expected result is that channel is LOUTER
# joined, program INNER joined
qs1_filter = Identifier.objects.filter(
Q(program__id=p2.id, channel__id=c1.id) | Q(program__id=p1.id)
).order_by('pk')
qs1_doubleneg = Identifier.objects.exclude(
~Q(Q(program__id=p2.id, channel__id=c1.id) | Q(program__id=p1.id))
).order_by('pk')
self.assertQuerysetEqual(qs1_doubleneg, qs1_filter, lambda x: x)
self.assertEqual(str(qs1_filter.query).count('JOIN'),
str(qs1_doubleneg.query).count('JOIN'))
self.assertEqual(1, str(qs1_doubleneg.query).count('INNER JOIN'))
self.assertEqual(str(qs1_filter.query).count('INNER JOIN'),
str(qs1_doubleneg.query).count('INNER JOIN'))
def test_ticket_21748_complex_filter(self):
i1 = Identifier.objects.create(name='i1')
i2 = Identifier.objects.create(name='i2')
Identifier.objects.create(name='i3')
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
p2 = Program.objects.create(identifier=i2)
# Finally, a more complex case, one time in a way where each
# NOT is pushed to lowest level in the boolean tree, and
# another query where this isn't done.
qs1 = Identifier.objects.filter(
~Q(~Q(program__id=p2.id, channel__id=c1.id) & Q(program__id=p1.id))
).order_by('pk')
qs2 = Identifier.objects.filter(
Q(Q(program__id=p2.id, channel__id=c1.id) | ~Q(program__id=p1.id))
).order_by('pk')
self.assertQuerysetEqual(qs1, qs2, lambda x: x)
self.assertEqual(str(qs1.query).count('JOIN'),
str(qs2.query).count('JOIN'))
self.assertEqual(0, str(qs1.query).count('INNER JOIN'))
self.assertEqual(str(qs1.query).count('INNER JOIN'),
str(qs2.query).count('INNER JOIN'))
class ReverseJoinTrimmingTest(TestCase):
def test_reverse_trimming(self):
# Check that we don't accidentally trim reverse joins - we can't know
# if there is anything on the other side of the join, so trimming
# reverse joins can't be done, ever.
t = Tag.objects.create()
qs = Tag.objects.filter(annotation__tag=t.pk)
self.assertIn('INNER JOIN', str(qs.query))
self.assertEqual(list(qs), [])
class JoinReuseTest(TestCase):
"""
Test that the queries reuse joins sensibly (for example, direct joins
are always reused).
"""
def test_fk_reuse(self):
qs = Annotation.objects.filter(tag__name='foo').filter(tag__name='bar')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_fk_reuse_select_related(self):
qs = Annotation.objects.filter(tag__name='foo').select_related('tag')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_fk_reuse_annotation(self):
qs = Annotation.objects.filter(tag__name='foo').annotate(cnt=Count('tag__name'))
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_fk_reuse_disjunction(self):
qs = Annotation.objects.filter(Q(tag__name='foo') | Q(tag__name='bar'))
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_fk_reuse_order_by(self):
qs = Annotation.objects.filter(tag__name='foo').order_by('tag__name')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_revo2o_reuse(self):
qs = Detail.objects.filter(member__name='foo').filter(member__name='foo')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_revfk_noreuse(self):
qs = Author.objects.filter(report__name='r4').filter(report__name='r1')
self.assertEqual(str(qs.query).count('JOIN'), 2)
class DisjunctionPromotionTests(TestCase):
def test_disjunction_promotion_select_related(self):
fk1 = FK1.objects.create(f1='f1', f2='f2')
basea = BaseA.objects.create(a=fk1)
qs = BaseA.objects.filter(Q(a=fk1) | Q(b=2))
self.assertEqual(str(qs.query).count(' JOIN '), 0)
qs = qs.select_related('a', 'b')
self.assertEqual(str(qs.query).count(' INNER JOIN '), 0)
self.assertEqual(str(qs.query).count(' LEFT OUTER JOIN '), 2)
with self.assertNumQueries(1):
self.assertSequenceEqual(qs, [basea])
self.assertEqual(qs[0].a, fk1)
self.assertIs(qs[0].b, None)
def test_disjunction_promotion1(self):
# Pre-existing join, add two ORed filters to the same join,
# all joins can be INNER JOINS.
qs = BaseA.objects.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
qs = qs.filter(Q(b__f1='foo') | Q(b__f2='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
# Reverse the order of AND and OR filters.
qs = BaseA.objects.filter(Q(b__f1='foo') | Q(b__f2='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
qs = qs.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
def test_disjunction_promotion2(self):
qs = BaseA.objects.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
# Now we have two different joins in an ORed condition, these
# must be OUTER joins. The pre-existing join should remain INNER.
qs = qs.filter(Q(b__f1='foo') | Q(c__f2='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
# Reverse case.
qs = BaseA.objects.filter(Q(b__f1='foo') | Q(c__f2='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
qs = qs.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
def test_disjunction_promotion3(self):
qs = BaseA.objects.filter(a__f2='bar')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
# The ANDed a__f2 filter allows us to use keep using INNER JOIN
# even inside the ORed case. If the join to a__ returns nothing,
# the ANDed filter for a__f2 can't be true.
qs = qs.filter(Q(a__f1='foo') | Q(b__f2='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
def test_disjunction_promotion3_demote(self):
# This one needs demotion logic: the first filter causes a to be
# outer joined, the second filter makes it inner join again.
qs = BaseA.objects.filter(
Q(a__f1='foo') | Q(b__f2='foo')).filter(a__f2='bar')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
def test_disjunction_promotion4_demote(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('JOIN'), 0)
# Demote needed for the "a" join. It is marked as outer join by
# above filter (even if it is trimmed away).
qs = qs.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
def test_disjunction_promotion4(self):
qs = BaseA.objects.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
def test_disjunction_promotion5_demote(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
# Note that the above filters on a force the join to an
# inner join even if it is trimmed.
self.assertEqual(str(qs.query).count('JOIN'), 0)
qs = qs.filter(Q(a__f1='foo') | Q(b__f1='foo'))
# So, now the a__f1 join doesn't need promotion.
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
# But b__f1 does.
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
qs = BaseA.objects.filter(Q(a__f1='foo') | Q(b__f1='foo'))
# Now the join to a is created as LOUTER
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
def test_disjunction_promotion6(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('JOIN'), 0)
qs = BaseA.objects.filter(Q(a__f1='foo') & Q(b__f1='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0)
qs = BaseA.objects.filter(Q(a__f1='foo') & Q(b__f1='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0)
def test_disjunction_promotion7(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('JOIN'), 0)
qs = BaseA.objects.filter(Q(a__f1='foo') | (Q(b__f1='foo') & Q(a__f1='bar')))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
qs = BaseA.objects.filter(
(Q(a__f1='foo') | Q(b__f1='foo')) & (Q(a__f1='bar') | Q(c__f1='foo'))
)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3)
self.assertEqual(str(qs.query).count('INNER JOIN'), 0)
qs = BaseA.objects.filter(
(Q(a__f1='foo') | (Q(a__f1='bar')) & (Q(b__f1='bar') | Q(c__f1='foo')))
)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
def test_disjunction_promotion_fexpression(self):
qs = BaseA.objects.filter(Q(a__f1=F('b__f1')) | Q(b__f1='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
qs = BaseA.objects.filter(Q(a__f1=F('c__f1')) | Q(b__f1='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3)
qs = BaseA.objects.filter(Q(a__f1=F('b__f1')) | Q(a__f2=F('b__f2')) | Q(c__f1='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3)
qs = BaseA.objects.filter(Q(a__f1=F('c__f1')) | (Q(pk=1) & Q(pk=2)))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
self.assertEqual(str(qs.query).count('INNER JOIN'), 0)
class ManyToManyExcludeTest(TestCase):
def test_exclude_many_to_many(self):
Identifier.objects.create(name='extra')
program = Program.objects.create(identifier=Identifier.objects.create(name='program'))
channel = Channel.objects.create(identifier=Identifier.objects.create(name='channel'))
channel.programs.add(program)
# channel contains 'program1', so all Identifiers except that one
# should be returned
self.assertQuerysetEqual(
Identifier.objects.exclude(program__channel=channel).order_by('name'),
['<Identifier: channel>', '<Identifier: extra>']
)
self.assertQuerysetEqual(
Identifier.objects.exclude(program__channel=None).order_by('name'),
['<Identifier: program>']
)
def test_ticket_12823(self):
pg3 = Page.objects.create(text='pg3')
pg2 = Page.objects.create(text='pg2')
pg1 = Page.objects.create(text='pg1')
pa1 = Paragraph.objects.create(text='pa1')
pa1.page.set([pg1, pg2])
pa2 = Paragraph.objects.create(text='pa2')
pa2.page.set([pg2, pg3])
pa3 = Paragraph.objects.create(text='pa3')
ch1 = Chapter.objects.create(title='ch1', paragraph=pa1)
ch2 = Chapter.objects.create(title='ch2', paragraph=pa2)
ch3 = Chapter.objects.create(title='ch3', paragraph=pa3)
b1 = Book.objects.create(title='b1', chapter=ch1)
b2 = Book.objects.create(title='b2', chapter=ch2)
b3 = Book.objects.create(title='b3', chapter=ch3)
q = Book.objects.exclude(chapter__paragraph__page__text='pg1')
self.assertNotIn('IS NOT NULL', str(q.query))
self.assertEqual(len(q), 2)
self.assertNotIn(b1, q)
self.assertIn(b2, q)
self.assertIn(b3, q)
class RelabelCloneTest(TestCase):
def test_ticket_19964(self):
my1 = MyObject.objects.create(data='foo')
my1.parent = my1
my1.save()
my2 = MyObject.objects.create(data='bar', parent=my1)
parents = MyObject.objects.filter(parent=F('id'))
children = MyObject.objects.filter(parent__in=parents).exclude(parent=F('id'))
self.assertEqual(list(parents), [my1])
# Evaluating the children query (which has parents as part of it) does
# not change results for the parents query.
self.assertEqual(list(children), [my2])
self.assertEqual(list(parents), [my1])
class Ticket20101Tests(TestCase):
def test_ticket_20101(self):
"""
Tests QuerySet ORed combining in exclude subquery case.
"""
t = Tag.objects.create(name='foo')
a1 = Annotation.objects.create(tag=t, name='a1')
a2 = Annotation.objects.create(tag=t, name='a2')
a3 = Annotation.objects.create(tag=t, name='a3')
n = Note.objects.create(note='foo', misc='bar')
qs1 = Note.objects.exclude(annotation__in=[a1, a2])
qs2 = Note.objects.filter(annotation__in=[a3])
self.assertIn(n, qs1)
self.assertNotIn(n, qs2)
self.assertIn(n, (qs1 | qs2))
class EmptyStringPromotionTests(TestCase):
def test_empty_string_promotion(self):
qs = RelatedObject.objects.filter(single__name='')
if connection.features.interprets_empty_strings_as_nulls:
self.assertIn('LEFT OUTER JOIN', str(qs.query))
else:
self.assertNotIn('LEFT OUTER JOIN', str(qs.query))
class ValuesSubqueryTests(TestCase):
def test_values_in_subquery(self):
# Check that if a values() queryset is used, then the given values
# will be used instead of forcing use of the relation's field.
o1 = Order.objects.create(id=-2)
o2 = Order.objects.create(id=-1)
oi1 = OrderItem.objects.create(order=o1, status=0)
oi1.status = oi1.pk
oi1.save()
OrderItem.objects.create(order=o2, status=0)
# The query below should match o1 as it has related order_item
# with id == status.
self.assertSequenceEqual(Order.objects.filter(items__in=OrderItem.objects.values_list('status')), [o1])
class DoubleInSubqueryTests(TestCase):
def test_double_subquery_in(self):
lfa1 = LeafA.objects.create(data='foo')
lfa2 = LeafA.objects.create(data='bar')
lfb1 = LeafB.objects.create(data='lfb1')
lfb2 = LeafB.objects.create(data='lfb2')
Join.objects.create(a=lfa1, b=lfb1)
Join.objects.create(a=lfa2, b=lfb2)
leaf_as = LeafA.objects.filter(data='foo').values_list('pk', flat=True)
joins = Join.objects.filter(a__in=leaf_as).values_list('b__id', flat=True)
qs = LeafB.objects.filter(pk__in=joins)
self.assertSequenceEqual(qs, [lfb1])
class Ticket18785Tests(TestCase):
def test_ticket_18785(self):
# Test join trimming from ticket18785
qs = Item.objects.exclude(
note__isnull=False
).filter(
name='something', creator__extra__isnull=True
).order_by()
self.assertEqual(1, str(qs.query).count('INNER JOIN'))
self.assertEqual(0, str(qs.query).count('OUTER JOIN'))
class Ticket20788Tests(TestCase):
def test_ticket_20788(self):
Paragraph.objects.create()
paragraph = Paragraph.objects.create()
page = paragraph.page.create()
chapter = Chapter.objects.create(paragraph=paragraph)
Book.objects.create(chapter=chapter)
paragraph2 = Paragraph.objects.create()
Page.objects.create()
chapter2 = Chapter.objects.create(paragraph=paragraph2)
book2 = Book.objects.create(chapter=chapter2)
sentences_not_in_pub = Book.objects.exclude(chapter__paragraph__page=page)
self.assertSequenceEqual(sentences_not_in_pub, [book2])
class Ticket12807Tests(TestCase):
def test_ticket_12807(self):
p1 = Paragraph.objects.create()
p2 = Paragraph.objects.create()
# The ORed condition below should have no effect on the query - the
# ~Q(pk__in=[]) will always be True.
qs = Paragraph.objects.filter((Q(pk=p2.pk) | ~Q(pk__in=[])) & Q(pk=p1.pk))
self.assertSequenceEqual(qs, [p1])
class RelatedLookupTypeTests(TestCase):
error = 'Cannot query "%s": Must be "%s" instance.'
@classmethod
def setUpTestData(cls):
cls.oa = ObjectA.objects.create(name="oa")
cls.poa = ProxyObjectA.objects.get(name="oa")
cls.coa = ChildObjectA.objects.create(name="coa")
cls.wrong_type = Order.objects.create(id=cls.oa.pk)
cls.ob = ObjectB.objects.create(name="ob", objecta=cls.oa, num=1)
ProxyObjectB.objects.create(name="pob", objecta=cls.oa, num=2)
cls.pob = ProxyObjectB.objects.all()
ObjectC.objects.create(childobjecta=cls.coa)
def test_wrong_type_lookup(self):
"""
A ValueError is raised when the incorrect object type is passed to a
query lookup.
"""
# Passing incorrect object type
with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)):
ObjectB.objects.get(objecta=self.wrong_type)
with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)):
ObjectB.objects.filter(objecta__in=[self.wrong_type])
with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)):
ObjectB.objects.filter(objecta=self.wrong_type)
with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectB._meta.object_name)):
ObjectA.objects.filter(objectb__in=[self.wrong_type, self.ob])
# Passing an object of the class on which query is done.
with self.assertRaisesMessage(ValueError, self.error % (self.ob, ObjectA._meta.object_name)):
ObjectB.objects.filter(objecta__in=[self.poa, self.ob])
with self.assertRaisesMessage(ValueError, self.error % (self.ob, ChildObjectA._meta.object_name)):
ObjectC.objects.exclude(childobjecta__in=[self.coa, self.ob])
def test_wrong_backward_lookup(self):
"""
A ValueError is raised when the incorrect object type is passed to a
query lookup for backward relations.
"""
with self.assertRaisesMessage(ValueError, self.error % (self.oa, ObjectB._meta.object_name)):
ObjectA.objects.filter(objectb__in=[self.oa, self.ob])
with self.assertRaisesMessage(ValueError, self.error % (self.oa, ObjectB._meta.object_name)):
ObjectA.objects.exclude(objectb=self.oa)
with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectB._meta.object_name)):
ObjectA.objects.get(objectb=self.wrong_type)
def test_correct_lookup(self):
"""
When passing proxy model objects, child objects, or parent objects,
lookups work fine.
"""
out_a = ['<ObjectA: oa>', ]
out_b = ['<ObjectB: ob>', '<ObjectB: pob>']
out_c = ['<ObjectC: >']
# proxy model objects
self.assertQuerysetEqual(ObjectB.objects.filter(objecta=self.poa).order_by('name'), out_b)
self.assertQuerysetEqual(ObjectA.objects.filter(objectb__in=self.pob).order_by('pk'), out_a * 2)
# child objects
self.assertQuerysetEqual(ObjectB.objects.filter(objecta__in=[self.coa]), [])
self.assertQuerysetEqual(ObjectB.objects.filter(objecta__in=[self.poa, self.coa]).order_by('name'), out_b)
self.assertQuerysetEqual(
ObjectB.objects.filter(objecta__in=iter([self.poa, self.coa])).order_by('name'),
out_b
)
# parent objects
self.assertQuerysetEqual(ObjectC.objects.exclude(childobjecta=self.oa), out_c)
# QuerySet related object type checking shouldn't issue queries
# (the querysets aren't evaluated here, hence zero queries) (#23266).
with self.assertNumQueries(0):
ObjectB.objects.filter(objecta__in=ObjectA.objects.all())
def test_values_queryset_lookup(self):
"""
#23396 - Ensure ValueQuerySets are not checked for compatibility with the lookup field
"""
# Make sure the num and objecta field values match.
ob = ObjectB.objects.get(name='ob')
ob.num = ob.objecta.pk
ob.save()
pob = ObjectB.objects.get(name='pob')
pob.num = pob.objecta.pk
pob.save()
self.assertQuerysetEqual(ObjectB.objects.filter(
objecta__in=ObjectB.objects.all().values_list('num')
).order_by('pk'), ['<ObjectB: ob>', '<ObjectB: pob>'])
class Ticket14056Tests(TestCase):
def test_ticket_14056(self):
s1 = SharedConnection.objects.create(data='s1')
s2 = SharedConnection.objects.create(data='s2')
s3 = SharedConnection.objects.create(data='s3')
PointerA.objects.create(connection=s2)
expected_ordering = (
[s1, s3, s2] if connection.features.nulls_order_largest
else [s2, s1, s3]
)
self.assertSequenceEqual(SharedConnection.objects.order_by('-pointera__connection', 'pk'), expected_ordering)
class Ticket20955Tests(TestCase):
def test_ticket_20955(self):
jack = Staff.objects.create(name='jackstaff')
jackstaff = StaffUser.objects.create(staff=jack)
jill = Staff.objects.create(name='jillstaff')
jillstaff = StaffUser.objects.create(staff=jill)
task = Task.objects.create(creator=jackstaff, owner=jillstaff, title="task")
task_get = Task.objects.get(pk=task.pk)
# Load data so that assertNumQueries doesn't complain about the get
# version's queries.
task_get.creator.staffuser.staff
task_get.owner.staffuser.staff
qs = Task.objects.select_related(
'creator__staffuser__staff', 'owner__staffuser__staff')
self.assertEqual(str(qs.query).count(' JOIN '), 6)
task_select_related = qs.get(pk=task.pk)
with self.assertNumQueries(0):
self.assertEqual(task_select_related.creator.staffuser.staff,
task_get.creator.staffuser.staff)
self.assertEqual(task_select_related.owner.staffuser.staff,
task_get.owner.staffuser.staff)
class Ticket21203Tests(TestCase):
def test_ticket_21203(self):
p = Ticket21203Parent.objects.create(parent_bool=True)
c = Ticket21203Child.objects.create(parent=p)
qs = Ticket21203Child.objects.select_related('parent').defer('parent__created')
self.assertSequenceEqual(qs, [c])
self.assertIs(qs[0].parent.parent_bool, True)
class ValuesJoinPromotionTests(TestCase):
def test_values_no_promotion_for_existing(self):
qs = Node.objects.filter(parent__parent__isnull=False)
self.assertIn(' INNER JOIN ', str(qs.query))
qs = qs.values('parent__parent__id')
self.assertIn(' INNER JOIN ', str(qs.query))
# Make sure there is a left outer join without the filter.
qs = Node.objects.values('parent__parent__id')
self.assertIn(' LEFT OUTER JOIN ', str(qs.query))
def test_non_nullable_fk_not_promoted(self):
qs = ObjectB.objects.values('objecta__name')
self.assertIn(' INNER JOIN ', str(qs.query))
def test_ticket_21376(self):
a = ObjectA.objects.create()
ObjectC.objects.create(objecta=a)
qs = ObjectC.objects.filter(
Q(objecta=a) | Q(objectb__objecta=a),
)
qs = qs.filter(
Q(objectb=1) | Q(objecta=a),
)
self.assertEqual(qs.count(), 1)
tblname = connection.ops.quote_name(ObjectB._meta.db_table)
self.assertIn(' LEFT OUTER JOIN %s' % tblname, str(qs.query))
class ForeignKeyToBaseExcludeTests(TestCase):
def test_ticket_21787(self):
sc1 = SpecialCategory.objects.create(special_name='sc1', name='sc1')
sc2 = SpecialCategory.objects.create(special_name='sc2', name='sc2')
sc3 = SpecialCategory.objects.create(special_name='sc3', name='sc3')
c1 = CategoryItem.objects.create(category=sc1)
CategoryItem.objects.create(category=sc2)
self.assertSequenceEqual(SpecialCategory.objects.exclude(categoryitem__id=c1.pk).order_by('name'), [sc2, sc3])
self.assertSequenceEqual(SpecialCategory.objects.filter(categoryitem__id=c1.pk), [sc1])
class ReverseM2MCustomPkTests(TestCase):
def test_ticket_21879(self):
cpt1 = CustomPkTag.objects.create(id='cpt1', tag='cpt1')
cp1 = CustomPk.objects.create(name='cp1', extra='extra')
cp1.custompktag_set.add(cpt1)
self.assertSequenceEqual(CustomPk.objects.filter(custompktag=cpt1), [cp1])
self.assertSequenceEqual(CustomPkTag.objects.filter(custom_pk=cp1), [cpt1])
class Ticket22429Tests(TestCase):
def test_ticket_22429(self):
sc1 = School.objects.create()
st1 = Student.objects.create(school=sc1)
sc2 = School.objects.create()
st2 = Student.objects.create(school=sc2)
cr = Classroom.objects.create(school=sc1)
cr.students.add(st1)
queryset = Student.objects.filter(~Q(classroom__school=F('school')))
self.assertSequenceEqual(queryset, [st2])
class Ticket23605Tests(TestCase):
def test_ticket_23605(self):
# Test filtering on a complicated q-object from ticket's report.
# The query structure is such that we have multiple nested subqueries.
# The original problem was that the inner queries weren't relabeled
# correctly.
# See also #24090.
a1 = Ticket23605A.objects.create()
a2 = Ticket23605A.objects.create()
c1 = Ticket23605C.objects.create(field_c0=10000.0)
Ticket23605B.objects.create(
field_b0=10000.0, field_b1=True,
modelc_fk=c1, modela_fk=a1)
complex_q = Q(pk__in=Ticket23605A.objects.filter(
Q(
# True for a1 as field_b0 = 10000, field_c0=10000
# False for a2 as no ticket23605b found
ticket23605b__field_b0__gte=1000000 /
F("ticket23605b__modelc_fk__field_c0")
) &
# True for a1 (field_b1=True)
Q(ticket23605b__field_b1=True) & ~Q(ticket23605b__pk__in=Ticket23605B.objects.filter(
~(
# Same filters as above commented filters, but
# double-negated (one for Q() above, one for
# parentheses). So, again a1 match, a2 not.
Q(field_b1=True) &
Q(field_b0__gte=1000000 / F("modelc_fk__field_c0"))
)
))).filter(ticket23605b__field_b1=True))
qs1 = Ticket23605A.objects.filter(complex_q)
self.assertSequenceEqual(qs1, [a1])
qs2 = Ticket23605A.objects.exclude(complex_q)
self.assertSequenceEqual(qs2, [a2])
class TestTicket24279(TestCase):
def test_ticket_24278(self):
School.objects.create()
qs = School.objects.filter(Q(pk__in=()) | Q())
self.assertQuerysetEqual(qs, [])
class TestInvalidValuesRelation(TestCase):
def test_invalid_values(self):
with self.assertRaises(ValueError):
Annotation.objects.filter(tag='abc')
with self.assertRaises(ValueError):
Annotation.objects.filter(tag__in=[123, 'abc'])
class TestTicket24605(TestCase):
def test_ticket_24605(self):
"""
Subquery table names should be quoted.
"""
i1 = Individual.objects.create(alive=True)
RelatedIndividual.objects.create(related=i1)
i2 = Individual.objects.create(alive=False)
RelatedIndividual.objects.create(related=i2)
i3 = Individual.objects.create(alive=True)
i4 = Individual.objects.create(alive=False)
self.assertSequenceEqual(Individual.objects.filter(Q(alive=False), Q(related_individual__isnull=True)), [i4])
self.assertSequenceEqual(
Individual.objects.exclude(Q(alive=False), Q(related_individual__isnull=True)).order_by('pk'),
[i1, i2, i3]
)
class Ticket23622Tests(TestCase):
@skipUnlessDBFeature('can_distinct_on_fields')
def test_ticket_23622(self):
"""
Make sure __pk__in and __in work the same for related fields when
using a distinct on subquery.
"""
a1 = Ticket23605A.objects.create()
a2 = Ticket23605A.objects.create()
c1 = Ticket23605C.objects.create(field_c0=0.0)
Ticket23605B.objects.create(
modela_fk=a1, field_b0=123,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a1, field_b0=23,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a1, field_b0=234,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a1, field_b0=12,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2, field_b0=567,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2, field_b0=76,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2, field_b0=7,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2, field_b0=56,
field_b1=True,
modelc_fk=c1,
)
qx = (
Q(ticket23605b__pk__in=Ticket23605B.objects.order_by('modela_fk', '-field_b1').distinct('modela_fk')) &
Q(ticket23605b__field_b0__gte=300)
)
qy = (
Q(ticket23605b__in=Ticket23605B.objects.order_by('modela_fk', '-field_b1').distinct('modela_fk')) &
Q(ticket23605b__field_b0__gte=300)
)
self.assertEqual(
set(Ticket23605A.objects.filter(qx).values_list('pk', flat=True)),
set(Ticket23605A.objects.filter(qy).values_list('pk', flat=True))
)
self.assertSequenceEqual(Ticket23605A.objects.filter(qx), [a2])
| bsd-3-clause |
Desarrollo-CeSPI/meran | dev-plugins/node64/lib/node/wafadmin/Logs.py | 4 | 4722 | #!/usr/bin/env python
# Meran - MERAN UNLP is a ILS (Integrated Library System) wich provides Catalog,
# Circulation and User's Management. It's written in Perl, and uses Apache2
# Web-Server, MySQL database and Sphinx 2 indexing.
# Copyright (C) 2009-2013 Grupo de desarrollo de Meran CeSPI-UNLP
#
# This file is part of Meran.
#
# Meran is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Meran is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Meran. If not, see <http://www.gnu.org/licenses/>.
# encoding: utf-8
# Meran - MERAN UNLP is a ILS (Integrated Library System) wich provides Catalog,
# Circulation and User's Management. It's written in Perl, and uses Apache2
# Web-Server, MySQL database and Sphinx 2 indexing.
# Copyright (C) 2009-2013 Grupo de desarrollo de Meran CeSPI-UNLP
#
# This file is part of Meran.
#
# Meran is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Meran is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Meran. If not, see <http://www.gnu.org/licenses/>.
# Thomas Nagy, 2005 (ita)
import ansiterm
import os, re, logging, traceback, sys
from Constants import *
zones = ''
verbose = 0
colors_lst = {
'USE' : True,
'BOLD' :'\x1b[01;1m',
'RED' :'\x1b[01;31m',
'GREEN' :'\x1b[32m',
'YELLOW':'\x1b[33m',
'PINK' :'\x1b[35m',
'BLUE' :'\x1b[01;34m',
'CYAN' :'\x1b[36m',
'NORMAL':'\x1b[0m',
'cursor_on' :'\x1b[?25h',
'cursor_off' :'\x1b[?25l',
}
got_tty = False
term = os.environ.get('TERM', 'dumb')
if not term in ['dumb', 'emacs']:
try:
got_tty = sys.stderr.isatty() or (sys.platform == 'win32' and term in ['xterm', 'msys'])
except AttributeError:
pass
import Utils
if not got_tty or 'NOCOLOR' in os.environ:
colors_lst['USE'] = False
# test
#if sys.platform == 'win32':
# colors_lst['USE'] = True
def get_color(cl):
if not colors_lst['USE']: return ''
return colors_lst.get(cl, '')
class foo(object):
def __getattr__(self, a):
return get_color(a)
def __call__(self, a):
return get_color(a)
colors = foo()
re_log = re.compile(r'(\w+): (.*)', re.M)
class log_filter(logging.Filter):
def __init__(self, name=None):
pass
def filter(self, rec):
rec.c1 = colors.PINK
rec.c2 = colors.NORMAL
rec.zone = rec.module
if rec.levelno >= logging.INFO:
if rec.levelno >= logging.ERROR:
rec.c1 = colors.RED
elif rec.levelno >= logging.WARNING:
rec.c1 = colors.YELLOW
else:
rec.c1 = colors.GREEN
return True
zone = ''
m = re_log.match(rec.msg)
if m:
zone = rec.zone = m.group(1)
rec.msg = m.group(2)
if zones:
return getattr(rec, 'zone', '') in zones or '*' in zones
elif not verbose > 2:
return False
return True
class formatter(logging.Formatter):
def __init__(self):
logging.Formatter.__init__(self, LOG_FORMAT, HOUR_FORMAT)
def format(self, rec):
if rec.levelno >= logging.WARNING or rec.levelno == logging.INFO:
try:
return '%s%s%s' % (rec.c1, rec.msg.decode('utf-8'), rec.c2)
except:
return rec.c1+rec.msg+rec.c2
return logging.Formatter.format(self, rec)
def debug(*k, **kw):
if verbose:
k = list(k)
k[0] = k[0].replace('\n', ' ')
logging.debug(*k, **kw)
def error(*k, **kw):
logging.error(*k, **kw)
if verbose > 1:
if isinstance(k[0], Utils.WafError):
st = k[0].stack
else:
st = traceback.extract_stack()
if st:
st = st[:-1]
buf = []
for filename, lineno, name, line in st:
buf.append(' File "%s", line %d, in %s' % (filename, lineno, name))
if line:
buf.append(' %s' % line.strip())
if buf: logging.error("\n".join(buf))
warn = logging.warn
info = logging.info
def init_log():
log = logging.getLogger()
log.handlers = []
log.filters = []
hdlr = logging.StreamHandler()
hdlr.setFormatter(formatter())
log.addHandler(hdlr)
log.addFilter(log_filter())
log.setLevel(logging.DEBUG)
# may be initialized more than once
init_log() | gpl-3.0 |
tboyce021/home-assistant | homeassistant/components/pvpc_hourly_pricing/__init__.py | 19 | 1708 | """The pvpc_hourly_pricing integration to collect Spain official electric prices."""
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_NAME
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from .const import ATTR_TARIFF, DEFAULT_NAME, DEFAULT_TARIFF, DOMAIN, PLATFORM, TARIFFS
UI_CONFIG_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME, default=DEFAULT_NAME): str,
vol.Required(ATTR_TARIFF, default=DEFAULT_TARIFF): vol.In(TARIFFS),
}
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: cv.ensure_list(UI_CONFIG_SCHEMA)}, extra=vol.ALLOW_EXTRA
)
async def async_setup(hass: HomeAssistant, config: dict):
"""
Set up the electricity price sensor from configuration.yaml.
```yaml
pvpc_hourly_pricing:
- name: PVPC manual ve
tariff: electric_car
- name: PVPC manual nocturna
tariff: discrimination
timeout: 3
```
"""
for conf in config.get(DOMAIN, []):
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, data=conf, context={"source": config_entries.SOURCE_IMPORT}
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: config_entries.ConfigEntry):
"""Set up pvpc hourly pricing from a config entry."""
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, PLATFORM)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: config_entries.ConfigEntry):
"""Unload a config entry."""
return await hass.config_entries.async_forward_entry_unload(entry, PLATFORM)
| apache-2.0 |
raghavs1108/DataPlotter | pyqtgraph/exporters/Exporter.py | 38 | 5393 | from ..widgets.FileDialog import FileDialog
from ..Qt import QtGui, QtCore, QtSvg
from ..python2_3 import asUnicode, basestring
from ..GraphicsScene import GraphicsScene
import os, re
LastExportDirectory = None
class Exporter(object):
"""
Abstract class used for exporting graphics to file / printer / whatever.
"""
allowCopy = False # subclasses set this to True if they can use the copy buffer
Exporters = []
@classmethod
def register(cls):
"""
Used to register Exporter classes to appear in the export dialog.
"""
Exporter.Exporters.append(cls)
def __init__(self, item):
"""
Initialize with the item to be exported.
Can be an individual graphics item or a scene.
"""
object.__init__(self)
self.item = item
def parameters(self):
"""Return the parameters used to configure this exporter."""
raise Exception("Abstract method must be overridden in subclass.")
def export(self, fileName=None, toBytes=False, copy=False):
"""
If *fileName* is None, pop-up a file dialog.
If *toBytes* is True, return a bytes object rather than writing to file.
If *copy* is True, export to the copy buffer rather than writing to file.
"""
raise Exception("Abstract method must be overridden in subclass.")
def fileSaveDialog(self, filter=None, opts=None):
## Show a file dialog, call self.export(fileName) when finished.
if opts is None:
opts = {}
self.fileDialog = FileDialog()
self.fileDialog.setFileMode(QtGui.QFileDialog.AnyFile)
self.fileDialog.setAcceptMode(QtGui.QFileDialog.AcceptSave)
if filter is not None:
if isinstance(filter, basestring):
self.fileDialog.setNameFilter(filter)
elif isinstance(filter, list):
self.fileDialog.setNameFilters(filter)
global LastExportDirectory
exportDir = LastExportDirectory
if exportDir is not None:
self.fileDialog.setDirectory(exportDir)
self.fileDialog.show()
self.fileDialog.opts = opts
self.fileDialog.fileSelected.connect(self.fileSaveFinished)
return
def fileSaveFinished(self, fileName):
fileName = asUnicode(fileName)
global LastExportDirectory
LastExportDirectory = os.path.split(fileName)[0]
## If file name does not match selected extension, append it now
ext = os.path.splitext(fileName)[1].lower().lstrip('.')
selectedExt = re.search(r'\*\.(\w+)\b', asUnicode(self.fileDialog.selectedNameFilter()))
if selectedExt is not None:
selectedExt = selectedExt.groups()[0].lower()
if ext != selectedExt:
fileName = fileName + '.' + selectedExt.lstrip('.')
self.export(fileName=fileName, **self.fileDialog.opts)
def getScene(self):
if isinstance(self.item, GraphicsScene):
return self.item
else:
return self.item.scene()
def getSourceRect(self):
if isinstance(self.item, GraphicsScene):
w = self.item.getViewWidget()
return w.viewportTransform().inverted()[0].mapRect(w.rect())
else:
return self.item.sceneBoundingRect()
def getTargetRect(self):
if isinstance(self.item, GraphicsScene):
return self.item.getViewWidget().rect()
else:
return self.item.mapRectToDevice(self.item.boundingRect())
def setExportMode(self, export, opts=None):
"""
Call setExportMode(export, opts) on all items that will
be painted during the export. This informs the item
that it is about to be painted for export, allowing it to
alter its appearance temporarily
*export* - bool; must be True before exporting and False afterward
*opts* - dict; common parameters are 'antialias' and 'background'
"""
if opts is None:
opts = {}
for item in self.getPaintItems():
if hasattr(item, 'setExportMode'):
item.setExportMode(export, opts)
def getPaintItems(self, root=None):
"""Return a list of all items that should be painted in the correct order."""
if root is None:
root = self.item
preItems = []
postItems = []
if isinstance(root, QtGui.QGraphicsScene):
childs = [i for i in root.items() if i.parentItem() is None]
rootItem = []
else:
childs = root.childItems()
rootItem = [root]
childs.sort(key=lambda a: a.zValue())
while len(childs) > 0:
ch = childs.pop(0)
tree = self.getPaintItems(ch)
if int(ch.flags() & ch.ItemStacksBehindParent) > 0 or (ch.zValue() < 0 and int(ch.flags() & ch.ItemNegativeZStacksBehindParent) > 0):
preItems.extend(tree)
else:
postItems.extend(tree)
return preItems + rootItem + postItems
def render(self, painter, targetRect, sourceRect, item=None):
self.getScene().render(painter, QtCore.QRectF(targetRect), QtCore.QRectF(sourceRect))
| mit |
jazzband/site | migrations/versions/17164a7d1c2e_.py | 1 | 3712 | """
Revision ID: 17164a7d1c2e
Revises: cc0e3906ecfb
Create Date: 2017-09-28 19:53:27.500788
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "17164a7d1c2e"
down_revision = "cc0e3906ecfb"
def upgrade():
op.create_table(
"project_members",
sa.Column("user_id", sa.Integer(), nullable=True),
sa.Column("project_id", sa.Integer(), nullable=True),
sa.Column("date_joined", sa.DateTime(), nullable=True),
sa.Column("is_lead", sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint(["project_id"], ["projects.id"]),
sa.ForeignKeyConstraint(["user_id"], ["users.id"]),
)
op.create_index(
op.f("ix_project_members_is_lead"), "project_members", ["is_lead"], unique=False
)
op.create_table(
"project_uploads",
sa.Column("synced_at", sa.DateTime(), nullable=False),
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("project_id", sa.Integer(), nullable=True),
sa.Column("version", sa.Text(), nullable=True),
sa.Column("path", sa.Text(), nullable=True),
sa.Column("filename", sa.Text(), nullable=True),
sa.Column("signature", sa.Text(), nullable=False),
sa.Column("size", sa.Integer(), nullable=True),
sa.Column("md5_digest", sa.Text(), nullable=False),
sa.Column("sha256_digest", sa.Text(), nullable=False),
sa.Column("blake2_256_digest", sa.Text(), nullable=False),
sa.Column("upload_time", sa.DateTime(), nullable=True),
sa.CheckConstraint("blake2_256_digest ~* '^[A-F0-9]{64}$'"),
sa.CheckConstraint("sha256_digest ~* '^[A-F0-9]{64}$'"),
sa.ForeignKeyConstraint(["project_id"], ["projects.id"]),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("blake2_256_digest"),
sa.UniqueConstraint("md5_digest"),
sa.UniqueConstraint("sha256_digest"),
sa.UniqueConstraint("signature"),
)
op.create_index(
op.f("ix_project_uploads_filename"),
"project_uploads",
["filename"],
unique=True,
)
op.create_index(
op.f("ix_project_uploads_path"), "project_uploads", ["path"], unique=True
)
op.create_index(
op.f("ix_project_uploads_version"), "project_uploads", ["version"], unique=False
)
op.create_index(
"project_uploads_project_version",
"project_uploads",
["project_id", "version"],
unique=False,
)
op.add_column(
"projects", sa.Column("client_id", postgresql.UUID(as_uuid=True), nullable=True)
)
op.add_column(
"projects",
sa.Column("secret_key", postgresql.UUID(as_uuid=True), nullable=True),
)
op.create_index("release_name_idx", "projects", ["name"], unique=False)
op.create_index(
"release_name_is_active_idx", "projects", ["name", "is_active"], unique=False
)
def downgrade():
op.drop_index("release_name_is_active_idx", table_name="projects")
op.drop_index("release_name_idx", table_name="projects")
op.drop_column("projects", "secret_key")
op.drop_column("projects", "client_id")
op.drop_index("project_uploads_project_version", table_name="project_uploads")
op.drop_index(op.f("ix_project_uploads_version"), table_name="project_uploads")
op.drop_index(op.f("ix_project_uploads_path"), table_name="project_uploads")
op.drop_index(op.f("ix_project_uploads_filename"), table_name="project_uploads")
op.drop_table("project_uploads")
op.drop_index(op.f("ix_project_members_is_lead"), table_name="project_members")
op.drop_table("project_members")
| mit |
xxsergzzxx/python-for-android | python3-alpha/extra_modules/gdata/apps/service.py | 47 | 20661 | #!/usr/bin/python
#
# Copyright (C) 2007 SIOS Technology, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'tmatsuo@sios.com (Takashi MATSUO)'
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import urllib.request, urllib.parse, urllib.error
import gdata
import atom.service
import gdata.service
import gdata.apps
import atom
API_VER="2.0"
HTTP_OK=200
UNKOWN_ERROR=1000
USER_DELETED_RECENTLY=1100
USER_SUSPENDED=1101
DOMAIN_USER_LIMIT_EXCEEDED=1200
DOMAIN_ALIAS_LIMIT_EXCEEDED=1201
DOMAIN_SUSPENDED=1202
DOMAIN_FEATURE_UNAVAILABLE=1203
ENTITY_EXISTS=1300
ENTITY_DOES_NOT_EXIST=1301
ENTITY_NAME_IS_RESERVED=1302
ENTITY_NAME_NOT_VALID=1303
INVALID_GIVEN_NAME=1400
INVALID_FAMILY_NAME=1401
INVALID_PASSWORD=1402
INVALID_USERNAME=1403
INVALID_HASH_FUNCTION_NAME=1404
INVALID_HASH_DIGGEST_LENGTH=1405
INVALID_EMAIL_ADDRESS=1406
INVALID_QUERY_PARAMETER_VALUE=1407
TOO_MANY_RECIPIENTS_ON_EMAIL_LIST=1500
DEFAULT_QUOTA_LIMIT='2048'
class Error(Exception):
pass
class AppsForYourDomainException(Error):
def __init__(self, response):
Error.__init__(self, response)
try:
self.element_tree = ElementTree.fromstring(response['body'])
self.error_code = int(self.element_tree[0].attrib['errorCode'])
self.reason = self.element_tree[0].attrib['reason']
self.invalidInput = self.element_tree[0].attrib['invalidInput']
except:
self.error_code = UNKOWN_ERROR
class AppsService(gdata.service.GDataService):
"""Client for the Google Apps Provisioning service."""
def __init__(self, email=None, password=None, domain=None, source=None,
server='apps-apis.google.com', additional_headers=None,
**kwargs):
"""Creates a client for the Google Apps Provisioning service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
domain: string (optional) The Google Apps domain name.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'apps-apis.google.com'.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
gdata.service.GDataService.__init__(
self, email=email, password=password, service='apps', source=source,
server=server, additional_headers=additional_headers, **kwargs)
self.ssl = True
self.port = 443
self.domain = domain
def _baseURL(self):
return "/a/feeds/%s" % self.domain
def AddAllElementsFromAllPages(self, link_finder, func):
"""retrieve all pages and add all elements"""
next = link_finder.GetNextLink()
while next is not None:
next_feed = self.Get(next.href, converter=func)
for a_entry in next_feed.entry:
link_finder.entry.append(a_entry)
next = next_feed.GetNextLink()
return link_finder
def RetrievePageOfEmailLists(self, start_email_list_name=None,
num_retries=gdata.service.DEFAULT_NUM_RETRIES,
delay=gdata.service.DEFAULT_DELAY,
backoff=gdata.service.DEFAULT_BACKOFF):
"""Retrieve one page of email list"""
uri = "%s/emailList/%s" % (self._baseURL(), API_VER)
if start_email_list_name is not None:
uri += "?startEmailListName=%s" % start_email_list_name
try:
return gdata.apps.EmailListFeedFromString(str(self.GetWithRetries(
uri, num_retries=num_retries, delay=delay, backoff=backoff)))
except gdata.service.RequestError as e:
raise AppsForYourDomainException(e.args[0])
def GetGeneratorForAllEmailLists(
self, num_retries=gdata.service.DEFAULT_NUM_RETRIES,
delay=gdata.service.DEFAULT_DELAY, backoff=gdata.service.DEFAULT_BACKOFF):
"""Retrieve a generator for all emaillists in this domain."""
first_page = self.RetrievePageOfEmailLists(num_retries=num_retries,
delay=delay,
backoff=backoff)
return self.GetGeneratorFromLinkFinder(
first_page, gdata.apps.EmailListRecipientFeedFromString,
num_retries=num_retries, delay=delay, backoff=backoff)
def RetrieveAllEmailLists(self):
"""Retrieve all email list of a domain."""
ret = self.RetrievePageOfEmailLists()
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.EmailListFeedFromString)
def RetrieveEmailList(self, list_name):
"""Retreive a single email list by the list's name."""
uri = "%s/emailList/%s/%s" % (
self._baseURL(), API_VER, list_name)
try:
return self.Get(uri, converter=gdata.apps.EmailListEntryFromString)
except gdata.service.RequestError as e:
raise AppsForYourDomainException(e.args[0])
def RetrieveEmailLists(self, recipient):
"""Retrieve All Email List Subscriptions for an Email Address."""
uri = "%s/emailList/%s?recipient=%s" % (
self._baseURL(), API_VER, recipient)
try:
ret = gdata.apps.EmailListFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError as e:
raise AppsForYourDomainException(e.args[0])
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.EmailListFeedFromString)
def RemoveRecipientFromEmailList(self, recipient, list_name):
"""Remove recipient from email list."""
uri = "%s/emailList/%s/%s/recipient/%s" % (
self._baseURL(), API_VER, list_name, recipient)
try:
self.Delete(uri)
except gdata.service.RequestError as e:
raise AppsForYourDomainException(e.args[0])
def RetrievePageOfRecipients(self, list_name, start_recipient=None,
num_retries=gdata.service.DEFAULT_NUM_RETRIES,
delay=gdata.service.DEFAULT_DELAY,
backoff=gdata.service.DEFAULT_BACKOFF):
"""Retrieve one page of recipient of an email list. """
uri = "%s/emailList/%s/%s/recipient" % (
self._baseURL(), API_VER, list_name)
if start_recipient is not None:
uri += "?startRecipient=%s" % start_recipient
try:
return gdata.apps.EmailListRecipientFeedFromString(str(
self.GetWithRetries(
uri, num_retries=num_retries, delay=delay, backoff=backoff)))
except gdata.service.RequestError as e:
raise AppsForYourDomainException(e.args[0])
def GetGeneratorForAllRecipients(
self, list_name, num_retries=gdata.service.DEFAULT_NUM_RETRIES,
delay=gdata.service.DEFAULT_DELAY, backoff=gdata.service.DEFAULT_BACKOFF):
"""Retrieve a generator for all recipients of a particular emaillist."""
first_page = self.RetrievePageOfRecipients(list_name,
num_retries=num_retries,
delay=delay,
backoff=backoff)
return self.GetGeneratorFromLinkFinder(
first_page, gdata.apps.EmailListRecipientFeedFromString,
num_retries=num_retries, delay=delay, backoff=backoff)
def RetrieveAllRecipients(self, list_name):
"""Retrieve all recipient of an email list."""
ret = self.RetrievePageOfRecipients(list_name)
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.EmailListRecipientFeedFromString)
def AddRecipientToEmailList(self, recipient, list_name):
"""Add a recipient to a email list."""
uri = "%s/emailList/%s/%s/recipient" % (
self._baseURL(), API_VER, list_name)
recipient_entry = gdata.apps.EmailListRecipientEntry()
recipient_entry.who = gdata.apps.Who(email=recipient)
try:
return gdata.apps.EmailListRecipientEntryFromString(
str(self.Post(recipient_entry, uri)))
except gdata.service.RequestError as e:
raise AppsForYourDomainException(e.args[0])
def DeleteEmailList(self, list_name):
"""Delete a email list"""
uri = "%s/emailList/%s/%s" % (self._baseURL(), API_VER, list_name)
try:
self.Delete(uri)
except gdata.service.RequestError as e:
raise AppsForYourDomainException(e.args[0])
def CreateEmailList(self, list_name):
"""Create a email list. """
uri = "%s/emailList/%s" % (self._baseURL(), API_VER)
email_list_entry = gdata.apps.EmailListEntry()
email_list_entry.email_list = gdata.apps.EmailList(name=list_name)
try:
return gdata.apps.EmailListEntryFromString(
str(self.Post(email_list_entry, uri)))
except gdata.service.RequestError as e:
raise AppsForYourDomainException(e.args[0])
def DeleteNickname(self, nickname):
"""Delete a nickname"""
uri = "%s/nickname/%s/%s" % (self._baseURL(), API_VER, nickname)
try:
self.Delete(uri)
except gdata.service.RequestError as e:
raise AppsForYourDomainException(e.args[0])
def RetrievePageOfNicknames(self, start_nickname=None,
num_retries=gdata.service.DEFAULT_NUM_RETRIES,
delay=gdata.service.DEFAULT_DELAY,
backoff=gdata.service.DEFAULT_BACKOFF):
"""Retrieve one page of nicknames in the domain"""
uri = "%s/nickname/%s" % (self._baseURL(), API_VER)
if start_nickname is not None:
uri += "?startNickname=%s" % start_nickname
try:
return gdata.apps.NicknameFeedFromString(str(self.GetWithRetries(
uri, num_retries=num_retries, delay=delay, backoff=backoff)))
except gdata.service.RequestError as e:
raise AppsForYourDomainException(e.args[0])
def GetGeneratorForAllNicknames(
self, num_retries=gdata.service.DEFAULT_NUM_RETRIES,
delay=gdata.service.DEFAULT_DELAY, backoff=gdata.service.DEFAULT_BACKOFF):
"""Retrieve a generator for all nicknames in this domain."""
first_page = self.RetrievePageOfNicknames(num_retries=num_retries,
delay=delay,
backoff=backoff)
return self.GetGeneratorFromLinkFinder(
first_page, gdata.apps.NicknameFeedFromString, num_retries=num_retries,
delay=delay, backoff=backoff)
def RetrieveAllNicknames(self):
"""Retrieve all nicknames in the domain"""
ret = self.RetrievePageOfNicknames()
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.NicknameFeedFromString)
def GetGeneratorForAllNicknamesOfAUser(
self, user_name, num_retries=gdata.service.DEFAULT_NUM_RETRIES,
delay=gdata.service.DEFAULT_DELAY, backoff=gdata.service.DEFAULT_BACKOFF):
"""Retrieve a generator for all nicknames of a particular user."""
uri = "%s/nickname/%s?username=%s" % (self._baseURL(), API_VER, user_name)
try:
first_page = gdata.apps.NicknameFeedFromString(str(self.GetWithRetries(
uri, num_retries=num_retries, delay=delay, backoff=backoff)))
except gdata.service.RequestError as e:
raise AppsForYourDomainException(e.args[0])
return self.GetGeneratorFromLinkFinder(
first_page, gdata.apps.NicknameFeedFromString, num_retries=num_retries,
delay=delay, backoff=backoff)
def RetrieveNicknames(self, user_name):
"""Retrieve nicknames of the user"""
uri = "%s/nickname/%s?username=%s" % (self._baseURL(), API_VER, user_name)
try:
ret = gdata.apps.NicknameFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError as e:
raise AppsForYourDomainException(e.args[0])
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.NicknameFeedFromString)
def RetrieveNickname(self, nickname):
"""Retrieve a nickname.
Args:
nickname: string The nickname to retrieve
Returns:
gdata.apps.NicknameEntry
"""
uri = "%s/nickname/%s/%s" % (self._baseURL(), API_VER, nickname)
try:
return gdata.apps.NicknameEntryFromString(str(self.Get(uri)))
except gdata.service.RequestError as e:
raise AppsForYourDomainException(e.args[0])
def CreateNickname(self, user_name, nickname):
"""Create a nickname"""
uri = "%s/nickname/%s" % (self._baseURL(), API_VER)
nickname_entry = gdata.apps.NicknameEntry()
nickname_entry.login = gdata.apps.Login(user_name=user_name)
nickname_entry.nickname = gdata.apps.Nickname(name=nickname)
try:
return gdata.apps.NicknameEntryFromString(
str(self.Post(nickname_entry, uri)))
except gdata.service.RequestError as e:
raise AppsForYourDomainException(e.args[0])
def DeleteUser(self, user_name):
"""Delete a user account"""
uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name)
try:
return self.Delete(uri)
except gdata.service.RequestError as e:
raise AppsForYourDomainException(e.args[0])
def UpdateUser(self, user_name, user_entry):
"""Update a user account."""
uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name)
try:
return gdata.apps.UserEntryFromString(str(self.Put(user_entry, uri)))
except gdata.service.RequestError as e:
raise AppsForYourDomainException(e.args[0])
def CreateUser(self, user_name, family_name, given_name, password,
suspended='false', quota_limit=None,
password_hash_function=None,
change_password=None):
"""Create a user account. """
uri = "%s/user/%s" % (self._baseURL(), API_VER)
user_entry = gdata.apps.UserEntry()
user_entry.login = gdata.apps.Login(
user_name=user_name, password=password, suspended=suspended,
hash_function_name=password_hash_function,
change_password=change_password)
user_entry.name = gdata.apps.Name(family_name=family_name,
given_name=given_name)
if quota_limit is not None:
user_entry.quota = gdata.apps.Quota(limit=str(quota_limit))
try:
return gdata.apps.UserEntryFromString(str(self.Post(user_entry, uri)))
except gdata.service.RequestError as e:
raise AppsForYourDomainException(e.args[0])
def SuspendUser(self, user_name):
user_entry = self.RetrieveUser(user_name)
if user_entry.login.suspended != 'true':
user_entry.login.suspended = 'true'
user_entry = self.UpdateUser(user_name, user_entry)
return user_entry
def RestoreUser(self, user_name):
user_entry = self.RetrieveUser(user_name)
if user_entry.login.suspended != 'false':
user_entry.login.suspended = 'false'
user_entry = self.UpdateUser(user_name, user_entry)
return user_entry
def RetrieveUser(self, user_name):
"""Retrieve an user account.
Args:
user_name: string The user name to retrieve
Returns:
gdata.apps.UserEntry
"""
uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name)
try:
return gdata.apps.UserEntryFromString(str(self.Get(uri)))
except gdata.service.RequestError as e:
raise AppsForYourDomainException(e.args[0])
def RetrievePageOfUsers(self, start_username=None,
num_retries=gdata.service.DEFAULT_NUM_RETRIES,
delay=gdata.service.DEFAULT_DELAY,
backoff=gdata.service.DEFAULT_BACKOFF):
"""Retrieve one page of users in this domain."""
uri = "%s/user/%s" % (self._baseURL(), API_VER)
if start_username is not None:
uri += "?startUsername=%s" % start_username
try:
return gdata.apps.UserFeedFromString(str(self.GetWithRetries(
uri, num_retries=num_retries, delay=delay, backoff=backoff)))
except gdata.service.RequestError as e:
raise AppsForYourDomainException(e.args[0])
def GetGeneratorForAllUsers(self,
num_retries=gdata.service.DEFAULT_NUM_RETRIES,
delay=gdata.service.DEFAULT_DELAY,
backoff=gdata.service.DEFAULT_BACKOFF):
"""Retrieve a generator for all users in this domain."""
first_page = self.RetrievePageOfUsers(num_retries=num_retries, delay=delay,
backoff=backoff)
return self.GetGeneratorFromLinkFinder(
first_page, gdata.apps.UserFeedFromString, num_retries=num_retries,
delay=delay, backoff=backoff)
def RetrieveAllUsers(self):
"""Retrieve all users in this domain. OBSOLETE"""
ret = self.RetrievePageOfUsers()
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.UserFeedFromString)
class PropertyService(gdata.service.GDataService):
"""Client for the Google Apps Property service."""
def __init__(self, email=None, password=None, domain=None, source=None,
server='apps-apis.google.com', additional_headers=None):
gdata.service.GDataService.__init__(self, email=email, password=password,
service='apps', source=source,
server=server,
additional_headers=additional_headers)
self.ssl = True
self.port = 443
self.domain = domain
def AddAllElementsFromAllPages(self, link_finder, func):
"""retrieve all pages and add all elements"""
next = link_finder.GetNextLink()
while next is not None:
next_feed = self.Get(next.href, converter=func)
for a_entry in next_feed.entry:
link_finder.entry.append(a_entry)
next = next_feed.GetNextLink()
return link_finder
def _GetPropertyEntry(self, properties):
property_entry = gdata.apps.PropertyEntry()
property = []
for name, value in properties.items():
if name is not None and value is not None:
property.append(gdata.apps.Property(name=name, value=value))
property_entry.property = property
return property_entry
def _PropertyEntry2Dict(self, property_entry):
properties = {}
for i, property in enumerate(property_entry.property):
properties[property.name] = property.value
return properties
def _GetPropertyFeed(self, uri):
try:
return gdata.apps.PropertyFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError as e:
raise gdata.apps.service.AppsForYourDomainException(e.args[0])
def _GetPropertiesList(self, uri):
property_feed = self._GetPropertyFeed(uri)
# pagination
property_feed = self.AddAllElementsFromAllPages(
property_feed, gdata.apps.PropertyFeedFromString)
properties_list = []
for property_entry in property_feed.entry:
properties_list.append(self._PropertyEntry2Dict(property_entry))
return properties_list
def _GetProperties(self, uri):
try:
return self._PropertyEntry2Dict(gdata.apps.PropertyEntryFromString(
str(self.Get(uri))))
except gdata.service.RequestError as e:
raise gdata.apps.service.AppsForYourDomainException(e.args[0])
def _PostProperties(self, uri, properties):
property_entry = self._GetPropertyEntry(properties)
try:
return self._PropertyEntry2Dict(gdata.apps.PropertyEntryFromString(
str(self.Post(property_entry, uri))))
except gdata.service.RequestError as e:
raise gdata.apps.service.AppsForYourDomainException(e.args[0])
def _PutProperties(self, uri, properties):
property_entry = self._GetPropertyEntry(properties)
try:
return self._PropertyEntry2Dict(gdata.apps.PropertyEntryFromString(
str(self.Put(property_entry, uri))))
except gdata.service.RequestError as e:
raise gdata.apps.service.AppsForYourDomainException(e.args[0])
def _DeleteProperties(self, uri):
try:
self.Delete(uri)
except gdata.service.RequestError as e:
raise gdata.apps.service.AppsForYourDomainException(e.args[0])
def _bool2str(b):
if b is None:
return None
return str(b is True).lower()
| apache-2.0 |
htuch/envoy | examples/grpc-bridge/service/gen/kv_pb2.py | 24 | 9527 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: kv.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='kv.proto',
package='kv',
syntax='proto3',
serialized_pb=_b('\n\x08kv.proto\x12\x02kv\"\x19\n\nGetRequest\x12\x0b\n\x03key\x18\x01 \x01(\t\"\x1c\n\x0bGetResponse\x12\r\n\x05value\x18\x01 \x01(\t\"(\n\nSetRequest\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\x19\n\x0bSetResponse\x12\n\n\x02ok\x18\x01 \x01(\x08\x32T\n\x02KV\x12&\n\x03Get\x12\x0e.kv.GetRequest\x1a\x0f.kv.GetResponse\x12&\n\x03Set\x12\x0e.kv.SetRequest\x1a\x0f.kv.SetResponseb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_GETREQUEST = _descriptor.Descriptor(
name='GetRequest',
full_name='kv.GetRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='kv.GetRequest.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=16,
serialized_end=41,
)
_GETRESPONSE = _descriptor.Descriptor(
name='GetResponse',
full_name='kv.GetResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='kv.GetResponse.value', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=43,
serialized_end=71,
)
_SETREQUEST = _descriptor.Descriptor(
name='SetRequest',
full_name='kv.SetRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='kv.SetRequest.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='kv.SetRequest.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=73,
serialized_end=113,
)
_SETRESPONSE = _descriptor.Descriptor(
name='SetResponse',
full_name='kv.SetResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ok', full_name='kv.SetResponse.ok', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=115,
serialized_end=140,
)
DESCRIPTOR.message_types_by_name['GetRequest'] = _GETREQUEST
DESCRIPTOR.message_types_by_name['GetResponse'] = _GETRESPONSE
DESCRIPTOR.message_types_by_name['SetRequest'] = _SETREQUEST
DESCRIPTOR.message_types_by_name['SetResponse'] = _SETRESPONSE
GetRequest = _reflection.GeneratedProtocolMessageType('GetRequest', (_message.Message,), dict(
DESCRIPTOR = _GETREQUEST,
__module__ = 'kv_pb2'
# @@protoc_insertion_point(class_scope:kv.GetRequest)
))
_sym_db.RegisterMessage(GetRequest)
GetResponse = _reflection.GeneratedProtocolMessageType('GetResponse', (_message.Message,), dict(
DESCRIPTOR = _GETRESPONSE,
__module__ = 'kv_pb2'
# @@protoc_insertion_point(class_scope:kv.GetResponse)
))
_sym_db.RegisterMessage(GetResponse)
SetRequest = _reflection.GeneratedProtocolMessageType('SetRequest', (_message.Message,), dict(
DESCRIPTOR = _SETREQUEST,
__module__ = 'kv_pb2'
# @@protoc_insertion_point(class_scope:kv.SetRequest)
))
_sym_db.RegisterMessage(SetRequest)
SetResponse = _reflection.GeneratedProtocolMessageType('SetResponse', (_message.Message,), dict(
DESCRIPTOR = _SETRESPONSE,
__module__ = 'kv_pb2'
# @@protoc_insertion_point(class_scope:kv.SetResponse)
))
_sym_db.RegisterMessage(SetResponse)
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class KVStub(object):
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Get = channel.unary_unary(
'/kv.KV/Get',
request_serializer=GetRequest.SerializeToString,
response_deserializer=GetResponse.FromString,
)
self.Set = channel.unary_unary(
'/kv.KV/Set',
request_serializer=SetRequest.SerializeToString,
response_deserializer=SetResponse.FromString,
)
class KVServicer(object):
def Get(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Set(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_KVServicer_to_server(servicer, server):
rpc_method_handlers = {
'Get': grpc.unary_unary_rpc_method_handler(
servicer.Get,
request_deserializer=GetRequest.FromString,
response_serializer=GetResponse.SerializeToString,
),
'Set': grpc.unary_unary_rpc_method_handler(
servicer.Set,
request_deserializer=SetRequest.FromString,
response_serializer=SetResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'kv.KV', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class BetaKVServicer(object):
def Get(self, request, context):
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def Set(self, request, context):
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetaKVStub(object):
def Get(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
raise NotImplementedError()
Get.future = None
def Set(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
raise NotImplementedError()
Set.future = None
def beta_create_KV_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
request_deserializers = {
('kv.KV', 'Get'): GetRequest.FromString,
('kv.KV', 'Set'): SetRequest.FromString,
}
response_serializers = {
('kv.KV', 'Get'): GetResponse.SerializeToString,
('kv.KV', 'Set'): SetResponse.SerializeToString,
}
method_implementations = {
('kv.KV', 'Get'): face_utilities.unary_unary_inline(servicer.Get),
('kv.KV', 'Set'): face_utilities.unary_unary_inline(servicer.Set),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_KV_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
request_serializers = {
('kv.KV', 'Get'): GetRequest.SerializeToString,
('kv.KV', 'Set'): SetRequest.SerializeToString,
}
response_deserializers = {
('kv.KV', 'Get'): GetResponse.FromString,
('kv.KV', 'Set'): SetResponse.FromString,
}
cardinalities = {
'Get': cardinality.Cardinality.UNARY_UNARY,
'Set': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'kv.KV', cardinalities, options=stub_options)
# @@protoc_insertion_point(module_scope)
| apache-2.0 |
iguzu/gae-django | tests/regressiontests/string_lookup/models.py | 20 | 2936 | # -*- coding: utf-8 -*-
from django.db import models
class Foo(models.Model):
name = models.CharField(max_length=50)
friend = models.CharField(max_length=50, blank=True)
def __unicode__(self):
return "Foo %s" % self.name
class Bar(models.Model):
name = models.CharField(max_length=50)
normal = models.ForeignKey(Foo, related_name='normal_foo')
fwd = models.ForeignKey("Whiz")
back = models.ForeignKey("Foo")
def __unicode__(self):
return "Bar %s" % self.place.name
class Whiz(models.Model):
name = models.CharField(max_length=50)
def __unicode__(self):
return "Whiz %s" % self.name
class Child(models.Model):
parent = models.OneToOneField('Base')
name = models.CharField(max_length=50)
def __unicode__(self):
return "Child %s" % self.name
class Base(models.Model):
name = models.CharField(max_length=50)
def __unicode__(self):
return "Base %s" % self.name
class Article(models.Model):
name = models.CharField(max_length=50)
text = models.TextField()
submitted_from = models.IPAddressField(blank=True, null=True)
def __str__(self):
return "Article %s" % self.name
__test__ = {'API_TESTS': ur"""
# Regression test for #1661 and #1662: Check that string form referencing of
# models works, both as pre and post reference, on all RelatedField types.
>>> f1 = Foo(name="Foo1")
>>> f1.save()
>>> f2 = Foo(name="Foo2")
>>> f2.save()
>>> w1 = Whiz(name="Whiz1")
>>> w1.save()
>>> b1 = Bar(name="Bar1", normal=f1, fwd=w1, back=f2)
>>> b1.save()
>>> b1.normal
<Foo: Foo Foo1>
>>> b1.fwd
<Whiz: Whiz Whiz1>
>>> b1.back
<Foo: Foo Foo2>
>>> base1 = Base(name="Base1")
>>> base1.save()
>>> child1 = Child(name="Child1", parent=base1)
>>> child1.save()
>>> child1.parent
<Base: Base Base1>
# Regression tests for #3937: make sure we can use unicode characters in
# queries.
# BUG: These tests fail on MySQL, but it's a problem with the test setup. A
# properly configured UTF-8 database can handle this.
>>> fx = Foo(name='Bjorn', friend=u'François')
>>> fx.save()
>>> Foo.objects.get(friend__contains=u'\xe7')
<Foo: Foo Bjorn>
# We can also do the above query using UTF-8 strings.
>>> Foo.objects.get(friend__contains='\xc3\xa7')
<Foo: Foo Bjorn>
# Regression tests for #5087: make sure we can perform queries on TextFields.
>>> a = Article(name='Test', text='The quick brown fox jumps over the lazy dog.')
>>> a.save()
>>> Article.objects.get(text__exact='The quick brown fox jumps over the lazy dog.')
<Article: Article Test>
>>> Article.objects.get(text__contains='quick brown fox')
<Article: Article Test>
# Regression test for #708: "like" queries on IP address fields require casting
# to text (on PostgreSQL).
>>> Article(name='IP test', text='The body', submitted_from='192.0.2.100').save()
>>> Article.objects.filter(submitted_from__contains='192.0.2')
[<Article: Article IP test>]
"""}
| bsd-3-clause |
eicher31/compassion-switzerland | partner_communication_switzerland/tests/test_sms_provider.py | 3 | 1574 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2018 Compassion CH (http://www.compassion.ch)
# @author: Quentin Gigon <gigon.quentin@gmail.com>
#
# The licence is in the file __manifest__.py
#
##############################################################################
import mock
from odoo.tests import HttpCase
class TestSmsProvider(HttpCase):
_name = "my.classs"
def setUp(self):
super(TestSmsProvider, self).setUp()
self.env['ir.config_parameter'] \
.set_param('web.external.url', 'base')
external_url = self.env['ir.config_parameter'] \
.get_param('web.external.url')
self.assertEqual(external_url, 'base')
self.partner = self.env.ref('base.res_partner_1')
@mock.patch('odoo.addons.sms_939.wizards.sms_sender_wizard.smsbox_send')
def test_sms_provider(self, smsbox_send):
wizard = self.env['partner.communication.generate.wizard'].create({
'name': 'test',
'force_language': 'fr_CH',
'sms_provider_id': self.env.ref('sms_939.small_account_id').id,
'send_mode': 'sms',
'partner_ids': [(6, 0, self.partner.ids)],
})
result = wizard.generate()
communication_job = self.env['partner.communication.job'].search([
('id', '=', result['domain'][0][2][0])
])
self.assertEqual(self.env.ref('sms_939.small_account_id').id,
communication_job.sms_provider_id.id)
| agpl-3.0 |
infobloxopen/neutron | neutron/db/migration/alembic_migrations/firewall_init_ops.py | 61 | 3536 | # Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Initial schema operations for firewall service plugin
from alembic import op
import sqlalchemy as sa
action_types = sa.Enum('allow', 'deny', name='firewallrules_action')
def upgrade():
op.create_table(
'firewall_policies',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('description', sa.String(length=1024), nullable=True),
sa.Column('shared', sa.Boolean(), nullable=True),
sa.Column('audited', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'))
op.create_table(
'firewalls',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('description', sa.String(length=1024), nullable=True),
sa.Column('shared', sa.Boolean(), nullable=True),
sa.Column('admin_state_up', sa.Boolean(), nullable=True),
sa.Column('status', sa.String(length=16), nullable=True),
sa.Column('firewall_policy_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['firewall_policy_id'],
['firewall_policies.id'],
name='firewalls_ibfk_1'),
sa.PrimaryKeyConstraint('id'))
op.create_table(
'firewall_rules',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('description', sa.String(length=1024), nullable=True),
sa.Column('firewall_policy_id', sa.String(length=36), nullable=True),
sa.Column('shared', sa.Boolean(), nullable=True),
sa.Column('protocol', sa.String(length=40), nullable=True),
sa.Column('ip_version', sa.Integer(), nullable=False),
sa.Column('source_ip_address', sa.String(length=46), nullable=True),
sa.Column('destination_ip_address', sa.String(length=46),
nullable=True),
sa.Column('source_port_range_min', sa.Integer(), nullable=True),
sa.Column('source_port_range_max', sa.Integer(), nullable=True),
sa.Column('destination_port_range_min', sa.Integer(), nullable=True),
sa.Column('destination_port_range_max', sa.Integer(), nullable=True),
sa.Column('action', action_types, nullable=True),
sa.Column('enabled', sa.Boolean(), nullable=True),
sa.Column('position', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['firewall_policy_id'],
['firewall_policies.id'],
name='firewall_rules_ibfk_1'),
sa.PrimaryKeyConstraint('id'))
| apache-2.0 |
soumide1102/pycbc | pycbc/distributions/arbitrary.py | 2 | 12257 | # Copyright (C) 2016 Miriam Cabero Mueller, Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules provides classes for evaluating arbitrary distributions from
a file.
"""
import h5py
import numpy
import scipy.stats
from pycbc.distributions import bounded
import pycbc.transforms
class Arbitrary(bounded.BoundedDist):
"""A distribution constructed from a set of parameter values using a kde.
Bounds may be optionally provided to limit the range.
Parameters
----------
bounds : {None, dict}
Independent bounds on one or more parameters may be provided to limit
the range of the kde.
\**params :
The keyword arguments should provide the names of the parameters and
a list of their parameter values. If multiple parameters are provided,
a single kde will be produced with dimension equal to the number of
parameters.
"""
name = 'arbitrary'
def __init__(self, bounds=None, **kwargs):
# initialize the bounds
if bounds is None:
bounds = {}
bounds.update({p: None for p in kwargs if p not in bounds})
super(Arbitrary, self).__init__(**bounds)
# check that all parameters specified in bounds have samples
if set(self.params) != set(kwargs.keys()):
raise ValueError("Must provide samples for all parameters given "
"in the bounds dictionary")
# if bounds are provided use logit transform to move the points
# to +/- inifinity
self._transforms = {}
self._tparams = {}
for param,bnds in self.bounds.items():
if numpy.isfinite(bnds[1] - bnds[0]):
tparam = 'logit'+param
samples = kwargs[param]
t = pycbc.transforms.Logit(param, tparam, domain=bnds)
self._transforms[tparam] = t
self._tparams[param] = tparam
# remove any sample points that fall out side of the bounds
outside = bnds.__contains__(samples)
if outside.any():
samples = samples[outside]
# transform the sample points
kwargs[param] = t.transform({param: samples})[tparam]
elif not (~numpy.isfinite(bnds[0]) and ~numpy.isfinite(bnds[1])):
raise ValueError("if specifying bounds, both bounds must "
"be finite")
# build the kde
self._kde = self.get_kde_from_arrays(*[kwargs[p] for p in self.params])
@property
def params(self):
return self._params
@property
def kde(self):
return self._kde
def _pdf(self, **kwargs):
"""Returns the pdf at the given values. The keyword arguments must
contain all of parameters in self's params. Unrecognized arguments are
ignored.
"""
for p in self._params:
if p not in kwargs.keys():
raise ValueError('Missing parameter {} to construct pdf.'
.format(p))
if kwargs in self:
# transform into the kde space
jacobian = 1.
for param, tparam in self._tparams.items():
t = self._transforms[tparam]
try:
samples = t.transform({param: kwargs[param]})
except ValueError as e:
# can get a value error if the value is exactly == to
# the bounds, in which case, just return 0.
if kwargs[param] in self.bounds[param]:
return 0.
else:
raise ValueError(e)
kwargs[param] = samples[tparam]
# update the jacobian for the transform; if p is the pdf
# in the params frame (the one we want) and p' is the pdf
# in the transformed frame (the one that's calculated) then:
# p = J * p', where J is the Jacobian of going from p to p'
jacobian *= t.jacobian(samples)
# for scipy < 0.15.0, gaussian_kde.pdf = gaussian_kde.evaluate
this_pdf = jacobian * self._kde.evaluate([kwargs[p]
for p in self._params])
if len(this_pdf) == 1:
return float(this_pdf)
else:
return this_pdf
else:
return 0.
def _logpdf(self, **kwargs):
"""Returns the log of the pdf at the given values. The keyword
arguments must contain all of parameters in self's params.
Unrecognized arguments are ignored.
"""
if kwargs not in self:
return -numpy.inf
else:
return numpy.log(self._pdf(**kwargs))
def rvs(self, size=1, param=None):
"""Gives a set of random values drawn from the kde.
Parameters
----------
size : {1, int}
The number of values to generate; default is 1.
param : {None, string}
If provided, will just return values for the given parameter.
Otherwise, returns random values for each parameter.
Returns
-------
structured array
The random values in a numpy structured array. If a param was
specified, the array will only have an element corresponding to the
given parameter. Otherwise, the array will have an element for each
parameter in self's params.
"""
if param is not None:
dtype = [(param, float)]
else:
dtype = [(p, float) for p in self.params]
size = int(size)
arr = numpy.zeros(size, dtype=dtype)
draws = self._kde.resample(size)
draws = {param: draws[ii,:] for ii,param in enumerate(self.params)}
for (param,_) in dtype:
try:
# transform back to param space
tparam = self._tparams[param]
tdraws = {tparam: draws[param]}
draws[param] = self._transforms[tparam].inverse_transform(
tdraws)[param]
except KeyError:
pass
arr[param] = draws[param]
return arr
@staticmethod
def get_kde_from_arrays(*arrays):
"""Constructs a KDE from the given arrays.
\*arrays :
Each argument should be a 1D numpy array to construct the kde from.
The resulting KDE will have dimension given by the number of
parameters.
"""
return scipy.stats.gaussian_kde(numpy.vstack(arrays))
@classmethod
def from_config(cls, cp, section, variable_args):
"""Raises a NotImplementedError; to load from a config file, use
`FromFile`.
"""
raise NotImplementedError("This class does not support loading from a "
"config file. Use `FromFile` instead.")
class FromFile(Arbitrary):
"""A distribution that reads the values of the parameter(s) from an hdf
file, computes the kde to construct the pdf, and draws random variables
from it.
Parameters
----------
filename : str
The path to an hdf file containing the values of the parameters that
want to be used to construct the distribution. Each parameter should
be a separate dataset in the hdf file, and all datasets should have
the same size. For example, to give a prior for mass1 and mass2 from
file f, f['mass1'] and f['mass2'] contain the n values for each
parameter.
\**params :
The keyword arguments should provide the names of the parameters to be
read from the file and (optionally) their bounds. If no parameters are
provided, it will use all the parameters found in the file. To provide
bounds, specify e.g. mass1=[10,100]. Otherwise, mass1=None.
Attributes
----------
name : 'fromfile'
The name of the distribution.
filename : str
The path to the file containing values for the parameter(s).
params : list
Parameters to read from file.
norm : float
The normalization of the multi-dimensional pdf.
lognorm : float
The log of the normalization.
kde :
The kde obtained from the values in the file.
"""
name = 'fromfile'
def __init__(self, filename=None, **params):
if filename is None:
raise ValueError('A file must be specified for this distribution.')
self._filename = filename
# Get the parameter names to pass to get_kde_from_file
if len(params) == 0:
ps = None
else:
ps = params.keys()
param_vals = self.get_arrays_from_file(filename, params=ps)
super(FromFile, self).__init__(bounds=params, **param_vals)
@property
def filename(self):
return self._filename
@staticmethod
def get_arrays_from_file(params_file, params=None):
"""Reads the values of one or more parameters from an hdf file and
returns as a dictionary.
Parameters
----------
params_file : str
The hdf file that contains the values of the parameters.
params : {None, list}
If provided, will just retrieve the given parameter names.
Returns
-------
dict
A dictionary of the parameters mapping `param_name -> array`.
"""
try:
f = h5py.File(params_file, 'r')
except:
raise ValueError('File not found.')
if params is not None:
if not isinstance(params, list):
params = [params]
for p in params:
if p not in f.keys():
raise ValueError('Parameter {} is not in {}'
.format(p, params_file))
else:
params = [str(k) for k in f.keys()]
params_values = {p:f[p][:] for p in params}
f.close()
return params_values
@classmethod
def from_config(cls, cp, section, variable_args):
"""Returns a distribution based on a configuration file.
The parameters
for the distribution are retrieved from the section titled
"[`section`-`variable_args`]" in the config file.
The file to construct the distribution from must be provided by setting
`filename`. Boundary arguments can be provided in the same way as
described in `get_param_bounds_from_config`.
.. code-block:: ini
[{section}-{tag}]
name = fromfile
filename = ra_prior.hdf
min-ra = 0
max-ra = 6.28
Parameters
----------
cp : pycbc.workflow.WorkflowConfigParser
A parsed configuration file that contains the distribution
options.
section : str
Name of the section in the configuration file.
variable_args : str
The names of the parameters for this distribution, separated by
`prior.VARARGS_DELIM`. These must appear in the "tag" part
of the section header.
Returns
-------
BoundedDist
A distribution instance from the pycbc.inference.prior module.
"""
return bounded.bounded_from_config(cls, cp, section, variable_args,
bounds_required=False)
__all__ = ['Arbitrary', 'FromFile']
| gpl-3.0 |
TheWylieStCoyote/gnuradio | gr-vocoder/python/vocoder/cvsd.py | 2 | 3118 | #!/usr/bin/env python
#
# Copyright 2007,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from __future__ import division
from __future__ import unicode_literals
from gnuradio import gr, filter, blocks
from . import vocoder_swig
class cvsd_encode_fb(gr.hier_block2):
'''
This is a wrapper for the CVSD encoder that performs interpolation and filtering
necessary to work with the vocoding. It converts an incoming float (+-1) to a short, scales
it (to 32000; slightly below the maximum value), interpolates it, and then vocodes it.
The incoming sampling rate can be anything, though, of course, the higher the sampling rate and the
higher the interpolation rate are, the better the sound quality.
'''
def __init__(self, resample=8, bw=0.5):
'''
When using the CVSD vocoder, appropriate sampling rates are from 8k to 64k with resampling rates
from 1 to 8. A rate of 8k with a resampling rate of 8 provides a good quality signal.
'''
gr.hier_block2.__init__(self, "cvsd_encode",
gr.io_signature(1, 1, gr.sizeof_float), # Input signature
gr.io_signature(1, 1, gr.sizeof_char)) # Output signature
scale_factor = 32000.0
self.interp = resample
src_scale = blocks.multiply_const_ff(scale_factor)
taps = filter.firdes.low_pass(self.interp, self.interp, bw, 2*bw)
interp = filter.interp_fir_filter_fff(self.interp, taps)
f2s = blocks.float_to_short()
enc = vocoder_swig.cvsd_encode_sb()
self.connect(self, src_scale, interp, f2s, enc, self)
class cvsd_decode_bf(gr.hier_block2):
'''
This is a wrapper for the CVSD decoder that performs decimation and filtering
necessary to work with the vocoding. It converts an incoming CVSD-encoded short to a float, decodes it
to a float, decimates it, and scales it (by 32000; slightly below the maximum value to avoid clipping).
The sampling rate can be anything, though, of course, the higher the sampling rate and the
higher the interpolation rate are, the better the sound quality.
'''
def __init__(self, resample=8, bw=0.5):
'''
When using the CVSD vocoder, appropriate sampling rates are from 8k to 64k with resampling rates
from 1 to 8. A rate of 8k with a resampling rate of 8 provides a good quality signal.
'''
gr.hier_block2.__init__(self, "cvsd_decode",
gr.io_signature(1, 1, gr.sizeof_char), # Input signature
gr.io_signature(1, 1, gr.sizeof_float)) # Output signature
scale_factor = 32000.0
self.decim = resample
dec = vocoder_swig.cvsd_decode_bs()
s2f = blocks.short_to_float()
taps = filter.firdes.low_pass(1, 1, bw, 2*bw)
decim = filter.fir_filter_fff(self.decim, taps)
sink_scale = blocks.multiply_const_ff(1.0 / scale_factor)
self.connect(self, dec, s2f, decim, sink_scale, self)
| gpl-3.0 |
kyl191/ansible | test/units/parsing/test_data_loader.py | 64 | 3283 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six import PY3
from yaml.scanner import ScannerError
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, mock_open
from ansible.errors import AnsibleParserError
from ansible.parsing import DataLoader
from ansible.parsing.yaml.objects import AnsibleMapping
class TestDataLoader(unittest.TestCase):
def setUp(self):
# FIXME: need to add tests that utilize vault_password
self._loader = DataLoader()
def tearDown(self):
pass
@patch.object(DataLoader, '_get_file_contents')
def test_parse_json_from_file(self, mock_def):
mock_def.return_value = ("""{"a": 1, "b": 2, "c": 3}""", True)
output = self._loader.load_from_file('dummy_json.txt')
self.assertEqual(output, dict(a=1,b=2,c=3))
@patch.object(DataLoader, '_get_file_contents')
def test_parse_yaml_from_file(self, mock_def):
mock_def.return_value = ("""
a: 1
b: 2
c: 3
""", True)
output = self._loader.load_from_file('dummy_yaml.txt')
self.assertEqual(output, dict(a=1,b=2,c=3))
@patch.object(DataLoader, '_get_file_contents')
def test_parse_fail_from_file(self, mock_def):
mock_def.return_value = ("""
TEXT:
***
NOT VALID
""", True)
self.assertRaises(AnsibleParserError, self._loader.load_from_file, 'dummy_yaml_bad.txt')
class TestDataLoaderWithVault(unittest.TestCase):
def setUp(self):
self._loader = DataLoader()
self._loader.set_vault_password('ansible')
def tearDown(self):
pass
@patch.multiple(DataLoader, path_exists=lambda s, x: True, is_file=lambda s, x: True)
def test_parse_from_vault_1_1_file(self):
vaulted_data = """$ANSIBLE_VAULT;1.1;AES256
33343734386261666161626433386662623039356366656637303939306563376130623138626165
6436333766346533353463636566313332623130383662340a393835656134633665333861393331
37666233346464636263636530626332623035633135363732623332313534306438393366323966
3135306561356164310a343937653834643433343734653137383339323330626437313562306630
3035
"""
if PY3:
builtins_name = 'builtins'
else:
builtins_name = '__builtin__'
with patch(builtins_name + '.open', mock_open(read_data=vaulted_data)):
output = self._loader.load_from_file('dummy_vault.txt')
self.assertEqual(output, dict(foo='bar'))
| gpl-3.0 |
pdellaert/ansible | lib/ansible/modules/windows/win_netbios.py | 23 | 2309 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Thomas Moore (@tmmruk)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_netbios
version_added: '2.9'
short_description: Manage NetBIOS over TCP/IP settings on Windows.
description:
- Enables or disables NetBIOS on Windows network adapters.
- Can be used to protect a system against NBT-NS poisoning and avoid NBNS broadcast storms.
- Settings can be applied system wide or per adapter.
options:
state:
description:
- Whether NetBIOS should be enabled, disabled, or default (use setting from DHCP server or if static IP address is assigned enable NetBIOS).
choices:
- enabled
- disabled
- default
required: yes
type: str
adapter_names:
description:
- List of adapter names for which to manage NetBIOS settings. If this option is omitted then configuration is applied to all adapters on the system.
- The adapter name used is the connection caption in the Network Control Panel or via C(Get-NetAdapter), eg C(Ethernet 2).
type: list
required: no
author:
- Thomas Moore (@tmmruk)
notes:
- Changing NetBIOS settings does not usually require a reboot and will take effect immediately.
- UDP port 137/138/139 will no longer be listening once NetBIOS is disabled.
'''
EXAMPLES = r'''
- name: Disable NetBIOS system wide
win_netbios:
state: disabled
- name: Disable NetBIOS on Ethernet2
win_netbios:
state: disabled
adapter_names:
- Ethernet2
- name: Enable NetBIOS on Public and Backup adapters
win_netbios:
state: enabled
adapter_names:
- Public
- Backup
- name: Set NetBIOS to system default on all adapters
win_netbios:
state: default
'''
RETURN = r'''
reboot_required:
description: Boolean value stating whether a system reboot is required.
returned: always
type: bool
sample: true
'''
| gpl-3.0 |
tudorvio/nova | nova/tests/unit/virt/xenapi/image/test_bittorrent.py | 51 | 4959 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mox3 import mox
import six
from nova import context
from nova import test
from nova.tests.unit.virt.xenapi import stubs
from nova.virt.xenapi import driver as xenapi_conn
from nova.virt.xenapi import fake
from nova.virt.xenapi.image import bittorrent
from nova.virt.xenapi import vm_utils
class TestBittorrentStore(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(TestBittorrentStore, self).setUp()
self.store = bittorrent.BittorrentStore()
self.mox = mox.Mox()
self.flags(torrent_base_url='http://foo',
connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.context = context.RequestContext(
'user', 'project', auth_token='foobar')
fake.reset()
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
self.session = driver._session
self.stubs.Set(
vm_utils, 'get_sr_path', lambda *a, **kw: '/fake/sr/path')
def test_download_image(self):
instance = {'uuid': '00000000-0000-0000-0000-000000007357'}
params = {'image_id': 'fake_image_uuid',
'sr_path': '/fake/sr/path',
'torrent_download_stall_cutoff': 600,
'torrent_listen_port_end': 6891,
'torrent_listen_port_start': 6881,
'torrent_max_last_accessed': 86400,
'torrent_max_seeder_processes_per_host': 1,
'torrent_seed_chance': 1.0,
'torrent_seed_duration': 3600,
'torrent_url': 'http://foo/fake_image_uuid.torrent',
'uuid_stack': ['uuid1']}
self.stubs.Set(vm_utils, '_make_uuid_stack',
lambda *a, **kw: ['uuid1'])
self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
self.session.call_plugin_serialized(
'bittorrent', 'download_vhd', **params)
self.mox.ReplayAll()
self.store.download_image(self.context, self.session,
instance, 'fake_image_uuid')
self.mox.VerifyAll()
def test_upload_image(self):
self.assertRaises(NotImplementedError, self.store.upload_image,
self.context, self.session, mox.IgnoreArg, 'fake_image_uuid',
['fake_vdi_uuid'])
class LookupTorrentURLTestCase(test.NoDBTestCase):
def setUp(self):
super(LookupTorrentURLTestCase, self).setUp()
self.store = bittorrent.BittorrentStore()
self.image_id = 'fakeimageid'
def test_default_fetch_url_no_base_url_set(self):
self.flags(torrent_base_url=None,
group='xenserver')
exc = self.assertRaises(
RuntimeError, self.store._lookup_torrent_url_fn)
self.assertEqual('Cannot create default bittorrent URL without'
' xenserver.torrent_base_url configuration option'
' set.',
six.text_type(exc))
def test_default_fetch_url_base_url_is_set(self):
self.flags(torrent_base_url='http://foo',
group='xenserver')
lookup_fn = self.store._lookup_torrent_url_fn()
self.assertEqual('http://foo/fakeimageid.torrent',
lookup_fn(self.image_id))
def test_invalid_base_url_warning_logged(self):
self.flags(torrent_base_url='www.foo.com',
group='xenserver')
# Make sure a warning is logged when an invalid base URL is set,
# where invalid means it does not contain any slash characters
warnings = []
def fake_warn(msg):
warnings.append(msg)
self.stubs.Set(bittorrent.LOG, 'warn', fake_warn)
lookup_fn = self.store._lookup_torrent_url_fn()
self.assertEqual('fakeimageid.torrent',
lookup_fn(self.image_id))
self.assertTrue(any('does not contain a slash character' in msg for
msg in warnings),
'_lookup_torrent_url_fn() did not log a warning '
'message when the torrent_base_url did not contain a '
'slash character.')
| apache-2.0 |
ysekky/GPy | GPy/inference/latent_function_inference/pep.py | 6 | 3521 | from .posterior import Posterior
from ...util.linalg import jitchol, tdot, dtrtrs, dtrtri, pdinv
from ...util import diag
import numpy as np
from . import LatentFunctionInference
log_2_pi = np.log(2*np.pi)
class PEP(LatentFunctionInference):
'''
Sparse Gaussian processes using Power-Expectation Propagation
for regression: alpha \approx 0 gives VarDTC and alpha = 1 gives FITC
Reference: A Unifying Framework for Sparse Gaussian Process Approximation using
Power Expectation Propagation, https://arxiv.org/abs/1605.07066
'''
const_jitter = 1e-6
def __init__(self, alpha):
super(PEP, self).__init__()
self.alpha = alpha
def inference(self, kern, X, Z, likelihood, Y, mean_function=None, Y_metadata=None):
assert mean_function is None, "inference with a mean function not implemented"
num_inducing, _ = Z.shape
num_data, output_dim = Y.shape
#make sure the noise is not hetero
sigma_n = likelihood.gaussian_variance(Y_metadata)
if sigma_n.size >1:
raise NotImplementedError("no hetero noise with this implementation of PEP")
Kmm = kern.K(Z)
Knn = kern.Kdiag(X)
Knm = kern.K(X, Z)
U = Knm
#factor Kmm
diag.add(Kmm, self.const_jitter)
Kmmi, L, Li, _ = pdinv(Kmm)
#compute beta_star, the effective noise precision
LiUT = np.dot(Li, U.T)
sigma_star = sigma_n + self.alpha * (Knn - np.sum(np.square(LiUT),0))
beta_star = 1./sigma_star
# Compute and factor A
A = tdot(LiUT*np.sqrt(beta_star)) + np.eye(num_inducing)
LA = jitchol(A)
# back substitute to get b, P, v
URiy = np.dot(U.T*beta_star,Y)
tmp, _ = dtrtrs(L, URiy, lower=1)
b, _ = dtrtrs(LA, tmp, lower=1)
tmp, _ = dtrtrs(LA, b, lower=1, trans=1)
v, _ = dtrtrs(L, tmp, lower=1, trans=1)
tmp, _ = dtrtrs(LA, Li, lower=1, trans=0)
P = tdot(tmp.T)
alpha_const_term = (1.0-self.alpha) / self.alpha
#compute log marginal
log_marginal = -0.5*num_data*output_dim*np.log(2*np.pi) + \
-np.sum(np.log(np.diag(LA)))*output_dim + \
0.5*output_dim*(1+alpha_const_term)*np.sum(np.log(beta_star)) + \
-0.5*np.sum(np.square(Y.T*np.sqrt(beta_star))) + \
0.5*np.sum(np.square(b)) + 0.5*alpha_const_term*num_data*np.log(sigma_n)
#compute dL_dR
Uv = np.dot(U, v)
dL_dR = 0.5*(np.sum(U*np.dot(U,P), 1) - (1.0+alpha_const_term)/beta_star + np.sum(np.square(Y), 1) - 2.*np.sum(Uv*Y, 1) \
+ np.sum(np.square(Uv), 1))*beta_star**2
# Compute dL_dKmm
vvT_P = tdot(v.reshape(-1,1)) + P
dL_dK = 0.5*(Kmmi - vvT_P)
KiU = np.dot(Kmmi, U.T)
dL_dK += self.alpha * np.dot(KiU*dL_dR, KiU.T)
# Compute dL_dU
vY = np.dot(v.reshape(-1,1),Y.T)
dL_dU = vY - np.dot(vvT_P, U.T)
dL_dU *= beta_star
dL_dU -= self.alpha * 2.*KiU*dL_dR
dL_dthetaL = likelihood.exact_inference_gradients(dL_dR)
dL_dthetaL += 0.5*alpha_const_term*num_data / sigma_n
grad_dict = {'dL_dKmm': dL_dK, 'dL_dKdiag':dL_dR * self.alpha, 'dL_dKnm':dL_dU.T, 'dL_dthetaL':dL_dthetaL}
#construct a posterior object
post = Posterior(woodbury_inv=Kmmi-P, woodbury_vector=v, K=Kmm, mean=None, cov=None, K_chol=L)
return post, log_marginal, grad_dict
| bsd-3-clause |
austgl/python-gflags | gflags_validators.py | 488 | 6977 | #!/usr/bin/env python
# Copyright (c) 2010, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module to enforce different constraints on flags.
A validator represents an invariant, enforced over a one or more flags.
See 'FLAGS VALIDATORS' in gflags.py's docstring for a usage manual.
"""
__author__ = 'olexiy@google.com (Olexiy Oryeshko)'
class Error(Exception):
"""Thrown If validator constraint is not satisfied."""
class Validator(object):
"""Base class for flags validators.
Users should NOT overload these classes, and use gflags.Register...
methods instead.
"""
# Used to assign each validator an unique insertion_index
validators_count = 0
def __init__(self, checker, message):
"""Constructor to create all validators.
Args:
checker: function to verify the constraint.
Input of this method varies, see SimpleValidator and
DictionaryValidator for a detailed description.
message: string, error message to be shown to the user
"""
self.checker = checker
self.message = message
Validator.validators_count += 1
# Used to assert validators in the order they were registered (CL/18694236)
self.insertion_index = Validator.validators_count
def Verify(self, flag_values):
"""Verify that constraint is satisfied.
flags library calls this method to verify Validator's constraint.
Args:
flag_values: gflags.FlagValues, containing all flags
Raises:
Error: if constraint is not satisfied.
"""
param = self._GetInputToCheckerFunction(flag_values)
if not self.checker(param):
raise Error(self.message)
def GetFlagsNames(self):
"""Return the names of the flags checked by this validator.
Returns:
[string], names of the flags
"""
raise NotImplementedError('This method should be overloaded')
def PrintFlagsWithValues(self, flag_values):
raise NotImplementedError('This method should be overloaded')
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: gflags.FlagValues, containing all flags.
Returns:
Return type depends on the specific validator.
"""
raise NotImplementedError('This method should be overloaded')
class SimpleValidator(Validator):
"""Validator behind RegisterValidator() method.
Validates that a single flag passes its checker function. The checker function
takes the flag value and returns True (if value looks fine) or, if flag value
is not valid, either returns False or raises an Exception."""
def __init__(self, flag_name, checker, message):
"""Constructor.
Args:
flag_name: string, name of the flag.
checker: function to verify the validator.
input - value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise Error.
message: string, error message to be shown to the user if validator's
condition is not satisfied
"""
super(SimpleValidator, self).__init__(checker, message)
self.flag_name = flag_name
def GetFlagsNames(self):
return [self.flag_name]
def PrintFlagsWithValues(self, flag_values):
return 'flag --%s=%s' % (self.flag_name, flag_values[self.flag_name].value)
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: gflags.FlagValues
Returns:
value of the corresponding flag.
"""
return flag_values[self.flag_name].value
class DictionaryValidator(Validator):
"""Validator behind RegisterDictionaryValidator method.
Validates that flag values pass their common checker function. The checker
function takes flag values and returns True (if values look fine) or,
if values are not valid, either returns False or raises an Exception.
"""
def __init__(self, flag_names, checker, message):
"""Constructor.
Args:
flag_names: [string], containing names of the flags used by checker.
checker: function to verify the validator.
input - dictionary, with keys() being flag_names, and value for each
key being the value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise Error.
message: string, error message to be shown to the user if validator's
condition is not satisfied
"""
super(DictionaryValidator, self).__init__(checker, message)
self.flag_names = flag_names
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: gflags.FlagValues
Returns:
dictionary, with keys() being self.lag_names, and value for each key
being the value of the corresponding flag (string, boolean, etc).
"""
return dict([key, flag_values[key].value] for key in self.flag_names)
def PrintFlagsWithValues(self, flag_values):
prefix = 'flags '
flags_with_values = []
for key in self.flag_names:
flags_with_values.append('%s=%s' % (key, flag_values[key].value))
return prefix + ', '.join(flags_with_values)
def GetFlagsNames(self):
return self.flag_names
| bsd-3-clause |
Peratham/tensor-sc | scripts/anomaly.py | 2 | 1531 | import snap
import numpy as np
G = snap.PNGraph.New()
set1 = range(0, 2)
set2 = range(2, 4)
set3 = range(4, 6)
set4 = range(6, 22)
for i in set1 + set2 + set3 + set4:
G.AddNode(i)
def add_edges(side1, side2, p):
for i in side1:
for j in side2:
if np.random.random() < p and i != j:
G.AddEdge(i, j)
# Choose random edges to hook up with
def AddRandEdges(side1, side2, num, forward):
for i in side1:
ends = [int(x) for x in np.random.choice(side2, num)]
for j in ends:
if forward:
if not G.IsEdge(i, j) and i != j:
G.AddEdge(i, j)
else:
if not G.IsEdge(j, i) and i != j:
G.AddEdge(j, i)
p = 1
add_edges(set1, set2, p)
add_edges(set2, set3, p)
add_edges(set3, set1, p)
num = 4
AddRandEdges(set1, set4, num, True)
AddRandEdges(set2, set4, num, True)
AddRandEdges(set3, set4, num, True)
num = 2
AddRandEdges(set1, set4, num, False)
AddRandEdges(set2, set4, num, False)
AddRandEdges(set3, set4, num, False)
p = 0.25
add_edges(set4, set4, p)
if 1:
in_avg = np.mean([G.GetNI(i).GetInDeg() for i in set1 + set2 + set3])
out_avg = np.mean([G.GetNI(i).GetOutDeg() for i in set1 + set2 + set3])
print in_avg, out_avg
if 1:
in_avg = np.mean([G.GetNI(i).GetInDeg() for i in set4])
out_avg = np.mean([G.GetNI(i).GetOutDeg() for i in set4])
print in_avg, out_avg
out_file = '../data/anomaly.txt'
print 'data:', out_file
snap.SaveEdgeList(G, out_file)
| bsd-2-clause |
StealthMicro/OctoPi-Makerbot | env/Lib/site-packages/werkzeug/local.py | 84 | 13416 | # -*- coding: utf-8 -*-
"""
werkzeug.local
~~~~~~~~~~~~~~
This module implements context-local objects.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from werkzeug.wsgi import ClosingIterator
from werkzeug._internal import _patch_wrapper
# since each thread has its own greenlet we can just use those as identifiers
# for the context. If greenlets are not available we fall back to the
# current thread ident.
try:
from greenlet import getcurrent as get_ident
except ImportError: # pragma: no cover
try:
from thread import get_ident
except ImportError: # pragma: no cover
from dummy_thread import get_ident
def release_local(local):
"""Releases the contents of the local for the current context.
This makes it possible to use locals without a manager.
Example::
>>> loc = Local()
>>> loc.foo = 42
>>> release_local(loc)
>>> hasattr(loc, 'foo')
False
With this function one can release :class:`Local` objects as well
as :class:`StackLocal` objects. However it is not possible to
release data held by proxies that way, one always has to retain
a reference to the underlying local object in order to be able
to release it.
.. versionadded:: 0.6.1
"""
local.__release_local__()
class Local(object):
__slots__ = ('__storage__', '__ident_func__')
def __init__(self):
object.__setattr__(self, '__storage__', {})
object.__setattr__(self, '__ident_func__', get_ident)
def __iter__(self):
return iter(self.__storage__.items())
def __call__(self, proxy):
"""Create a proxy for a name."""
return LocalProxy(self, proxy)
def __release_local__(self):
self.__storage__.pop(self.__ident_func__(), None)
def __getattr__(self, name):
try:
return self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
ident = self.__ident_func__()
storage = self.__storage__
try:
storage[ident][name] = value
except KeyError:
storage[ident] = {name: value}
def __delattr__(self, name):
try:
del self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
class LocalStack(object):
"""This class works similar to a :class:`Local` but keeps a stack
of objects instead. This is best explained with an example::
>>> ls = LocalStack()
>>> ls.push(42)
>>> ls.top
42
>>> ls.push(23)
>>> ls.top
23
>>> ls.pop()
23
>>> ls.top
42
They can be force released by using a :class:`LocalManager` or with
the :func:`release_local` function but the correct way is to pop the
item from the stack after using. When the stack is empty it will
no longer be bound to the current context (and as such released).
By calling the stack without arguments it returns a proxy that resolves to
the topmost item on the stack.
.. versionadded:: 0.6.1
"""
def __init__(self):
self._local = Local()
def __release_local__(self):
self._local.__release_local__()
def _get__ident_func__(self):
return self._local.__ident_func__
def _set__ident_func__(self, value):
object.__setattr__(self._local, '__ident_func__', value)
__ident_func__ = property(_get__ident_func__, _set__ident_func__)
del _get__ident_func__, _set__ident_func__
def __call__(self):
def _lookup():
rv = self.top
if rv is None:
raise RuntimeError('object unbound')
return rv
return LocalProxy(_lookup)
def push(self, obj):
"""Pushes a new item to the stack"""
rv = getattr(self._local, 'stack', None)
if rv is None:
self._local.stack = rv = []
rv.append(obj)
return rv
def pop(self):
"""Removes the topmost item from the stack, will return the
old value or `None` if the stack was already empty.
"""
stack = getattr(self._local, 'stack', None)
if stack is None:
return None
elif len(stack) == 1:
release_local(self._local)
return stack[-1]
else:
return stack.pop()
@property
def top(self):
"""The topmost item on the stack. If the stack is empty,
`None` is returned.
"""
try:
return self._local.stack[-1]
except (AttributeError, IndexError):
return None
class LocalManager(object):
"""Local objects cannot manage themselves. For that you need a local
manager. You can pass a local manager multiple locals or add them later
by appending them to `manager.locals`. Everytime the manager cleans up
it, will clean up all the data left in the locals for this context.
The `ident_func` parameter can be added to override the default ident
function for the wrapped locals.
.. versionchanged:: 0.6.1
Instead of a manager the :func:`release_local` function can be used
as well.
.. versionchanged:: 0.7
`ident_func` was added.
"""
def __init__(self, locals=None, ident_func=None):
if locals is None:
self.locals = []
elif isinstance(locals, Local):
self.locals = [locals]
else:
self.locals = list(locals)
if ident_func is not None:
self.ident_func = ident_func
for local in self.locals:
object.__setattr__(local, '__ident_func__', ident_func)
else:
self.ident_func = get_ident
def get_ident(self):
"""Return the context identifier the local objects use internally for
this context. You cannot override this method to change the behavior
but use it to link other context local objects (such as SQLAlchemy's
scoped sessions) to the Werkzeug locals.
.. versionchanged:: 0.7
Yu can pass a different ident function to the local manager that
will then be propagated to all the locals passed to the
constructor.
"""
return self.ident_func()
def cleanup(self):
"""Manually clean up the data in the locals for this context. Call
this at the end of the request or use `make_middleware()`.
"""
for local in self.locals:
release_local(local)
def make_middleware(self, app):
"""Wrap a WSGI application so that cleaning up happens after
request end.
"""
def application(environ, start_response):
return ClosingIterator(app(environ, start_response), self.cleanup)
return application
def middleware(self, func):
"""Like `make_middleware` but for decorating functions.
Example usage::
@manager.middleware
def application(environ, start_response):
...
The difference to `make_middleware` is that the function passed
will have all the arguments copied from the inner application
(name, docstring, module).
"""
return _patch_wrapper(func, self.make_middleware(func))
def __repr__(self):
return '<%s storages: %d>' % (
self.__class__.__name__,
len(self.locals)
)
class LocalProxy(object):
"""Acts as a proxy for a werkzeug local. Forwards all operations to
a proxied object. The only operations not supported for forwarding
are right handed operands and any kind of assignment.
Example usage::
from werkzeug.local import Local
l = Local()
# these are proxies
request = l('request')
user = l('user')
from werkzeug.local import LocalStack
_response_local = LocalStack()
# this is a proxy
response = _response_local()
Whenever something is bound to l.user / l.request the proxy objects
will forward all operations. If no object is bound a :exc:`RuntimeError`
will be raised.
To create proxies to :class:`Local` or :class:`LocalStack` objects,
call the object as shown above. If you want to have a proxy to an
object looked up by a function, you can (as of Werkzeug 0.6.1) pass
a function to the :class:`LocalProxy` constructor::
session = LocalProxy(lambda: get_current_request().session)
.. versionchanged:: 0.6.1
The class can be instanciated with a callable as well now.
"""
__slots__ = ('__local', '__dict__', '__name__')
def __init__(self, local, name=None):
object.__setattr__(self, '_LocalProxy__local', local)
object.__setattr__(self, '__name__', name)
def _get_current_object(self):
"""Return the current object. This is useful if you want the real
object behind the proxy at a time for performance reasons or because
you want to pass the object into a different context.
"""
if not hasattr(self.__local, '__release_local__'):
return self.__local()
try:
return getattr(self.__local, self.__name__)
except AttributeError:
raise RuntimeError('no object bound to %s' % self.__name__)
@property
def __dict__(self):
try:
return self._get_current_object().__dict__
except RuntimeError:
raise AttributeError('__dict__')
def __repr__(self):
try:
obj = self._get_current_object()
except RuntimeError:
return '<%s unbound>' % self.__class__.__name__
return repr(obj)
def __nonzero__(self):
try:
return bool(self._get_current_object())
except RuntimeError:
return False
def __unicode__(self):
try:
return unicode(self._get_current_object())
except RuntimeError:
return repr(self)
def __dir__(self):
try:
return dir(self._get_current_object())
except RuntimeError:
return []
def __getattr__(self, name):
if name == '__members__':
return dir(self._get_current_object())
return getattr(self._get_current_object(), name)
def __setitem__(self, key, value):
self._get_current_object()[key] = value
def __delitem__(self, key):
del self._get_current_object()[key]
def __setslice__(self, i, j, seq):
self._get_current_object()[i:j] = seq
def __delslice__(self, i, j):
del self._get_current_object()[i:j]
__setattr__ = lambda x, n, v: setattr(x._get_current_object(), n, v)
__delattr__ = lambda x, n: delattr(x._get_current_object(), n)
__str__ = lambda x: str(x._get_current_object())
__lt__ = lambda x, o: x._get_current_object() < o
__le__ = lambda x, o: x._get_current_object() <= o
__eq__ = lambda x, o: x._get_current_object() == o
__ne__ = lambda x, o: x._get_current_object() != o
__gt__ = lambda x, o: x._get_current_object() > o
__ge__ = lambda x, o: x._get_current_object() >= o
__cmp__ = lambda x, o: cmp(x._get_current_object(), o)
__hash__ = lambda x: hash(x._get_current_object())
__call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw)
__len__ = lambda x: len(x._get_current_object())
__getitem__ = lambda x, i: x._get_current_object()[i]
__iter__ = lambda x: iter(x._get_current_object())
__contains__ = lambda x, i: i in x._get_current_object()
__getslice__ = lambda x, i, j: x._get_current_object()[i:j]
__add__ = lambda x, o: x._get_current_object() + o
__sub__ = lambda x, o: x._get_current_object() - o
__mul__ = lambda x, o: x._get_current_object() * o
__floordiv__ = lambda x, o: x._get_current_object() // o
__mod__ = lambda x, o: x._get_current_object() % o
__divmod__ = lambda x, o: x._get_current_object().__divmod__(o)
__pow__ = lambda x, o: x._get_current_object() ** o
__lshift__ = lambda x, o: x._get_current_object() << o
__rshift__ = lambda x, o: x._get_current_object() >> o
__and__ = lambda x, o: x._get_current_object() & o
__xor__ = lambda x, o: x._get_current_object() ^ o
__or__ = lambda x, o: x._get_current_object() | o
__div__ = lambda x, o: x._get_current_object().__div__(o)
__truediv__ = lambda x, o: x._get_current_object().__truediv__(o)
__neg__ = lambda x: -(x._get_current_object())
__pos__ = lambda x: +(x._get_current_object())
__abs__ = lambda x: abs(x._get_current_object())
__invert__ = lambda x: ~(x._get_current_object())
__complex__ = lambda x: complex(x._get_current_object())
__int__ = lambda x: int(x._get_current_object())
__long__ = lambda x: long(x._get_current_object())
__float__ = lambda x: float(x._get_current_object())
__oct__ = lambda x: oct(x._get_current_object())
__hex__ = lambda x: hex(x._get_current_object())
__index__ = lambda x: x._get_current_object().__index__()
__coerce__ = lambda x, o: x.__coerce__(x, o)
__enter__ = lambda x: x.__enter__()
__exit__ = lambda x, *a, **kw: x.__exit__(*a, **kw)
| agpl-3.0 |
tgalal/yowsup | yowsup/layers/protocol_groups/protocolentities/iq_result_groups_info.py | 59 | 4256 | from yowsup.structs import ProtocolTreeNode
from yowsup.layers.protocol_iq.protocolentities import ResultIqProtocolEntity
class InfoGroupsResultIqProtocolEntity(ResultIqProtocolEntity):
'''
<iq type="result" from="{{GROUP_ID}}" id="{{IQ_ID}}">
<group subject="{{GROUPSUBJ}}" creation="{{GROUP_CREATION_TYIME}}"
creator="{{CREATOR_JID}}" s_t="{{SUBJECT_SET_TIMESTAMP}}" id="{{GROUP_ID}}"
s_o="{{SUBJECT_OWNER_JID}}">
<participant jid="{{PARTICIPANT_JID}}" type="admin"></participant>
<participant jid="{{PARTICIPANT_JID}}"></participant>
<participant jid="{{PARTICIPANT_JID}}"></participant>
</group>
</iq>
'''
TYPE_PARTICIPANT_ADMIN = "admin"
def __init__(self, _id, _from,
groupId, creationTimestamp, creatorJid,
subject, subjectTime, subjectOwnerJid,
participants):
super(InfoGroupsResultIqProtocolEntity, self).__init__(_id = _id, _from = _from)
self.setGroupProps(groupId, creationTimestamp, creatorJid,
subject, subjectTime, subjectOwnerJid, participants)
def setGroupProps(self, groupId, creationTimestamp, creatorJid,
subject, subjectTime, subjectOwnerJid,
participants):
assert type(participants) is dict, "Participants must be a dict {jid => type?}"
self.groupId = groupId
self.creationTimestamp = int(creationTimestamp)
self.creatorJid = creatorJid
self.subject = subject
self.subjectTime = int(subjectTime)
self.subjectOwnerJid = subjectOwnerJid
self.participants = participants
def getParticipants(self):
return self.participants
def getSubject(self):
return self.subject
def getGroupId(self):
return self.groupId
def getCreationTimestamp(self):
return self.creationTimestamp
def getCreatorJid(self, full = True):
return self.creatorJid if full else self.creatorJid.split('@')[0]
def getSubjectTimestamp(self):
return self.subjectTime
def getSubjectOwnerJid(self, full = True):
return self.subjectOwnerJid if full else self.subjectOwnerJid.split('@')[0]
def getGroupAdmins(self, full = True):
admins = []
for jid, _type in self.participants.items():
if _type == self.__class__.TYPE_PARTICIPANT_ADMIN:
admins.append(jid if full else jid.split('@')[0])
return admins
def __str__(self):
out = super(InfoGroupsResultIqProtocolEntity, self).__str__()
out += "Group ID: %s\n" % self.groupId
out += "Created: %s\n" % self.creationTimestamp
out += "Creator JID: %s\n" % self.creatorJid
out += "Subject: %s\n" % self.subject
out += "Subject Timestamp: %s\n" % self.subjectTime
out += "Subject owner JID: %s\n" % self.subjectOwnerJid
out += "Participants: %s\n" % self.participants
return out
def toProtocolTreeNode(self):
node = super(InfoGroupsResultIqProtocolEntity, self).toProtocolTreeNode()
groupNode = ProtocolTreeNode("group", {
"subject": self.getSubject(),
"creation": str(self.getCreationTimestamp()),
"creator": self.getCreatorJid(),
"s_t": self.getSubjectTimestamp(),
"s_o": self.getSubjectOwnerJid(),
"id": self.getGroupId()
})
participants = []
for jid, _type in self.getParticipants().items():
pnode = ProtocolTreeNode("participant", {"jid": jid})
if _type:
pnode["type"] = _type
participants.append(pnode)
groupNode.addChildren(participants)
node.addChild(groupNode)
return node
@staticmethod
def fromProtocolTreeNode(node):
groupNode = node.getChild("group")
participants = {}
for p in groupNode.getAllChildren("participant"):
participants[p["jid"]] = p["type"]
return InfoGroupsResultIqProtocolEntity(
node["id"], node["from"],
groupNode["id"], groupNode["creation"], groupNode["creator"], groupNode["subject"],
groupNode["s_t"], groupNode["s_o"], participants
)
| gpl-3.0 |
nirb/whatsapp | build/lib/yowsup/layers/protocol_groups/protocolentities/iq_groups_leave.py | 4 | 1130 | from yowsup.structs import ProtocolEntity, ProtocolTreeNode
from .iq_groups import GroupsIqProtocolEntity
class LeaveGroupsIqProtocolEntity(GroupsIqProtocolEntity):
'''
<iq type="set" id="{{id}}" xmlns="w:g", to="g.us">
<leave>
<group id="{{gjid}}"></group>
<leave>
</iq>
'''
def __init__(self, jids, _id = None):
super(LeaveGroupsIqProtocolEntity, self).__init__(to = "g.us", _id = _id, _type = "set")
self.setProps(jids)
def setProps(self, jids):
self.jids = jids
def toProtocolTreeNode(self):
node = super(LeaveGroupsIqProtocolEntity, self).toProtocolTreeNode()
leaveNode = ProtocolTreeNode("leave",{}, [ProtocolTreeNode("group", {"id": jid}) for jid in self.jids])
node.addChild(leaveNode)
return node
@staticmethod
def fromProtocolTreeNode(node):
entity = GroupsIqProtocolEntity.fromProtocolTreeNode(node)
entity.__class__ = LeaveGroupsIqProtocolEntity
entity.setProps([group.getAttributeValue("id") for group in node.getChild("leave").getAllChildren()])
return entity
| gpl-3.0 |
ProjectSWGCore/NGECore2 | scripts/mobiles/talus/aakuan_warder.py | 2 | 1640 | import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('aakuan_warder')
mobileTemplate.setLevel(52)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(True)
mobileTemplate.setScale(1)
mobileTemplate.setSocialGroup("aakuans")
mobileTemplate.setAssistRange(0)
mobileTemplate.setStalker(True)
mobileTemplate.setOptionsBitmask(Options.AGGRESSIVE | Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_dressed_aakuan_warder_zabrak_male_01.iff')
templates.add('object/mobile/shared_dressed_aakuan_warder_zabrak_female_01.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/polearm/shared_polearm_vibro_axe.iff', WeaponType.CARBINE, 1.0, 15, 'energy')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('meleeHit')
mobileTemplate.setAttacks(attacks)
lootPoolNames_1 = ['Junk']
lootPoolChances_1 = [100]
lootGroupChance_1 = 100
mobileTemplate.addToLootGroups(lootPoolNames_1,lootPoolChances_1,lootGroupChance_1)
core.spawnService.addMobileTemplate('aakuan_steward', mobileTemplate)
return | lgpl-3.0 |
google-research/language | language/conpono/evals/discriminative_eval.py | 1 | 11487 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Evaluate coherent permutation discrimination model on baseline."""
from collections import namedtuple # pylint: disable=g-importing-member
import csv
import json
import random
import time
from absl import app
from absl import flags
from bert import modeling
from bert import tokenization
import numpy as np
import tensorflow.compat.v1 as tf
FLAGS = flags.FLAGS
flags.DEFINE_integer("num_slices", 25, "Divide dataset into this many slices.")
flags.DEFINE_integer("slice_index", 0, "Evaluate this slice.")
flags.DEFINE_bool("do_reduce", False, "Collect eval numbers and aggregate.")
flags.DEFINE_string("output_dir", None, "The directory to write the output.")
flags.DEFINE_string("model_weights", None, "The pretrained BERT weights.")
flags.DEFINE_string("vocab_file", "", "BERT vocab file.")
flags.DEFINE_string("bert_config", "", "BERT config file.")
flags.DEFINE_string("data_dir", "", "Data directory.")
# pylint: disable=invalid-name
PARA_BREAK = "<para_break>"
model_weights = FLAGS.model_weights
vocab_file = FLAGS.vocab_file
def _restore_checkpoint(init_checkpoint):
"""Restore parameters from checkpoint."""
tvars = tf.trainable_variables()
(assignment_map,
_) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
def read_data(data_file):
"""Read permutation discrimination eval dataset.
Args:
data_file: data file to read from
Returns:
A list of examples (21 permutations where the first is the correct one).
"""
perms = []
with tf.gfile.Open(data_file, "r") as handle:
tsvreader = csv.reader(handle, delimiter="\t")
for line in tsvreader:
if line:
perms.append(line)
examples = []
for perm in perms:
target = perm[1].split("<PUNC>")
distractors = [
list(para.split("<PUNC>")) for para in perm[2].split("<BREAK>")
]
examples.append([target] + distractors)
return examples
def read_permutations():
perm_file = FLAGS.data_dir + "/wikiAperm.test.txt"
return read_data(perm_file)
def disc_coherence_scores(examples, tokenizer):
"""Get discriminative coherence scores."""
tf.reset_default_graph()
placeholders, model = create_cpc_model_and_placeholders(2)
with tf.compat.v1.Session() as sess:
_restore_checkpoint(FLAGS.model_weights)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
sess.run(init_op)
# To classify which of 2 paragraphs is coherent, we compute the probability
# of each paragraph and then select the one with higher likelihood as the
# coherent paragraph.
# Since our model is bi-directional, we estimate the probability of a
# paragraph by taking the mean of the probabilities each triple of sentences
# where we set the middle sentences as the context and the sentence before
# and after as the targets.
all_scores = []
for paragraphs in examples:
scores = []
for paragraph in paragraphs:
partial_paragraph_probs = []
for i in range(len(paragraph) - 2):
sents = paragraph[i:i + 3]
context = sents[1]
targets = sents[:1] + sents[2:3]
e = create_cpc_input_from_text(
tokenizer, context, targets, [], group_size=2)
input_map = {
placeholders.input_ids: e.tokens,
placeholders.input_mask: e.mask,
placeholders.segment_ids: e.seg_ids,
placeholders.softmax_mask: [True] * 8
}
results = sess.run([model.logits], feed_dict=input_map)
# partial_prob = np.sum(np.diagonal(results[0][0][3:5, :]))
diag = np.diagonal(results[0][0][3:5, :])
diag = 1 / (1 + np.exp(-diag))
partial_prob = np.sum(diag)
partial_paragraph_probs.append(partial_prob)
scores.append(np.sum(partial_paragraph_probs))
all_scores.append(scores)
print("Finished", len(all_scores))
tf.logging.info("Finished %d" % len(all_scores))
acc = np.sum([list(np.argsort(score)).index(0) for score in all_scores
]) / float(len(all_scores) * 20)
return acc
def create_cpc_model(model, num_choices, is_training):
"""Creates a classification model.
Args:
model: the BERT model from modeling.py
num_choices: number of negatives samples + 1
is_training: training mode (bool)
Returns:
tuple of (loss, per_example_loss, logits, probabilities) for model
"""
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
with tf.variable_scope("cpc_loss"):
softmax_weights = tf.get_variable(
"softmax_weights", [hidden_size, 8],
initializer=tf.truncated_normal_initializer(stddev=0.02))
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
matmul_out = tf.matmul(output_layer, softmax_weights)
logits = tf.reshape(matmul_out, (-1, num_choices, 8))
logits = tf.transpose(logits, perm=[0, 2, 1])
probabilities = tf.nn.softmax(logits, axis=-1)
return (logits, probabilities)
def create_cpc_model_and_placeholders(num_choices):
"""Build model and placeholders."""
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config)
is_training = False
use_one_hot_embeddings = False
seq_length = 512
Placeholders = namedtuple("Placeholders", [
"input_ids", "input_mask", "segment_ids", "labels", "label_types",
"softmax_mask"
])
input_ids = tf.placeholder(dtype=tf.int32, shape=[None, seq_length])
input_mask = tf.placeholder(dtype=tf.int32, shape=[None, seq_length])
segment_ids = tf.placeholder(dtype=tf.int32, shape=[None, seq_length])
labels = tf.placeholder(dtype=tf.int32, shape=[None, 8])
label_types = tf.placeholder(dtype=tf.int32, shape=[None, 8])
softmax_mask = tf.placeholder(dtype=tf.bool, shape=[None])
placeholders = Placeholders(input_ids, input_mask, segment_ids, labels,
label_types, softmax_mask)
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
logits, probabilities = create_cpc_model(model, num_choices, False)
Model = namedtuple("Model", ["logits", "probabilities"])
model = Model(logits, probabilities)
return placeholders, model
def create_cpc_input_from_text(tokenizer,
context,
sents,
labels,
group_size=32,
max_seq_length=512):
"""Parse text into BERT input."""
Input = namedtuple("Input", ["tokens", "mask", "seg_ids", "labels"])
context = tokenizer.tokenize(context)
sents = [tokenizer.tokenize(sent) for sent in sents]
rng = random.Random()
for sent in sents:
truncate_seq_pair(context, sents, max_seq_length - 3, rng)
tokens_list, input_mask_list, seg_id_list = [], [], []
for sent in sents:
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in context:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for token in sent:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_mask = [1] * len(tokens)
tokens = tokenizer.convert_tokens_to_ids(tokens)
while len(tokens) < max_seq_length:
tokens.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(tokens) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
tokens_list.append(tokens)
input_mask_list.append(input_mask)
seg_id_list.append(segment_ids)
zero_list = [0] * max_seq_length
while len(tokens_list) < group_size:
tokens_list.append(zero_list)
input_mask_list.append(zero_list)
seg_id_list.append(zero_list)
return Input(tokens_list, input_mask_list, seg_id_list, labels)
def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng):
"""Truncates a pair of sequences to a maximum sequence length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_num_tokens:
break
trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
assert len(trunc_tokens) >= 1
# We want to sometimes truncate from the front and sometimes from the
# back to add more randomness and avoid biases.
if rng.random() < 0.5:
del trunc_tokens[0]
else:
trunc_tokens.pop()
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
tf.logging.info("Evaluating slice %d" % FLAGS.slice_index)
tokenizer = tokenization.FullTokenizer(vocab_file, do_lower_case=True)
print("Trying to read...")
examples = read_permutations()
print("Load data successful")
if FLAGS.do_reduce:
acc = 0
found = [False] * FLAGS.num_slices
output_filename = FLAGS.output_dir + "/disc_acc.%d.json"
while not all(found):
for i in range(FLAGS.num_slices):
if tf.gfile.Exists(output_filename % i):
found[i] = True
time.sleep(5)
total = 0
numerator = 0
for i in range(FLAGS.num_slices):
with tf.gfile.Open(output_filename % i, "r") as handle:
data = json.load(handle)
total += data["length"]
numerator += data["acc"] * data["length"]
acc = numerator / total
tf.logging.info("Accuracy: " + str(acc))
with tf.gfile.Open(FLAGS.output_dir + "result.txt", "w") as handle:
handle.write("Accuracy: " + str(acc))
else:
slice_size = int(len(examples) / FLAGS.num_slices)
start = slice_size * FLAGS.slice_index
end = slice_size * (FLAGS.slice_index + 1)
if FLAGS.slice_index == (FLAGS.num_slices - 1):
examples_to_eval = examples[start:]
else:
examples_to_eval = examples[start:end]
accuracy = disc_coherence_scores(examples_to_eval, tokenizer)
tf.logging.info(accuracy)
output_filename = FLAGS.output_dir + "/disc_acc.%d.json" % FLAGS.slice_index
with tf.gfile.Open(output_filename, "w") as handle:
handle.write(
json.dumps({
"length": len(examples_to_eval),
"acc": accuracy
}))
if __name__ == "__main__":
flags.mark_flag_as_required("num_slices")
flags.mark_flag_as_required("slice_index")
flags.mark_flag_as_required("output_dir")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config")
flags.mark_flag_as_required("data_dir")
app.run(main)
| apache-2.0 |
frouty/odoogoeen | addons/sale_order_dates/__openerp__.py | 50 | 1739 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Dates on Sales Order',
'version': '1.0',
'category': 'Sales Management',
'description': """
Add additional date information to the sales order.
===================================================
You can add the following additional dates to a sales order:
------------------------------------------------------------
* Requested Date
* Commitment Date
* Effective Date
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'images': ['images/sale_order_dates.jpeg'],
'depends': ['sale_stock'],
'data': ['sale_order_dates_view.xml'],
'demo': [],
'test': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.