repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
apavlenko/opencv | 3rdparty/jinja2/visitor.py | 1401 | 3316 | # -*- coding: utf-8 -*-
"""
jinja2.visitor
~~~~~~~~~~~~~~
This module implements a visitor for the nodes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from jinja2.nodes import Node
class NodeVisitor(object):
"""Walks the abstract syntax tree and call visitor functions for every
node found. The visitor functions may return values which will be
forwarded by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
"""
def get_visitor(self, node):
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node, *args, **kwargs):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node, *args, **kwargs)
return self.generic_visit(node, *args, **kwargs)
def generic_visit(self, node, *args, **kwargs):
"""Called if no explicit visitor function exists for a node."""
for node in node.iter_child_nodes():
self.visit(node, *args, **kwargs)
class NodeTransformer(NodeVisitor):
"""Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
"""
def generic_visit(self, node, *args, **kwargs):
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, Node):
value = self.visit(value, *args, **kwargs)
if value is None:
continue
elif not isinstance(value, Node):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, Node):
new_node = self.visit(old_value, *args, **kwargs)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def visit_list(self, node, *args, **kwargs):
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
rv = [rv]
return rv
| bsd-3-clause |
apollo13/ansible | test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/lldp_global/lldp_global.py | 47 | 3926 | #
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The vyos lldp_global fact class
It is in this file the configuration is collected from the device
for a given resource, parsed, and the facts tree is populated
based on the configuration.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from re import findall, M
from copy import deepcopy
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
utils,
)
from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.argspec.lldp_global.lldp_global import (
Lldp_globalArgs,
)
class Lldp_globalFacts(object):
""" The vyos lldp_global fact class
"""
def __init__(self, module, subspec="config", options="options"):
self._module = module
self.argument_spec = Lldp_globalArgs.argument_spec
spec = deepcopy(self.argument_spec)
if subspec:
if options:
facts_argument_spec = spec[subspec][options]
else:
facts_argument_spec = spec[subspec]
else:
facts_argument_spec = spec
self.generated_spec = utils.generate_dict(facts_argument_spec)
def populate_facts(self, connection, ansible_facts, data=None):
""" Populate the facts for lldp_global
:param connection: the device connection
:param ansible_facts: Facts dictionary
:param data: previously collected conf
:rtype: dictionary
:returns: facts
"""
if not data:
data = connection.get_config()
objs = {}
lldp_output = findall(r"^set service lldp (\S+)", data, M)
if lldp_output:
for item in set(lldp_output):
lldp_regex = r" %s .+$" % item
cfg = findall(lldp_regex, data, M)
obj = self.render_config(cfg)
if obj:
objs.update(obj)
lldp_service = findall(r"^set service (lldp)?('lldp')", data, M)
if lldp_service or lldp_output:
lldp_obj = {}
lldp_obj["enable"] = True
objs.update(lldp_obj)
facts = {}
params = utils.validate_config(self.argument_spec, {"config": objs})
facts["lldp_global"] = utils.remove_empties(params["config"])
ansible_facts["ansible_network_resources"].update(facts)
return ansible_facts
def render_config(self, conf):
"""
Render config as dictionary structure and delete keys
from spec for null values
:param spec: The facts tree, generated from the argspec
:param conf: The configuration
:rtype: dictionary
:returns: The generated config
"""
protocol_conf = "\n".join(
filter(lambda x: ("legacy-protocols" in x), conf)
)
att_conf = "\n".join(
filter(lambda x: ("legacy-protocols" not in x), conf)
)
config = self.parse_attribs(["snmp", "address"], att_conf)
config["legacy_protocols"] = self.parse_protocols(protocol_conf)
return utils.remove_empties(config)
def parse_protocols(self, conf):
protocol_support = None
if conf:
protocols = findall(r"^.*legacy-protocols (.+)", conf, M)
if protocols:
protocol_support = []
for protocol in protocols:
protocol_support.append(protocol.strip("'"))
return protocol_support
def parse_attribs(self, attribs, conf):
config = {}
for item in attribs:
value = utils.parse_conf_arg(conf, item)
if value:
config[item] = value.strip("'")
else:
config[item] = None
return utils.remove_empties(config)
| gpl-3.0 |
jrwdunham/old | onlinelinguisticdatabase/tests/functional/test_speakers.py | 1 | 16641 | # Copyright 2016 Joel Dunham
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import simplejson as json
from time import sleep
from nose.tools import nottest
from onlinelinguisticdatabase.tests import TestController, url
import onlinelinguisticdatabase.model as model
from onlinelinguisticdatabase.model.meta import Session
import onlinelinguisticdatabase.lib.helpers as h
from onlinelinguisticdatabase.model import Speaker
log = logging.getLogger(__name__)
################################################################################
# Functions for creating & retrieving test data
################################################################################
class TestSpeakersController(TestController):
@nottest
def test_index(self):
"""Tests that GET /speakers returns an array of all speakers and that order_by and pagination parameters work correctly."""
# Add 100 speakers.
def create_speaker_from_index(index):
speaker = model.Speaker()
speaker.first_name = u'John%d' % index
speaker.last_name = u'Doe%d' % index
speaker.dialect = u'dialect %d' % index
speaker.page_content = u'page content %d' % index
return speaker
speakers = [create_speaker_from_index(i) for i in range(1, 101)]
Session.add_all(speakers)
Session.commit()
speakers = h.get_speakers(True)
speakers_count = len(speakers)
# Test that GET /speakers gives us all of the speakers.
response = self.app.get(url('speakers'), headers=self.json_headers,
extra_environ=self.extra_environ_view)
resp = json.loads(response.body)
assert len(resp) == speakers_count
assert resp[0]['first_name'] == u'John1'
assert resp[0]['id'] == speakers[0].id
assert response.content_type == 'application/json'
# Test the paginator GET params.
paginator = {'items_per_page': 23, 'page': 3}
response = self.app.get(url('speakers'), paginator, headers=self.json_headers,
extra_environ=self.extra_environ_view)
resp = json.loads(response.body)
assert len(resp['items']) == 23
assert resp['items'][0]['first_name'] == speakers[46].first_name
# Test the order_by GET params.
order_by_params = {'order_by_model': 'Speaker', 'order_by_attribute': 'first_name',
'order_by_direction': 'desc'}
response = self.app.get(url('speakers'), order_by_params,
headers=self.json_headers, extra_environ=self.extra_environ_view)
resp = json.loads(response.body)
result_set = sorted([s.first_name for s in speakers], reverse=True)
assert result_set == [s['first_name'] for s in resp]
# Test the order_by *with* paginator.
params = {'order_by_model': 'Speaker', 'order_by_attribute': 'first_name',
'order_by_direction': 'desc', 'items_per_page': 23, 'page': 3}
response = self.app.get(url('speakers'), params,
headers=self.json_headers, extra_environ=self.extra_environ_view)
resp = json.loads(response.body)
assert result_set[46] == resp['items'][0]['first_name']
# Expect a 400 error when the order_by_direction param is invalid
order_by_params = {'order_by_model': 'Speaker', 'order_by_attribute': 'first_name',
'order_by_direction': 'descending'}
response = self.app.get(url('speakers'), order_by_params, status=400,
headers=self.json_headers, extra_environ=self.extra_environ_view)
resp = json.loads(response.body)
assert resp['errors']['order_by_direction'] == u"Value must be one of: asc; desc (not u'descending')"
assert response.content_type == 'application/json'
# Expect the default BY id ASCENDING ordering when the order_by_model/Attribute
# param is invalid.
order_by_params = {'order_by_model': 'Speakerist', 'order_by_attribute': 'prenom',
'order_by_direction': 'desc'}
response = self.app.get(url('speakers'), order_by_params,
headers=self.json_headers, extra_environ=self.extra_environ_view)
resp = json.loads(response.body)
assert resp[0]['id'] == speakers[0].id
# Expect a 400 error when the paginator GET params are empty
# or are integers less than 1
paginator = {'items_per_page': u'a', 'page': u''}
response = self.app.get(url('speakers'), paginator, headers=self.json_headers,
extra_environ=self.extra_environ_view, status=400)
resp = json.loads(response.body)
assert resp['errors']['items_per_page'] == u'Please enter an integer value'
assert resp['errors']['page'] == u'Please enter a value'
assert response.content_type == 'application/json'
paginator = {'items_per_page': 0, 'page': -1}
response = self.app.get(url('speakers'), paginator, headers=self.json_headers,
extra_environ=self.extra_environ_view, status=400)
resp = json.loads(response.body)
assert resp['errors']['items_per_page'] == u'Please enter a number that is 1 or greater'
assert resp['errors']['page'] == u'Please enter a number that is 1 or greater'
assert response.content_type == 'application/json'
@nottest
def test_create(self):
"""Tests that POST /speakers creates a new speaker
or returns an appropriate error if the input is invalid.
"""
original_speaker_count = Session.query(Speaker).count()
# Create a valid one
params = self.speaker_create_params.copy()
params.update({
'first_name': u'John',
'last_name': u'Doe',
'page_content': u'page_content',
'dialect': u'dialect'
})
params = json.dumps(params)
response = self.app.post(url('speakers'), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
new_speaker_count = Session.query(Speaker).count()
assert new_speaker_count == original_speaker_count + 1
assert resp['first_name'] == u'John'
assert resp['dialect'] == u'dialect'
assert response.content_type == 'application/json'
# Invalid because first_name is too long
params = self.speaker_create_params.copy()
params.update({
'first_name': u'John' * 400,
'last_name': u'Doe',
'page_content': u'page_content',
'dialect': u'dialect'
})
params = json.dumps(params)
response = self.app.post(url('speakers'), params, self.json_headers, self.extra_environ_admin, status=400)
resp = json.loads(response.body)
assert resp['errors']['first_name'] == u'Enter a value not more than 255 characters long'
assert response.content_type == 'application/json'
@nottest
def test_new(self):
"""Tests that GET /speakers/new returns an empty JSON object."""
response = self.app.get(url('new_speaker'), headers=self.json_headers,
extra_environ=self.extra_environ_contrib)
resp = json.loads(response.body)
assert resp == {}
assert response.content_type == 'application/json'
@nottest
def test_update(self):
"""Tests that PUT /speakers/id updates the speaker with id=id."""
# Create a speaker to update.
params = self.speaker_create_params.copy()
params.update({
'first_name': u'first_name',
'last_name': u'last_name',
'page_content': u'page_content',
'dialect': u'dialect'
})
params = json.dumps(params)
response = self.app.post(url('speakers'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
speaker_count = Session.query(Speaker).count()
speaker_id = resp['id']
original_datetime_modified = resp['datetime_modified']
# Update the speaker
sleep(1) # sleep for a second to ensure that MySQL registers a different datetime_modified for the update
params = self.speaker_create_params.copy()
params.update({
'first_name': u'first_name',
'last_name': u'last_name',
'page_content': u'page_content',
'dialect': u'updated dialect.'
})
params = json.dumps(params)
response = self.app.put(url('speaker', id=speaker_id), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
datetime_modified = resp['datetime_modified']
new_speaker_count = Session.query(Speaker).count()
assert speaker_count == new_speaker_count
assert datetime_modified != original_datetime_modified
assert response.content_type == 'application/json'
# Attempt an update with no new input and expect to fail
sleep(1) # sleep for a second to ensure that MySQL could register a different datetime_modified for the update
response = self.app.put(url('speaker', id=speaker_id), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
speaker_count = new_speaker_count
new_speaker_count = Session.query(Speaker).count()
our_speaker_datetime_modified = Session.query(Speaker).get(speaker_id).datetime_modified
assert our_speaker_datetime_modified.isoformat() == datetime_modified
assert speaker_count == new_speaker_count
assert resp['error'] == u'The update request failed because the submitted data were not new.'
assert response.content_type == 'application/json'
@nottest
def test_delete(self):
"""Tests that DELETE /speakers/id deletes the speaker with id=id."""
# Create a speaker to delete.
params = self.speaker_create_params.copy()
params.update({
'first_name': u'first_name',
'last_name': u'last_name',
'page_content': u'page_content',
'dialect': u'dialect'
})
params = json.dumps(params)
response = self.app.post(url('speakers'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
speaker_count = Session.query(Speaker).count()
speaker_id = resp['id']
# Now delete the speaker
response = self.app.delete(url('speaker', id=speaker_id), headers=self.json_headers,
extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
new_speaker_count = Session.query(Speaker).count()
assert new_speaker_count == speaker_count - 1
assert resp['id'] == speaker_id
assert response.content_type == 'application/json'
# Trying to get the deleted speaker from the db should return None
deleted_speaker = Session.query(Speaker).get(speaker_id)
assert deleted_speaker == None
assert response.content_type == 'application/json'
# Delete with an invalid id
id = 9999999999999
response = self.app.delete(url('speaker', id=id),
headers=self.json_headers, extra_environ=self.extra_environ_admin,
status=404)
assert u'There is no speaker with id %s' % id in json.loads(response.body)['error']
assert response.content_type == 'application/json'
# Delete without an id
response = self.app.delete(url('speaker', id=''), status=404,
headers=self.json_headers, extra_environ=self.extra_environ_admin)
assert json.loads(response.body)['error'] == 'The resource could not be found.'
assert response.content_type == 'application/json'
@nottest
def test_show(self):
"""Tests that GET /speakers/id returns the speaker with id=id or an appropriate error."""
# Create a speaker to show.
params = self.speaker_create_params.copy()
params.update({
'first_name': u'first_name',
'last_name': u'last_name',
'page_content': u'page_content',
'dialect': u'dialect'
})
params = json.dumps(params)
response = self.app.post(url('speakers'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
speaker_id = resp['id']
# Try to get a speaker using an invalid id
id = 100000000000
response = self.app.get(url('speaker', id=id),
headers=self.json_headers, extra_environ=self.extra_environ_admin,
status=404)
resp = json.loads(response.body)
assert u'There is no speaker with id %s' % id in json.loads(response.body)['error']
assert response.content_type == 'application/json'
# No id
response = self.app.get(url('speaker', id=''), status=404,
headers=self.json_headers, extra_environ=self.extra_environ_admin)
assert json.loads(response.body)['error'] == 'The resource could not be found.'
assert response.content_type == 'application/json'
# Valid id
response = self.app.get(url('speaker', id=speaker_id), headers=self.json_headers,
extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert resp['first_name'] == u'first_name'
assert resp['dialect'] == u'dialect'
assert response.content_type == 'application/json'
@nottest
def test_edit(self):
"""Tests that GET /speakers/id/edit returns a JSON object of data necessary to edit the speaker with id=id.
The JSON object is of the form {'speaker': {...}, 'data': {...}} or
{'error': '...'} (with a 404 status code) depending on whether the id is
valid or invalid/unspecified, respectively.
"""
# Create a speaker to edit.
params = self.speaker_create_params.copy()
params.update({
'first_name': u'first_name',
'last_name': u'last_name',
'page_content': u'page_content',
'dialect': u'dialect'
})
params = json.dumps(params)
response = self.app.post(url('speakers'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
speaker_id = resp['id']
# Not logged in: expect 401 Unauthorized
response = self.app.get(url('edit_speaker', id=speaker_id), status=401)
resp = json.loads(response.body)
assert resp['error'] == u'Authentication is required to access this resource.'
assert response.content_type == 'application/json'
# Invalid id
id = 9876544
response = self.app.get(url('edit_speaker', id=id),
headers=self.json_headers, extra_environ=self.extra_environ_admin,
status=404)
assert u'There is no speaker with id %s' % id in json.loads(response.body)['error']
assert response.content_type == 'application/json'
# No id
response = self.app.get(url('edit_speaker', id=''), status=404,
headers=self.json_headers, extra_environ=self.extra_environ_admin)
assert json.loads(response.body)['error'] == 'The resource could not be found.'
assert response.content_type == 'application/json'
# Valid id
response = self.app.get(url('edit_speaker', id=speaker_id),
headers=self.json_headers, extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert resp['speaker']['first_name'] == u'first_name'
assert resp['data'] == {}
assert response.content_type == 'application/json'
| apache-2.0 |
open-health-hub/the_centile_app | node_modules/ewdjs/node_modules/socket.io/node_modules/engine.io/node_modules/engine.io-parser/node_modules/utf8/tests/generate-test-data.py | 2214 | 1347 | #!/usr/bin/env python
import re
import json
# http://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
# http://stackoverflow.com/a/13436167/96656
def unisymbol(codePoint):
if codePoint >= 0x0000 and codePoint <= 0xFFFF:
return unichr(codePoint)
elif codePoint >= 0x010000 and codePoint <= 0x10FFFF:
highSurrogate = int((codePoint - 0x10000) / 0x400) + 0xD800
lowSurrogate = int((codePoint - 0x10000) % 0x400) + 0xDC00
return unichr(highSurrogate) + unichr(lowSurrogate)
else:
return 'Error'
def hexify(codePoint):
return 'U+' + hex(codePoint)[2:].upper().zfill(6)
def writeFile(filename, contents):
print filename
with open(filename, 'w') as f:
f.write(contents.strip() + '\n')
data = []
for codePoint in range(0x000000, 0x10FFFF + 1):
symbol = unisymbol(codePoint)
# http://stackoverflow.com/a/17199950/96656
bytes = symbol.encode('utf8').decode('latin1')
data.append({
'codePoint': codePoint,
'decoded': symbol,
'encoded': bytes
});
jsonData = json.dumps(data, sort_keys=False, indent=2, separators=(',', ': '))
# Use tabs instead of double spaces for indentation
jsonData = jsonData.replace(' ', '\t')
# Escape hexadecimal digits in escape sequences
jsonData = re.sub(
r'\\u([a-fA-F0-9]{4})',
lambda match: r'\u{}'.format(match.group(1).upper()),
jsonData
)
writeFile('data.json', jsonData)
| gpl-3.0 |
bsmedberg/socorro | socorro/unittest/cron/sampleapp.py | 16 | 1051 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
A sample app that is able to exit and spit out specific messages on stdout
and stderr exactly as asked for.
This is used for testing the socorro.cron.base.SubprocessMixin class
To test this app run it like this::
$ ./sampleapp.py 1 foo bar 1> out.log 2> err.log
$ echo $?
1
$ cat out.log
foo
$ cat err.log
bar
"""
if __name__ == '__main__':
import sys
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--exit", dest="exit_code", type="int", default=0)
parser.add_option("-o", dest="out", default="")
parser.add_option("-e", dest="err", default="")
options, args = parser.parse_args()
if options.out:
print >>sys.stdout, options.out
if options.err:
print >>sys.stderr, options.err
sys.exit(options.exit_code)
| mpl-2.0 |
iivic/BoiseStateX | lms/djangoapps/shoppingcart/models.py | 9 | 89079 | """ Models for the shopping cart and assorted purchase types """
from collections import namedtuple
from datetime import datetime
from datetime import timedelta
from decimal import Decimal
import json
import analytics
from io import BytesIO
from django.db.models import Q
import pytz
import logging
import smtplib
import StringIO
import csv
from boto.exception import BotoServerError # this is a super-class of SESError and catches connection errors
from django.dispatch import receiver
from django.db import models
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.core.mail import send_mail
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _, ugettext_lazy
from django.db import transaction
from django.db.models import Sum, Count
from django.db.models.signals import post_save, post_delete
from django.core.urlresolvers import reverse
from model_utils.managers import InheritanceManager
from model_utils.models import TimeStampedModel
from django.core.mail.message import EmailMessage
from xmodule.modulestore.django import modulestore
from eventtracking import tracker
from courseware.courses import get_course_by_id
from config_models.models import ConfigurationModel
from course_modes.models import CourseMode
from edxmako.shortcuts import render_to_string
from student.models import CourseEnrollment, UNENROLL_DONE
from util.query import use_read_replica_if_available
from xmodule_django.models import CourseKeyField
from .exceptions import (
InvalidCartItem,
PurchasedCallbackException,
ItemAlreadyInCartException,
AlreadyEnrolledInCourseException,
CourseDoesNotExistException,
MultipleCouponsNotAllowedException,
InvalidStatusToRetire,
UnexpectedOrderItemStatus,
ItemNotFoundInCartException
)
from microsite_configuration import microsite
from shoppingcart.pdf import PDFInvoice
log = logging.getLogger("shoppingcart")
ORDER_STATUSES = (
# The user is selecting what he/she wants to purchase.
('cart', 'cart'),
# The user has been sent to the external payment processor.
# At this point, the order should NOT be modified.
# If the user returns to the payment flow, he/she will start a new order.
('paying', 'paying'),
# The user has successfully purchased the items in the order.
('purchased', 'purchased'),
# The user's order has been refunded.
('refunded', 'refunded'),
# The user's order went through, but the order was erroneously left
# in 'cart'.
('defunct-cart', 'defunct-cart'),
# The user's order went through, but the order was erroneously left
# in 'paying'.
('defunct-paying', 'defunct-paying'),
)
# maps order statuses to their defunct states
ORDER_STATUS_MAP = {
'cart': 'defunct-cart',
'paying': 'defunct-paying',
}
# we need a tuple to represent the primary key of various OrderItem subclasses
OrderItemSubclassPK = namedtuple('OrderItemSubclassPK', ['cls', 'pk']) # pylint: disable=invalid-name
class OrderTypes(object):
"""
This class specify purchase OrderTypes.
"""
PERSONAL = 'personal'
BUSINESS = 'business'
ORDER_TYPES = (
(PERSONAL, 'personal'),
(BUSINESS, 'business'),
)
class Order(models.Model):
"""
This is the model for an order. Before purchase, an Order and its related OrderItems are used
as the shopping cart.
FOR ANY USER, THERE SHOULD ONLY EVER BE ZERO OR ONE ORDER WITH STATUS='cart'.
"""
user = models.ForeignKey(User, db_index=True)
currency = models.CharField(default="usd", max_length=8) # lower case ISO currency codes
status = models.CharField(max_length=32, default='cart', choices=ORDER_STATUSES)
purchase_time = models.DateTimeField(null=True, blank=True)
refunded_time = models.DateTimeField(null=True, blank=True)
# Now we store data needed to generate a reasonable receipt
# These fields only make sense after the purchase
bill_to_first = models.CharField(max_length=64, blank=True)
bill_to_last = models.CharField(max_length=64, blank=True)
bill_to_street1 = models.CharField(max_length=128, blank=True)
bill_to_street2 = models.CharField(max_length=128, blank=True)
bill_to_city = models.CharField(max_length=64, blank=True)
bill_to_state = models.CharField(max_length=8, blank=True)
bill_to_postalcode = models.CharField(max_length=16, blank=True)
bill_to_country = models.CharField(max_length=64, blank=True)
bill_to_ccnum = models.CharField(max_length=8, blank=True) # last 4 digits
bill_to_cardtype = models.CharField(max_length=32, blank=True)
# a JSON dump of the CC processor response, for completeness
processor_reply_dump = models.TextField(blank=True)
# bulk purchase registration code workflow billing details
company_name = models.CharField(max_length=255, null=True, blank=True)
company_contact_name = models.CharField(max_length=255, null=True, blank=True)
company_contact_email = models.CharField(max_length=255, null=True, blank=True)
recipient_name = models.CharField(max_length=255, null=True, blank=True)
recipient_email = models.CharField(max_length=255, null=True, blank=True)
customer_reference_number = models.CharField(max_length=63, null=True, blank=True)
order_type = models.CharField(max_length=32, default='personal', choices=OrderTypes.ORDER_TYPES)
@classmethod
def get_cart_for_user(cls, user):
"""
Always use this to preserve the property that at most 1 order per user has status = 'cart'
"""
# find the newest element in the db
try:
cart_order = cls.objects.filter(user=user, status='cart').order_by('-id')[:1].get()
except ObjectDoesNotExist:
# if nothing exists in the database, create a new cart
cart_order, _created = cls.objects.get_or_create(user=user, status='cart')
return cart_order
@classmethod
def does_user_have_cart(cls, user):
"""
Returns a boolean whether a shopping cart (Order) exists for the specified user
"""
return cls.objects.filter(user=user, status='cart').exists()
@classmethod
def user_cart_has_items(cls, user, item_types=None):
"""
Returns true if the user (anonymous user ok) has
a cart with items in it. (Which means it should be displayed.
If a item_type is passed in, then we check to see if the cart has at least one of
those types of OrderItems
"""
if not user.is_authenticated():
return False
cart = cls.get_cart_for_user(user)
if not item_types:
# check to see if the cart has at least some item in it
return cart.has_items()
else:
# if the caller is explicitly asking to check for particular types
for item_type in item_types:
if cart.has_items(item_type):
return True
return False
@classmethod
def remove_cart_item_from_order(cls, item, user):
"""
Removes the item from the cart if the item.order.status == 'cart'.
Also removes any code redemption associated with the order_item
"""
if item.order.status == 'cart':
log.info("order item %s removed for user %s", str(item.id), user)
item.delete()
# remove any redemption entry associated with the item
CouponRedemption.remove_code_redemption_from_item(item, user)
@property
def total_cost(self):
"""
Return the total cost of the cart. If the order has been purchased, returns total of
all purchased and not refunded items.
"""
return sum(i.line_cost for i in self.orderitem_set.filter(status=self.status)) # pylint: disable=no-member
def has_items(self, item_type=None):
"""
Does the cart have any items in it?
If an item_type is passed in then we check to see if there are any items of that class type
"""
if not item_type:
return self.orderitem_set.exists() # pylint: disable=no-member
else:
items = self.orderitem_set.all().select_subclasses() # pylint: disable=no-member
for item in items:
if isinstance(item, item_type):
return True
return False
def reset_cart_items_prices(self):
"""
Reset the items price state in the user cart
"""
for item in self.orderitem_set.all(): # pylint: disable=no-member
if item.is_discounted:
item.unit_cost = item.list_price
item.save()
def clear(self):
"""
Clear out all the items in the cart
"""
self.orderitem_set.all().delete() # pylint: disable=no-member
@transaction.commit_on_success
def start_purchase(self):
"""
Start the purchase process. This will set the order status to "paying",
at which point it should no longer be modified.
Future calls to `Order.get_cart_for_user()` will filter out orders with
status "paying", effectively creating a new (empty) cart.
"""
if self.status == 'cart':
self.status = 'paying'
self.save()
for item in OrderItem.objects.filter(order=self).select_subclasses():
item.start_purchase()
def update_order_type(self):
"""
updating order type. This method wil inspect the quantity associated with the OrderItem.
In the application, it is implied that when qty > 1, then the user is to purchase
'RegistrationCodes' which are randomly generated strings that users can distribute to
others in order for them to enroll in paywalled courses.
The UI/UX may change in the future to make the switching between PaidCourseRegistration
and CourseRegCodeItems a more explicit UI gesture from the purchaser
"""
cart_items = self.orderitem_set.all() # pylint: disable=no-member
is_order_type_business = False
for cart_item in cart_items:
if cart_item.qty > 1:
is_order_type_business = True
items_to_delete = []
old_to_new_id_map = []
if is_order_type_business:
for cart_item in cart_items:
if hasattr(cart_item, 'paidcourseregistration'):
course_reg_code_item = CourseRegCodeItem.add_to_order(self, cart_item.paidcourseregistration.course_id, cart_item.qty)
# update the discounted prices if coupon redemption applied
course_reg_code_item.list_price = cart_item.list_price
course_reg_code_item.unit_cost = cart_item.unit_cost
course_reg_code_item.save()
items_to_delete.append(cart_item)
old_to_new_id_map.append({"oldId": cart_item.id, "newId": course_reg_code_item.id})
else:
for cart_item in cart_items:
if hasattr(cart_item, 'courseregcodeitem'):
paid_course_registration = PaidCourseRegistration.add_to_order(self, cart_item.courseregcodeitem.course_id)
# update the discounted prices if coupon redemption applied
paid_course_registration.list_price = cart_item.list_price
paid_course_registration.unit_cost = cart_item.unit_cost
paid_course_registration.save()
items_to_delete.append(cart_item)
old_to_new_id_map.append({"oldId": cart_item.id, "newId": paid_course_registration.id})
for item in items_to_delete:
item.delete()
self.order_type = OrderTypes.BUSINESS if is_order_type_business else OrderTypes.PERSONAL
self.save()
return old_to_new_id_map
def generate_pdf_receipt(self, order_items):
"""
Generates the pdf receipt for the given order_items
and returns the pdf_buffer.
"""
items_data = []
for item in order_items:
item_total = item.qty * item.unit_cost
items_data.append({
'item_description': item.pdf_receipt_display_name,
'quantity': item.qty,
'list_price': item.get_list_price(),
'discount': item.get_list_price() - item.unit_cost,
'item_total': item_total
})
pdf_buffer = BytesIO()
PDFInvoice(
items_data=items_data,
item_id=str(self.id), # pylint: disable=no-member
date=self.purchase_time,
is_invoice=False,
total_cost=self.total_cost,
payment_received=self.total_cost,
balance=0
).generate_pdf(pdf_buffer)
return pdf_buffer
def generate_registration_codes_csv(self, orderitems, site_name):
"""
this function generates the csv file
"""
course_info = []
csv_file = StringIO.StringIO()
csv_writer = csv.writer(csv_file)
csv_writer.writerow(['Course Name', 'Registration Code', 'URL'])
for item in orderitems:
course_id = item.course_id
course = get_course_by_id(getattr(item, 'course_id'), depth=0)
registration_codes = CourseRegistrationCode.objects.filter(course_id=course_id, order=self)
course_info.append((course.display_name, ' (' + course.start_datetime_text() + '-' + course.end_datetime_text() + ')'))
for registration_code in registration_codes:
redemption_url = reverse('register_code_redemption', args=[registration_code.code])
url = '{base_url}{redemption_url}'.format(base_url=site_name, redemption_url=redemption_url)
csv_writer.writerow([unicode(course.display_name).encode("utf-8"), registration_code.code, url])
return csv_file, course_info
def send_confirmation_emails(self, orderitems, is_order_type_business, csv_file, pdf_file, site_name, courses_info):
"""
send confirmation e-mail
"""
recipient_list = [(self.user.username, getattr(self.user, 'email'), 'user')] # pylint: disable=no-member
if self.company_contact_email:
recipient_list.append((self.company_contact_name, self.company_contact_email, 'company_contact'))
joined_course_names = ""
if self.recipient_email:
recipient_list.append((self.recipient_name, self.recipient_email, 'email_recipient'))
courses_names_with_dates = [course_info[0] + course_info[1] for course_info in courses_info]
joined_course_names = " " + ", ".join(courses_names_with_dates)
if not is_order_type_business:
subject = _("Order Payment Confirmation")
else:
subject = _('Confirmation and Registration Codes for the following courses: {course_name_list}').format(
course_name_list=joined_course_names
)
dashboard_url = '{base_url}{dashboard}'.format(
base_url=site_name,
dashboard=reverse('dashboard')
)
try:
from_address = microsite.get_value(
'email_from_address',
settings.PAYMENT_SUPPORT_EMAIL
)
# Send a unique email for each recipient. Don't put all email addresses in a single email.
for recipient in recipient_list:
message = render_to_string(
'emails/business_order_confirmation_email.txt' if is_order_type_business else 'emails/order_confirmation_email.txt',
{
'order': self,
'recipient_name': recipient[0],
'recipient_type': recipient[2],
'site_name': site_name,
'order_items': orderitems,
'course_names': ", ".join([course_info[0] for course_info in courses_info]),
'dashboard_url': dashboard_url,
'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1],
'order_placed_by': '{username} ({email})'.format(username=self.user.username, email=getattr(self.user, 'email')), # pylint: disable=no-member
'has_billing_info': settings.FEATURES['STORE_BILLING_INFO'],
'platform_name': microsite.get_value('platform_name', settings.PLATFORM_NAME),
'payment_support_email': microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL),
'payment_email_signature': microsite.get_value('payment_email_signature'),
}
)
email = EmailMessage(
subject=subject,
body=message,
from_email=from_address,
to=[recipient[1]]
)
# Only the business order is HTML formatted. A single seat order confirmation is plain text.
if is_order_type_business:
email.content_subtype = "html"
if csv_file:
email.attach(u'RegistrationCodesRedemptionUrls.csv', csv_file.getvalue(), 'text/csv')
if pdf_file is not None:
email.attach(u'Receipt.pdf', pdf_file.getvalue(), 'application/pdf')
else:
file_buffer = StringIO.StringIO(_('pdf download unavailable right now, please contact support.'))
email.attach(u'pdf_not_available.txt', file_buffer.getvalue(), 'text/plain')
email.send()
except (smtplib.SMTPException, BotoServerError): # sadly need to handle diff. mail backends individually
log.error('Failed sending confirmation e-mail for order %d', self.id) # pylint: disable=no-member
def purchase(self, first='', last='', street1='', street2='', city='', state='', postalcode='',
country='', ccnum='', cardtype='', processor_reply_dump=''):
"""
Call to mark this order as purchased. Iterates through its OrderItems and calls
their purchased_callback
`first` - first name of person billed (e.g. John)
`last` - last name of person billed (e.g. Smith)
`street1` - first line of a street address of the billing address (e.g. 11 Cambridge Center)
`street2` - second line of a street address of the billing address (e.g. Suite 101)
`city` - city of the billing address (e.g. Cambridge)
`state` - code of the state, province, or territory of the billing address (e.g. MA)
`postalcode` - postal code of the billing address (e.g. 02142)
`country` - country code of the billing address (e.g. US)
`ccnum` - last 4 digits of the credit card number of the credit card billed (e.g. 1111)
`cardtype` - 3-digit code representing the card type used (e.g. 001)
`processor_reply_dump` - all the parameters returned by the processor
"""
if self.status == 'purchased':
log.error(
u"`purchase` method called on order {}, but order is already purchased.".format(self.id) # pylint: disable=no-member
)
return
self.status = 'purchased'
self.purchase_time = datetime.now(pytz.utc)
self.bill_to_first = first
self.bill_to_last = last
self.bill_to_city = city
self.bill_to_state = state
self.bill_to_country = country
self.bill_to_postalcode = postalcode
if settings.FEATURES['STORE_BILLING_INFO']:
self.bill_to_street1 = street1
self.bill_to_street2 = street2
self.bill_to_ccnum = ccnum
self.bill_to_cardtype = cardtype
self.processor_reply_dump = processor_reply_dump
# save these changes on the order, then we can tell when we are in an
# inconsistent state
self.save()
# this should return all of the objects with the correct types of the
# subclasses
orderitems = OrderItem.objects.filter(order=self).select_subclasses()
site_name = microsite.get_value('SITE_NAME', settings.SITE_NAME)
if self.order_type == OrderTypes.BUSINESS:
self.update_order_type()
for item in orderitems:
item.purchase_item()
csv_file = None
courses_info = []
if self.order_type == OrderTypes.BUSINESS:
#
# Generate the CSV file that contains all of the RegistrationCodes that have already been
# generated when the purchase has transacted
#
csv_file, courses_info = self.generate_registration_codes_csv(orderitems, site_name)
try:
pdf_file = self.generate_pdf_receipt(orderitems)
except Exception: # pylint: disable=broad-except
log.exception('Exception at creating pdf file.')
pdf_file = None
try:
self.send_confirmation_emails(
orderitems, self.order_type == OrderTypes.BUSINESS,
csv_file, pdf_file, site_name, courses_info
)
except Exception: # pylint: disable=broad-except
# Catch all exceptions here, since the Django view implicitly
# wraps this in a transaction. If the order completes successfully,
# we don't want to roll back just because we couldn't send
# the confirmation email.
log.exception('Error occurred while sending payment confirmation email')
self._emit_order_event('Completed Order', orderitems)
def refund(self):
"""
Refund the given order. As of right now, this just marks the order as refunded.
"""
self.status = 'refunded'
self.save()
orderitems = OrderItem.objects.filter(order=self).select_subclasses()
self._emit_order_event('Refunded Order', orderitems)
def _emit_order_event(self, event_name, orderitems):
"""
Emit an analytics event with the given name for this Order. Will iterate over all associated
OrderItems and add them as products in the event as well.
"""
try:
if settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
analytics.track(self.user.id, event_name, { # pylint: disable=no-member
'orderId': self.id, # pylint: disable=no-member
'total': str(self.total_cost),
'currency': self.currency,
'products': [item.analytics_data() for item in orderitems]
}, context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
})
except Exception: # pylint: disable=broad-except
# Capturing all exceptions thrown while tracking analytics events. We do not want
# an operation to fail because of an analytics event, so we will capture these
# errors in the logs.
log.exception(
u'Unable to emit {event} event for user {user} and order {order}'.format(
event=event_name, user=self.user.id, order=self.id) # pylint: disable=no-member
)
def add_billing_details(self, company_name='', company_contact_name='', company_contact_email='', recipient_name='',
recipient_email='', customer_reference_number=''):
"""
This function is called after the user selects a purchase type of "Business" and
is asked to enter the optional billing details. The billing details are updated
for that order.
company_name - Name of purchasing organization
company_contact_name - Name of the key contact at the company the sale was made to
company_contact_email - Email of the key contact at the company the sale was made to
recipient_name - Name of the company should the invoice be sent to
recipient_email - Email of the company should the invoice be sent to
customer_reference_number - purchase order number of the organization associated with this Order
"""
self.company_name = company_name
self.company_contact_name = company_contact_name
self.company_contact_email = company_contact_email
self.recipient_name = recipient_name
self.recipient_email = recipient_email
self.customer_reference_number = customer_reference_number
self.save()
def generate_receipt_instructions(self):
"""
Call to generate specific instructions for each item in the order. This gets displayed on the receipt
page, typically. Instructions are something like "visit your dashboard to see your new courses".
This will return two things in a pair. The first will be a dict with keys=OrderItemSubclassPK corresponding
to an OrderItem and values=a set of html instructions they generate. The second will be a set of de-duped
html instructions
"""
instruction_set = set([]) # heh. not ia32 or alpha or sparc
instruction_dict = {}
order_items = OrderItem.objects.filter(order=self).select_subclasses()
for item in order_items:
item_pk_with_subclass, set_of_html = item.generate_receipt_instructions()
instruction_dict[item_pk_with_subclass] = set_of_html
instruction_set.update(set_of_html)
return instruction_dict, instruction_set
def retire(self):
"""
Method to "retire" orders that have gone through to the payment service
but have (erroneously) not had their statuses updated.
This method only works on orders that satisfy the following conditions:
1) the order status is either "cart" or "paying" (otherwise we raise
an InvalidStatusToRetire error)
2) the order's order item's statuses match the order's status (otherwise
we throw an UnexpectedOrderItemStatus error)
"""
# if an order is already retired, no-op:
if self.status in ORDER_STATUS_MAP.values():
return
if self.status not in ORDER_STATUS_MAP.keys():
raise InvalidStatusToRetire(
"order status {order_status} is not 'paying' or 'cart'".format(
order_status=self.status
)
)
for item in self.orderitem_set.all(): # pylint: disable=no-member
if item.status != self.status:
raise UnexpectedOrderItemStatus(
"order_item status is different from order status"
)
self.status = ORDER_STATUS_MAP[self.status]
self.save()
for item in self.orderitem_set.all(): # pylint: disable=no-member
item.retire()
def find_item_by_course_id(self, course_id):
"""
course_id: Course id of the item to find
Returns OrderItem from the Order given a course_id
Raises exception ItemNotFoundException when the item
having the given course_id is not present in the cart
"""
cart_items = OrderItem.objects.filter(order=self).select_subclasses()
found_items = []
for item in cart_items:
if getattr(item, 'course_id', None):
if item.course_id == course_id:
found_items.append(item)
if not found_items:
raise ItemNotFoundInCartException
return found_items
class OrderItem(TimeStampedModel):
"""
This is the basic interface for order items.
Order items are line items that fill up the shopping carts and orders.
Each implementation of OrderItem should provide its own purchased_callback as
a method.
"""
objects = InheritanceManager()
order = models.ForeignKey(Order, db_index=True)
# this is denormalized, but convenient for SQL queries for reports, etc. user should always be = order.user
user = models.ForeignKey(User, db_index=True)
# this is denormalized, but convenient for SQL queries for reports, etc. status should always be = order.status
status = models.CharField(max_length=32, default='cart', choices=ORDER_STATUSES, db_index=True)
qty = models.IntegerField(default=1)
unit_cost = models.DecimalField(default=0.0, decimal_places=2, max_digits=30)
list_price = models.DecimalField(decimal_places=2, max_digits=30, null=True)
line_desc = models.CharField(default="Misc. Item", max_length=1024)
currency = models.CharField(default="usd", max_length=8) # lower case ISO currency codes
fulfilled_time = models.DateTimeField(null=True, db_index=True)
refund_requested_time = models.DateTimeField(null=True, db_index=True)
service_fee = models.DecimalField(default=0.0, decimal_places=2, max_digits=30)
# general purpose field, not user-visible. Used for reporting
report_comments = models.TextField(default="")
@property
def line_cost(self):
""" Return the total cost of this OrderItem """
return self.qty * self.unit_cost
@classmethod
def add_to_order(cls, order, *args, **kwargs):
"""
A suggested convenience function for subclasses.
NOTE: This does not add anything to the cart. That is left up to the
subclasses to implement for themselves
"""
# this is a validation step to verify that the currency of the item we
# are adding is the same as the currency of the order we are adding it
# to
currency = kwargs.get('currency', 'usd')
if order.currency != currency and order.orderitem_set.exists():
raise InvalidCartItem(_("Trying to add a different currency into the cart"))
@transaction.commit_on_success
def purchase_item(self):
"""
This is basically a wrapper around purchased_callback that handles
modifying the OrderItem itself
"""
self.purchased_callback()
self.status = 'purchased'
self.fulfilled_time = datetime.now(pytz.utc)
self.save()
def start_purchase(self):
"""
Start the purchase process. This will set the order item status to "paying",
at which point it should no longer be modified.
"""
self.status = 'paying'
self.save()
def purchased_callback(self):
"""
This is called on each inventory item in the shopping cart when the
purchase goes through.
"""
raise NotImplementedError
def generate_receipt_instructions(self):
"""
This is called on each item in a purchased order to generate receipt instructions.
This should return a list of `ReceiptInstruction`s in HTML string
Default implementation is to return an empty set
"""
return self.pk_with_subclass, set([])
@property
def pk_with_subclass(self):
"""
Returns a named tuple that annotates the pk of this instance with its class, to fully represent
a pk of a subclass (inclusive) of OrderItem
"""
return OrderItemSubclassPK(type(self), self.pk)
@property
def is_discounted(self):
"""
Returns True if the item a discount coupon has been applied to the OrderItem and False otherwise.
Earlier, the OrderItems were stored with an empty list_price if a discount had not been applied.
Now we consider the item to be non discounted if list_price is None or list_price == unit_cost. In
these lines, an item is discounted if it's non-None and list_price and unit_cost mismatch.
This should work with both new and old records.
"""
return self.list_price and self.list_price != self.unit_cost
def get_list_price(self):
"""
Returns the unit_cost if no discount has been applied, or the list_price if it is defined.
"""
return self.list_price if self.list_price else self.unit_cost
@property
def single_item_receipt_template(self):
"""
The template that should be used when there's only one item in the order
"""
return 'shoppingcart/receipt.html'
@property
def single_item_receipt_context(self):
"""
Extra variables needed to render the template specified in
`single_item_receipt_template`
"""
return {}
def additional_instruction_text(self, **kwargs): # pylint: disable=unused-argument
"""
Individual instructions for this order item.
Currently, only used for emails.
"""
return ''
@property
def pdf_receipt_display_name(self):
"""
How to display this item on a PDF printed receipt file.
This can be overridden by the subclasses of OrderItem
"""
course_key = getattr(self, 'course_id', None)
if course_key:
course = get_course_by_id(course_key, depth=0)
return course.display_name
else:
raise Exception(
"Not Implemented. OrderItems that are not Course specific should have"
" a overridden pdf_receipt_display_name property"
)
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
The default implementation returns defaults for most attributes. When no name or
category is specified by the implementation, the string 'N/A' is placed for the
name and category. This should be handled appropriately by all implementations.
Returns
A dictionary containing analytics data for this OrderItem.
"""
return {
'id': self.id, # pylint: disable=no-member
'sku': type(self).__name__,
'name': 'N/A',
'price': str(self.unit_cost),
'quantity': self.qty,
'category': 'N/A',
}
def retire(self):
"""
Called by the `retire` method defined in the `Order` class. Retires
an order item if its (and its order's) status was erroneously not
updated to "purchased" after the order was processed.
"""
self.status = ORDER_STATUS_MAP[self.status]
self.save()
class Invoice(TimeStampedModel):
"""
This table capture all the information needed to support "invoicing"
which is when a user wants to purchase Registration Codes,
but will not do so via a Credit Card transaction.
"""
company_name = models.CharField(max_length=255, db_index=True)
company_contact_name = models.CharField(max_length=255)
company_contact_email = models.CharField(max_length=255)
recipient_name = models.CharField(max_length=255)
recipient_email = models.CharField(max_length=255)
address_line_1 = models.CharField(max_length=255)
address_line_2 = models.CharField(max_length=255, null=True, blank=True)
address_line_3 = models.CharField(max_length=255, null=True, blank=True)
city = models.CharField(max_length=255, null=True)
state = models.CharField(max_length=255, null=True)
zip = models.CharField(max_length=15, null=True)
country = models.CharField(max_length=64, null=True)
# This field has been deprecated.
# The total amount can now be calculated as the sum
# of each invoice item associated with the invoice.
# For backwards compatibility, this field is maintained
# and written to during invoice creation.
total_amount = models.FloatField()
# This field has been deprecated in order to support
# invoices for items that are not course-related.
# Although this field is still maintained for backwards
# compatibility, you should use CourseRegistrationCodeInvoiceItem
# to look up the course ID for purchased redeem codes.
course_id = CourseKeyField(max_length=255, db_index=True)
internal_reference = models.CharField(
max_length=255,
null=True,
blank=True,
help_text=ugettext_lazy("Internal reference code for this invoice.")
)
customer_reference_number = models.CharField(
max_length=63,
null=True,
blank=True,
help_text=ugettext_lazy("Customer's reference code for this invoice.")
)
is_valid = models.BooleanField(default=True)
@classmethod
def get_invoice_total_amount_for_course(cls, course_key):
"""
returns the invoice total amount generated by course.
"""
result = cls.objects.filter(course_id=course_key, is_valid=True).aggregate(total=Sum('total_amount'))
total = result.get('total', 0)
return total if total else 0
def generate_pdf_invoice(self, course, course_price, quantity, sale_price):
"""
Generates the pdf invoice for the given course
and returns the pdf_buffer.
"""
discount_per_item = float(course_price) - sale_price / quantity
list_price = course_price - discount_per_item
items_data = [{
'item_description': course.display_name,
'quantity': quantity,
'list_price': list_price,
'discount': discount_per_item,
'item_total': quantity * list_price
}]
pdf_buffer = BytesIO()
PDFInvoice(
items_data=items_data,
item_id=str(self.id), # pylint: disable=no-member
date=datetime.now(pytz.utc),
is_invoice=True,
total_cost=float(self.total_amount),
payment_received=0,
balance=float(self.total_amount)
).generate_pdf(pdf_buffer)
return pdf_buffer
def snapshot(self):
"""Create a snapshot of the invoice.
A snapshot is a JSON-serializable representation
of the invoice's state, including its line items
and associated transactions (payments/refunds).
This is useful for saving the history of changes
to the invoice.
Returns:
dict
"""
return {
'internal_reference': self.internal_reference,
'customer_reference': self.customer_reference_number,
'is_valid': self.is_valid,
'contact_info': {
'company_name': self.company_name,
'company_contact_name': self.company_contact_name,
'company_contact_email': self.company_contact_email,
'recipient_name': self.recipient_name,
'recipient_email': self.recipient_email,
'address_line_1': self.address_line_1,
'address_line_2': self.address_line_2,
'address_line_3': self.address_line_3,
'city': self.city,
'state': self.state,
'zip': self.zip,
'country': self.country,
},
'items': [
item.snapshot()
for item in InvoiceItem.objects.filter(invoice=self).select_subclasses()
],
'transactions': [
trans.snapshot()
for trans in InvoiceTransaction.objects.filter(invoice=self)
],
}
def __unicode__(self):
label = (
unicode(self.internal_reference)
if self.internal_reference
else u"No label"
)
created = (
self.created.strftime("%Y-%m-%d") # pylint: disable=no-member
if self.created
else u"No date"
)
return u"{label} ({date_created})".format(
label=label, date_created=created
)
INVOICE_TRANSACTION_STATUSES = (
# A payment/refund is in process, but money has not yet been transferred
('started', 'started'),
# A payment/refund has completed successfully
# This should be set ONLY once money has been successfully exchanged.
('completed', 'completed'),
# A payment/refund was promised, but was cancelled before
# money had been transferred. An example would be
# cancelling a refund check before the recipient has
# a chance to deposit it.
('cancelled', 'cancelled')
)
class InvoiceTransaction(TimeStampedModel):
"""Record payment and refund information for invoices.
There are two expected use cases:
1) We send an invoice to someone, and they send us a check.
We then manually create an invoice transaction to represent
the payment.
2) We send an invoice to someone, and they pay us. Later, we
need to issue a refund for the payment. We manually
create a transaction with a negative amount to represent
the refund.
"""
invoice = models.ForeignKey(Invoice)
amount = models.DecimalField(
default=0.0, decimal_places=2, max_digits=30,
help_text=ugettext_lazy(
"The amount of the transaction. Use positive amounts for payments"
" and negative amounts for refunds."
)
)
currency = models.CharField(
default="usd",
max_length=8,
help_text=ugettext_lazy("Lower-case ISO currency codes")
)
comments = models.TextField(
null=True,
blank=True,
help_text=ugettext_lazy("Optional: provide additional information for this transaction")
)
status = models.CharField(
max_length=32,
default='started',
choices=INVOICE_TRANSACTION_STATUSES,
help_text=ugettext_lazy(
"The status of the payment or refund. "
"'started' means that payment is expected, but money has not yet been transferred. "
"'completed' means that the payment or refund was received. "
"'cancelled' means that payment or refund was expected, but was cancelled before money was transferred. "
)
)
created_by = models.ForeignKey(User)
last_modified_by = models.ForeignKey(User, related_name='last_modified_by_user')
@classmethod
def get_invoice_transaction(cls, invoice_id):
"""
if found Returns the Invoice Transaction object for the given invoice_id
else returns None
"""
try:
return cls.objects.get(Q(invoice_id=invoice_id), Q(status='completed') | Q(status='refunded'))
except InvoiceTransaction.DoesNotExist:
return None
@classmethod
def get_total_amount_of_paid_course_invoices(cls, course_key):
"""
returns the total amount of the paid invoices.
"""
result = cls.objects.filter(amount__gt=0, invoice__course_id=course_key, status='completed').aggregate(
total=Sum('amount')
)
total = result.get('total', 0)
return total if total else 0
def snapshot(self):
"""Create a snapshot of the invoice transaction.
The returned dictionary is JSON-serializable.
Returns:
dict
"""
return {
'amount': unicode(self.amount),
'currency': self.currency,
'comments': self.comments,
'status': self.status,
'created_by': self.created_by.username, # pylint: disable=no-member
'last_modified_by': self.last_modified_by.username # pylint: disable=no-member
}
class InvoiceItem(TimeStampedModel):
"""
This is the basic interface for invoice items.
Each invoice item represents a "line" in the invoice.
For example, in an invoice for course registration codes,
there might be an invoice item representing 10 registration
codes for the DemoX course.
"""
objects = InheritanceManager()
invoice = models.ForeignKey(Invoice, db_index=True)
qty = models.IntegerField(
default=1,
help_text=ugettext_lazy("The number of items sold.")
)
unit_price = models.DecimalField(
default=0.0,
decimal_places=2,
max_digits=30,
help_text=ugettext_lazy("The price per item sold, including discounts.")
)
currency = models.CharField(
default="usd",
max_length=8,
help_text=ugettext_lazy("Lower-case ISO currency codes")
)
def snapshot(self):
"""Create a snapshot of the invoice item.
The returned dictionary is JSON-serializable.
Returns:
dict
"""
return {
'qty': self.qty,
'unit_price': unicode(self.unit_price),
'currency': self.currency
}
class CourseRegistrationCodeInvoiceItem(InvoiceItem):
"""
This is an invoice item that represents a payment for
a course registration.
"""
course_id = CourseKeyField(max_length=128, db_index=True)
def snapshot(self):
"""Create a snapshot of the invoice item.
This is the same as a snapshot for other invoice items,
with the addition of a `course_id` field.
Returns:
dict
"""
snapshot = super(CourseRegistrationCodeInvoiceItem, self).snapshot()
snapshot['course_id'] = unicode(self.course_id)
return snapshot
class InvoiceHistory(models.Model):
"""History of changes to invoices.
This table stores snapshots of invoice state,
including the associated line items and transactions
(payments/refunds).
Entries in the table are created, but never deleted
or modified.
We use Django signals to save history entries on change
events. These signals are fired within a database
transaction, so the history record is created only
if the invoice change is successfully persisted.
"""
timestamp = models.DateTimeField(auto_now_add=True, db_index=True)
invoice = models.ForeignKey(Invoice)
# JSON-serialized representation of the current state
# of the invoice, including its line items and
# transactions (payments/refunds).
snapshot = models.TextField(blank=True)
@classmethod
def save_invoice_snapshot(cls, invoice):
"""Save a snapshot of the invoice's current state.
Arguments:
invoice (Invoice): The invoice to save.
"""
cls.objects.create(
invoice=invoice,
snapshot=json.dumps(invoice.snapshot())
)
@staticmethod
def snapshot_receiver(sender, instance, **kwargs): # pylint: disable=unused-argument
"""Signal receiver that saves a snapshot of an invoice.
Arguments:
sender: Not used, but required by Django signals.
instance (Invoice, InvoiceItem, or InvoiceTransaction)
"""
if isinstance(instance, Invoice):
InvoiceHistory.save_invoice_snapshot(instance)
elif hasattr(instance, 'invoice'):
InvoiceHistory.save_invoice_snapshot(instance.invoice)
class Meta(object):
get_latest_by = "timestamp"
# Hook up Django signals to record changes in the history table.
# We record any change to an invoice, invoice item, or transaction.
# We also record any deletion of a transaction, since users can delete
# transactions via Django admin.
# Note that we need to include *each* InvoiceItem subclass
# here, since Django signals do not fire automatically for subclasses
# of the "sender" class.
post_save.connect(InvoiceHistory.snapshot_receiver, sender=Invoice)
post_save.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceItem)
post_save.connect(InvoiceHistory.snapshot_receiver, sender=CourseRegistrationCodeInvoiceItem)
post_save.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceTransaction)
post_delete.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceTransaction)
class CourseRegistrationCode(models.Model):
"""
This table contains registration codes
With registration code, a user can register for a course for free
"""
code = models.CharField(max_length=32, db_index=True, unique=True)
course_id = CourseKeyField(max_length=255, db_index=True)
created_by = models.ForeignKey(User, related_name='created_by_user')
created_at = models.DateTimeField(auto_now_add=True)
order = models.ForeignKey(Order, db_index=True, null=True, related_name="purchase_order")
mode_slug = models.CharField(max_length=100, null=True)
is_valid = models.BooleanField(default=True)
# For backwards compatibility, we maintain the FK to "invoice"
# In the future, we will remove this in favor of the FK
# to "invoice_item" (which can be used to look up the invoice).
invoice = models.ForeignKey(Invoice, null=True)
invoice_item = models.ForeignKey(CourseRegistrationCodeInvoiceItem, null=True)
@classmethod
def order_generated_registration_codes(cls, course_id):
"""
Returns the registration codes that were generated
via bulk purchase scenario.
"""
return cls.objects.filter(order__isnull=False, course_id=course_id)
@classmethod
def invoice_generated_registration_codes(cls, course_id):
"""
Returns the registration codes that were generated
via invoice.
"""
return cls.objects.filter(invoice__isnull=False, course_id=course_id)
class RegistrationCodeRedemption(models.Model):
"""
This model contains the registration-code redemption info
"""
order = models.ForeignKey(Order, db_index=True, null=True)
registration_code = models.ForeignKey(CourseRegistrationCode, db_index=True)
redeemed_by = models.ForeignKey(User, db_index=True)
redeemed_at = models.DateTimeField(auto_now_add=True, null=True)
course_enrollment = models.ForeignKey(CourseEnrollment, null=True)
@classmethod
def registration_code_used_for_enrollment(cls, course_enrollment):
"""
Returns RegistrationCodeRedemption object if registration code
has been used during the course enrollment else Returns None.
"""
# theoretically there could be more than one (e.g. someone self-unenrolls
# then re-enrolls with a different regcode)
reg_codes = cls.objects.filter(course_enrollment=course_enrollment).order_by('-redeemed_at')
if reg_codes:
# return the first one. In all normal use cases of registration codes
# the user will only have one
return reg_codes[0]
return None
@classmethod
def is_registration_code_redeemed(cls, course_reg_code):
"""
Checks the existence of the registration code
in the RegistrationCodeRedemption
"""
return cls.objects.filter(registration_code__code=course_reg_code).exists()
@classmethod
def get_registration_code_redemption(cls, code, course_id):
"""
Returns the registration code redemption object if found else returns None.
"""
try:
code_redemption = cls.objects.get(registration_code__code=code, registration_code__course_id=course_id)
except cls.DoesNotExist:
code_redemption = None
return code_redemption
@classmethod
def create_invoice_generated_registration_redemption(cls, course_reg_code, user): # pylint: disable=invalid-name
"""
This function creates a RegistrationCodeRedemption entry in case the registration codes were invoice generated
and thus the order_id is missing.
"""
code_redemption = RegistrationCodeRedemption(registration_code=course_reg_code, redeemed_by=user)
code_redemption.save()
return code_redemption
class SoftDeleteCouponManager(models.Manager):
""" Use this manager to get objects that have a is_active=True """
def get_active_coupons_query_set(self):
"""
filter the is_active = True Coupons only
"""
return super(SoftDeleteCouponManager, self).get_query_set().filter(is_active=True)
def get_query_set(self):
"""
get all the coupon objects
"""
return super(SoftDeleteCouponManager, self).get_query_set()
class Coupon(models.Model):
"""
This table contains coupon codes
A user can get a discount offer on course if provide coupon code
"""
code = models.CharField(max_length=32, db_index=True)
description = models.CharField(max_length=255, null=True, blank=True)
course_id = CourseKeyField(max_length=255)
percentage_discount = models.IntegerField(default=0)
created_by = models.ForeignKey(User)
created_at = models.DateTimeField(auto_now_add=True)
is_active = models.BooleanField(default=True)
expiration_date = models.DateTimeField(null=True, blank=True)
def __unicode__(self):
return "[Coupon] code: {} course: {}".format(self.code, self.course_id)
objects = SoftDeleteCouponManager()
@property
def display_expiry_date(self):
"""
return the coupon expiration date in the readable format
"""
return (self.expiration_date - timedelta(days=1)).strftime("%B %d, %Y") if self.expiration_date else None
class CouponRedemption(models.Model):
"""
This table contain coupon redemption info
"""
order = models.ForeignKey(Order, db_index=True)
user = models.ForeignKey(User, db_index=True)
coupon = models.ForeignKey(Coupon, db_index=True)
@classmethod
def remove_code_redemption_from_item(cls, item, user):
"""
If an item removed from shopping cart then we will remove
the corresponding redemption info of coupon code
"""
order_item_course_id = getattr(item, 'course_id')
try:
# Try to remove redemption information of coupon code, If exist.
coupon_redemption = cls.objects.get(
user=user,
coupon__course_id=order_item_course_id if order_item_course_id else CourseKeyField.Empty,
order=item.order_id
)
coupon_redemption.delete()
log.info(
u'Coupon "%s" redemption entry removed for user "%s" for order item "%s"',
coupon_redemption.coupon.code,
user,
str(item.id),
)
except CouponRedemption.DoesNotExist:
log.debug(u'Code redemption does not exist for order item id=%s.', str(item.id))
@classmethod
def remove_coupon_redemption_from_cart(cls, user, cart):
"""
This method delete coupon redemption
"""
coupon_redemption = cls.objects.filter(user=user, order=cart)
if coupon_redemption:
coupon_redemption.delete()
log.info(u'Coupon redemption entry removed for user %s for order %s', user, cart.id)
@classmethod
def get_discount_price(cls, percentage_discount, value):
"""
return discounted price against coupon
"""
discount = Decimal("{0:.2f}".format(Decimal(percentage_discount / 100.00) * value))
return value - discount
@classmethod
def add_coupon_redemption(cls, coupon, order, cart_items):
"""
add coupon info into coupon_redemption model
"""
is_redemption_applied = False
coupon_redemptions = cls.objects.filter(order=order, user=order.user)
for coupon_redemption in coupon_redemptions:
if coupon_redemption.coupon.code != coupon.code or coupon_redemption.coupon.id == coupon.id:
log.exception(
u"Coupon redemption already exist for user '%s' against order id '%s'",
order.user.username,
order.id,
)
raise MultipleCouponsNotAllowedException
for item in cart_items:
if getattr(item, 'course_id'):
if item.course_id == coupon.course_id:
coupon_redemption = cls(order=order, user=order.user, coupon=coupon)
coupon_redemption.save()
discount_price = cls.get_discount_price(coupon.percentage_discount, item.unit_cost)
item.list_price = item.unit_cost
item.unit_cost = discount_price
item.save()
log.info(
u"Discount generated for user %s against order id '%s'",
order.user.username,
order.id,
)
is_redemption_applied = True
return is_redemption_applied
return is_redemption_applied
@classmethod
def get_top_discount_codes_used(cls, course_id):
"""
Returns the top discount codes used.
QuerySet = [
{
'coupon__percentage_discount': 22,
'coupon__code': '12',
'coupon__used_count': '2',
},
{
...
}
]
"""
return cls.objects.filter(order__status='purchased', coupon__course_id=course_id).values(
'coupon__code', 'coupon__percentage_discount'
).annotate(coupon__used_count=Count('coupon__code')).order_by('-coupon__used_count')
@classmethod
def get_total_coupon_code_purchases(cls, course_id):
"""
returns total seats purchases using coupon codes
"""
return cls.objects.filter(order__status='purchased', coupon__course_id=course_id).aggregate(Count('coupon'))
class PaidCourseRegistration(OrderItem):
"""
This is an inventory item for paying for a course registration
"""
course_id = CourseKeyField(max_length=128, db_index=True)
mode = models.SlugField(default=CourseMode.DEFAULT_MODE_SLUG)
course_enrollment = models.ForeignKey(CourseEnrollment, null=True)
@classmethod
def get_self_purchased_seat_count(cls, course_key, status='purchased'):
"""
returns the count of paid_course items filter by course_id and status.
"""
return cls.objects.filter(course_id=course_key, status=status).count()
@classmethod
def get_course_item_for_user_enrollment(cls, user, course_id, course_enrollment):
"""
Returns PaidCourseRegistration object if user has payed for
the course enrollment else Returns None
"""
try:
return cls.objects.filter(course_id=course_id, user=user, course_enrollment=course_enrollment,
status='purchased').latest('id')
except PaidCourseRegistration.DoesNotExist:
return None
@classmethod
def contained_in_order(cls, order, course_id):
"""
Is the course defined by course_id contained in the order?
"""
return course_id in [
item.course_id
for item in order.orderitem_set.all().select_subclasses("paidcourseregistration")
if isinstance(item, cls)
]
@classmethod
def get_total_amount_of_purchased_item(cls, course_key, status='purchased'):
"""
This will return the total amount of money that a purchased course generated
"""
total_cost = 0
result = cls.objects.filter(course_id=course_key, status=status).aggregate(
total=Sum('unit_cost', field='qty * unit_cost')
)
if result['total'] is not None:
total_cost = result['total']
return total_cost
@classmethod
@transaction.commit_on_success
def add_to_order(cls, order, course_id, mode_slug=CourseMode.DEFAULT_MODE_SLUG, cost=None, currency=None):
"""
A standardized way to create these objects, with sensible defaults filled in.
Will update the cost if called on an order that already carries the course.
Returns the order item
"""
# First a bunch of sanity checks:
# actually fetch the course to make sure it exists, use this to
# throw errors if it doesn't.
course = modulestore().get_course(course_id)
if not course:
log.error("User {} tried to add non-existent course {} to cart id {}"
.format(order.user.email, course_id, order.id))
raise CourseDoesNotExistException
if cls.contained_in_order(order, course_id):
log.warning(
u"User %s tried to add PaidCourseRegistration for course %s, already in cart id %s",
order.user.email,
course_id,
order.id,
)
raise ItemAlreadyInCartException
if CourseEnrollment.is_enrolled(user=order.user, course_key=course_id):
log.warning("User {} trying to add course {} to cart id {}, already registered"
.format(order.user.email, course_id, order.id))
raise AlreadyEnrolledInCourseException
### Validations done, now proceed
### handle default arguments for mode_slug, cost, currency
course_mode = CourseMode.mode_for_course(course_id, mode_slug)
if not course_mode:
# user could have specified a mode that's not set, in that case return the DEFAULT_MODE
course_mode = CourseMode.DEFAULT_MODE
if not cost:
cost = course_mode.min_price
if not currency:
currency = course_mode.currency
super(PaidCourseRegistration, cls).add_to_order(order, course_id, cost, currency=currency)
item, created = cls.objects.get_or_create(order=order, user=order.user, course_id=course_id)
item.status = order.status
item.mode = course_mode.slug
item.qty = 1
item.unit_cost = cost
item.list_price = cost
item.line_desc = _(u'Registration for Course: {course_name}').format(
course_name=course.display_name_with_default)
item.currency = currency
order.currency = currency
item.report_comments = item.csv_report_comments
order.save()
item.save()
log.info("User {} added course registration {} to cart: order {}"
.format(order.user.email, course_id, order.id))
return item
def purchased_callback(self):
"""
When purchased, this should enroll the user in the course. We are assuming that
course settings for enrollment date are configured such that only if the (user.email, course_id) pair is found
in CourseEnrollmentAllowed will the user be allowed to enroll. Otherwise requiring payment
would in fact be quite silly since there's a clear back door.
"""
if not modulestore().has_course(self.course_id):
msg = u"The customer purchased Course {0}, but that course doesn't exist!".format(self.course_id)
log.error(msg)
raise PurchasedCallbackException(msg)
# enroll in course and link to the enrollment_id
self.course_enrollment = CourseEnrollment.enroll(user=self.user, course_key=self.course_id, mode=self.mode)
self.save()
log.info("Enrolled {0} in paid course {1}, paid ${2}"
.format(self.user.email, self.course_id, self.line_cost)) # pylint: disable=no-member
def generate_receipt_instructions(self):
"""
Generates instructions when the user has purchased a PaidCourseRegistration.
Basically tells the user to visit the dashboard to see their new classes
"""
notification = _(
u"Please visit your {link_start}dashboard{link_end} "
u"to see your new course."
).format(
link_start=u'<a href="{url}">'.format(url=reverse('dashboard')),
link_end=u'</a>',
)
return self.pk_with_subclass, set([notification])
@property
def csv_report_comments(self):
"""
Tries to fetch an annotation associated with the course_id from the database. If not found, returns u"".
Otherwise returns the annotation
"""
try:
return PaidCourseRegistrationAnnotation.objects.get(course_id=self.course_id).annotation
except PaidCourseRegistrationAnnotation.DoesNotExist:
return u""
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the Order Item is associated with a course, additional fields will be populated with
course information. If there is a mode associated, the mode data is included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(PaidCourseRegistration, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
if self.mode:
data['sku'] = sku + u'.' + unicode(self.mode)
return data
class CourseRegCodeItem(OrderItem):
"""
This is an inventory item for paying for
generating course registration codes
"""
course_id = CourseKeyField(max_length=128, db_index=True)
mode = models.SlugField(default=CourseMode.DEFAULT_MODE_SLUG)
@classmethod
def get_bulk_purchased_seat_count(cls, course_key, status='purchased'):
"""
returns the sum of bulk purchases seats.
"""
total = 0
result = cls.objects.filter(course_id=course_key, status=status).aggregate(total=Sum('qty'))
if result['total'] is not None:
total = result['total']
return total
@classmethod
def contained_in_order(cls, order, course_id):
"""
Is the course defined by course_id contained in the order?
"""
return course_id in [
item.course_id
for item in order.orderitem_set.all().select_subclasses("courseregcodeitem")
if isinstance(item, cls)
]
@classmethod
def get_total_amount_of_purchased_item(cls, course_key, status='purchased'):
"""
This will return the total amount of money that a purchased course generated
"""
total_cost = 0
result = cls.objects.filter(course_id=course_key, status=status).aggregate(
total=Sum('unit_cost', field='qty * unit_cost')
)
if result['total'] is not None:
total_cost = result['total']
return total_cost
@classmethod
@transaction.commit_on_success
def add_to_order(cls, order, course_id, qty, mode_slug=CourseMode.DEFAULT_MODE_SLUG, cost=None, currency=None): # pylint: disable=arguments-differ
"""
A standardized way to create these objects, with sensible defaults filled in.
Will update the cost if called on an order that already carries the course.
Returns the order item
"""
# First a bunch of sanity checks:
# actually fetch the course to make sure it exists, use this to
# throw errors if it doesn't.
course = modulestore().get_course(course_id)
if not course:
log.error("User {} tried to add non-existent course {} to cart id {}"
.format(order.user.email, course_id, order.id))
raise CourseDoesNotExistException
if cls.contained_in_order(order, course_id):
log.warning("User {} tried to add PaidCourseRegistration for course {}, already in cart id {}"
.format(order.user.email, course_id, order.id))
raise ItemAlreadyInCartException
if CourseEnrollment.is_enrolled(user=order.user, course_key=course_id):
log.warning("User {} trying to add course {} to cart id {}, already registered"
.format(order.user.email, course_id, order.id))
raise AlreadyEnrolledInCourseException
### Validations done, now proceed
### handle default arguments for mode_slug, cost, currency
course_mode = CourseMode.mode_for_course(course_id, mode_slug)
if not course_mode:
# user could have specified a mode that's not set, in that case return the DEFAULT_MODE
course_mode = CourseMode.DEFAULT_MODE
if not cost:
cost = course_mode.min_price
if not currency:
currency = course_mode.currency
super(CourseRegCodeItem, cls).add_to_order(order, course_id, cost, currency=currency)
item, created = cls.objects.get_or_create(order=order, user=order.user, course_id=course_id) # pylint: disable=unused-variable
item.status = order.status
item.mode = course_mode.slug
item.unit_cost = cost
item.list_price = cost
item.qty = qty
item.line_desc = _(u'Enrollment codes for Course: {course_name}').format(
course_name=course.display_name_with_default)
item.currency = currency
order.currency = currency
item.report_comments = item.csv_report_comments
order.save()
item.save()
log.info("User {} added course registration {} to cart: order {}"
.format(order.user.email, course_id, order.id))
return item
def purchased_callback(self):
"""
The purchase is completed, this OrderItem type will generate Registration Codes that will
be redeemed by users
"""
if not modulestore().has_course(self.course_id):
msg = u"The customer purchased Course {0}, but that course doesn't exist!".format(self.course_id)
log.error(msg)
raise PurchasedCallbackException(msg)
total_registration_codes = int(self.qty)
# we need to import here because of a circular dependency
# we should ultimately refactor code to have save_registration_code in this models.py
# file, but there's also a shared dependency on a random string generator which
# is in another PR (for another feature)
from instructor.views.api import save_registration_code
for i in range(total_registration_codes): # pylint: disable=unused-variable
save_registration_code(self.user, self.course_id, self.mode, order=self.order)
log.info("Enrolled {0} in paid course {1}, paid ${2}"
.format(self.user.email, self.course_id, self.line_cost)) # pylint: disable=no-member
@property
def csv_report_comments(self):
"""
Tries to fetch an annotation associated with the course_id from the database. If not found, returns u"".
Otherwise returns the annotation
"""
try:
return CourseRegCodeItemAnnotation.objects.get(course_id=self.course_id).annotation
except CourseRegCodeItemAnnotation.DoesNotExist:
return u""
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the OrderItem is associated with a course, additional fields will be populated with
course information. If a mode is available, it will be included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(CourseRegCodeItem, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
if self.mode:
data['sku'] = sku + u'.' + unicode(self.mode)
return data
class CourseRegCodeItemAnnotation(models.Model):
"""
A model that maps course_id to an additional annotation. This is specifically needed because when Stanford
generates report for the paid courses, each report item must contain the payment account associated with a course.
And unfortunately we didn't have the concept of a "SKU" or stock item where we could keep this association,
so this is to retrofit it.
"""
course_id = CourseKeyField(unique=True, max_length=128, db_index=True)
annotation = models.TextField(null=True)
def __unicode__(self):
# pylint: disable=no-member
return u"{} : {}".format(self.course_id.to_deprecated_string(), self.annotation)
class PaidCourseRegistrationAnnotation(models.Model):
"""
A model that maps course_id to an additional annotation. This is specifically needed because when Stanford
generates report for the paid courses, each report item must contain the payment account associated with a course.
And unfortunately we didn't have the concept of a "SKU" or stock item where we could keep this association,
so this is to retrofit it.
"""
course_id = CourseKeyField(unique=True, max_length=128, db_index=True)
annotation = models.TextField(null=True)
def __unicode__(self):
# pylint: disable=no-member
return u"{} : {}".format(self.course_id.to_deprecated_string(), self.annotation)
class CertificateItem(OrderItem):
"""
This is an inventory item for purchasing certificates
"""
course_id = CourseKeyField(max_length=128, db_index=True)
course_enrollment = models.ForeignKey(CourseEnrollment)
mode = models.SlugField()
@receiver(UNENROLL_DONE)
def refund_cert_callback(sender, course_enrollment=None, skip_refund=False, **kwargs): # pylint: disable=no-self-argument,unused-argument
"""
When a CourseEnrollment object calls its unenroll method, this function checks to see if that unenrollment
occurred in a verified certificate that was within the refund deadline. If so, it actually performs the
refund.
Returns the refunded certificate on a successful refund; else, it returns nothing.
"""
# Only refund verified cert unenrollments that are within bounds of the expiration date
if (not course_enrollment.refundable()) or skip_refund:
return
target_certs = CertificateItem.objects.filter(course_id=course_enrollment.course_id, user_id=course_enrollment.user, status='purchased', mode='verified')
try:
target_cert = target_certs[0]
except IndexError:
log.error(
u"Matching CertificateItem not found while trying to refund. User %s, Course %s",
course_enrollment.user,
course_enrollment.course_id,
)
return
target_cert.status = 'refunded'
target_cert.refund_requested_time = datetime.now(pytz.utc)
target_cert.save()
target_cert.order.refund()
order_number = target_cert.order_id
# send billing an email so they can handle refunding
subject = _("[Refund] User-Requested Refund")
message = "User {user} ({user_email}) has requested a refund on Order #{order_number}.".format(user=course_enrollment.user,
user_email=course_enrollment.user.email,
order_number=order_number)
to_email = [settings.PAYMENT_SUPPORT_EMAIL]
from_email = microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)
try:
send_mail(subject, message, from_email, to_email, fail_silently=False)
except Exception as exception: # pylint: disable=broad-except
err_str = ('Failed sending email to billing to request a refund for verified certificate'
' (User {user}, Course {course}, CourseEnrollmentID {ce_id}, Order #{order})\n{exception}')
log.error(err_str.format(
user=course_enrollment.user,
course=course_enrollment.course_id,
ce_id=course_enrollment.id,
order=order_number,
exception=exception,
))
return target_cert
@classmethod
@transaction.commit_on_success
def add_to_order(cls, order, course_id, cost, mode, currency='usd'):
"""
Add a CertificateItem to an order
Returns the CertificateItem object after saving
`order` - an order that this item should be added to, generally the cart order
`course_id` - the course that we would like to purchase as a CertificateItem
`cost` - the amount the user will be paying for this CertificateItem
`mode` - the course mode that this certificate is going to be issued for
This item also creates a new enrollment if none exists for this user and this course.
Example Usage:
cart = Order.get_cart_for_user(user)
CertificateItem.add_to_order(cart, 'edX/Test101/2013_Fall', 30, 'verified')
"""
super(CertificateItem, cls).add_to_order(order, course_id, cost, currency=currency)
course_enrollment = CourseEnrollment.get_or_create_enrollment(order.user, course_id)
# do some validation on the enrollment mode
valid_modes = CourseMode.modes_for_course_dict(course_id)
if mode in valid_modes:
mode_info = valid_modes[mode]
else:
msg = u"Mode {mode} does not exist for {course_id}".format(mode=mode, course_id=course_id)
log.error(msg)
raise InvalidCartItem(
_(u"Mode {mode} does not exist for {course_id}").format(mode=mode, course_id=course_id)
)
item, _created = cls.objects.get_or_create(
order=order,
user=order.user,
course_id=course_id,
course_enrollment=course_enrollment,
mode=mode,
)
item.status = order.status
item.qty = 1
item.unit_cost = cost
item.list_price = cost
course_name = modulestore().get_course(course_id).display_name
# Translators: In this particular case, mode_name refers to a
# particular mode (i.e. Honor Code Certificate, Verified Certificate, etc)
# by which a user could enroll in the given course.
item.line_desc = _("{mode_name} for course {course}").format(
mode_name=mode_info.name,
course=course_name
)
item.currency = currency
order.currency = currency
order.save()
item.save()
return item
def purchased_callback(self):
"""
When purchase goes through, activate and update the course enrollment for the correct mode
"""
self.course_enrollment.change_mode(self.mode)
self.course_enrollment.activate()
def additional_instruction_text(self):
verification_reminder = ""
is_enrollment_mode_verified = self.course_enrollment.is_verified_enrollment() # pylint: disable=E1101
if is_enrollment_mode_verified:
domain = microsite.get_value('SITE_NAME', settings.SITE_NAME)
path = reverse('verify_student_verify_now', kwargs={'course_id': unicode(self.course_id)})
verification_url = "http://{domain}{path}".format(domain=domain, path=path)
verification_reminder = _(
"If you haven't verified your identity yet, please start the verification process ({verification_url})."
).format(verification_url=verification_url)
refund_reminder = _(
"You have up to two weeks into the course to unenroll and receive a full refund."
"To receive your refund, contact {billing_email}. "
"Please include your order number in your email. "
"Please do NOT include your credit card information."
).format(
billing_email=settings.PAYMENT_SUPPORT_EMAIL
)
# Need this to be unicode in case the reminder strings
# have been translated and contain non-ASCII unicode
return u"{verification_reminder} {refund_reminder}".format(
verification_reminder=verification_reminder,
refund_reminder=refund_reminder
)
@classmethod
def verified_certificates_count(cls, course_id, status):
"""Return a queryset of CertificateItem for every verified enrollment in course_id with the given status."""
return use_read_replica_if_available(
CertificateItem.objects.filter(course_id=course_id, mode='verified', status=status).count())
# TODO combine these three methods into one
@classmethod
def verified_certificates_monetary_field_sum(cls, course_id, status, field_to_aggregate):
"""
Returns a Decimal indicating the total sum of field_to_aggregate for all verified certificates with a particular status.
Sample usages:
- status 'refunded' and field_to_aggregate 'unit_cost' will give the total amount of money refunded for course_id
- status 'purchased' and field_to_aggregate 'service_fees' gives the sum of all service fees for purchased certificates
etc
"""
query = use_read_replica_if_available(
CertificateItem.objects.filter(course_id=course_id, mode='verified', status=status)).aggregate(Sum(field_to_aggregate))[field_to_aggregate + '__sum']
if query is None:
return Decimal(0.00)
else:
return query
@classmethod
def verified_certificates_contributing_more_than_minimum(cls, course_id):
return use_read_replica_if_available(
CertificateItem.objects.filter(
course_id=course_id,
mode='verified',
status='purchased',
unit_cost__gt=(CourseMode.min_course_price_for_verified_for_currency(course_id, 'usd')))).count()
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the CertificateItem is associated with a course, additional fields will be populated with
course information. If there is a mode associated with the certificate, it is included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(CertificateItem, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
if self.mode:
data['sku'] = sku + u'.' + unicode(self.mode)
return data
class DonationConfiguration(ConfigurationModel):
"""Configure whether donations are enabled on the site."""
pass
class Donation(OrderItem):
"""A donation made by a user.
Donations can be made for a specific course or to the organization as a whole.
Users can choose the donation amount.
"""
# Types of donations
DONATION_TYPES = (
("general", "A general donation"),
("course", "A donation to a particular course")
)
# The type of donation
donation_type = models.CharField(max_length=32, default="general", choices=DONATION_TYPES)
# If a donation is made for a specific course, then store the course ID here.
# If the donation is made to the organization as a whole,
# set this field to CourseKeyField.Empty
course_id = CourseKeyField(max_length=255, db_index=True)
@classmethod
@transaction.commit_on_success
def add_to_order(cls, order, donation_amount, course_id=None, currency='usd'):
"""Add a donation to an order.
Args:
order (Order): The order to add this donation to.
donation_amount (Decimal): The amount the user is donating.
Keyword Args:
course_id (CourseKey): If provided, associate this donation with a particular course.
currency (str): The currency used for the the donation.
Raises:
InvalidCartItem: The provided course ID is not valid.
Returns:
Donation
"""
# This will validate the currency but won't actually add the item to the order.
super(Donation, cls).add_to_order(order, currency=currency)
# Create a line item description, including the name of the course
# if this is a per-course donation.
# This will raise an exception if the course can't be found.
description = cls._line_item_description(course_id=course_id)
params = {
"order": order,
"user": order.user,
"status": order.status,
"qty": 1,
"unit_cost": donation_amount,
"currency": currency,
"line_desc": description
}
if course_id is not None:
params["course_id"] = course_id
params["donation_type"] = "course"
else:
params["donation_type"] = "general"
return cls.objects.create(**params)
def purchased_callback(self):
"""Donations do not need to be fulfilled, so this method does nothing."""
pass
def generate_receipt_instructions(self):
"""Provide information about tax-deductible donations in the receipt.
Returns:
tuple of (Donation, unicode)
"""
return self.pk_with_subclass, set([self._tax_deduction_msg()])
def additional_instruction_text(self, **kwargs): # pylint: disable=unused-argument
"""Provide information about tax-deductible donations in the confirmation email.
Returns:
unicode
"""
return self._tax_deduction_msg()
def _tax_deduction_msg(self):
"""Return the translated version of the tax deduction message.
Returns:
unicode
"""
return _(
u"We greatly appreciate this generous contribution and your support of the {platform_name} mission. "
u"This receipt was prepared to support charitable contributions for tax purposes. "
u"We confirm that neither goods nor services were provided in exchange for this gift."
).format(platform_name=settings.PLATFORM_NAME)
@classmethod
def _line_item_description(cls, course_id=None):
"""Create a line-item description for the donation.
Includes the course display name if provided.
Keyword Arguments:
course_id (CourseKey)
Raises:
CourseDoesNotExistException: The course ID is not valid.
Returns:
unicode
"""
# If a course ID is provided, include the display name of the course
# in the line item description.
if course_id is not None:
course = modulestore().get_course(course_id)
if course is None:
msg = u"Could not find a course with the ID '{course_id}'".format(course_id=course_id)
log.error(msg)
raise CourseDoesNotExistException(
_(u"Could not find a course with the ID '{course_id}'").format(course_id=course_id)
)
return _(u"Donation for {course}").format(course=course.display_name)
# The donation is for the organization as a whole, not a specific course
else:
return _(u"Donation for {platform_name}").format(platform_name=settings.PLATFORM_NAME)
@property
def single_item_receipt_context(self):
return {
'receipt_has_donation_item': True,
}
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the donation is associated with a course, additional fields will be populated with
course information. When no name or category is specified by the implementation, the
platform name is used as a default value for required event fields, to declare that
the Order is specific to the platform, rather than a specific product name or category.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(Donation, self).analytics_data()
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
else:
data['name'] = settings.PLATFORM_NAME
data['category'] = settings.PLATFORM_NAME
return data
@property
def pdf_receipt_display_name(self):
"""
How to display this item on a PDF printed receipt file.
"""
return self._line_item_description(course_id=self.course_id)
| agpl-3.0 |
sxpert/ansible-modules-core | cloud/openstack/os_client_config.py | 75 | 2295 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import os_client_config
from os_client_config import exceptions
DOCUMENTATION = '''
---
module: os_client_config
short_description: Get OpenStack Client config
description:
- Get I(openstack) client config data from clouds.yaml or environment
version_added: "2.0"
notes:
- Facts are placed in the C(openstack.clouds) variable.
options:
clouds:
description:
- List of clouds to limit the return list to. No value means return
information on all configured clouds
required: false
default: []
requirements: [ os-client-config ]
author: "Monty Taylor (@emonty)"
'''
EXAMPLES = '''
# Get list of clouds that do not support security groups
- os_client_config:
- debug: var={{ item }}
with_items: "{{ openstack.clouds|rejectattr('secgroup_source', 'none')|list() }}"
# Get the information back just about the mordred cloud
- os_client_config:
clouds:
- mordred
'''
def main():
module = AnsibleModule(argument_spec=dict(
clouds=dict(required=False, default=[]),
))
p = module.params
try:
config = os_client_config.OpenStackConfig()
clouds = []
for cloud in config.get_all_clouds():
if not p['clouds'] or cloud.name in p['clouds']:
cloud.config['name'] = cloud.name
clouds.append(cloud.config)
module.exit_json(ansible_facts=dict(openstack=dict(clouds=clouds)))
except exceptions.OpenStackConfigException as e:
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
jhseu/tensorflow | tensorflow/python/kernel_tests/record_input_test.py | 30 | 6186 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for record_input_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.framework import test_util
from tensorflow.python.framework.errors_impl import NotFoundError
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class RecordInputOpTest(test.TestCase):
def generateTestData(self,
prefix,
n,
m,
compression_type=tf_record.TFRecordCompressionType.NONE):
options = tf_record.TFRecordOptions(compression_type)
for i in range(n):
f = os.path.join(self.get_temp_dir(), prefix + "." + str(i))
w = tf_record.TFRecordWriter(f, options=options)
for j in range(m):
w.write("{0:0{width}}".format(i * m + j, width=10).encode("utf-8"))
w.close()
def testRecordInputSimple(self):
with self.cached_session() as sess:
self.generateTestData("basic", 1, 1)
yield_op = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=1,
buffer_size=1,
batch_size=1,
name="record_input").get_yield_op()
self.assertEqual(self.evaluate(yield_op), b"0000000000")
def testRecordInputSimpleGzip(self):
with self.cached_session() as sess:
self.generateTestData(
"basic",
1,
1,
compression_type=tf_record.TFRecordCompressionType.GZIP)
yield_op = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=1,
buffer_size=1,
batch_size=1,
name="record_input",
compression_type=tf_record.TFRecordCompressionType.GZIP).get_yield_op(
)
self.assertEqual(self.evaluate(yield_op), b"0000000000")
def testRecordInputSimpleZlib(self):
with self.cached_session() as sess:
self.generateTestData(
"basic",
1,
1,
compression_type=tf_record.TFRecordCompressionType.ZLIB)
yield_op = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=1,
buffer_size=1,
batch_size=1,
name="record_input",
compression_type=tf_record.TFRecordCompressionType.ZLIB).get_yield_op(
)
self.assertEqual(self.evaluate(yield_op), b"0000000000")
@test_util.run_deprecated_v1
def testRecordInputEpochs(self):
files = 100
records_per_file = 100
batches = 2
with self.cached_session() as sess:
self.generateTestData("basic", files, records_per_file)
records = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=2,
buffer_size=2000,
batch_size=1,
shift_ratio=0.33,
seed=10,
name="record_input",
batches=batches)
yield_op = records.get_yield_op()
# cycle over 3 epochs and make sure we never duplicate
for _ in range(3):
epoch_set = set()
for _ in range(int(files * records_per_file / batches)):
op_list = self.evaluate(yield_op)
self.assertTrue(len(op_list) is batches)
for r in op_list:
self.assertTrue(r[0] not in epoch_set)
epoch_set.add(r[0])
@test_util.run_deprecated_v1
def testDoesNotDeadlock(self):
# Iterate multiple times to cause deadlock if there is a chance it can occur
for _ in range(30):
with self.cached_session() as sess:
self.generateTestData("basic", 1, 1)
records = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=1,
buffer_size=100,
batch_size=1,
name="record_input")
yield_op = records.get_yield_op()
for _ in range(50):
self.evaluate(yield_op)
@test_util.run_deprecated_v1
def testEmptyGlob(self):
with self.cached_session() as sess:
record_input = data_flow_ops.RecordInput(file_pattern="foo")
yield_op = record_input.get_yield_op()
self.evaluate(variables.global_variables_initializer())
with self.assertRaises(NotFoundError):
self.evaluate(yield_op)
@test_util.run_deprecated_v1
def testBufferTooSmall(self):
files = 10
records_per_file = 10
batches = 2
with self.cached_session() as sess:
self.generateTestData("basic", files, records_per_file)
records = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=2,
buffer_size=2000,
batch_size=1,
shift_ratio=0.33,
seed=10,
name="record_input",
batches=batches)
yield_op = records.get_yield_op()
# cycle over 3 epochs and make sure we never duplicate
for _ in range(3):
epoch_set = set()
for _ in range(int(files * records_per_file / batches)):
op_list = self.evaluate(yield_op)
self.assertTrue(len(op_list) is batches)
for r in op_list:
self.assertTrue(r[0] not in epoch_set)
epoch_set.add(r[0])
if __name__ == "__main__":
test.main()
| apache-2.0 |
GoogleCloudPlatform/training-data-analyst | courses/machine_learning/deepdive2/end_to_end_ml/solutions/serving/application/lib/werkzeug/urls.py | 7 | 39322 | # -*- coding: utf-8 -*-
"""
werkzeug.urls
~~~~~~~~~~~~~
``werkzeug.urls`` used to provide several wrapper functions for Python 2
urlparse, whose main purpose were to work around the behavior of the Py2
stdlib and its lack of unicode support. While this was already a somewhat
inconvenient situation, it got even more complicated because Python 3's
``urllib.parse`` actually does handle unicode properly. In other words,
this module would wrap two libraries with completely different behavior. So
now this module contains a 2-and-3-compatible backport of Python 3's
``urllib.parse``, which is mostly API-compatible.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import codecs
import os
import re
from collections import namedtuple
from ._compat import fix_tuple_repr
from ._compat import implements_to_string
from ._compat import make_literal_wrapper
from ._compat import normalize_string_tuple
from ._compat import PY2
from ._compat import text_type
from ._compat import to_native
from ._compat import to_unicode
from ._compat import try_coerce_native
from ._internal import _decode_idna
from ._internal import _encode_idna
# A regular expression for what a valid schema looks like
_scheme_re = re.compile(r"^[a-zA-Z0-9+-.]+$")
# Characters that are safe in any part of an URL.
_always_safe = frozenset(
bytearray(
b"abcdefghijklmnopqrstuvwxyz"
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"0123456789"
b"-._~"
)
)
_hexdigits = "0123456789ABCDEFabcdef"
_hextobyte = dict(
((a + b).encode(), int(a + b, 16)) for a in _hexdigits for b in _hexdigits
)
_bytetohex = [("%%%02X" % char).encode("ascii") for char in range(256)]
_URLTuple = fix_tuple_repr(
namedtuple("_URLTuple", ["scheme", "netloc", "path", "query", "fragment"])
)
class BaseURL(_URLTuple):
"""Superclass of :py:class:`URL` and :py:class:`BytesURL`."""
__slots__ = ()
def replace(self, **kwargs):
"""Return an URL with the same values, except for those parameters
given new values by whichever keyword arguments are specified."""
return self._replace(**kwargs)
@property
def host(self):
"""The host part of the URL if available, otherwise `None`. The
host is either the hostname or the IP address mentioned in the
URL. It will not contain the port.
"""
return self._split_host()[0]
@property
def ascii_host(self):
"""Works exactly like :attr:`host` but will return a result that
is restricted to ASCII. If it finds a netloc that is not ASCII
it will attempt to idna decode it. This is useful for socket
operations when the URL might include internationalized characters.
"""
rv = self.host
if rv is not None and isinstance(rv, text_type):
try:
rv = _encode_idna(rv)
except UnicodeError:
rv = rv.encode("ascii", "ignore")
return to_native(rv, "ascii", "ignore")
@property
def port(self):
"""The port in the URL as an integer if it was present, `None`
otherwise. This does not fill in default ports.
"""
try:
rv = int(to_native(self._split_host()[1]))
if 0 <= rv <= 65535:
return rv
except (ValueError, TypeError):
pass
@property
def auth(self):
"""The authentication part in the URL if available, `None`
otherwise.
"""
return self._split_netloc()[0]
@property
def username(self):
"""The username if it was part of the URL, `None` otherwise.
This undergoes URL decoding and will always be a unicode string.
"""
rv = self._split_auth()[0]
if rv is not None:
return _url_unquote_legacy(rv)
@property
def raw_username(self):
"""The username if it was part of the URL, `None` otherwise.
Unlike :attr:`username` this one is not being decoded.
"""
return self._split_auth()[0]
@property
def password(self):
"""The password if it was part of the URL, `None` otherwise.
This undergoes URL decoding and will always be a unicode string.
"""
rv = self._split_auth()[1]
if rv is not None:
return _url_unquote_legacy(rv)
@property
def raw_password(self):
"""The password if it was part of the URL, `None` otherwise.
Unlike :attr:`password` this one is not being decoded.
"""
return self._split_auth()[1]
def decode_query(self, *args, **kwargs):
"""Decodes the query part of the URL. Ths is a shortcut for
calling :func:`url_decode` on the query argument. The arguments and
keyword arguments are forwarded to :func:`url_decode` unchanged.
"""
return url_decode(self.query, *args, **kwargs)
def join(self, *args, **kwargs):
"""Joins this URL with another one. This is just a convenience
function for calling into :meth:`url_join` and then parsing the
return value again.
"""
return url_parse(url_join(self, *args, **kwargs))
def to_url(self):
"""Returns a URL string or bytes depending on the type of the
information stored. This is just a convenience function
for calling :meth:`url_unparse` for this URL.
"""
return url_unparse(self)
def decode_netloc(self):
"""Decodes the netloc part into a string."""
rv = _decode_idna(self.host or "")
if ":" in rv:
rv = "[%s]" % rv
port = self.port
if port is not None:
rv = "%s:%d" % (rv, port)
auth = ":".join(
filter(
None,
[
_url_unquote_legacy(self.raw_username or "", "/:%@"),
_url_unquote_legacy(self.raw_password or "", "/:%@"),
],
)
)
if auth:
rv = "%s@%s" % (auth, rv)
return rv
def to_uri_tuple(self):
"""Returns a :class:`BytesURL` tuple that holds a URI. This will
encode all the information in the URL properly to ASCII using the
rules a web browser would follow.
It's usually more interesting to directly call :meth:`iri_to_uri` which
will return a string.
"""
return url_parse(iri_to_uri(self).encode("ascii"))
def to_iri_tuple(self):
"""Returns a :class:`URL` tuple that holds a IRI. This will try
to decode as much information as possible in the URL without
losing information similar to how a web browser does it for the
URL bar.
It's usually more interesting to directly call :meth:`uri_to_iri` which
will return a string.
"""
return url_parse(uri_to_iri(self))
def get_file_location(self, pathformat=None):
"""Returns a tuple with the location of the file in the form
``(server, location)``. If the netloc is empty in the URL or
points to localhost, it's represented as ``None``.
The `pathformat` by default is autodetection but needs to be set
when working with URLs of a specific system. The supported values
are ``'windows'`` when working with Windows or DOS paths and
``'posix'`` when working with posix paths.
If the URL does not point to a local file, the server and location
are both represented as ``None``.
:param pathformat: The expected format of the path component.
Currently ``'windows'`` and ``'posix'`` are
supported. Defaults to ``None`` which is
autodetect.
"""
if self.scheme != "file":
return None, None
path = url_unquote(self.path)
host = self.netloc or None
if pathformat is None:
if os.name == "nt":
pathformat = "windows"
else:
pathformat = "posix"
if pathformat == "windows":
if path[:1] == "/" and path[1:2].isalpha() and path[2:3] in "|:":
path = path[1:2] + ":" + path[3:]
windows_share = path[:3] in ("\\" * 3, "/" * 3)
import ntpath
path = ntpath.normpath(path)
# Windows shared drives are represented as ``\\host\\directory``.
# That results in a URL like ``file://///host/directory``, and a
# path like ``///host/directory``. We need to special-case this
# because the path contains the hostname.
if windows_share and host is None:
parts = path.lstrip("\\").split("\\", 1)
if len(parts) == 2:
host, path = parts
else:
host = parts[0]
path = ""
elif pathformat == "posix":
import posixpath
path = posixpath.normpath(path)
else:
raise TypeError("Invalid path format %s" % repr(pathformat))
if host in ("127.0.0.1", "::1", "localhost"):
host = None
return host, path
def _split_netloc(self):
if self._at in self.netloc:
return self.netloc.split(self._at, 1)
return None, self.netloc
def _split_auth(self):
auth = self._split_netloc()[0]
if not auth:
return None, None
if self._colon not in auth:
return auth, None
return auth.split(self._colon, 1)
def _split_host(self):
rv = self._split_netloc()[1]
if not rv:
return None, None
if not rv.startswith(self._lbracket):
if self._colon in rv:
return rv.split(self._colon, 1)
return rv, None
idx = rv.find(self._rbracket)
if idx < 0:
return rv, None
host = rv[1:idx]
rest = rv[idx + 1 :]
if rest.startswith(self._colon):
return host, rest[1:]
return host, None
@implements_to_string
class URL(BaseURL):
"""Represents a parsed URL. This behaves like a regular tuple but
also has some extra attributes that give further insight into the
URL.
"""
__slots__ = ()
_at = "@"
_colon = ":"
_lbracket = "["
_rbracket = "]"
def __str__(self):
return self.to_url()
def encode_netloc(self):
"""Encodes the netloc part to an ASCII safe URL as bytes."""
rv = self.ascii_host or ""
if ":" in rv:
rv = "[%s]" % rv
port = self.port
if port is not None:
rv = "%s:%d" % (rv, port)
auth = ":".join(
filter(
None,
[
url_quote(self.raw_username or "", "utf-8", "strict", "/:%"),
url_quote(self.raw_password or "", "utf-8", "strict", "/:%"),
],
)
)
if auth:
rv = "%s@%s" % (auth, rv)
return to_native(rv)
def encode(self, charset="utf-8", errors="replace"):
"""Encodes the URL to a tuple made out of bytes. The charset is
only being used for the path, query and fragment.
"""
return BytesURL(
self.scheme.encode("ascii"),
self.encode_netloc(),
self.path.encode(charset, errors),
self.query.encode(charset, errors),
self.fragment.encode(charset, errors),
)
class BytesURL(BaseURL):
"""Represents a parsed URL in bytes."""
__slots__ = ()
_at = b"@"
_colon = b":"
_lbracket = b"["
_rbracket = b"]"
def __str__(self):
return self.to_url().decode("utf-8", "replace")
def encode_netloc(self):
"""Returns the netloc unchanged as bytes."""
return self.netloc
def decode(self, charset="utf-8", errors="replace"):
"""Decodes the URL to a tuple made out of strings. The charset is
only being used for the path, query and fragment.
"""
return URL(
self.scheme.decode("ascii"),
self.decode_netloc(),
self.path.decode(charset, errors),
self.query.decode(charset, errors),
self.fragment.decode(charset, errors),
)
_unquote_maps = {frozenset(): _hextobyte}
def _unquote_to_bytes(string, unsafe=""):
if isinstance(string, text_type):
string = string.encode("utf-8")
if isinstance(unsafe, text_type):
unsafe = unsafe.encode("utf-8")
unsafe = frozenset(bytearray(unsafe))
groups = iter(string.split(b"%"))
result = bytearray(next(groups, b""))
try:
hex_to_byte = _unquote_maps[unsafe]
except KeyError:
hex_to_byte = _unquote_maps[unsafe] = {
h: b for h, b in _hextobyte.items() if b not in unsafe
}
for group in groups:
code = group[:2]
if code in hex_to_byte:
result.append(hex_to_byte[code])
result.extend(group[2:])
else:
result.append(37) # %
result.extend(group)
return bytes(result)
def _url_encode_impl(obj, charset, encode_keys, sort, key):
from .datastructures import iter_multi_items
iterable = iter_multi_items(obj)
if sort:
iterable = sorted(iterable, key=key)
for key, value in iterable:
if value is None:
continue
if not isinstance(key, bytes):
key = text_type(key).encode(charset)
if not isinstance(value, bytes):
value = text_type(value).encode(charset)
yield _fast_url_quote_plus(key) + "=" + _fast_url_quote_plus(value)
def _url_unquote_legacy(value, unsafe=""):
try:
return url_unquote(value, charset="utf-8", errors="strict", unsafe=unsafe)
except UnicodeError:
return url_unquote(value, charset="latin1", unsafe=unsafe)
def url_parse(url, scheme=None, allow_fragments=True):
"""Parses a URL from a string into a :class:`URL` tuple. If the URL
is lacking a scheme it can be provided as second argument. Otherwise,
it is ignored. Optionally fragments can be stripped from the URL
by setting `allow_fragments` to `False`.
The inverse of this function is :func:`url_unparse`.
:param url: the URL to parse.
:param scheme: the default schema to use if the URL is schemaless.
:param allow_fragments: if set to `False` a fragment will be removed
from the URL.
"""
s = make_literal_wrapper(url)
is_text_based = isinstance(url, text_type)
if scheme is None:
scheme = s("")
netloc = query = fragment = s("")
i = url.find(s(":"))
if i > 0 and _scheme_re.match(to_native(url[:i], errors="replace")):
# make sure "iri" is not actually a port number (in which case
# "scheme" is really part of the path)
rest = url[i + 1 :]
if not rest or any(c not in s("0123456789") for c in rest):
# not a port number
scheme, url = url[:i].lower(), rest
if url[:2] == s("//"):
delim = len(url)
for c in s("/?#"):
wdelim = url.find(c, 2)
if wdelim >= 0:
delim = min(delim, wdelim)
netloc, url = url[2:delim], url[delim:]
if (s("[") in netloc and s("]") not in netloc) or (
s("]") in netloc and s("[") not in netloc
):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and s("#") in url:
url, fragment = url.split(s("#"), 1)
if s("?") in url:
url, query = url.split(s("?"), 1)
result_type = URL if is_text_based else BytesURL
return result_type(scheme, netloc, url, query, fragment)
def _make_fast_url_quote(charset="utf-8", errors="strict", safe="/:", unsafe=""):
"""Precompile the translation table for a URL encoding function.
Unlike :func:`url_quote`, the generated function only takes the
string to quote.
:param charset: The charset to encode the result with.
:param errors: How to handle encoding errors.
:param safe: An optional sequence of safe characters to never encode.
:param unsafe: An optional sequence of unsafe characters to always encode.
"""
if isinstance(safe, text_type):
safe = safe.encode(charset, errors)
if isinstance(unsafe, text_type):
unsafe = unsafe.encode(charset, errors)
safe = (frozenset(bytearray(safe)) | _always_safe) - frozenset(bytearray(unsafe))
table = [chr(c) if c in safe else "%%%02X" % c for c in range(256)]
if not PY2:
def quote(string):
return "".join([table[c] for c in string])
else:
def quote(string):
return "".join([table[c] for c in bytearray(string)])
return quote
_fast_url_quote = _make_fast_url_quote()
_fast_quote_plus = _make_fast_url_quote(safe=" ", unsafe="+")
def _fast_url_quote_plus(string):
return _fast_quote_plus(string).replace(" ", "+")
def url_quote(string, charset="utf-8", errors="strict", safe="/:", unsafe=""):
"""URL encode a single string with a given encoding.
:param s: the string to quote.
:param charset: the charset to be used.
:param safe: an optional sequence of safe characters.
:param unsafe: an optional sequence of unsafe characters.
.. versionadded:: 0.9.2
The `unsafe` parameter was added.
"""
if not isinstance(string, (text_type, bytes, bytearray)):
string = text_type(string)
if isinstance(string, text_type):
string = string.encode(charset, errors)
if isinstance(safe, text_type):
safe = safe.encode(charset, errors)
if isinstance(unsafe, text_type):
unsafe = unsafe.encode(charset, errors)
safe = (frozenset(bytearray(safe)) | _always_safe) - frozenset(bytearray(unsafe))
rv = bytearray()
for char in bytearray(string):
if char in safe:
rv.append(char)
else:
rv.extend(_bytetohex[char])
return to_native(bytes(rv))
def url_quote_plus(string, charset="utf-8", errors="strict", safe=""):
"""URL encode a single string with the given encoding and convert
whitespace to "+".
:param s: The string to quote.
:param charset: The charset to be used.
:param safe: An optional sequence of safe characters.
"""
return url_quote(string, charset, errors, safe + " ", "+").replace(" ", "+")
def url_unparse(components):
"""The reverse operation to :meth:`url_parse`. This accepts arbitrary
as well as :class:`URL` tuples and returns a URL as a string.
:param components: the parsed URL as tuple which should be converted
into a URL string.
"""
scheme, netloc, path, query, fragment = normalize_string_tuple(components)
s = make_literal_wrapper(scheme)
url = s("")
# We generally treat file:///x and file:/x the same which is also
# what browsers seem to do. This also allows us to ignore a schema
# register for netloc utilization or having to differenciate between
# empty and missing netloc.
if netloc or (scheme and path.startswith(s("/"))):
if path and path[:1] != s("/"):
path = s("/") + path
url = s("//") + (netloc or s("")) + path
elif path:
url += path
if scheme:
url = scheme + s(":") + url
if query:
url = url + s("?") + query
if fragment:
url = url + s("#") + fragment
return url
def url_unquote(string, charset="utf-8", errors="replace", unsafe=""):
"""URL decode a single string with a given encoding. If the charset
is set to `None` no unicode decoding is performed and raw bytes
are returned.
:param s: the string to unquote.
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param errors: the error handling for the charset decoding.
"""
rv = _unquote_to_bytes(string, unsafe)
if charset is not None:
rv = rv.decode(charset, errors)
return rv
def url_unquote_plus(s, charset="utf-8", errors="replace"):
"""URL decode a single string with the given `charset` and decode "+" to
whitespace.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
:exc:`HTTPUnicodeError` is raised.
:param s: The string to unquote.
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param errors: The error handling for the `charset` decoding.
"""
if isinstance(s, text_type):
s = s.replace(u"+", u" ")
else:
s = s.replace(b"+", b" ")
return url_unquote(s, charset, errors)
def url_fix(s, charset="utf-8"):
r"""Sometimes you get an URL by a user that just isn't a real URL because
it contains unsafe characters like ' ' and so on. This function can fix
some of the problems in a similar way browsers handle data entered by the
user:
>>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)')
'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)'
:param s: the string with the URL to fix.
:param charset: The target charset for the URL if the url was given as
unicode string.
"""
# First step is to switch to unicode processing and to convert
# backslashes (which are invalid in URLs anyways) to slashes. This is
# consistent with what Chrome does.
s = to_unicode(s, charset, "replace").replace("\\", "/")
# For the specific case that we look like a malformed windows URL
# we want to fix this up manually:
if s.startswith("file://") and s[7:8].isalpha() and s[8:10] in (":/", "|/"):
s = "file:///" + s[7:]
url = url_parse(s)
path = url_quote(url.path, charset, safe="/%+$!*'(),")
qs = url_quote_plus(url.query, charset, safe=":&%=+$!*'(),")
anchor = url_quote_plus(url.fragment, charset, safe=":&%=+$!*'(),")
return to_native(url_unparse((url.scheme, url.encode_netloc(), path, qs, anchor)))
# not-unreserved characters remain quoted when unquoting to IRI
_to_iri_unsafe = "".join([chr(c) for c in range(128) if c not in _always_safe])
def _codec_error_url_quote(e):
"""Used in :func:`uri_to_iri` after unquoting to re-quote any
invalid bytes.
"""
out = _fast_url_quote(e.object[e.start : e.end])
if PY2:
out = out.decode("utf-8")
return out, e.end
codecs.register_error("werkzeug.url_quote", _codec_error_url_quote)
def uri_to_iri(uri, charset="utf-8", errors="werkzeug.url_quote"):
"""Convert a URI to an IRI. All valid UTF-8 characters are unquoted,
leaving all reserved and invalid characters quoted. If the URL has
a domain, it is decoded from Punycode.
>>> uri_to_iri("http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF")
'http://\\u2603.net/p\\xe5th?q=\\xe8ry%DF'
:param uri: The URI to convert.
:param charset: The encoding to encode unquoted bytes with.
:param errors: Error handler to use during ``bytes.encode``. By
default, invalid bytes are left quoted.
.. versionchanged:: 0.15
All reserved and invalid characters remain quoted. Previously,
only some reserved characters were preserved, and invalid bytes
were replaced instead of left quoted.
.. versionadded:: 0.6
"""
if isinstance(uri, tuple):
uri = url_unparse(uri)
uri = url_parse(to_unicode(uri, charset))
path = url_unquote(uri.path, charset, errors, _to_iri_unsafe)
query = url_unquote(uri.query, charset, errors, _to_iri_unsafe)
fragment = url_unquote(uri.fragment, charset, errors, _to_iri_unsafe)
return url_unparse((uri.scheme, uri.decode_netloc(), path, query, fragment))
# reserved characters remain unquoted when quoting to URI
_to_uri_safe = ":/?#[]@!$&'()*+,;=%"
def iri_to_uri(iri, charset="utf-8", errors="strict", safe_conversion=False):
"""Convert an IRI to a URI. All non-ASCII and unsafe characters are
quoted. If the URL has a domain, it is encoded to Punycode.
>>> iri_to_uri('http://\\u2603.net/p\\xe5th?q=\\xe8ry%DF')
'http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF'
:param iri: The IRI to convert.
:param charset: The encoding of the IRI.
:param errors: Error handler to use during ``bytes.encode``.
:param safe_conversion: Return the URL unchanged if it only contains
ASCII characters and no whitespace. See the explanation below.
There is a general problem with IRI conversion with some protocols
that are in violation of the URI specification. Consider the
following two IRIs::
magnet:?xt=uri:whatever
itms-services://?action=download-manifest
After parsing, we don't know if the scheme requires the ``//``,
which is dropped if empty, but conveys different meanings in the
final URL if it's present or not. In this case, you can use
``safe_conversion``, which will return the URL unchanged if it only
contains ASCII characters and no whitespace. This can result in a
URI with unquoted characters if it was not already quoted correctly,
but preserves the URL's semantics. Werkzeug uses this for the
``Location`` header for redirects.
.. versionchanged:: 0.15
All reserved characters remain unquoted. Previously, only some
reserved characters were left unquoted.
.. versionchanged:: 0.9.6
The ``safe_conversion`` parameter was added.
.. versionadded:: 0.6
"""
if isinstance(iri, tuple):
iri = url_unparse(iri)
if safe_conversion:
# If we're not sure if it's safe to convert the URL, and it only
# contains ASCII characters, return it unconverted.
try:
native_iri = to_native(iri)
ascii_iri = native_iri.encode("ascii")
# Only return if it doesn't have whitespace. (Why?)
if len(ascii_iri.split()) == 1:
return native_iri
except UnicodeError:
pass
iri = url_parse(to_unicode(iri, charset, errors))
path = url_quote(iri.path, charset, errors, _to_uri_safe)
query = url_quote(iri.query, charset, errors, _to_uri_safe)
fragment = url_quote(iri.fragment, charset, errors, _to_uri_safe)
return to_native(
url_unparse((iri.scheme, iri.encode_netloc(), path, query, fragment))
)
def url_decode(
s,
charset="utf-8",
decode_keys=False,
include_empty=True,
errors="replace",
separator="&",
cls=None,
):
"""
Parse a querystring and return it as :class:`MultiDict`. There is a
difference in key decoding on different Python versions. On Python 3
keys will always be fully decoded whereas on Python 2, keys will
remain bytestrings if they fit into ASCII. On 2.x keys can be forced
to be unicode by setting `decode_keys` to `True`.
If the charset is set to `None` no unicode decoding will happen and
raw bytes will be returned.
Per default a missing value for a key will default to an empty key. If
you don't want that behavior you can set `include_empty` to `False`.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
`HTTPUnicodeError` is raised.
.. versionchanged:: 0.5
In previous versions ";" and "&" could be used for url decoding.
This changed in 0.5 where only "&" is supported. If you want to
use ";" instead a different `separator` can be provided.
The `cls` parameter was added.
:param s: a string with the query string to decode.
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param decode_keys: Used on Python 2.x to control whether keys should
be forced to be unicode objects. If set to `True`
then keys will be unicode in all cases. Otherwise,
they remain `str` if they fit into ASCII.
:param include_empty: Set to `False` if you don't want empty values to
appear in the dict.
:param errors: the decoding error behavior.
:param separator: the pair separator to be used, defaults to ``&``
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
"""
if cls is None:
from .datastructures import MultiDict
cls = MultiDict
if isinstance(s, text_type) and not isinstance(separator, text_type):
separator = separator.decode(charset or "ascii")
elif isinstance(s, bytes) and not isinstance(separator, bytes):
separator = separator.encode(charset or "ascii")
return cls(
_url_decode_impl(
s.split(separator), charset, decode_keys, include_empty, errors
)
)
def url_decode_stream(
stream,
charset="utf-8",
decode_keys=False,
include_empty=True,
errors="replace",
separator="&",
cls=None,
limit=None,
return_iterator=False,
):
"""Works like :func:`url_decode` but decodes a stream. The behavior
of stream and limit follows functions like
:func:`~werkzeug.wsgi.make_line_iter`. The generator of pairs is
directly fed to the `cls` so you can consume the data while it's
parsed.
.. versionadded:: 0.8
:param stream: a stream with the encoded querystring
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param decode_keys: Used on Python 2.x to control whether keys should
be forced to be unicode objects. If set to `True`,
keys will be unicode in all cases. Otherwise, they
remain `str` if they fit into ASCII.
:param include_empty: Set to `False` if you don't want empty values to
appear in the dict.
:param errors: the decoding error behavior.
:param separator: the pair separator to be used, defaults to ``&``
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
:param limit: the content length of the URL data. Not necessary if
a limited stream is provided.
:param return_iterator: if set to `True` the `cls` argument is ignored
and an iterator over all decoded pairs is
returned
"""
from .wsgi import make_chunk_iter
pair_iter = make_chunk_iter(stream, separator, limit)
decoder = _url_decode_impl(pair_iter, charset, decode_keys, include_empty, errors)
if return_iterator:
return decoder
if cls is None:
from .datastructures import MultiDict
cls = MultiDict
return cls(decoder)
def _url_decode_impl(pair_iter, charset, decode_keys, include_empty, errors):
for pair in pair_iter:
if not pair:
continue
s = make_literal_wrapper(pair)
equal = s("=")
if equal in pair:
key, value = pair.split(equal, 1)
else:
if not include_empty:
continue
key = pair
value = s("")
key = url_unquote_plus(key, charset, errors)
if charset is not None and PY2 and not decode_keys:
key = try_coerce_native(key)
yield key, url_unquote_plus(value, charset, errors)
def url_encode(
obj, charset="utf-8", encode_keys=False, sort=False, key=None, separator=b"&"
):
"""URL encode a dict/`MultiDict`. If a value is `None` it will not appear
in the result string. Per default only values are encoded into the target
charset strings. If `encode_keys` is set to ``True`` unicode keys are
supported too.
If `sort` is set to `True` the items are sorted by `key` or the default
sorting algorithm.
.. versionadded:: 0.5
`sort`, `key`, and `separator` were added.
:param obj: the object to encode into a query string.
:param charset: the charset of the query string.
:param encode_keys: set to `True` if you have unicode keys. (Ignored on
Python 3.x)
:param sort: set to `True` if you want parameters to be sorted by `key`.
:param separator: the separator to be used for the pairs.
:param key: an optional function to be used for sorting. For more details
check out the :func:`sorted` documentation.
"""
separator = to_native(separator, "ascii")
return separator.join(_url_encode_impl(obj, charset, encode_keys, sort, key))
def url_encode_stream(
obj,
stream=None,
charset="utf-8",
encode_keys=False,
sort=False,
key=None,
separator=b"&",
):
"""Like :meth:`url_encode` but writes the results to a stream
object. If the stream is `None` a generator over all encoded
pairs is returned.
.. versionadded:: 0.8
:param obj: the object to encode into a query string.
:param stream: a stream to write the encoded object into or `None` if
an iterator over the encoded pairs should be returned. In
that case the separator argument is ignored.
:param charset: the charset of the query string.
:param encode_keys: set to `True` if you have unicode keys. (Ignored on
Python 3.x)
:param sort: set to `True` if you want parameters to be sorted by `key`.
:param separator: the separator to be used for the pairs.
:param key: an optional function to be used for sorting. For more details
check out the :func:`sorted` documentation.
"""
separator = to_native(separator, "ascii")
gen = _url_encode_impl(obj, charset, encode_keys, sort, key)
if stream is None:
return gen
for idx, chunk in enumerate(gen):
if idx:
stream.write(separator)
stream.write(chunk)
def url_join(base, url, allow_fragments=True):
"""Join a base URL and a possibly relative URL to form an absolute
interpretation of the latter.
:param base: the base URL for the join operation.
:param url: the URL to join.
:param allow_fragments: indicates whether fragments should be allowed.
"""
if isinstance(base, tuple):
base = url_unparse(base)
if isinstance(url, tuple):
url = url_unparse(url)
base, url = normalize_string_tuple((base, url))
s = make_literal_wrapper(base)
if not base:
return url
if not url:
return base
bscheme, bnetloc, bpath, bquery, bfragment = url_parse(
base, allow_fragments=allow_fragments
)
scheme, netloc, path, query, fragment = url_parse(url, bscheme, allow_fragments)
if scheme != bscheme:
return url
if netloc:
return url_unparse((scheme, netloc, path, query, fragment))
netloc = bnetloc
if path[:1] == s("/"):
segments = path.split(s("/"))
elif not path:
segments = bpath.split(s("/"))
if not query:
query = bquery
else:
segments = bpath.split(s("/"))[:-1] + path.split(s("/"))
# If the rightmost part is "./" we want to keep the slash but
# remove the dot.
if segments[-1] == s("."):
segments[-1] = s("")
# Resolve ".." and "."
segments = [segment for segment in segments if segment != s(".")]
while 1:
i = 1
n = len(segments) - 1
while i < n:
if segments[i] == s("..") and segments[i - 1] not in (s(""), s("..")):
del segments[i - 1 : i + 1]
break
i += 1
else:
break
# Remove trailing ".." if the URL is absolute
unwanted_marker = [s(""), s("..")]
while segments[:2] == unwanted_marker:
del segments[1]
path = s("/").join(segments)
return url_unparse((scheme, netloc, path, query, fragment))
class Href(object):
"""Implements a callable that constructs URLs with the given base. The
function can be called with any number of positional and keyword
arguments which than are used to assemble the URL. Works with URLs
and posix paths.
Positional arguments are appended as individual segments to
the path of the URL:
>>> href = Href('/foo')
>>> href('bar', 23)
'/foo/bar/23'
>>> href('foo', bar=23)
'/foo/foo?bar=23'
If any of the arguments (positional or keyword) evaluates to `None` it
will be skipped. If no keyword arguments are given the last argument
can be a :class:`dict` or :class:`MultiDict` (or any other dict subclass),
otherwise the keyword arguments are used for the query parameters, cutting
off the first trailing underscore of the parameter name:
>>> href(is_=42)
'/foo?is=42'
>>> href({'foo': 'bar'})
'/foo?foo=bar'
Combining of both methods is not allowed:
>>> href({'foo': 'bar'}, bar=42)
Traceback (most recent call last):
...
TypeError: keyword arguments and query-dicts can't be combined
Accessing attributes on the href object creates a new href object with
the attribute name as prefix:
>>> bar_href = href.bar
>>> bar_href("blub")
'/foo/bar/blub'
If `sort` is set to `True` the items are sorted by `key` or the default
sorting algorithm:
>>> href = Href("/", sort=True)
>>> href(a=1, b=2, c=3)
'/?a=1&b=2&c=3'
.. versionadded:: 0.5
`sort` and `key` were added.
"""
def __init__(self, base="./", charset="utf-8", sort=False, key=None):
if not base:
base = "./"
self.base = base
self.charset = charset
self.sort = sort
self.key = key
def __getattr__(self, name):
if name[:2] == "__":
raise AttributeError(name)
base = self.base
if base[-1:] != "/":
base += "/"
return Href(url_join(base, name), self.charset, self.sort, self.key)
def __call__(self, *path, **query):
if path and isinstance(path[-1], dict):
if query:
raise TypeError("keyword arguments and query-dicts can't be combined")
query, path = path[-1], path[:-1]
elif query:
query = dict(
[(k.endswith("_") and k[:-1] or k, v) for k, v in query.items()]
)
path = "/".join(
[
to_unicode(url_quote(x, self.charset), "ascii")
for x in path
if x is not None
]
).lstrip("/")
rv = self.base
if path:
if not rv.endswith("/"):
rv += "/"
rv = url_join(rv, "./" + path)
if query:
rv += "?" + to_unicode(
url_encode(query, self.charset, sort=self.sort, key=self.key), "ascii"
)
return to_native(rv)
| apache-2.0 |
alphaBenj/zipline | zipline/pipeline/api_utils.py | 4 | 1654 | """
Utilities for creating public APIs (e.g. argument validation decorators).
"""
from zipline.utils.input_validation import preprocess
def restrict_to_dtype(dtype, message_template):
"""
A factory for decorators that restrict Term methods to only be callable on
Terms with a specific dtype.
This is conceptually similar to
zipline.utils.input_validation.expect_dtypes, but provides more flexibility
for providing error messages that are specifically targeting Term methods.
Parameters
----------
dtype : numpy.dtype
The dtype on which the decorated method may be called.
message_template : str
A template for the error message to be raised.
`message_template.format` will be called with keyword arguments
`method_name`, `expected_dtype`, and `received_dtype`.
Examples
--------
@restrict_to_dtype(
dtype=float64_dtype,
message_template=(
"{method_name}() was called on a factor of dtype {received_dtype}."
"{method_name}() requires factors of dtype{expected_dtype}."
),
)
def some_factor_method(self, ...):
self.stuff_that_requires_being_float64(...)
"""
def processor(term_method, _, term_instance):
term_dtype = term_instance.dtype
if term_dtype != dtype:
raise TypeError(
message_template.format(
method_name=term_method.__name__,
expected_dtype=dtype.name,
received_dtype=term_dtype,
)
)
return term_instance
return preprocess(self=processor)
| apache-2.0 |
a-parhom/edx-platform | openedx/core/djangoapps/external_auth/tests/test_shib.py | 3 | 30178 | # -*- coding: utf-8 -*-
"""
Tests for Shibboleth Authentication
@jbau
"""
import unittest
from importlib import import_module
from urllib import urlencode
from ddt import ddt, data
from django.conf import settings
from django.http import HttpResponseRedirect
from django.test import TestCase
from django.test.client import RequestFactory, Client as DjangoTestClient
from django.test.utils import override_settings
from django.urls import reverse
from django.contrib.auth.models import AnonymousUser, User
from openedx.core.djangoapps.external_auth.models import ExternalAuthMap
from openedx.core.djangoapps.external_auth.views import (
shib_login, course_specific_login, course_specific_register, _flatten_to_ascii
)
from openedx.core.djangoapps.user_api import accounts as accounts_settings
from mock import patch
from six import text_type
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
from student.views import change_enrollment
from student.models import UserProfile, CourseEnrollment
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore import ModuleStoreEnum
# Shib is supposed to provide 'REMOTE_USER', 'givenName', 'sn', 'mail', 'Shib-Identity-Provider'
# attributes via request.META. We can count on 'Shib-Identity-Provider', and 'REMOTE_USER' being present
# b/c of how mod_shib works but should test the behavior with the rest of the attributes present/missing
# For the sake of python convention we'll make all of these variable names ALL_CAPS
# These values would all returned from request.META, so they need to be str, not unicode
IDP = 'https://idp.stanford.edu/'
REMOTE_USER = 'test_user@stanford.edu'
MAILS = [None, '', 'test_user@stanford.edu'] # unicode shouldn't be in emails, would fail django's email validator
DISPLAYNAMES = [None, '', 'Jason 包']
GIVENNAMES = [None, '', 'jasön; John; bob'] # At Stanford, the givenNames can be a list delimited by ';'
SNS = [None, '', '包; smith'] # At Stanford, the sns can be a list delimited by ';'
def gen_all_identities():
"""
A generator for all combinations of test inputs.
Each generated item is a dict that represents what a shib IDP
could potentially pass to django via request.META, i.e.
setting (or not) request.META['givenName'], etc.
"""
def _build_identity_dict(mail, display_name, given_name, surname):
""" Helper function to return a dict of test identity """
meta_dict = {'Shib-Identity-Provider': IDP,
'REMOTE_USER': REMOTE_USER}
if display_name is not None:
meta_dict['displayName'] = display_name
if mail is not None:
meta_dict['mail'] = mail
if given_name is not None:
meta_dict['givenName'] = given_name
if surname is not None:
meta_dict['sn'] = surname
return meta_dict
for mail in MAILS:
for given_name in GIVENNAMES:
for surname in SNS:
for display_name in DISPLAYNAMES:
yield _build_identity_dict(mail, display_name, given_name, surname)
@ddt
@override_settings(SESSION_ENGINE='django.contrib.sessions.backends.cache')
class ShibSPTest(CacheIsolationTestCase):
"""
Tests for the Shibboleth SP, which communicates via request.META
(Apache environment variables set by mod_shib)
"""
shard = 3
ENABLED_CACHES = ['default']
request_factory = RequestFactory()
def setUp(self):
super(ShibSPTest, self).setUp()
self.test_user_id = ModuleStoreEnum.UserID.test
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_exception_shib_login(self):
"""
Tests that we get the error page when there is no REMOTE_USER
or Shib-Identity-Provider in request.META
"""
no_remote_user_response = self.client.get(reverse('shib-login'), HTTP_SHIB_IDENTITY_PROVIDER=IDP)
self.assertEqual(no_remote_user_response.status_code, 403)
self.assertIn("identity server did not return your ID information", no_remote_user_response.content)
no_idp_response = self.client.get(reverse('shib-login'), HTTP_REMOTE_USER=REMOTE_USER)
self.assertEqual(no_idp_response.status_code, 403)
self.assertIn("identity server did not return your ID information", no_idp_response.content)
def _assert_shib_login_is_logged(self, audit_log_call, remote_user):
"""Asserts that shibboleth login attempt is being logged"""
remote_user = _flatten_to_ascii(remote_user) # django usernames have to be ascii
method_name, args, _kwargs = audit_log_call
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 1)
self.assertIn(u'logged in via Shibboleth', args[0])
self.assertIn(remote_user, args[0])
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_shib_login(self):
"""
Tests that:
* shib credentials that match an existing ExternalAuthMap with a linked active user logs the user in
* shib credentials that match an existing ExternalAuthMap with a linked inactive user shows error page
* shib credentials that match an existing ExternalAuthMap without a linked user and also match the email
of an existing user without an existing ExternalAuthMap links the two and log the user in
* shib credentials that match an existing ExternalAuthMap without a linked user and also match the email
of an existing user that already has an ExternalAuthMap causes an error (403)
* shib credentials that do not match an existing ExternalAuthMap causes the registration form to appear
"""
# pylint: disable=too-many-statements
user_w_map = UserFactory.create(email='withmap@stanford.edu')
extauth = ExternalAuthMap(external_id='withmap@stanford.edu',
external_email='',
external_domain='shib:https://idp.stanford.edu/',
external_credentials="",
user=user_w_map)
user_wo_map = UserFactory.create(email='womap@stanford.edu')
user_w_map.save()
user_wo_map.save()
extauth.save()
inactive_user = UserFactory.create(email='inactive@stanford.edu')
inactive_user.is_active = False
inactive_extauth = ExternalAuthMap(external_id='inactive@stanford.edu',
external_email='',
external_domain='shib:https://idp.stanford.edu/',
external_credentials="",
user=inactive_user)
inactive_user.save()
inactive_extauth.save()
idps = ['https://idp.stanford.edu/', 'https://someother.idp.com/']
remote_users = ['withmap@stanford.edu', 'womap@stanford.edu',
'testuser2@someother_idp.com', 'inactive@stanford.edu']
for idp in idps:
for remote_user in remote_users:
self.client.logout()
with patch('openedx.core.djangoapps.external_auth.views.AUDIT_LOG') as mock_audit_log:
response = self.client.get(
reverse('shib-login'),
**{
'Shib-Identity-Provider': idp,
'mail': remote_user,
'REMOTE_USER': remote_user,
}
)
audit_log_calls = mock_audit_log.method_calls
if idp == "https://idp.stanford.edu/" and remote_user == 'withmap@stanford.edu':
self.assertRedirects(response, '/dashboard')
self.assertEquals(int(self.client.session['_auth_user_id']), user_w_map.id)
# verify logging:
self.assertEquals(len(audit_log_calls), 2)
self._assert_shib_login_is_logged(audit_log_calls[0], remote_user)
method_name, args, _kwargs = audit_log_calls[1]
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 1)
self.assertIn(u'Login success', args[0])
self.assertIn(remote_user, args[0])
elif idp == "https://idp.stanford.edu/" and remote_user == 'inactive@stanford.edu':
self.assertEqual(response.status_code, 403)
self.assertIn("Account not yet activated: please look for link in your email", response.content)
# verify logging:
self.assertEquals(len(audit_log_calls), 2)
self._assert_shib_login_is_logged(audit_log_calls[0], remote_user)
method_name, args, _kwargs = audit_log_calls[1]
self.assertEquals(method_name, 'warning')
self.assertEquals(len(args), 1)
self.assertIn(u'is not active after external login', args[0])
# self.assertEquals(remote_user, args[1])
elif idp == "https://idp.stanford.edu/" and remote_user == 'womap@stanford.edu':
self.assertIsNotNone(ExternalAuthMap.objects.get(user=user_wo_map))
self.assertRedirects(response, '/dashboard')
self.assertEquals(int(self.client.session['_auth_user_id']), user_wo_map.id)
# verify logging:
self.assertEquals(len(audit_log_calls), 2)
self._assert_shib_login_is_logged(audit_log_calls[0], remote_user)
method_name, args, _kwargs = audit_log_calls[1]
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 1)
self.assertIn(u'Login success', args[0])
self.assertIn(remote_user, args[0])
elif idp == "https://someother.idp.com/" and remote_user in \
['withmap@stanford.edu', 'womap@stanford.edu', 'inactive@stanford.edu']:
self.assertEqual(response.status_code, 403)
self.assertIn("You have already created an account using an external login", response.content)
# no audit logging calls
self.assertEquals(len(audit_log_calls), 0)
else:
self.assertEqual(response.status_code, 200)
self.assertContains(response,
(u"Preferences for {platform_name}"
.format(platform_name=settings.PLATFORM_NAME)))
# no audit logging calls
self.assertEquals(len(audit_log_calls), 0)
def _test_auto_activate_user_with_flag(self, log_user_string="inactive@stanford.edu"):
"""
Tests that FEATURES['BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'] means extauth automatically
linked users, activates them, and logs them in
"""
inactive_user = UserFactory.create(email='inactive@stanford.edu')
if not log_user_string:
log_user_string = "user.id: {}".format(inactive_user.id)
inactive_user.is_active = False
inactive_user.save()
request = self.request_factory.get('/shib-login')
request.session = import_module(settings.SESSION_ENGINE).SessionStore() # empty session
request.META.update({
'Shib-Identity-Provider': 'https://idp.stanford.edu/',
'REMOTE_USER': 'inactive@stanford.edu',
'mail': 'inactive@stanford.edu'
})
request.user = AnonymousUser()
with patch('openedx.core.djangoapps.external_auth.views.AUDIT_LOG') as mock_audit_log:
response = shib_login(request)
audit_log_calls = mock_audit_log.method_calls
# reload user from db, since the view function works via db side-effects
inactive_user = User.objects.get(id=inactive_user.id)
self.assertIsNotNone(ExternalAuthMap.objects.get(user=inactive_user))
self.assertTrue(inactive_user.is_active)
self.assertIsInstance(response, HttpResponseRedirect)
self.assertEqual(request.user, inactive_user)
self.assertEqual(response['Location'], '/dashboard')
# verify logging:
self.assertEquals(len(audit_log_calls), 3)
self._assert_shib_login_is_logged(audit_log_calls[0], log_user_string)
method_name, args, _kwargs = audit_log_calls[2]
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 1)
self.assertIn(u'Login success', args[0])
self.assertIn(log_user_string, args[0])
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@patch.dict(settings.FEATURES, {'BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH': True, 'SQUELCH_PII_IN_LOGS': False})
def test_extauth_auto_activate_user_with_flag_no_squelch(self):
"""
Wrapper to run base_test_extauth_auto_activate_user_with_flag with {'SQUELCH_PII_IN_LOGS': False}
"""
self._test_auto_activate_user_with_flag(log_user_string="inactive@stanford.edu")
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@patch.dict(settings.FEATURES, {'BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH': True, 'SQUELCH_PII_IN_LOGS': True})
def test_extauth_auto_activate_user_with_flag_squelch(self):
"""
Wrapper to run base_test_extauth_auto_activate_user_with_flag with {'SQUELCH_PII_IN_LOGS': True}
"""
self._test_auto_activate_user_with_flag(log_user_string=None)
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@data(*gen_all_identities())
def test_registration_form(self, identity):
"""
Tests the registration form showing up with the proper parameters.
Uses django test client for its session support
"""
client = DjangoTestClient()
# identity k/v pairs will show up in request.META
response = client.get(path='/shib-login/', data={}, follow=False, **identity)
self.assertEquals(response.status_code, 200)
mail_input_html = '<input class="" id="email" type="email" name="email"'
if not identity.get('mail'):
self.assertContains(response, mail_input_html)
else:
self.assertNotContains(response, mail_input_html)
sn_empty = not identity.get('sn')
given_name_empty = not identity.get('givenName')
displayname_empty = not identity.get('displayName')
fullname_input_html = '<input id="name" type="text" name="name"'
if sn_empty and given_name_empty and displayname_empty:
self.assertContains(response, fullname_input_html)
else:
self.assertNotContains(response, fullname_input_html)
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@data(*gen_all_identities())
def test_registration_form_submit(self, identity):
"""
Tests user creation after the registration form that pops is submitted. If there is no shib
ExternalAuthMap in the session, then the created user should take the username and email from the
request.
Uses django test client for its session support
"""
# First we pop the registration form
self.client.get(path='/shib-login/', data={}, follow=False, **identity)
# Then we have the user answer the registration form
# These are unicode because request.POST returns unicode
postvars = {'email': u'post_email@stanford.edu',
'username': u'post_username', # django usernames can't be unicode
'password': u'post_pássword',
'name': u'post_náme',
'terms_of_service': u'true',
'honor_code': u'true'}
with patch('openedx.core.djangoapps.user_authn.views.register.AUDIT_LOG') as mock_audit_log:
self.client.post('/create_account', data=postvars)
mail = identity.get('mail')
# verify logging of login happening during account creation:
audit_log_calls = mock_audit_log.method_calls
self.assertEquals(len(audit_log_calls), 3)
method_name, args, _kwargs = audit_log_calls[0]
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 2)
self.assertIn(u'User registered with external_auth', args[0])
self.assertEquals(u'post_username', args[1])
method_name, args, _kwargs = audit_log_calls[1]
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 3)
self.assertIn(u'Updated ExternalAuthMap for ', args[0])
self.assertEquals(u'post_username', args[1])
self.assertEquals(u'test_user@stanford.edu', args[2].external_id)
method_name, args, _kwargs = audit_log_calls[2]
self.assertEquals(method_name, 'info')
self.assertEquals(len(args), 1)
self.assertIn(u'Login success on new account creation', args[0])
self.assertIn(u'post_username', args[0])
user = User.objects.get(id=self.client.session['_auth_user_id'])
# check that the created user has the right email, either taken from shib or user input
if mail:
self.assertEqual(user.email, mail)
self.assertEqual(list(User.objects.filter(email=postvars['email'])), [])
self.assertIsNotNone(User.objects.get(email=mail)) # get enforces only 1 such user
else:
self.assertEqual(user.email, postvars['email'])
self.assertEqual(list(User.objects.filter(email=mail)), [])
self.assertIsNotNone(User.objects.get(email=postvars['email'])) # get enforces only 1 such user
# check that the created user profile has the right name, either taken from shib or user input
profile = UserProfile.objects.get(user=user)
external_name = self.client.session['ExternalAuthMap'].external_name
displayname_empty = not identity.get('displayName')
if displayname_empty:
if len(external_name.strip()) < accounts_settings.NAME_MIN_LENGTH:
self.assertEqual(profile.name, postvars['name'])
else:
self.assertEqual(profile.name, external_name.strip())
self.assertNotIn(u';', profile.name)
else:
self.assertEqual(profile.name, self.client.session['ExternalAuthMap'].external_name)
self.assertEqual(profile.name, identity.get('displayName').decode('utf-8'))
@ddt
@override_settings(SESSION_ENGINE='django.contrib.sessions.backends.cache')
class ShibSPTestModifiedCourseware(ModuleStoreTestCase):
"""
Tests for the Shibboleth SP which modify the courseware
"""
ENABLED_CACHES = ['default', 'mongo_metadata_inheritance', 'loc_cache']
request_factory = RequestFactory()
def setUp(self):
super(ShibSPTestModifiedCourseware, self).setUp()
self.test_user_id = ModuleStoreEnum.UserID.test
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@data(None, "", "shib:https://idp.stanford.edu/")
def test_course_specific_login_and_reg(self, domain):
"""
Tests that the correct course specific login and registration urls work for shib
"""
course = CourseFactory.create(
org='MITx',
number='999',
display_name='Robot Super Course',
user_id=self.test_user_id,
)
# Test for cases where course is found
# set domains
# temporarily set the branch to draft-preferred so we can update the course
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course.id):
course.enrollment_domain = domain
self.store.update_item(course, self.test_user_id)
# setting location to test that GET params get passed through
login_request = self.request_factory.get('/course_specific_login/MITx/999/Robot_Super_Course' +
'?course_id=MITx/999/Robot_Super_Course' +
'&enrollment_action=enroll')
_reg_request = self.request_factory.get('/course_specific_register/MITx/999/Robot_Super_Course' +
'?course_id=MITx/999/course/Robot_Super_Course' +
'&enrollment_action=enroll')
login_response = course_specific_login(login_request, 'MITx/999/Robot_Super_Course')
reg_response = course_specific_register(login_request, 'MITx/999/Robot_Super_Course')
if domain and "shib" in domain:
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(login_response['Location'],
reverse('shib-login') +
'?course_id=MITx/999/Robot_Super_Course' +
'&enrollment_action=enroll')
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(reg_response['Location'],
reverse('shib-login') +
'?course_id=MITx/999/Robot_Super_Course' +
'&enrollment_action=enroll')
else:
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(login_response['Location'],
reverse('signin_user') +
'?course_id=MITx/999/Robot_Super_Course' +
'&enrollment_action=enroll')
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(reg_response['Location'],
reverse('register_user') +
'?course_id=MITx/999/Robot_Super_Course' +
'&enrollment_action=enroll')
# Now test for non-existent course
# setting location to test that GET params get passed through
login_request = self.request_factory.get('/course_specific_login/DNE/DNE/DNE' +
'?course_id=DNE/DNE/DNE' +
'&enrollment_action=enroll')
_reg_request = self.request_factory.get('/course_specific_register/DNE/DNE/DNE' +
'?course_id=DNE/DNE/DNE/Robot_Super_Course' +
'&enrollment_action=enroll')
login_response = course_specific_login(login_request, 'DNE/DNE/DNE')
reg_response = course_specific_register(login_request, 'DNE/DNE/DNE')
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(login_response['Location'],
reverse('signin_user') +
'?course_id=DNE/DNE/DNE' +
'&enrollment_action=enroll')
self.assertIsInstance(login_response, HttpResponseRedirect)
self.assertEqual(reg_response['Location'],
reverse('register_user') +
'?course_id=DNE/DNE/DNE' +
'&enrollment_action=enroll')
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_enrollment_limit_by_domain(self):
"""
Tests that the enrollmentDomain setting is properly limiting enrollment to those who have
the proper external auth
"""
# create 2 course, one with limited enrollment one without
shib_course = CourseFactory.create(
org='Stanford',
number='123',
display_name='Shib Only',
enrollment_domain='shib:https://idp.stanford.edu/',
user_id=self.test_user_id,
)
open_enroll_course = CourseFactory.create(
org='MITx',
number='999',
display_name='Robot Super Course',
enrollment_domain='',
user_id=self.test_user_id,
)
# create 3 kinds of students, external_auth matching shib_course, external_auth not matching, no external auth
shib_student = UserFactory.create()
shib_student.save()
extauth = ExternalAuthMap(external_id='testuser@stanford.edu',
external_email='',
external_domain='shib:https://idp.stanford.edu/',
external_credentials="",
user=shib_student)
extauth.save()
other_ext_student = UserFactory.create()
other_ext_student.username = "teststudent2"
other_ext_student.email = "teststudent2@other.edu"
other_ext_student.save()
extauth = ExternalAuthMap(external_id='testuser1@other.edu',
external_email='',
external_domain='shib:https://other.edu/',
external_credentials="",
user=other_ext_student)
extauth.save()
int_student = UserFactory.create()
int_student.username = "teststudent3"
int_student.email = "teststudent3@gmail.com"
int_student.save()
# Tests the two case for courses, limited and not
for course in [shib_course, open_enroll_course]:
for student in [shib_student, other_ext_student, int_student]:
request = self.request_factory.post(
'/change_enrollment',
data={'enrollment_action': 'enroll', 'course_id': text_type(course.id)}
)
request.user = student
response = change_enrollment(request)
# If course is not limited or student has correct shib extauth then enrollment should be allowed
if course is open_enroll_course or student is shib_student:
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseEnrollment.is_enrolled(student, course.id))
else:
self.assertEqual(response.status_code, 400)
self.assertFalse(CourseEnrollment.is_enrolled(student, course.id))
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_shib_login_enrollment(self):
"""
A functionality test that a student with an existing shib login
can auto-enroll in a class with GET or POST params. Also tests the direction functionality of
the 'next' GET/POST param
"""
student = UserFactory.create()
extauth = ExternalAuthMap(external_id='testuser@stanford.edu',
external_email='',
external_domain='shib:https://idp.stanford.edu/',
external_credentials="",
internal_password="password",
user=student)
student.set_password("password")
student.save()
extauth.save()
course = CourseFactory.create(
org='Stanford',
number='123',
display_name='Shib Only',
enrollment_domain='shib:https://idp.stanford.edu/',
user_id=self.test_user_id,
)
# use django test client for sessions and url processing
# no enrollment before trying
self.assertFalse(CourseEnrollment.is_enrolled(student, course.id))
self.client.logout()
params = [
('course_id', text_type(course.id)),
('enrollment_action', 'enroll'),
('next', '/testredirect')
]
request_kwargs = {'path': '/shib-login/',
'data': dict(params),
'follow': False,
'REMOTE_USER': 'testuser@stanford.edu',
'Shib-Identity-Provider': 'https://idp.stanford.edu/',
'HTTP_ACCEPT': "text/html"}
response = self.client.get(**request_kwargs)
# successful login is a redirect to the URL that handles auto-enrollment
self.assertEqual(response.status_code, 302)
self.assertEqual(response['location'],
'/account/finish_auth?{}'.format(urlencode(params)))
class ShibUtilFnTest(TestCase):
"""
Tests util functions in shib module
"""
def test__flatten_to_ascii(self):
DIACRITIC = u"àèìòùÀÈÌÒÙáéíóúýÁÉÍÓÚÝâêîôûÂÊÎÔÛãñõÃÑÕäëïöüÿÄËÏÖÜŸåÅçÇ" # pylint: disable=invalid-name
STR_DIACRI = "àèìòùÀÈÌÒÙáéíóúýÁÉÍÓÚÝâêîôûÂÊÎÔÛãñõÃÑÕäëïöüÿÄËÏÖÜŸåÅçÇ" # pylint: disable=invalid-name
FLATTENED = u"aeiouAEIOUaeiouyAEIOUYaeiouAEIOUanoANOaeiouyAEIOUYaAcC" # pylint: disable=invalid-name
self.assertEqual(_flatten_to_ascii('jasön'), 'jason') # umlaut
self.assertEqual(_flatten_to_ascii('Jason包'), 'Jason') # mandarin, so it just gets dropped
self.assertEqual(_flatten_to_ascii('abc'), 'abc') # pass through
unicode_test = _flatten_to_ascii(DIACRITIC)
self.assertEqual(unicode_test, FLATTENED)
self.assertIsInstance(unicode_test, unicode)
str_test = _flatten_to_ascii(STR_DIACRI)
self.assertEqual(str_test, FLATTENED)
self.assertIsInstance(str_test, str)
| agpl-3.0 |
zouyapeng/horizon_change | openstack_dashboard/test/tests/utils.py | 31 | 1368 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from openstack_dashboard.test import helpers as test
from openstack_dashboard.utils import filters
class UtilsFilterTests(test.TestCase):
def test_accept_valid_integer(self):
val = 100
ret = filters.get_int_or_uuid(val)
self.assertEqual(val, ret)
def test_accept_valid_integer_string(self):
val = '100'
ret = filters.get_int_or_uuid(val)
self.assertEqual(int(val), ret)
def test_accept_valid_uuid(self):
val = str(uuid.uuid4())
ret = filters.get_int_or_uuid(val)
self.assertEqual(val, ret)
def test_reject_random_string(self):
val = '55WbJTpJDf'
self.assertRaises(ValueError, filters.get_int_or_uuid, val)
| apache-2.0 |
firmlyjin/brython | www/src/Lib/_string.py | 625 | 1112 | """string helper module"""
import re
class __loader__(object):
pass
def formatter_field_name_split(fieldname):
"""split the argument as a field name"""
_list=[]
for _name in fieldname:
_parts = _name.split('.')
for _item in _parts:
is_attr=False #fix me
if re.match('\d+', _item):
_list.append((int(_item), is_attr))
else:
_list.append((_item, is_attr))
return _list[0][0], iter(_list[1:])
def formatter_parser(*args,**kw):
"""parse the argument as a format string"""
assert len(args)==1
assert isinstance(args[0], str)
_result=[]
for _match in re.finditer("([^{]*)?(\{[^}]*\})?", args[0]):
_pre, _fmt = _match.groups()
if _fmt is None:
_result.append((_pre, None, None, None))
elif _fmt == '{}':
_result.append((_pre, '', '', None))
else:
_m=re.match("\{([^!]*)!?(.*)?\}", _fmt)
_name=_m.groups(0)
_flags=_m.groups(1)
_result.append((_pre, _name, _flags, None))
return _result
| bsd-3-clause |
jaygattuso/set_pattern_finder | sorter.py | 1 | 1359 | import os
import operator
def get_seq(f_path, start, end):
with open(f_path, "rb") as data:
bytes = data.read()
return bytes[start:end]
def get_seq_reversed(f_path, start, end):
with open(f_path, "rb") as data:
bytes = data.read()
bytes = bytes[::-1]
return bytes[start:end]
def process(folder, start=0, end=16, show_all=False):
seqs = {}
for root, subs, files in os.walk(folder):
for f in files:
f_path = os.path.join(root, f)
if os.path.isfile(f_path):
filename, file_extension = os.path.splitext(f_path)
if file_extension == "":
seq = get_seq(f_path, start, end)
if seq != "":
if seq not in seqs:
seqs[seq] = 1
else:
seqs[seq] += 1
if show_all:
print "".join("{:02x}".format(ord(c)) for c in seq), repr( seq), f_path
if show_all:
print
return seqs
def show_results(seqs):
sorted_seqs = sorted(seqs.items(), key=operator.itemgetter(1), reverse=True)
for s in sorted_seqs:
byte_pattern = "".join("{:02x}".format(ord(c)) for c in s[0])
if cap < s[1]:
print "Count: {}\t Patterns: {} {}".format(s[1], byte_pattern, repr(s[0]))
print
print "Total unique patterns: {}".format(len(seqs))
### Folder of interest
folder = r""
### start offset
start = 0
### end offset
end = 2
seqs = process(folder, start, end, show_all=True)
show_results(seqs, cap=10)
| gpl-3.0 |
sangheestyle/elasticsearch-dsl-py | elasticsearch_dsl_fake/faceted_search.py | 1 | 7770 | from datetime import timedelta, datetime
from six import iteritems, itervalues
from functools import partial
from .search import Search
from .aggs import A
from .utils import AttrDict
from .result import Response
from .query import Q
__all__ = ['FacetedSearch', 'HistogramFacet', 'TermsFacet', 'DateHistogramFacet', 'RangeFacet']
class Facet(object):
"""
A facet on faceted search. Wraps and aggregation and provides functionality
to create a filter for selected values and return a list of facet values
from the result of the aggregation.
"""
agg_type = None
def __init__(self, **kwargs):
self.filter_values = ()
self._params = kwargs
def get_aggregation(self):
"""
Return the aggregation object.
"""
return A(self.agg_type, **self._params)
def add_filter(self, filter_values):
"""
Construct a filter and remember the values for use in get_values.
"""
self.filter_values = filter_values
if not filter_values:
return
f = self.get_value_filter(filter_values[0])
for v in filter_values[1:]:
f |= self.get_value_filter(v)
return f
def get_value_filter(self, filter_value):
"""
Construct a filter for an individual value
"""
pass
def is_filtered(self, key):
"""
Is a filter active on the given key.
"""
return key in self.filter_values
def get_value(self, bucket):
"""
return a value representing a bucket. Its key as default.
"""
return bucket['key']
def get_values(self, data):
"""
Turn the raw bucket data into a list of tuples containing the key,
number of documents and a flag indicating whether this value has been
selected or not.
"""
out = []
for bucket in data:
key = self.get_value(bucket)
out.append((
key,
bucket['doc_count'],
self.is_filtered(key)
))
return out
class TermsFacet(Facet):
agg_type = 'terms'
def add_filter(self, filter_values):
""" Create a terms filter instead of bool containing term filters. """
self.filter_values = filter_values
if filter_values:
return Q('terms', **{self._params['field']: filter_values})
class RangeFacet(Facet):
agg_type = 'range'
def _range_to_dict(self, range):
key, range = range
out = {'key': key}
if range[0] is not None:
out['from'] = range[0]
if range[1] is not None:
out['to'] = range[1]
return out
def __init__(self, ranges, **kwargs):
super(RangeFacet, self).__init__(**kwargs)
self._params['ranges'] = list(map(self._range_to_dict, ranges))
self._params['keyed'] = False
self._ranges = dict(ranges)
def get_value_filter(self, filter_value):
f, t = self._ranges[filter_value]
limits = {}
if f is not None:
limits['from'] = f
if t is not None:
limits['to'] = t
return Q('range', **{
self._params['field']: limits
})
class HistogramFacet(Facet):
agg_type = 'histogram'
def get_value_filter(self, filter_value):
return Q('range', **{
self._params['field']: {
'gte': filter_value,
'lt': filter_value + self._params['interval']
}
})
class DateHistogramFacet(Facet):
agg_type = 'date_histogram'
DATE_INTERVALS = {
'month': lambda d: (d+timedelta(days=32)).replace(day=1),
'week': lambda d: d+timedelta(days=7),
'day': lambda d: d+timedelta(days=1),
'hour': lambda d: d+timedelta(hours=1),
}
def __init__(self, **kwargs):
kwargs.setdefault("min_doc_count", 0)
super(DateHistogramFacet, self).__init__(**kwargs)
def get_value(self, bucket):
return datetime.utcfromtimestamp(int(bucket['key']) / 1000)
def get_value_filter(self, filter_value):
return Q('range', **{
self._params['field']: {
'gte': filter_value,
'lt': self.DATE_INTERVALS[self._params['interval']](filter_value)
}
})
class FacetedResponse(Response):
def __init__(self, search, *args, **kwargs):
super(FacetedResponse, self).__init__(*args, **kwargs)
super(AttrDict, self).__setattr__('_search', search)
@property
def query_string(self):
return self._search._query
@property
def facets(self):
if not hasattr(self, '_facets'):
super(AttrDict, self).__setattr__('_facets', AttrDict({}))
for name, facet in iteritems(self._search.facets):
self._facets[name] = facet.get_values(self.aggregations['_filter_' + name][name]['buckets'])
return self._facets
class FacetedSearch(object):
index = '_all'
doc_types = ['_all']
fields = ('*', )
facets = {}
def __init__(self, query=None, filters={}):
self._query = query
self._filters = {}
for name, value in iteritems(filters):
self.add_filter(name, value)
def add_filter(self, name, filter_values):
"""
Add a filter for a facet.
"""
# normalize the value into a list
if not isinstance(filter_values, (tuple, list)):
if filter_values in (None, ''):
return
filter_values = [filter_values, ]
# get the filter from the facet
f = self.facets[name].add_filter(filter_values)
if f is None:
return
self._filters[name] = f
def search(self):
"""
Construct the Search object.
"""
return Search(doc_type=self.doc_types, index=self.index)
def query(self, search, query):
"""
Add query part to ``search``.
Override this if you wish to customize the query used.
"""
if query:
return search.query('multi_match', fields=self.fields, query=query)
return search
def aggregate(self, search):
"""
Add aggregations representing the facets selected, including potential
filters.
"""
for f, facet in iteritems(self.facets):
agg = facet.get_aggregation()
agg_filter = Q('match_all')
for field, filter in iteritems(self._filters):
if f == field:
continue
agg_filter &= filter
search.aggs.bucket(
'_filter_' + f,
'filter',
filter=agg_filter
).bucket(f, agg)
def filter(self, search):
"""
Add a ``post_filter`` to the search request narrowing the results based
on the facet filters.
"""
post_filter = Q('match_all')
for f in itervalues(self._filters):
post_filter &= f
return search.post_filter(post_filter)
def highlight(self, search):
"""
Add highlighting for all the fields
"""
return search.highlight(*self.fields)
def build_search(self):
"""
Construct the ``Search`` object.
"""
s = self.search()
s = self.query(s, self._query)
s = self.filter(s)
s = self.highlight(s)
self.aggregate(s)
return s
def execute(self):
if not hasattr(self, '_response'):
s = self.build_search()
self._response = s.execute(response_class=partial(FacetedResponse, self))
return self._response
| apache-2.0 |
tomlof/scikit-learn | sklearn/datasets/twenty_newsgroups.py | 31 | 13747 | """Caching loader for the 20 newsgroups text classification dataset
The description of the dataset is available on the official website at:
http://people.csail.mit.edu/jrennie/20Newsgroups/
Quoting the introduction:
The 20 Newsgroups data set is a collection of approximately 20,000
newsgroup documents, partitioned (nearly) evenly across 20 different
newsgroups. To the best of my knowledge, it was originally collected
by Ken Lang, probably for his Newsweeder: Learning to filter netnews
paper, though he does not explicitly mention this collection. The 20
newsgroups collection has become a popular data set for experiments
in text applications of machine learning techniques, such as text
classification and text clustering.
This dataset loader will download the recommended "by date" variant of the
dataset and which features a point in time split between the train and
test sets. The compressed dataset size is around 14 Mb compressed. Once
uncompressed the train set is 52 MB and the test set is 34 MB.
The data is downloaded, extracted and cached in the '~/scikit_learn_data'
folder.
The `fetch_20newsgroups` function will not vectorize the data into numpy
arrays but the dataset lists the filenames of the posts and their categories
as target labels.
The `fetch_20newsgroups_vectorized` function will in addition do a simple
tf-idf vectorization step.
"""
# Copyright (c) 2011 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import os
import logging
import tarfile
import pickle
import shutil
import re
import codecs
import numpy as np
import scipy.sparse as sp
from .base import get_data_home
from .base import Bunch
from .base import load_files
from .base import _pkl_filepath
from ..utils import check_random_state
from ..feature_extraction.text import CountVectorizer
from ..preprocessing import normalize
from ..externals import joblib, six
if six.PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
logger = logging.getLogger(__name__)
URL = ("http://people.csail.mit.edu/jrennie/"
"20Newsgroups/20news-bydate.tar.gz")
ARCHIVE_NAME = "20news-bydate.tar.gz"
CACHE_NAME = "20news-bydate.pkz"
TRAIN_FOLDER = "20news-bydate-train"
TEST_FOLDER = "20news-bydate-test"
def download_20newsgroups(target_dir, cache_path):
"""Download the 20 newsgroups data and stored it as a zipped pickle."""
archive_path = os.path.join(target_dir, ARCHIVE_NAME)
train_path = os.path.join(target_dir, TRAIN_FOLDER)
test_path = os.path.join(target_dir, TEST_FOLDER)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if os.path.exists(archive_path):
# Download is not complete as the .tar.gz file is removed after
# download.
logger.warning("Download was incomplete, downloading again.")
os.remove(archive_path)
logger.warning("Downloading dataset from %s (14 MB)", URL)
opener = urlopen(URL)
with open(archive_path, 'wb') as f:
f.write(opener.read())
logger.info("Decompressing %s", archive_path)
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
os.remove(archive_path)
# Store a zipped pickle
cache = dict(train=load_files(train_path, encoding='latin1'),
test=load_files(test_path, encoding='latin1'))
compressed_content = codecs.encode(pickle.dumps(cache), 'zlib_codec')
with open(cache_path, 'wb') as f:
f.write(compressed_content)
shutil.rmtree(target_dir)
return cache
def strip_newsgroup_header(text):
"""
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
"""
_before, _blankline, after = text.partition('\n\n')
return after
_QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:'
r'|^In article|^Quoted from|^\||^>)')
def strip_newsgroup_quoting(text):
"""
Given text in "news" format, strip lines beginning with the quote
characters > or |, plus lines that often introduce a quoted section
(for example, because they contain the string 'writes:'.)
"""
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines)
def strip_newsgroup_footer(text):
"""
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
"""
lines = text.strip().split('\n')
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip('-') == '':
break
if line_num > 0:
return '\n'.join(lines[:line_num])
else:
return text
def fetch_20newsgroups(data_home=None, subset='train', categories=None,
shuffle=True, random_state=42,
remove=(),
download_if_missing=True):
"""Load the filenames and data from the 20 newsgroups dataset.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset : 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home : optional, default: None
Specify a download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
categories : None or collection of string or unicode
If None (default), load all the categories.
If not None, list of category names to load (other categories
ignored).
shuffle : bool, optional
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : numpy random number generator or seed integer
Used to shuffle the dataset.
download_if_missing : optional, True by default
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
remove : tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
'headers' follows an exact standard; the other filters are not always
correct.
"""
data_home = get_data_home(data_home=data_home)
cache_path = _pkl_filepath(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, "20news_home")
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(
compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
logger.info("Downloading 20news dataset. "
"This may take a few minutes.")
cache = download_20newsgroups(target_dir=twenty_home,
cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if subset in ('train', 'test'):
data = cache[subset]
elif subset == 'all':
data_lst = list()
target = list()
filenames = list()
for subset in ('train', 'test'):
data = cache[subset]
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
data.description = 'the 20 newsgroups by date dataset'
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
return data
def fetch_20newsgroups_vectorized(subset="train", remove=(), data_home=None):
"""Load the 20 newsgroups dataset and transform it into tf-idf vectors.
This is a convenience function; the tf-idf transformation is done using the
default settings for `sklearn.feature_extraction.text.Vectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom `Vectorizer` or `CountVectorizer`.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset : 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home : optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
remove : tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
Returns
-------
bunch : Bunch object
bunch.data: sparse matrix, shape [n_samples, n_features]
bunch.target: array, shape [n_samples]
bunch.target_names: list, length [n_classes]
"""
data_home = get_data_home(data_home=data_home)
filebase = '20newsgroup_vectorized'
if remove:
filebase += 'remove-' + ('-'.join(remove))
target_file = _pkl_filepath(data_home, filebase + ".pkl")
# we shuffle but use a fixed seed for the memoization
data_train = fetch_20newsgroups(data_home=data_home,
subset='train',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
data_test = fetch_20newsgroups(data_home=data_home,
subset='test',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
if os.path.exists(target_file):
X_train, X_test = joblib.load(target_file)
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
joblib.dump((X_train, X_test), target_file, compress=9)
# the data is stored as int16 for compactness
# but normalize needs floats
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
normalize(X_train, copy=False)
normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == "train":
data = X_train
target = data_train.target
elif subset == "test":
data = X_test
target = data_test.target
elif subset == "all":
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
else:
raise ValueError("%r is not a valid subset: should be one of "
"['train', 'test', 'all']" % subset)
return Bunch(data=data, target=target, target_names=target_names)
| bsd-3-clause |
kaplun/invenio | modules/websearch/lib/websearch_external_collections_unit_tests.py | 16 | 4127 | # -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2010, 2011, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Testing functions for the external collections search.
More tests of the page getter module can be done with
websearch_external_collections_getter_tests.py
"""
__revision__ = "$Id$"
from invenio.testutils import InvenioTestCase
from invenio.websearch_external_collections_searcher import external_collections_dictionary
from invenio.websearch_external_collections_getter import HTTPAsyncPageGetter, async_download
from invenio.testutils import make_test_suite, run_test_suite, nottest
def download_and_parse():
"""Try to make a query that always return results on all search engines.
Check that a page is well returned and that the result can be parsed.
This test is not included in the general test suite.
This test give false positive if any of the external server is non working or too slow.
"""
test = [['+', 'ieee', '', 'w']]
errors = []
external_collections = external_collections_dictionary.values()
urls = [engine.build_search_url(test) for engine in external_collections]
pagegetters = [HTTPAsyncPageGetter(url) for url in urls]
dummy = async_download(pagegetters, None, None, 30)
for (page, engine, url) in zip(pagegetters, external_collections, urls):
if not url:
errors.append("Unable to build url for : " + engine.name)
continue
if len(page.data) == 0:
errors.append("Zero sized page with : " + engine.name)
continue
if engine.parser:
results = engine.parser.parse_and_get_results(page.data)
num_results = engine.parser.parse_num_results()
if len(results) == 0:
errors.append("Unable to parse results for : " + engine.name)
continue
if not num_results:
errors.append("Unable to parse (None returned) number of results for : " + engine.name)
try:
num_results = int(num_results)
except:
errors.append("Unable to parse (not a number) number of results for : " + engine.name)
return errors
@nottest
def build_search_urls_test():
"""Build some classical urls from basic_search_units."""
print "Testing external_search_engines build_search_url functions."
tests = [ [['+', 'ellis', 'author', 'w'], ['+', 'unification', 'title', 'w'],
['-', 'Ross', 'author', 'w'], ['+', 'large', '', 'w'], ['-', 'helloworld', '', 'w']],
[['+', 'ellis', 'author', 'w'], ['+', 'unification', 'title', 'w']],
[['+', 'ellis', 'author', 'w']],
[['-', 'Ross', 'author', 'w']] ]
for engine in external_collections_dictionary.values():
print engine.name
for test in tests:
url = engine.build_search_url(test)
print " Url: " + str(url)
class ExtCollTests(InvenioTestCase):
"""Test cases for websearch_external_collections_*"""
@nottest
def test_download_and_parse(self):
"""websearch_external_collections - download_and_parse (not reliable, see docstring)"""
self.assertEqual([], download_and_parse())
# FIXME: the above tests not plugged into global unit test suite
TEST_SUITE = make_test_suite() #ExtCollTests,)
if __name__ == "__main__":
build_search_urls_test()
run_test_suite(TEST_SUITE)
| gpl-2.0 |
CityGenerator/Megacosm-Generator | integration_tests/test_mundaneitems.py | 1 | 1111 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from megacosm.generators import MundaneItem
import unittest2 as unittest
from pprint import pprint
import redis
from config import IntegrationTestConfiguration
class TestMundaneItemIntegration(unittest.TestCase):
def setUp(self):
self.redis = redis.from_url(IntegrationTestConfiguration.REDIS_URL)
def tearDown(self):
"""Tear stuff Down."""
#self.redis.flushall()
def test_kinds(self):
""" """
for kind in self.redis.lrange('mundaneitem_kind',0,-1):
print "kind: "+kind
for template in self.redis.lrange('mundaneitem_template',0,-1):
print "template: "+template
mundaneitem = MundaneItem(self.redis, {'kind':kind, 'template':template} )
rendered_kind= mundaneitem.render_template(kind)
self.assertEqual(rendered_kind, str(mundaneitem.kind))
self.assertNotIn('{', str(mundaneitem))
self.assertNotIn('}', str(mundaneitem))
self.assertNotIn('params', str(mundaneitem))
| gpl-2.0 |
twig/django-taggit | taggit/south_migrations/0002_unique_tagnames.py | 36 | 2027 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'Tag', fields ['name']
db.create_unique('taggit_tag', ['name'])
def backwards(self, orm):
# Removing unique constraint on 'Tag', fields ['name']
db.delete_unique('taggit_tag', ['name'])
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['taggit']
| bsd-3-clause |
SynapticNulship/Anibots | sim_py/anibots_breve_exp0.py | 1 | 6082 | # Anibots (anigraf robots) physical/visual sim
#
# Copyright (c) 2007-2012 Samuel H. Kenyon. <sam@synapticnulship.com>
# http://synapticnulship.com
# This is open source, made available under the MIT License (see the
# accompanying file LICENSE).
#
# This python script connects my anibots C++ program (with the help of SWIG)
# to the Breve simulation environment. It also defines low-level physical
# actions triggered by higher-level anigraf actions.
#
# For final project 9.343J Fall 2006
# Experiment 0: The block task
#
# To run: execute breve with this file as arg
from anibots import *
import breve
class AnibotPhysicsSim( breve.PhysicalControl ):
def __init__( self ):
breve.Control.__init__( self )
self.bots = breve.objectList()
self.actionDuration = 45
self.iterCount=self.actionDuration
self.videoLog = breve.Movie()
self.block = None
#configure the anibots
self.env = None
self.numBots = 2
self.iterations = 20
self.kDepth = 3
self.takeTurns = True
self.anibotConfig = AnibotConfig("02b-graf.dat", "02b-pedge.dat","02b-pweights.dat")
self.anibotConfig.proxify = True
# bool proxyWeightsProportional;
# float proxyWeightsProportion;
#bool randomizeEdges
self.anibotConfig.randomize = False
#self.anibotConfig.quant = 11
#self.anibotConfig.quantDiff = 1
#anibotConfig.quantIrregular;
#self.anibotConfig.randMin = 0
#self.anibotConfig.randMax = 10
self.anibotConfig.singleTops = True;
AnibotPhysicsSim.init( self )
def init( self ):
print '''Setting up Anibot environment'''
# start the anibots environment (mental simulation)
self.env = AgentEnv("anibots_breve_exp0.py",self.kDepth,self.takeTurns)
self.env.NewAnibot(self.numBots,self.anibotConfig)
self.env.InitTransaction(0,1)
print '''Setting up Physics Sim.'''
#start the visual/physical environment in Breve
self.setDisplayText( "Anibots Sim", -1.0, 0.8, 1 )
self.setRandomSeedFromDevRandom()
self.enableFastPhysics()
self.setFastPhysicsIterations( 15 )
#self.setGravity( breve.vector(0.0,-3.0,0.0) )
self.enableLighting()
self.enableSmoothDrawing()
self.moveLight( breve.vector( 20, 30, 20 ) )
floor = breve.createInstances( breve.Floor, 1 )
floor.catchShadows()
#floor.setE( 1.000000 )
floor.setMu(0.0)
#floor.showAxis()
self.cloudTexture = breve.createInstances( breve.Image, 1 ).load( 'images/clouds.png' )
self.enableShadowVolumes()
self.enableReflections()
self.setBackgroundColor( breve.vector( 0.400000, 0.600000, 0.900000 ) )
self.setBackgroundTextureImage( self.cloudTexture )
#self.offsetCamera( breve.vector( 3, 13, -13 ) )
self.pointCamera( breve.vector( 0, 0, 0 ), breve.vector( 20, 20, 60 ) )
#the virtual bodies
self.bots = breve.createInstances( breve.AnibotBody, 2 )
self.bots[0].move( breve.vector( -7.5, self.bots[0].radius, 14 ) )
self.bots[1].move( breve.vector( 7.5, self.bots[0].radius, 14 ) )
self.bots[0].setColor(breve.vector( 0.1, 0.8 , 0.1 ))
self.bots[1].setColor(breve.vector( 0.0, 0.0 , 0.8 ))
#the block
self.block = breve.createInstances( breve.Mobile, 1 )
shape = breve.createInstances( breve.Cube, 1 ).initWith( breve.vector(15,3,4) )
shape.setMass(0.5)
self.block.setShape(shape)
self.block.setColor(breve.vector( 1.0, 0.5 ,0.0 ))
self.block.move( breve.vector( 0.0, 1.5 ,0.0 ) )
self.block.setMu(0.0)
#block.setE(0.1)
self.block.enablePhysics()
print self.block.getMass()
self.watch( self.block )
self.videoLog.record("anibots-02b.mpg")
s2 = "block dist: " + str(self.block.getLocation()[2])
#block.setForce( breve.vector( 500.0, 500.0 , 500.0 ) )
#block.setVelocity( breve.vector( 0, 0, -10 ) )
#self.watch( self.bots[0] )
def iterate( self ):
self.iterCount = self.iterCount + 1
if self.iterCount > self.actionDuration:
action="foo"
self.iterCount=0
self.env.Next()
for i in range(self.numBots):
if self.env.CurrentAction(i) == 67:
action = "center"
self.bots[i].moveX(0.0)
elif self.env.CurrentAction(i) == 76:
action = "left"
self.bots[i].moveX(-3.7)
elif self.env.CurrentAction(i) == 82:
action = "right"
self.bots[i].moveX(3.7)
else:
action = "unknown"
self.bots[i].moveX(8.0)
#this avoids spheres being in exact same place:
#if i > 0:
# if self.env.CurrentAction(i-1) == self.env.CurrentAction(i):
# self.bots[i].moveXextra(1.6)
#j=4
s = "bot"+ str(i)+" action: " + action;
print s
self.setDisplayText(s, -1.0, 0.7-(i/10.0), i+2 )
s2 = "block dist: %.2f" % (-self.block.getLocation()[2])
self.setDisplayText(s2, -1.0, 0.5, 6)
breve.Control.iterate( self )
breve.AnibotPhysicsSim = AnibotPhysicsSim
class AnibotBody( breve.Mobile ):
def __init__( self ):
breve.Mobile.__init__( self )
self.radius = 1.5
AnibotBody.init( self )
def init( self ):
shape = breve.createInstances( breve.Sphere, 1 ).initWith( self.radius )
shape.setDensity(100)
self.setShape( shape )
#self.setShape( breve.createInstances( breve.Cube, 1 ).initWith( breve.vector(self.radius,self.radius,self.radius) ))
self.setColor( breve.randomExpression( breve.vector( 1.000000, 1.000000, 1.000000 ) ) )
#self.move( breve.vector( breve.randomExpression(8.0)-4.0, self.radius, breve.randomExpression(20.0) + 8.0 ) )
self.move( breve.vector( 0.0, self.radius, 14.0 ) )
print self.getMass()
self.enablePhysics()
#self.setVelocity( breve.vector( 0.0, 0.0, -2.0 ) )
#self.setForce( breve.vector( 0.0, 0.0, -100.0 ) )
def moveX( self, x ):
#if self.getLocation()[0] != x:
z = self.getLocation()[2]
self.move( breve.vector( x, self.radius, z+2 ) )
def moveXextra( self, m ):
x = self.getLocation()[0] * m
z = self.getLocation()[2]
self.move( breve.vector( x, self.radius, z ) )
def iterate( self ):
#print self.getVelocity()
self.setVelocity( breve.vector( 0.0, 0.0, -2.0 ) )
breve.AnibotBody = AnibotBody
# Create an instance of our controller object to initialize the simulation
AnibotPhysicsSim()
| mit |
lancezlin/ml_template_py | lib/python2.7/site-packages/IPython/utils/tests/test_importstring.py | 35 | 1247 | """Tests for IPython.utils.importstring."""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import nose.tools as nt
from IPython.utils.importstring import import_item
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
def test_import_plain():
"Test simple imports"
import os
os2 = import_item('os')
nt.assert_true(os is os2)
def test_import_nested():
"Test nested imports from the stdlib"
from os import path
path2 = import_item('os.path')
nt.assert_true(path is path2)
def test_import_raises():
"Test that failing imports raise the right exception"
nt.assert_raises(ImportError, import_item, 'IPython.foobar')
| mit |
FlorianLudwig/odoo | addons/sales_team/res_config.py | 366 | 1922 | # -*- coding: utf-8 -*-
from openerp.osv import fields, osv
class sales_team_configuration(osv.TransientModel):
_name = 'sale.config.settings'
_inherit = ['sale.config.settings']
def set_group_multi_salesteams(self, cr, uid, ids, context=None):
""" This method is automatically called by res_config as it begins
with set. It is used to implement the 'one group or another'
behavior. We have to perform some group manipulation by hand
because in res_config.execute(), set_* methods are called
after group_*; therefore writing on an hidden res_config file
could not work.
If group_multi_salesteams is checked: remove group_mono_salesteams
from group_user, remove the users. Otherwise, just add
group_mono_salesteams in group_user.
The inverse logic about group_multi_salesteams is managed by the
normal behavior of 'group_multi_salesteams' field.
"""
def ref(xml_id):
mod, xml = xml_id.split('.', 1)
return self.pool['ir.model.data'].get_object(cr, uid, mod, xml, context)
for obj in self.browse(cr, uid, ids, context=context):
config_group = ref('base.group_mono_salesteams')
base_group = ref('base.group_user')
if obj.group_multi_salesteams:
base_group.write({'implied_ids': [(3, config_group.id)]})
config_group.write({'users': [(3, u.id) for u in base_group.users]})
else:
base_group.write({'implied_ids': [(4, config_group.id)]})
return True
_columns = {
'group_multi_salesteams': fields.boolean("Organize Sales activities into multiple Sales Teams",
implied_group='base.group_multi_salesteams',
help="""Allows you to use Sales Teams to manage your leads and opportunities."""),
}
| agpl-3.0 |
intel-analytics/analytics-zoo | pyzoo/test/zoo/chronos/model/forecast/test_tcmf_forecaster.py | 1 | 14341 | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import numpy as np
from zoo.chronos.model.forecast.tcmf_forecaster import TCMFForecaster
from unittest import TestCase
import tempfile
import pandas as pd
class TestChronosModelTCMFForecaster(TestCase):
def setUp(self):
self.model = TCMFForecaster()
self.num_samples = 300
self.horizon = np.random.randint(1, 50)
self.seq_len = 480
self.data = np.random.rand(self.num_samples, self.seq_len)
self.id = np.arange(self.num_samples)
self.data_new = np.random.rand(self.num_samples, self.horizon)
self.fit_params = dict(val_len=12,
start_date="2020-1-1",
freq="5min",
y_iters=1,
init_FX_epoch=1,
max_FX_epoch=1,
max_TCN_epoch=1,
alt_iters=2)
def test_forecast_tcmf_ndarray(self):
ndarray_input = {'id': self.id, 'y': self.data}
self.model.fit(ndarray_input, **self.fit_params)
assert not self.model.is_xshards_distributed()
# test predict
yhat = self.model.predict(horizon=self.horizon)
# test save load
with tempfile.TemporaryDirectory() as tempdirname:
self.model.save(tempdirname)
loaded_model = TCMFForecaster.load(tempdirname, is_xshards_distributed=False)
yhat_loaded = loaded_model.predict(horizon=self.horizon)
yhat_id = yhat_loaded["id"]
np.testing.assert_equal(yhat_id, self.id)
yhat = yhat["prediction"]
yhat_loaded = yhat_loaded["prediction"]
assert yhat.shape == (self.num_samples, self.horizon)
np.testing.assert_array_almost_equal(yhat, yhat_loaded, decimal=4)
# test evaluate
target_value = dict({"y": self.data_new})
assert self.model.evaluate(target_value=target_value, metric=['mse'])
# test fit_incremental
self.model.fit_incremental({'y': self.data_new}) # 1st time
self.model.fit_incremental({'y': self.data_new}) # 2nd time
yhat_incr = self.model.predict(horizon=self.horizon)
yhat_incr = yhat_incr["prediction"]
assert yhat_incr.shape == (self.num_samples, self.horizon)
np.testing.assert_raises(AssertionError, np.testing.assert_array_equal, yhat, yhat_incr)
def test_tcmf_ndarray_covariates_dti(self):
ndarray_input = {'id': self.id, 'y': self.data}
self.model.fit(ndarray_input,
covariates=np.random.rand(3, self.seq_len),
dti=pd.date_range('20130101', periods=self.seq_len),
**self.fit_params)
future_covariates = np.random.randn(3, self.horizon)
future_dti = pd.date_range('20130101', periods=self.horizon)
# test predict
yhat = self.model.predict(horizon=self.horizon,
future_covariates=future_covariates,
future_dti=future_dti,
)
# test save load
with tempfile.TemporaryDirectory() as tempdirname:
self.model.save(tempdirname)
loaded_model = TCMFForecaster.load(tempdirname, is_xshards_distributed=False)
yhat_loaded = loaded_model.predict(horizon=self.horizon,
future_covariates=future_covariates,
future_dti=future_dti,
)
yhat_id = yhat_loaded["id"]
np.testing.assert_equal(yhat_id, self.id)
yhat = yhat["prediction"]
yhat_loaded = yhat_loaded["prediction"]
assert yhat.shape == (self.num_samples, self.horizon)
np.testing.assert_array_almost_equal(yhat, yhat_loaded, decimal=4)
# test evaluate
target_value = dict({"y": self.data_new})
assert self.model.evaluate(target_value=target_value,
target_covariates=future_covariates,
target_dti=future_dti,
metric=['mse'])
# test fit_incremental
self.model.fit_incremental({'y': self.data_new},
covariates_incr=future_covariates,
dti_incr=future_dti,)
yhat_incr = self.model.predict(horizon=self.horizon,
future_covariates=future_covariates,
future_dti=future_dti,
)
yhat_incr = yhat_incr["prediction"]
assert yhat_incr.shape == (self.num_samples, self.horizon)
np.testing.assert_raises(AssertionError, np.testing.assert_array_equal, yhat, yhat_incr)
def test_forecast_ndarray_error(self):
# is_xshards_distributed
with self.assertRaises(Exception) as context:
self.model.is_xshards_distributed()
self.assertTrue('You should run fit before calling is_xshards_distributed()'
in str(context.exception))
# fit
input = dict({'data': self.data})
with self.assertRaises(Exception) as context:
self.model.fit(input)
self.assertTrue("key `y` doesn't exist in x" in str(context.exception))
input = dict({'y': "abc"})
with self.assertRaises(Exception) as context:
self.model.fit(input)
self.assertTrue("the value of y should be an ndarray" in str(context.exception))
id_diff = np.arange(200)
input = dict({'id': id_diff, 'y': self.data})
with self.assertRaises(Exception) as context:
self.model.fit(input)
self.assertTrue("the length of the id array should be equal to the number of"
in str(context.exception))
input_right = dict({'id': self.id, 'y': self.data})
self.model.fit(input_right, **self.fit_params)
with self.assertRaises(Exception) as context:
self.model.fit(input_right)
self.assertTrue('This model has already been fully trained' in str(context.exception))
# fit_incremental
data_id_diff = {'id': self.id - 1, 'y': self.data_new}
with self.assertRaises(ValueError) as context:
self.model.fit_incremental(data_id_diff)
self.assertTrue('The input ids in fit_incremental differs from input ids in fit'
in str(context.exception))
# evaluate
target_value_fake = dict({"data": self.data_new})
with self.assertRaises(Exception) as context:
self.model.evaluate(target_value=target_value_fake, metric=['mse'])
self.assertTrue("key `y` doesn't exist in x" in str(context.exception))
def test_forecast_tcmf_without_id(self):
# construct data
input = dict({'y': self.data})
self.model.fit(input, **self.fit_params)
assert not self.model.is_xshards_distributed()
with tempfile.TemporaryDirectory() as tempdirname:
self.model.save(tempdirname)
loaded_model = TCMFForecaster.load(tempdirname, is_xshards_distributed=False)
yhat = self.model.predict(horizon=self.horizon)
yhat_loaded = loaded_model.predict(horizon=self.horizon)
assert "id" not in yhat_loaded
yhat = yhat["prediction"]
yhat_loaded = yhat_loaded["prediction"]
assert yhat.shape == (self.num_samples, self.horizon)
np.testing.assert_array_almost_equal(yhat, yhat_loaded, decimal=4)
target_value = dict({"y": self.data_new})
self.model.evaluate(target_value=target_value, metric=['mse'])
self.model.fit_incremental({'y': self.data_new}) # 1st time
yhat_incr = self.model.predict(horizon=self.horizon)
yhat_incr = yhat_incr["prediction"]
assert yhat_incr.shape == (self.num_samples, self.horizon)
np.testing.assert_raises(AssertionError, np.testing.assert_array_equal, yhat, yhat_incr)
data_new_id = {'id': self.id, 'y': self.data_new}
with self.assertRaises(ValueError) as context:
self.model.fit_incremental(data_new_id)
self.assertTrue('Got valid id in fit_incremental and invalid id in fit.'
in str(context.exception))
def test_forecast_tcmf_xshards(self):
from zoo.orca import OrcaContext
import zoo.orca.data.pandas
import pandas as pd
OrcaContext.pandas_read_backend = "pandas"
def preprocessing(df, id_name, y_name):
id = df.index
data = df.to_numpy()
result = dict({id_name: id, y_name: data})
return result
def postprocessing(pred_results, output_dt_col_name):
id_arr = pred_results["id"]
pred_results = pred_results["prediction"]
pred_results = np.concatenate((np.expand_dims(id_arr, axis=1), pred_results), axis=1)
final_df = pd.DataFrame(pred_results, columns=["id"] + output_dt_col_name)
final_df.id = final_df.id.astype("int")
final_df = final_df.set_index("id")
final_df.columns.name = "datetime"
final_df = final_df.unstack().reset_index().rename({0: "prediction"}, axis=1)
return final_df
def get_pred(d):
return d["prediction"]
with tempfile.NamedTemporaryFile() as temp:
data = np.random.rand(300, 480)
df = pd.DataFrame(data)
df.to_csv(temp.name)
shard = zoo.orca.data.pandas.read_csv(temp.name)
shard.cache()
shard_train = shard.transform_shard(preprocessing, 'id', 'data')
with self.assertRaises(Exception) as context:
self.model.fit(shard_train)
self.assertTrue("key `y` doesn't exist in x" in str(context.exception))
shard_train = shard.transform_shard(preprocessing, 'cid', 'y')
with self.assertRaises(Exception) as context:
self.model.fit(shard_train)
self.assertTrue("key `id` doesn't exist in x" in str(context.exception))
with self.assertRaises(Exception) as context:
self.model.is_xshards_distributed()
self.assertTrue('You should run fit before calling is_xshards_distributed()'
in str(context.exception))
shard_train = shard.transform_shard(preprocessing, 'id', 'y')
self.model.fit(shard_train, **self.fit_params)
assert self.model.is_xshards_distributed()
with self.assertRaises(Exception) as context:
self.model.fit(shard_train)
self.assertTrue('This model has already been fully trained' in str(context.exception))
with self.assertRaises(Exception) as context:
self.model.fit_incremental(shard_train)
self.assertTrue('NotImplementedError' in context.exception.__class__.__name__)
with tempfile.TemporaryDirectory() as tempdirname:
self.model.save(tempdirname + "/model")
loaded_model = TCMFForecaster.load(tempdirname + "/model", is_xshards_distributed=True)
horizon = np.random.randint(1, 50)
yhat_shard_origin = self.model.predict(horizon=horizon)
yhat_list_origin = yhat_shard_origin.collect()
yhat_list_origin = list(map(get_pred, yhat_list_origin))
yhat_shard = loaded_model.predict(horizon=horizon)
yhat_list = yhat_shard.collect()
yhat_list = list(map(get_pred, yhat_list))
yhat_origin = np.concatenate(yhat_list_origin)
yhat = np.concatenate(yhat_list)
assert yhat.shape == (300, horizon)
np.testing.assert_equal(yhat, yhat_origin)
output_dt_col_name = pd.date_range(start='2020-05-01', periods=horizon, freq='H').to_list()
yhat_df_shards = yhat_shard.transform_shard(postprocessing, output_dt_col_name)
final_df_list = yhat_df_shards.collect()
final_df = pd.concat(final_df_list)
final_df.sort_values("datetime", inplace=True)
assert final_df.shape == (300 * horizon, 3)
OrcaContext.pandas_read_backend = "spark"
def test_forecast_tcmf_distributed(self):
input = dict({'id': self.id, 'y': self.data})
from zoo.orca import init_orca_context, stop_orca_context
init_orca_context(cores=4, spark_log_level="INFO", init_ray_on_spark=True,
object_store_memory="1g")
self.model.fit(input, num_workers=4, **self.fit_params)
with tempfile.TemporaryDirectory() as tempdirname:
self.model.save(tempdirname)
loaded_model = TCMFForecaster.load(tempdirname, is_xshards_distributed=False)
yhat = self.model.predict(horizon=self.horizon, num_workers=4)
yhat_loaded = loaded_model.predict(horizon=self.horizon, num_workers=4)
yhat_id = yhat_loaded["id"]
np.testing.assert_equal(yhat_id, self.id)
yhat = yhat["prediction"]
yhat_loaded = yhat_loaded["prediction"]
assert yhat.shape == (self.num_samples, self.horizon)
np.testing.assert_equal(yhat, yhat_loaded)
self.model.fit_incremental({'y': self.data_new})
yhat_incr = self.model.predict(horizon=self.horizon)
yhat_incr = yhat_incr["prediction"]
assert yhat_incr.shape == (self.num_samples, self.horizon)
np.testing.assert_raises(AssertionError, np.testing.assert_array_equal, yhat, yhat_incr)
target_value = dict({"y": self.data_new})
assert self.model.evaluate(target_value=target_value, metric=['mse'])
stop_orca_context()
if __name__ == "__main__":
pytest.main([__file__])
| apache-2.0 |
cmvelo/ansible-modules-core | cloud/openstack/os_client_config.py | 75 | 2295 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import os_client_config
from os_client_config import exceptions
DOCUMENTATION = '''
---
module: os_client_config
short_description: Get OpenStack Client config
description:
- Get I(openstack) client config data from clouds.yaml or environment
version_added: "2.0"
notes:
- Facts are placed in the C(openstack.clouds) variable.
options:
clouds:
description:
- List of clouds to limit the return list to. No value means return
information on all configured clouds
required: false
default: []
requirements: [ os-client-config ]
author: "Monty Taylor (@emonty)"
'''
EXAMPLES = '''
# Get list of clouds that do not support security groups
- os_client_config:
- debug: var={{ item }}
with_items: "{{ openstack.clouds|rejectattr('secgroup_source', 'none')|list() }}"
# Get the information back just about the mordred cloud
- os_client_config:
clouds:
- mordred
'''
def main():
module = AnsibleModule(argument_spec=dict(
clouds=dict(required=False, default=[]),
))
p = module.params
try:
config = os_client_config.OpenStackConfig()
clouds = []
for cloud in config.get_all_clouds():
if not p['clouds'] or cloud.name in p['clouds']:
cloud.config['name'] = cloud.name
clouds.append(cloud.config)
module.exit_json(ansible_facts=dict(openstack=dict(clouds=clouds)))
except exceptions.OpenStackConfigException as e:
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
tod31/pyload | module/lib/beaker/ext/database.py | 46 | 6000 | import cPickle
import logging
import pickle
from datetime import datetime
from beaker.container import OpenResourceNamespaceManager, Container
from beaker.exceptions import InvalidCacheBackendError, MissingCacheParameter
from beaker.synchronization import file_synchronizer, null_synchronizer
from beaker.util import verify_directory, SyncDict
log = logging.getLogger(__name__)
sa = None
pool = None
types = None
class DatabaseNamespaceManager(OpenResourceNamespaceManager):
metadatas = SyncDict()
tables = SyncDict()
@classmethod
def _init_dependencies(cls):
global sa, pool, types
if sa is not None:
return
try:
import sqlalchemy as sa
import sqlalchemy.pool as pool
from sqlalchemy import types
except ImportError:
raise InvalidCacheBackendError("Database cache backend requires "
"the 'sqlalchemy' library")
def __init__(self, namespace, url=None, sa_opts=None, optimistic=False,
table_name='beaker_cache', data_dir=None, lock_dir=None,
**params):
"""Creates a database namespace manager
``url``
SQLAlchemy compliant db url
``sa_opts``
A dictionary of SQLAlchemy keyword options to initialize the engine
with.
``optimistic``
Use optimistic session locking, note that this will result in an
additional select when updating a cache value to compare version
numbers.
``table_name``
The table name to use in the database for the cache.
"""
OpenResourceNamespaceManager.__init__(self, namespace)
if sa_opts is None:
sa_opts = params
if lock_dir:
self.lock_dir = lock_dir
elif data_dir:
self.lock_dir = data_dir + "/container_db_lock"
if self.lock_dir:
verify_directory(self.lock_dir)
# Check to see if the table's been created before
url = url or sa_opts['sa.url']
table_key = url + table_name
def make_cache():
# Check to see if we have a connection pool open already
meta_key = url + table_name
def make_meta():
# SQLAlchemy pops the url, this ensures it sticks around
# later
sa_opts['sa.url'] = url
engine = sa.engine_from_config(sa_opts, 'sa.')
meta = sa.MetaData()
meta.bind = engine
return meta
meta = DatabaseNamespaceManager.metadatas.get(meta_key, make_meta)
# Create the table object and cache it now
cache = sa.Table(table_name, meta,
sa.Column('id', types.Integer, primary_key=True),
sa.Column('namespace', types.String(255), nullable=False),
sa.Column('accessed', types.DateTime, nullable=False),
sa.Column('created', types.DateTime, nullable=False),
sa.Column('data', types.PickleType, nullable=False),
sa.UniqueConstraint('namespace')
)
cache.create(checkfirst=True)
return cache
self.hash = {}
self._is_new = False
self.loaded = False
self.cache = DatabaseNamespaceManager.tables.get(table_key, make_cache)
def get_access_lock(self):
return null_synchronizer()
def get_creation_lock(self, key):
return file_synchronizer(
identifier ="databasecontainer/funclock/%s" % self.namespace,
lock_dir = self.lock_dir)
def do_open(self, flags):
# If we already loaded the data, don't bother loading it again
if self.loaded:
self.flags = flags
return
cache = self.cache
result = sa.select([cache.c.data],
cache.c.namespace==self.namespace
).execute().fetchone()
if not result:
self._is_new = True
self.hash = {}
else:
self._is_new = False
try:
self.hash = result['data']
except (IOError, OSError, EOFError, cPickle.PickleError,
pickle.PickleError):
log.debug("Couln't load pickle data, creating new storage")
self.hash = {}
self._is_new = True
self.flags = flags
self.loaded = True
def do_close(self):
if self.flags is not None and (self.flags == 'c' or self.flags == 'w'):
cache = self.cache
if self._is_new:
cache.insert().execute(namespace=self.namespace, data=self.hash,
accessed=datetime.now(),
created=datetime.now())
self._is_new = False
else:
cache.update(cache.c.namespace==self.namespace).execute(
data=self.hash, accessed=datetime.now())
self.flags = None
def do_remove(self):
cache = self.cache
cache.delete(cache.c.namespace==self.namespace).execute()
self.hash = {}
# We can retain the fact that we did a load attempt, but since the
# file is gone this will be a new namespace should it be saved.
self._is_new = True
def __getitem__(self, key):
return self.hash[key]
def __contains__(self, key):
return self.hash.has_key(key)
def __setitem__(self, key, value):
self.hash[key] = value
def __delitem__(self, key):
del self.hash[key]
def keys(self):
return self.hash.keys()
class DatabaseContainer(Container):
namespace_manager = DatabaseNamespaceManager
| gpl-3.0 |
havatv/QGIS | tests/src/python/test_qgsfieldmappingwidget.py | 30 | 13209 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsFieldMapping widget and model.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Alessandro Pasotti'
__date__ = '16/03/2020'
__copyright__ = 'Copyright 2020, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.core import (
QgsFields,
QgsField,
QgsFieldConstraints,
QgsProperty
)
from qgis.gui import (
QgsFieldMappingWidget,
QgsFieldMappingModel,
)
from qgis.PyQt.Qt import Qt
from qgis.PyQt.QtCore import (
QCoreApplication,
QVariant,
QModelIndex,
QItemSelectionModel,
)
from qgis.PyQt.QtGui import (
QColor
)
from qgis.testing import start_app, unittest
class TestPyQgsFieldMappingModel(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
QCoreApplication.setOrganizationName("QGIS_Test")
QCoreApplication.setOrganizationDomain(cls.__name__)
QCoreApplication.setApplicationName(cls.__name__)
start_app()
def setUp(self):
"""Run before each test"""
source_fields = QgsFields()
f = QgsField('source_field1', QVariant.String)
self.assertTrue(source_fields.append(f))
f = QgsField('source_field2', QVariant.Int, 'integer', 10, 8)
self.assertTrue(source_fields.append(f))
destination_fields = QgsFields()
f = QgsField('destination_field1', QVariant.Int, 'integer', 10, 8)
self.assertTrue(destination_fields.append(f))
f = QgsField('destination_field2', QVariant.String)
self.assertTrue(destination_fields.append(f))
f = QgsField('destination_field3', QVariant.String)
self.assertTrue(destination_fields.append(f))
self.source_fields = source_fields
self.destination_fields = destination_fields
def _showDialog(self, widget):
"""Used during development"""
from qgis.PyQt.QtWidgets import QDialog, QVBoxLayout
d = QDialog()
l = QVBoxLayout()
l.addWidget(widget)
d.setLayout(l)
d.exec()
def testModel(self):
"""Test the mapping model"""
model = QgsFieldMappingModel(self.source_fields, self.destination_fields)
self.assertEqual(model.rowCount(QModelIndex()), 3)
self.assertIsNone(model.data(model.index(9999, 0), Qt.DisplayRole))
# We now have this default mapping:
# source exp | destination fld
# -------------------------------------------
# source_field2 | destination_field1
# source_field1 | destination_field2
# NOT SET (NULL) | destination_field3
self.assertEqual(model.data(model.index(0, 0), Qt.DisplayRole), '"source_field2"')
self.assertEqual(model.data(model.index(0, 1), Qt.DisplayRole), 'destination_field1')
self.assertEqual(model.data(model.index(0, 3), Qt.DisplayRole), 10)
self.assertEqual(model.data(model.index(0, 4), Qt.DisplayRole), 8)
self.assertEqual(model.data(model.index(1, 0), Qt.DisplayRole), '"source_field1"')
self.assertEqual(model.data(model.index(1, 1), Qt.DisplayRole), 'destination_field2')
self.assertEqual(model.data(model.index(2, 0), Qt.DisplayRole), QVariant())
self.assertEqual(model.data(model.index(2, 1), Qt.DisplayRole), 'destination_field3')
# Test expression scope
ctx = model.contextGenerator().createExpressionContext()
self.assertTrue('source_field1' in ctx.fields().names())
# Test add fields
model.appendField(QgsField('destination_field4', QVariant.String))
self.assertEqual(model.rowCount(QModelIndex()), 4)
self.assertEqual(model.data(model.index(3, 1), Qt.DisplayRole), 'destination_field4')
# Test remove field
model.removeField(model.index(3, 0))
self.assertEqual(model.rowCount(QModelIndex()), 3)
self.assertEqual(model.data(model.index(2, 1), Qt.DisplayRole), 'destination_field3')
# Test edit fields
mapping = model.mapping()
self.assertEqual(mapping[0].field.name(), 'destination_field1')
self.assertEqual(mapping[1].field.name(), 'destination_field2')
self.assertEqual(mapping[2].field.name(), 'destination_field3')
self.assertEqual(mapping[0].originalName, 'destination_field1')
self.assertEqual(mapping[1].originalName, 'destination_field2')
self.assertEqual(mapping[2].originalName, 'destination_field3')
# Test move up or down
self.assertFalse(model.moveUp(model.index(0, 0)))
self.assertFalse(model.moveUp(model.index(100, 0)))
self.assertFalse(model.moveDown(model.index(2, 0)))
self.assertFalse(model.moveDown(model.index(100, 0)))
self.assertTrue(model.moveDown(model.index(0, 0)))
mapping = model.mapping()
self.assertEqual(mapping[1].field.name(), 'destination_field1')
self.assertEqual(mapping[0].field.name(), 'destination_field2')
self.assertEqual(mapping[2].field.name(), 'destination_field3')
self.assertEqual(mapping[1].originalName, 'destination_field1')
self.assertEqual(mapping[0].originalName, 'destination_field2')
self.assertEqual(mapping[2].originalName, 'destination_field3')
self.assertTrue(model.moveUp(model.index(1, 0)))
mapping = model.mapping()
self.assertEqual(mapping[0].field.name(), 'destination_field1')
self.assertEqual(mapping[1].field.name(), 'destination_field2')
self.assertEqual(mapping[2].field.name(), 'destination_field3')
self.assertEqual(mapping[0].originalName, 'destination_field1')
self.assertEqual(mapping[1].originalName, 'destination_field2')
self.assertEqual(mapping[2].originalName, 'destination_field3')
self.assertTrue(model.moveUp(model.index(2, 0)))
mapping = model.mapping()
self.assertEqual(mapping[0].field.name(), 'destination_field1')
self.assertEqual(mapping[2].field.name(), 'destination_field2')
self.assertEqual(mapping[1].field.name(), 'destination_field3')
self.assertEqual(mapping[0].originalName, 'destination_field1')
self.assertEqual(mapping[2].originalName, 'destination_field2')
self.assertEqual(mapping[1].originalName, 'destination_field3')
def testSetSourceFields(self):
"""Test that changing source fields also empty expressions are updated"""
model = QgsFieldMappingModel(self.source_fields, self.destination_fields)
self.assertEqual(model.data(model.index(2, 0), Qt.DisplayRole), QVariant())
self.assertEqual(model.data(model.index(2, 1), Qt.DisplayRole), 'destination_field3')
f = QgsField('source_field3', QVariant.String)
fields = self.source_fields
fields.append(f)
model.setSourceFields(fields)
self.assertEqual(model.data(model.index(0, 0), Qt.DisplayRole), '"source_field2"')
self.assertEqual(model.data(model.index(0, 1), Qt.DisplayRole), 'destination_field1')
self.assertEqual(model.data(model.index(1, 0), Qt.DisplayRole), '"source_field1"')
self.assertEqual(model.data(model.index(1, 1), Qt.DisplayRole), 'destination_field2')
self.assertEqual(model.data(model.index(2, 0), Qt.DisplayRole), '"source_field3"')
self.assertEqual(model.data(model.index(2, 1), Qt.DisplayRole), 'destination_field3')
def testProperties(self):
model = QgsFieldMappingModel(self.source_fields, self.destination_fields)
model.setDestinationFields(self.destination_fields, {'destination_field1': '5',
'destination_field2': 'source_field2',
'destination_field3': 'source_field2 * @myvar'})
mapping = model.mapping()
self.assertEqual(mapping[0].field.name(), 'destination_field1')
self.assertEqual(mapping[1].field.name(), 'destination_field2')
self.assertEqual(mapping[2].field.name(), 'destination_field3')
self.assertEqual(mapping[0].expression, '5')
self.assertEqual(mapping[1].expression, 'source_field2')
self.assertEqual(mapping[2].expression, 'source_field2 * @myvar')
self.assertEqual(model.fieldPropertyMap(), {'destination_field1': QgsProperty.fromExpression('5'),
'destination_field2': QgsProperty.fromField('source_field2'),
'destination_field3': QgsProperty.fromExpression(
'source_field2 * @myvar'),
})
model = QgsFieldMappingModel(self.source_fields, self.destination_fields)
self.assertEqual(model.fieldPropertyMap(), {'destination_field1': QgsProperty.fromField('source_field2'),
'destination_field2': QgsProperty.fromField('source_field1'),
'destination_field3': QgsProperty.fromExpression(''),
})
model.setFieldPropertyMap({
'destination_field1': QgsProperty.fromField('source_field1'),
'destination_field2': QgsProperty.fromExpression('55*6'),
'destination_field3': QgsProperty.fromValue(6),
})
self.assertEqual(model.fieldPropertyMap(), {
'destination_field1': QgsProperty.fromField('source_field1'),
'destination_field2': QgsProperty.fromExpression('55*6'),
'destination_field3': QgsProperty.fromExpression('6'),
})
def testWidget(self):
"""Test widget operations"""
widget = QgsFieldMappingWidget()
for i in range(10):
widget.appendField(QgsField(str(i)))
self.assertTrue(widget.model().rowCount(QModelIndex()), 10)
def _compare(widget, expected):
actual = []
for field in widget.mapping():
actual.append(int(field.originalName))
self.assertEqual(actual, expected)
_compare(widget, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
selection_model = widget.selectionModel()
selection_model.clear()
for i in range(0, 10, 2):
selection_model.select(widget.model().index(i, 0), QItemSelectionModel.Select)
self.assertTrue(widget.moveSelectedFieldsDown())
_compare(widget, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8])
selection_model.clear()
for i in range(1, 10, 2):
selection_model.select(widget.model().index(i, 0), QItemSelectionModel.Select)
self.assertTrue(widget.moveSelectedFieldsUp())
_compare(widget, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
selection_model.clear()
for i in range(0, 10, 2):
selection_model.select(widget.model().index(i, 0), QItemSelectionModel.Select)
self.assertTrue(widget.removeSelectedFields())
_compare(widget, [1, 3, 5, 7, 9])
# Test set destination fields
widget.setSourceFields(self.source_fields)
widget.setDestinationFields(self.destination_fields)
mapping = widget.mapping()
self.assertEqual(mapping[0].field.name(), 'destination_field1')
self.assertEqual(mapping[1].field.name(), 'destination_field2')
self.assertEqual(mapping[2].field.name(), 'destination_field3')
self.assertEqual(mapping[0].originalName, 'destination_field1')
self.assertEqual(mapping[1].originalName, 'destination_field2')
self.assertEqual(mapping[2].originalName, 'destination_field3')
# Test constraints
f = QgsField('constraint_field', QVariant.Int)
constraints = QgsFieldConstraints()
constraints.setConstraint(QgsFieldConstraints.ConstraintNotNull, QgsFieldConstraints.ConstraintOriginProvider)
constraints.setConstraint(QgsFieldConstraints.ConstraintExpression,
QgsFieldConstraints.ConstraintOriginProvider)
constraints.setConstraint(QgsFieldConstraints.ConstraintUnique, QgsFieldConstraints.ConstraintOriginProvider)
f.setConstraints(constraints)
fields = QgsFields()
fields.append(f)
widget.setDestinationFields(fields)
self.assertEqual(widget.model().data(widget.model().index(0, 5, QModelIndex()), Qt.DisplayRole),
"Constraints active")
self.assertEqual(widget.model().data(widget.model().index(0, 5, QModelIndex()), Qt.ToolTipRole),
"Unique<br>Not null<br>Expression")
self.assertEqual(widget.model().data(widget.model().index(0, 5, QModelIndex()), Qt.BackgroundColorRole),
QColor(255, 224, 178))
# self._showDialog(widget)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
bixbydev/Bixby | google/dist/gdata-2.0.18/tests/gdata_tests/spreadsheets/data_test.py | 23 | 29150 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import gdata.spreadsheets.data
import gdata.test_config as conf
import atom.core
SPREADSHEET = """<entry xmlns="http://www.w3.org/2005/Atom"
xmlns:gd="http://schemas.google.com/g/2005"
gd:etag='"BxAUSQUJRCp7ImBq"'>
<id>http://spreadsheets.google.com/feeds/spreadsheets/private/full/key</id>
<updated>2006-11-17T18:24:18.231Z</updated>
<title type="text">Groceries R Us</title>
<content type="text">Groceries R Us</content>
<link rel="http://schemas.google.com/spreadsheets/2006#worksheetsfeed"
type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/worksheets/key/private/full"/>
<link rel="alternate" type="text/html"
href="http://spreadsheets.google.com/ccc?key=key"/>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/spreadsheets/private/full/key"/>
<author>
<name>Fitzwilliam Darcy</name>
<email>fitz@gmail.com</email>
</author>
</entry>"""
WORKSHEETS_FEED = """<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearch/1.1/"
xmlns:gs="http://schemas.google.com/spreadsheets/2006"
xmlns:gd="http://schemas.google.com/g/2005"
gd:etag='W/"D0cERnk-eip7ImA9WBBXGEg."'>
<id>http://spreadsheets.google.com/feeds/worksheets/key/private/full</id>
<updated>2006-11-17T18:23:45.173Z</updated>
<title type="text">Groceries R Us</title>
<link rel="alternate" type="text/html"
href="http://spreadsheets.google.com/ccc?key=key"/>
<link rel="http://schemas.google.com/g/2005#feed"
type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/worksheets/key/private/full"/>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/worksheets/key/private/full"/>
<link
rel="http://schemas.google.com/g/2005#post" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/worksheets/key/private/full"/>
<author>
<name>Fitzwilliam Darcy</name>
<email>fitz@gmail.com</email>
</author>
<openSearch:totalResults>1</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>1</openSearch:itemsPerPage>
<entry gd:etag='"YDwqeyI."'>
<id>http://spreadsheets.google.com/feeds/worksheets/0/private/full/1</id>
<updated>2006-11-17T18:23:45.173Z</updated>
<title type="text">Sheet1</title>
<content type="text">Sheet1</content>
<link rel="http://schemas.google.com/spreadsheets/2006#listfeed"
type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/0/1/private/full"/>
<link rel="http://schemas.google.com/spreadsheets/2006#cellsfeed"
type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/cells/0/1/private/full"/>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/worksheets/0/private/full/1"/>
<link rel="edit" type="application/atom+xml"
href="http://spreadsheets.google.com/.../0/.../1/version"/>
<gs:rowCount>100</gs:rowCount>
<gs:colCount>20</gs:colCount>
</entry>
</feed>"""
NEW_WORKSHEET = """<entry xmlns="http://www.w3.org/2005/Atom"
xmlns:gs="http://schemas.google.com/spreadsheets/2006">
<title>Expenses</title>
<gs:rowCount>50</gs:rowCount>
<gs:colCount>10</gs:colCount>
</entry>"""
EDIT_WORKSHEET = """<entry>
<id>
http://spreadsheets.google.com/feeds/worksheets/k/private/full/w
</id>
<updated>2007-07-30T18:51:30.666Z</updated>
<category scheme="http://schemas.google.com/spreadsheets/2006"
term="http://schemas.google.com/spreadsheets/2006#worksheet"/>
<title type="text">Income</title>
<content type="text">Expenses</content>
<link rel="http://schemas.google.com/spreadsheets/2006#listfeed"
type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/k/w/private/full"/>
<link rel="http://schemas.google.com/spreadsheets/2006#cellsfeed"
type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/cells/k/w/private/full"/>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/worksheets/k/private/full/w"/>
<link rel="edit" type="application/atom+xml"
href="http://spreadsheets.google.com/.../k/private/full/w/v"/>
<gs:rowCount>45</gs:rowCount>
<gs:colCount>15</gs:colCount>
</entry>"""
NEW_TABLE = """<entry xmlns="http://www.w3.org/2005/Atom"
xmlns:gs="http://schemas.google.com/spreadsheets/2006">
<title type='text'>Table 1</title>
<summary type='text'>This is a list of all who have registered to vote and
whether or not they qualify to vote.</summary>
<gs:worksheet name='Sheet1' />
<gs:header row='1' />
<gs:data numRows='0' startRow='2'>
<gs:column index='B' name='Birthday' />
<gs:column index='C' name='Age' />
<gs:column index='A' name='Name' />
<gs:column index='D' name='CanVote' />
</gs:data>
</entry>"""
TABLES_FEED = """<?xml version='1.0' encoding='utf-8'?>
<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearch/1.1/"
xmlns:gs="http://schemas.google.com/spreadsheets/2006"
xmlns:gd="http://schemas.google.com/g/2005"
gd:etag='W/"DEQHQn84fCt7ImA9WxJTGEU."'>
<id>
http://spreadsheets.google.com/feeds/key/tables</id>
<updated>2009-04-28T02:38:53.134Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/spreadsheets/2006#table' />
<title>Sample table and record feed</title>
<link rel='alternate' type='text/html'
href='http://spreadsheets.google.com/ccc?key=key' />
<link rel='http://schemas.google.com/g/2005#feed'
type='application/atom+xml'
href='http://spreadsheets.google.com/feeds/key/tables' />
<link rel='http://schemas.google.com/g/2005#post'
type='application/atom+xml'
href='http://spreadsheets.google.com/feeds/key/tables' />
<link rel='self' type='application/atom+xml'
href='http://spreadsheets.google.com/feeds/key/tables' />
<author>
<name>Liz</name>
<email>liz@gmail.com</email>
</author>
<openSearch:totalResults>2</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<entry gd:etag='"HBcUVgtWASt7ImBq"'>
<id>
http://spreadsheets.google.com/feeds/key/tables/0</id>
<updated>2009-04-28T01:20:32.707Z</updated>
<app:edited xmlns:app="http://www.w3.org/2007/app">
2009-04-28T01:20:32.707Z</app:edited>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/spreadsheets/2006#table' />
<title>Table 1</title>
<summary>This is a list of all who have registered to vote and
whether or not they qualify to vote.</summary>
<content type='application/atom+xml;type=feed'
src='http://spreadsheets.google.com/feeds/key/records/0' />
<link rel='self' type='application/atom+xml'
href='http://spreadsheets.google.com/feeds/key/tables/0' />
<link rel='edit' type='application/atom+xml'
href='http://spreadsheets.google.com/feeds/key/tables/0' />
<gs:worksheet name='Sheet1' />
<gs:header row='1' />
<gs:data insertionMode='overwrite' numRows='2' startRow='2'>
<gs:column index='B' name='Birthday' />
<gs:column index='C' name='Age' />
<gs:column index='A' name='Name' />
<gs:column index='D' name='CanVote' />
</gs:data>
</entry>
<entry gd:etag='"HBcUVgdCGyt7ImBq"'>
<id>
http://spreadsheets.google.com/feeds/key/tables/1</id>
<updated>2009-04-28T01:20:38.313Z</updated>
<app:edited xmlns:app="http://www.w3.org/2007/app">
2009-04-28T01:20:38.313Z</app:edited>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/spreadsheets/2006#table' />
<title>Table 2</title>
<summary>List of detailed information about each voter.</summary>
<content type='application/atom+xml;type=feed'
src='http://spreadsheets.google.com/feeds/key/records/1' />
<link rel='self' type='application/atom+xml'
href='http://spreadsheets.google.com/feeds/key/tables/1' />
<link rel='edit' type='application/atom+xml'
href='http://spreadsheets.google.com/feeds/key/tables/1' />
<gs:worksheet name='Sheet1' />
<gs:header row='30' />
<gs:data insertionMode='overwrite' numRows='10' startRow='34'>
<gs:column index='C' name='Last' />
<gs:column index='B' name='First' />
<gs:column index='D' name='DOB' />
<gs:column index='E' name='Driver License?' />
</gs:data>
</entry>
</feed>"""
NEW_RECORD = """<entry xmlns="http://www.w3.org/2005/Atom"
xmlns:gs="http://schemas.google.com/spreadsheets/2006">
<title>Darcy</title>
<gs:field name='Birthday'>2/10/1785</gs:field>
<gs:field name='Age'>28</gs:field>
<gs:field name='Name'>Darcy</gs:field>
<gs:field name='CanVote'>No</gs:field>
</entry>"""
RECORDS_FEED = """<?xml version='1.0' encoding='utf-8'?>
<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearch/1.1/"
xmlns:gs="http://schemas.google.com/spreadsheets/2006"
xmlns:gd="http://schemas.google.com/g/2005"
gd:etag='W/"DEQHQn84fCt7ImA9WxJTGEU."'>
<id>http://spreadsheets.google.com/feeds/key/records/0</id>
<updated>2009-04-28T02:38:53.134Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/spreadsheets/2006#record' />
<title>Table 1</title>
<link rel='alternate' type='text/html'
href='http://spreadsheets.google.com/pub?key=key' />
<link rel='http://schemas.google.com/g/2005#feed'
type='application/atom+xml'
href='http://spreadsheets.google.com/feeds/key/records/0' />
<link rel='http://schemas.google.com/g/2005#post'
type='application/atom+xml'
href='http://spreadsheets.google.com/feeds/key/records/0' />
<link rel='self' type='application/atom+xml'
href='http://spreadsheets.google.com/feeds/key/records/0' />
<author>
<name>Liz</name>
<email>liz@gmail.com</email>
</author>
<openSearch:totalResults>2</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<entry gd:etag='"UB8DTlJAKSt7ImA-WkUT"'>
<id>
http://spreadsheets.google.com/feeds/key/records/0/cn6ca</id>
<updated>2009-04-28T02:38:53.134Z</updated>
<app:edited xmlns:app="http://www.w3.org/2007/app">
2009-04-28T02:38:53.134Z</app:edited>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/spreadsheets/2006#record' />
<title>Darcy</title>
<content>Birthday: 2/10/1785, Age: 28, Name: Darcy,
CanVote: No</content>
<link rel='self' type='application/atom+xml'
href='http://spreadsheets.google.com/feeds/key/records/0/cn6ca' />
<link rel='edit' type='application/atom+xml'
href='http://spreadsheets.google.com/feeds/key/records/0/cn6ca' />
<gs:field index='B' name='Birthday'>2/10/1785</gs:field>
<gs:field index='C' name='Age'>28</gs:field>
<gs:field index='A' name='Name'>Darcy</gs:field>
<gs:field index='D' name='CanVote'>No</gs:field>
</entry>
<entry gd:etag='"UVBFUEcNRCt7ImA9DU8."'>
<id>
http://spreadsheets.google.com/feeds/key/records/0/cokwr</id>
<updated>2009-04-28T02:38:53.134Z</updated>
<app:edited xmlns:app="http://www.w3.org/2007/app">
2009-04-28T02:38:53.134Z</app:edited>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/spreadsheets/2006#record' />
<title>Jane</title>
<content>Birthday: 1/6/1791, Age: 22, Name: Jane,
CanVote: Yes</content>
<link rel='self' type='application/atom+xml'
href='http://spreadsheets.google.com/feeds/key/records/0/cokwr' />
<link rel='edit' type='application/atom+xml'
href='http://spreadsheets.google.com/feeds/key/records/0/cokwr' />
<gs:field index='B' name='Birthday'>1/6/1791</gs:field>
<gs:field index='C' name='Age'>22</gs:field>
<gs:field index='A' name='Name'>Jane</gs:field>
<gs:field index='D' name='CanVote'>Yes</gs:field>
</entry>
</feed>"""
LIST_FEED = """<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearch/1.1/"
xmlns:gsx="http://schemas.google.com/spreadsheets/2006/extended"
xmlns:gd="http://schemas.google.com/g/2005"
gd:etag='W/"D0cERnk-eip7ImA9WBBXGEg."'>
<id>
http://spreadsheets.google.com/feeds/list/key/worksheetId/private/full
</id>
<updated>2006-11-17T18:23:45.173Z</updated>
<title type="text">Sheet1</title>
<link rel="alternate" type="text/html"
href="http://spreadsheets.google.com/ccc?key=key"/>
<link rel="http://schemas.google.com/g/2005#feed"
type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/k/w/private/full"/>
<link rel="http://schemas.google.com/g/2005#post"
type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/k/w/private/full"/>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/k/w/private/full"/>
<author>
<name>Fitzwilliam Darcy</name>
<email>fitz@gmail.com</email>
</author>
<openSearch:totalResults>8</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>8</openSearch:itemsPerPage>
<entry gd:etag='"S0wCTlpIIip7ImA0X0QI"'>
<id>http://spreadsheets.google.com/feeds/list/k/w/private/full/r</id>
<updated>2006-11-17T18:23:45.173Z</updated>
<category scheme="http://schemas.google.com/spreadsheets/2006"
term="http://schemas.google.com/spreadsheets/2006#list"/>
<title type="text">Bingley</title>
<content type="text">Hours: 10, Items: 2, IPM: 0.0033</content>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/k/w/private/full/r"/>
<link rel="edit" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/k/w/private/full/r/v"/>
<gsx:name>Bingley</gsx:name>
<gsx:hours>10</gsx:hours>
<gsx:items>2</gsx:items>
<gsx:ipm>0.0033</gsx:ipm>
</entry>
<entry gd:etag='"AxQDSXxjfyp7ImA0ChJVSBI."'>
<id>
http://spreadsheets.google.com/feeds/list/k/w/private/full/rowId
</id>
<updated>2006-11-17T18:23:45.173Z</updated>
<category scheme="http://schemas.google.com/spreadsheets/2006"
term="http://schemas.google.com/spreadsheets/2006#list"/>
<title type="text">Charlotte</title>
<content type="text">Hours: 60, Items: 18000, IPM: 5</content>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/k/w/private/full/r"/>
<link rel="edit" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/k/w/private/full/r/v"/>
<gsx:name>Charlotte</gsx:name>
<gsx:hours>60</gsx:hours>
<gsx:items>18000</gsx:items>
<gsx:ipm>5</gsx:ipm>
</entry>
</feed>"""
NEW_ROW = """<entry xmlns="http://www.w3.org/2005/Atom"
xmlns:gsx="http://schemas.google.com/spreadsheets/2006/extended">
<gsx:hours>1</gsx:hours>
<gsx:ipm>1</gsx:ipm>
<gsx:items>60</gsx:items>
<gsx:name>Elizabeth Bennet</gsx:name>
</entry>"""
UPDATED_ROW = """<entry gd:etag='"S0wCTlpIIip7ImA0X0QI"'
xmlns="http://www.w3.org/2005/Atom"
xmlns:gd="http://schemas.google.com/g/2005"
xmlns:gsx="http://schemas.google.com/spreadsheets/2006/extended">
<id>http://spreadsheets.google.com/feeds/list/k/w/private/full/rowId</id>
<updated>2006-11-17T18:23:45.173Z</updated>
<category scheme="http://schemas.google.com/spreadsheets/2006"
term="http://schemas.google.com/spreadsheets/2006#list"/>
<title type="text">Bingley</title>
<content type="text">Hours: 10, Items: 2, IPM: 0.0033</content>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/k/w/private/full/r"/>
<link rel="edit" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/k/w/private/full/r/v"/>
<gsx:name>Bingley</gsx:name>
<gsx:hours>20</gsx:hours>
<gsx:items>4</gsx:items>
<gsx:ipm>0.0033</gsx:ipm>
</entry>"""
CELLS_FEED = """<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearch/1.1/"
xmlns:gs="http://schemas.google.com/spreadsheets/2006"
xmlns:gd="http://schemas.google.com/g/2005"
gd:etag='W/"D0cERnk-eip7ImA9WBBXGEg."'>
<id>
http://spreadsheets.google.com/feeds/cells/key/worksheetId/private/full
</id>
<updated>2006-11-17T18:27:32.543Z</updated>
<title type="text">Sheet1</title>
<link rel="alternate" type="text/html"
href="http://spreadsheets.google.com/ccc?key=key"/>
<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/cells/k/w/private/full"/>
<link rel="http://schemas.google.com/g/2005#post"
type="application/atom+xml"
<link rel="http://schemas.google.com/g/2005#batch"
type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/cells/k/w/private/full/batch"/>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/cells/k/w/private/full"/>
<author>
<name>Fitzwilliam Darcy</name>
<email>fitz@gmail.com</email>
</author>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>36</openSearch:itemsPerPage>
<gs:rowCount>100</gs:rowCount>
<gs:colCount>20</gs:colCount>
<entry gd:etag='"ImA9D1APFyp7"'>
<id>
http://spreadsheets.google.com/feeds/cells/k/w/private/full/R1C1
</id>
<updated>2006-11-17T18:27:32.543Z</updated>
<category scheme="http://schemas.google.com/spreadsheets/2006"
term="http://schemas.google.com/spreadsheets/2006#cell"/>
<title type="text">A1</title>
<content type="text">Name</content>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/cells/k/w/pr/full/R1C1"/>
<link rel="edit" type="application/atom+xml"
href="http://spreadsheets.google.com/./cells/k/w/pr/full/R1C1/bgvjf"/>
<gs:cell row="1" col="1" inputValue="Name">Name</gs:cell>
</entry>
<entry gd:etag='"YD0PS1YXByp7Ig.."'>
<id>
http://spreadsheets.google.com/feeds/cells/k/w/private/full/R1C2
</id>
<updated>2006-11-17T18:27:32.543Z</updated>
<category scheme="http://schemas.google.com/spreadsheets/2006"
term="http://schemas.google.com/spreadsheets/2006#cell"/>
<title type="text">B1</title>
<content type="text">Hours</content>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/cells/k/w/pr/full/R1C2"/>
<link rel="edit" type="application/atom+xml"
href="http://spreadsheets.google.com/./cells/k/w/pr/full/R1C2/1pn567"/>
<gs:cell row="1" col="2" inputValue="Hours">Hours</gs:cell>
</entry>
<entry gd:etag='"ImB5CBYSRCp7"'>
<id>
http://spreadsheets.google.com/feeds/cells/k/w/private/full/R9C4
</id>
<updated>2006-11-17T18:27:32.543Z</updated>
<category scheme="http://schemas.google.com/spreadsheets/2006"
term="http://schemas.google.com/spreadsheets/2006#cell"/>
<title type="text">D9</title>
<content type="text">5</content>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/cells/k/w/pr/full/R9C4"/>
<link rel="edit" type="application/atom+xml"
href="http://spreadsheets.google.com/./cells/k/w/pr/full/R9C4/srevc"/>
<gs:cell row="9" col="4"
inputValue="=FLOOR(R[0]C[-1]/(R[0]C[-2]*60),.0001)"
numericValue="5.0">5</gs:cell>
</entry>
</feed>"""
BATCH_CELLS = """<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:batch="http://schemas.google.com/gdata/batch"
xmlns:gs="http://schemas.google.com/spreadsheets/2006">
<id>
http://spreadsheets.google.com/feeds/cells/key/worksheetId/private/full
</id>
<entry>
<batch:id">A1</batch:id">
<batch:operation type="update"/>
<id>
http://spreadsheets.google.com/feeds/cells/k/w/private/full/cellId
</id>
<link rel="edit" type="application/atom+xml"
href="http://spreadsheets/google.com/./cells/k/w/pr/full/R2C4/v"/>
<gs:cell row="2" col="4" inputValue="newData"/>
</entry>
<entry>
<batch:id">A2</batch:id">
<batch:operation type="update"/>
<title type="text">A2</title>
<id>
http://spreadsheets.google.com/feeds/cells/k/w/private/full/cellId
</id>
<link rel="edit" type="application/atom+xml"
href="http://spreadsheets/google.com/feeds/cells/k/w/pr/full/R2C5/v"/>
<gs:cell row="2" col="5" inputValue="moreInfo"/>
</entry>
</feed>"""
class SpreadsheetEntryTest(unittest.TestCase):
def setUp(self):
self.spreadsheet = atom.core.parse(
SPREADSHEET, gdata.spreadsheets.data.Spreadsheet)
def test_check_parsing(self):
self.assertEqual(self.spreadsheet.etag, '"BxAUSQUJRCp7ImBq"')
self.assertEqual(self.spreadsheet.id.text,
'http://spreadsheets.google.com/feeds/spreadsheets'
'/private/full/key')
self.assertEqual(self.spreadsheet.updated.text,
'2006-11-17T18:24:18.231Z')
self.assertEqual(self.spreadsheet.find_worksheets_feed(),
'http://spreadsheets.google.com/feeds/worksheets'
'/key/private/full')
self.assertEqual(self.spreadsheet.find_self_link(),
'http://spreadsheets.google.com/feeds/spreadsheets'
'/private/full/key')
def test_get_spreadsheet_key(self):
self.assertEqual(self.spreadsheet.get_spreadsheet_key(), 'key')
# Change the value of the self link.
self.spreadsheet.id.text = '42'
self.assertEqual(self.spreadsheet.GetSpreadsheetKey(), '42')
class WorksheetEntryTest(unittest.TestCase):
def setUp(self):
self.worksheets = atom.core.parse(
WORKSHEETS_FEED, gdata.spreadsheets.data.WorksheetsFeed)
def test_check_parsing(self):
self.assertEqual(len(self.worksheets.entry), 1)
self.assertEqual(self.worksheets.entry[0].get_id(),
'http://spreadsheets.google.com/feeds/worksheets/0/private/full/1')
def test_get_worksheet_id(self):
self.assertEqual(self.worksheets.entry[0].get_worksheet_id(), '1')
self.worksheets.entry[0].id.text = '////spam'
self.assertEqual(self.worksheets.entry[0].GetWorksheetId(), 'spam')
class ListEntryTest(unittest.TestCase):
def test_get_and_set_column_value(self):
row = atom.core.parse(NEW_ROW, gdata.spreadsheets.data.ListEntry)
row.set_value('hours', '3')
row.set_value('name', 'Lizzy')
self.assertEqual(row.get_value('hours'), '3')
self.assertEqual(row.get_value('ipm'), '1')
self.assertEqual(row.get_value('items'), '60')
self.assertEqual(row.get_value('name'), 'Lizzy')
self.assertEqual(row.get_value('x'), None)
row.set_value('x', 'Test')
self.assertEqual(row.get_value('x'), 'Test')
row_xml = str(row)
self.assert_(row_xml.find(':x') > -1)
self.assert_(row_xml.find('>Test</') > -1)
self.assert_(row_xml.find(':hours') > -1)
self.assert_(row_xml.find('>3</') > -1)
self.assert_(row_xml.find(':ipm') > -1)
self.assert_(row_xml.find('>1</') > -1)
self.assert_(row_xml.find(':items') > -1)
self.assert_(row_xml.find('>60</') > -1)
self.assert_(row_xml.find(':name') > -1)
self.assert_(row_xml.find('>Lizzy</') > -1)
self.assertEqual(row_xml.find(':zzz'), -1)
self.assertEqual(row_xml.find('>foo</'), -1)
def test_check_parsing(self):
row = atom.core.parse(NEW_ROW, gdata.spreadsheets.data.ListEntry)
self.assertEqual(row.get_value('hours'), '1')
self.assertEqual(row.get_value('ipm'), '1')
self.assertEqual(row.get_value('items'), '60')
self.assertEqual(row.get_value('name'), 'Elizabeth Bennet')
self.assertEqual(row.get_value('none'), None)
row = atom.core.parse(UPDATED_ROW, gdata.spreadsheets.data.ListEntry)
self.assertEqual(row.get_value('hours'), '20')
self.assertEqual(row.get_value('ipm'), '0.0033')
self.assertEqual(row.get_value('items'), '4')
self.assertEqual(row.get_value('name'), 'Bingley')
self.assertEqual(row.get_value('x'), None)
self.assertEqual(
row.id.text, 'http://spreadsheets.google.com/feeds/list'
'/k/w/private/full/rowId')
self.assertEqual(row.updated.text, '2006-11-17T18:23:45.173Z')
self.assertEqual(row.content.text, 'Hours: 10, Items: 2, IPM: 0.0033')
class RecordEntryTest(unittest.TestCase):
def setUp(self):
self.records = atom.core.parse(
RECORDS_FEED, gdata.spreadsheets.data.RecordsFeed)
def test_get_by_index(self):
self.assertEqual(self.records.entry[0].field[0].index, 'B')
self.assertEqual(self.records.entry[0].field[0].name, 'Birthday')
self.assertEqual(self.records.entry[0].field[0].text, '2/10/1785')
self.assertEqual(self.records.entry[0].value_for_index('B'), '2/10/1785')
self.assertRaises(gdata.spreadsheets.data.FieldMissing,
self.records.entry[0].ValueForIndex, 'E')
self.assertEqual(self.records.entry[1].value_for_index('D'), 'Yes')
def test_get_by_name(self):
self.assertEqual(self.records.entry[0].ValueForName('Birthday'),
'2/10/1785')
self.assertRaises(gdata.spreadsheets.data.FieldMissing,
self.records.entry[0].value_for_name, 'Foo')
self.assertEqual(self.records.entry[1].value_for_name('Age'), '22')
class BatchRequestTest(unittest.TestCase):
def setUp(self):
self.feed = gdata.spreadsheets.data.build_batch_cells_update('skey', 'wid')
def test_builder(self):
self.assertEqual(len(self.feed.link), 1)
self.assertEqual(self.feed.link[0].rel, 'edit')
self.assertEqual(self.feed.link[0].href,
'https://spreadsheets.google.com/feeds/cells/skey/wid/'
'private/full/batch')
self.assertEqual(self.feed.id.text,
'https://spreadsheets.google.com/feeds/cells/skey/wid/'
'private/full')
self.assertEqual(len(self.feed.entry), 0)
def test_set_cell(self):
self.feed.add_set_cell(1, 2, 'value')
self.assertEqual(len(self.feed.entry), 1)
self.assertEqual(self.feed.entry[0].id.text,
'https://spreadsheets.google.com/feeds/cells/skey/wid/private/'
'full/R1C2')
self.assertEqual(self.feed.entry[0].cell.row, '1')
self.assertEqual(self.feed.entry[0].cell.col, '2')
self.assertEqual(self.feed.entry[0].cell.input_value, 'value')
self.assertEqual(self.feed.entry[0].batch_operation.type, 'update')
self.assertEqual(self.feed.entry[0].batch_id.text, '0')
self.feed.add_set_cell(3, 1, 'spam')
self.assertEqual(len(self.feed.entry), 2)
self.assertEqual(self.feed.entry[1].id.text,
'https://spreadsheets.google.com/feeds/cells/skey/wid/private/'
'full/R3C1')
self.assertEqual(self.feed.entry[1].cell.row, '3')
self.assertEqual(self.feed.entry[1].cell.col, '1')
self.assertEqual(self.feed.entry[1].cell.input_value, 'spam')
self.assertEqual(self.feed.entry[1].batch_operation.type, 'update')
self.assertEqual(self.feed.entry[1].batch_id.text, '1')
class DataClassSanityTest(unittest.TestCase):
def test_basic_element_structure(self):
conf.check_data_classes(self, [
gdata.spreadsheets.data.Cell, gdata.spreadsheets.data.ColCount,
gdata.spreadsheets.data.Field, gdata.spreadsheets.data.Column,
gdata.spreadsheets.data.Data, gdata.spreadsheets.data.Header,
gdata.spreadsheets.data.RowCount, gdata.spreadsheets.data.Worksheet,
gdata.spreadsheets.data.Spreadsheet,
gdata.spreadsheets.data.SpreadsheetsFeed,
gdata.spreadsheets.data.WorksheetEntry,
gdata.spreadsheets.data.WorksheetsFeed,
gdata.spreadsheets.data.Table,
gdata.spreadsheets.data.TablesFeed,
gdata.spreadsheets.data.Record,
gdata.spreadsheets.data.RecordsFeed,
gdata.spreadsheets.data.ListRow,
gdata.spreadsheets.data.ListEntry,
gdata.spreadsheets.data.ListsFeed,
gdata.spreadsheets.data.CellEntry,
gdata.spreadsheets.data.CellsFeed])
def suite():
return conf.build_suite([SpreadsheetEntryTest, DataClassSanityTest,
ListEntryTest, RecordEntryTest])
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
thiedri/experiments | sorting/question.py | 4 | 2911 | import sys
#compatibility
try: input = raw_input
except NameError: pass
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
colors = {
'GREEN': bcolors.OKGREEN,
'BLUE': bcolors.OKBLUE,
'MAGENTA': bcolors.HEADER,
'PURPLE': bcolors.HEADER,
'YELLOW': bcolors.WARNING,
'RED': bcolors.FAIL,
'NONE': bcolors.ENDC
}
attribs = {
'BOLD' : bcolors.BOLD,
'UNDERLINE': bcolors.UNDERLINE,
}
exit_cond = lambda x: x in {'q', 'quit', 'leave', 'exit'}
def set_exit_cond(condition):
global exit_cond
exit_cond = condition
def get_char(s, char_list):
while( True ):
string = input(s)
if exit_cond(string):
return None
if string in char_list:
return string
def get_number(s, max_val=None):
while( True ):
try:
string = input(s)
if exit_cond(string):
return None
val = int(string)
if max_val is None or val <= max_val:
return val
except:
print ('Not a number. Try again')
def get_string(s):
string = input(s)
if exit_cond(string):
return None
return string
def get_word(s):
string = input(s)
if exit_cond(string):
return False
return True
def ask_addition_question(m, n):
for i in range(1, 4):
result = get_number(str(m) + ' + ' + str(n) + ' = ')
if result == None:
return -1
if result == (m+n):
print ('Correct !')
return 1
else:
print ('Wrong. try again!')
return 0
def ask_multiplication_question(m, n):
for i in range(1, 4):
result = get_number(str(m) + ' x ' + str(n) + ' = ')
if result == None:
return -1
if result == (m*n):
print ('Correct !')
return 1
else:
print ('Wrong. try again!')
return 0
def ask_subtraction_question(m, n):
for i in range(1, 4):
if m < n:
m, n = n, m
result = get_number(str(m) + ' - ' + str(n) + ' = ')
if result == None:
return -1
if result == (m-n):
print ('Correct !')
return 1
else:
print ('Wrong. try again!')
return 0
def ask_word_question(word):
return get_word(' ' + word + ' ')
def write(text, color=None, *attrib):
prefix = ''
sufix = ''
if not color is None:
prefix += colors[color.upper()]
for at in attrib:
prefix += attribs[at.upper()]
if len(prefix) > 0:
sufix = colors['NONE']
print (prefix + text + sufix)
| mit |
tomv564/LSP | tests/test_configurations.py | 1 | 3675 | from LSP.plugin.core.configurations import ConfigManager
from LSP.plugin.core.configurations import is_supported_syntax
from LSP.plugin.core.configurations import WindowConfigManager
from test_mocks import MockView
from test_mocks import MockWindow
from test_mocks import TEST_CONFIG, DISABLED_CONFIG
from test_mocks import TEST_LANGUAGE
import unittest
class GlobalConfigManagerTests(unittest.TestCase):
def test_empty_configs(self):
manager = ConfigManager([])
window_mgr = manager.for_window(MockWindow())
self.assertEqual(window_mgr.all, [])
def test_global_config(self):
manager = ConfigManager([TEST_CONFIG])
window_mgr = manager.for_window(MockWindow())
self.assertEqual(window_mgr.all, [TEST_CONFIG])
def test_override_config(self):
manager = ConfigManager([TEST_CONFIG])
self.assertTrue(TEST_CONFIG.enabled)
win = MockWindow()
win.set_project_data({'settings': {'LSP': {TEST_CONFIG.name: {"enabled": False}}}})
window_mgr = manager.for_window(win)
self.assertFalse(window_mgr.all[0].enabled)
class WindowConfigManagerTests(unittest.TestCase):
def test_no_configs(self):
view = MockView(__file__)
manager = WindowConfigManager(MockWindow(), [])
self.assertFalse(manager.is_supported(view))
self.assertFalse(manager.syntax_supported(view))
def test_with_single_config(self):
view = MockView(__file__)
manager = WindowConfigManager(MockWindow(), [TEST_CONFIG])
self.assertTrue(manager.is_supported(view))
self.assertEqual(list(manager.scope_configs(view)), [TEST_CONFIG])
self.assertTrue(manager.syntax_supported(view))
self.assertEqual(manager.syntax_configs(view), [TEST_CONFIG])
lang_configs = manager.syntax_config_languages(view)
self.assertEqual(len(lang_configs), 1)
self.assertEqual(lang_configs[TEST_CONFIG.name].id, TEST_CONFIG.languages[0].id)
def test_applies_project_settings(self):
view = MockView(__file__)
window = MockWindow()
window.set_project_data({
"settings": {
"LSP": {
"test": {
"enabled": True
}
}
}
})
manager = WindowConfigManager(window, [DISABLED_CONFIG])
manager.update()
configs = manager.syntax_configs(view)
self.assertEqual(len(configs), 1)
config = configs[0]
self.assertEqual(DISABLED_CONFIG.name, config.name)
self.assertTrue(config.enabled)
def test_disables_temporarily(self):
view = MockView(__file__)
window = MockWindow()
window.set_project_data({
"settings": {
"LSP": {
"test": {
"enabled": True
}
}
}
})
manager = WindowConfigManager(window, [DISABLED_CONFIG])
manager.update()
# crash handler disables config and shows popup
manager.disable_temporarily(DISABLED_CONFIG.name)
# view is activated after popup, we try to start a session again...
manager.update()
self.assertEqual([], manager.syntax_configs(view))
class IsSupportedSyntaxTests(unittest.TestCase):
def test_no_configs(self):
self.assertFalse(is_supported_syntax('asdf', []))
def test_single_config(self):
self.assertEqual(TEST_LANGUAGE.syntaxes[0], TEST_CONFIG.languages[0].syntaxes[0])
self.assertTrue(is_supported_syntax(TEST_LANGUAGE.syntaxes[0], [TEST_CONFIG]))
| mit |
smspillaz/artificial-intelligence | tests/knapsack_test.py | 1 | 2053 | # /tests/knapsack_test.py
#
# Test cases for artificialintelligence.knapsack
#
# See LICENCE.md for Copyright information
"""Test cases for usage of polysquarecmakelinter.main()."""
from artificialintelligence.knapsack import (knapsack,
items_for_knapsack)
from nose_parameterized import parameterized, param
from testtools import TestCase
from collections import namedtuple
Item = namedtuple("Item", "value cost")
class TestKnapsack(TestCase):
"""Test cases for knapsack."""
items = [
Item(value=1, cost=2),
Item(value=2, cost=3),
Item(value=7, cost=9),
Item(value=4, cost=4),
]
expected_value_of_knapsack = {
0: [],
1: [],
2: [1],
3: [2],
4: [4],
5: [4],
6: [4, 1],
7: [4, 2],
8: [4, 2],
9: [7],
10: [7],
11: [6, 1]
}
@parameterized.expand([param (i) for i in range(0, 11)])
def test_knapsack_has_expected_items_for_capacity(self, capacity):
"""Test that the knapsack has the correct items for its capacity."""
self.assertEqual(sum(TestKnapsack.expected_value_of_knapsack[capacity]),
knapsack(TestKnapsack.items, capacity)[1])
unequal_items = [
Item(value=5, cost=5),
Item(value=5, cost=5),
Item(value=6, cost=9)
]
def test_knapsack_doesnt_use_greedy(self):
"""Test that the knapsack picks the two value five items."""
self.assertEqual(10, knapsack(TestKnapsack.unequal_items, 10)[1])
@parameterized.expand([param (i) for i in range(0, 11)])
def test_can_pick_right_items_for_knapsack_capacity(self, capacity):
"""Test that we can pick the right items for the knapsack's capacity."""
self.assertEqual(TestKnapsack.expected_value_of_knapsack[capacity],
list(reversed(sorted(items_for_knapsack(TestKnapsack.items,
capacity)))))
| mit |
zamattiac/osf.io | framework/postcommit_tasks/handlers.py | 1 | 2700 | # -*- coding: utf-8 -*-
import functools
import hashlib
import logging
import threading
import binascii
from collections import OrderedDict
import os
from celery.local import PromiseProxy
from gevent.pool import Pool
from website import settings
_local = threading.local()
logger = logging.getLogger(__name__)
def postcommit_queue():
if not hasattr(_local, 'postcommit_queue'):
_local.postcommit_queue = OrderedDict()
return _local.postcommit_queue
def postcommit_before_request():
_local.postcommit_queue = OrderedDict()
def postcommit_after_request(response, base_status_error_code=500):
if response.status_code >= base_status_error_code:
_local.postcommit_queue = OrderedDict()
return response
try:
if postcommit_queue():
number_of_threads = 30 # one db connection per greenlet, let's share
pool = Pool(number_of_threads)
for func in postcommit_queue().values():
pool.spawn(func)
pool.join(timeout=5.0, raise_error=True) # 5 second timeout and reraise exceptions
except AttributeError as ex:
if not settings.DEBUG_MODE:
logger.error('Post commit task queue not initialized: {}'.format(ex))
return response
def enqueue_postcommit_task(fn, args, kwargs, once_per_request=True):
# make a hash of the pertinent data
raw = [fn.__name__, fn.__module__, args, kwargs]
m = hashlib.md5()
m.update('-'.join([x.__repr__() for x in raw]))
key = m.hexdigest()
if not once_per_request:
# we want to run it once for every occurrence, add a random string
key = '{}:{}'.format(key, binascii.hexlify(os.urandom(8)))
postcommit_queue().update({key: functools.partial(fn, *args, **kwargs)})
handlers = {
'before_request': postcommit_before_request,
'after_request': postcommit_after_request,
}
def run_postcommit(once_per_request=True, celery=False):
'''
Delays function execution until after the request's transaction has been committed.
!!!Tasks enqueued using this decorator **WILL NOT** run if the return status code is >= 500!!!
Unless celery is marked True, then they run any way
:return:
'''
def wrapper(func):
# if we're local dev or running unit tests, run without queueing
if settings.DEBUG_MODE:
return func
@functools.wraps(func)
def wrapped(*args, **kwargs):
if celery is True and isinstance(func, PromiseProxy):
func.delay(*args, **kwargs)
else:
enqueue_postcommit_task(func, args, kwargs, once_per_request=once_per_request)
return wrapped
return wrapper
| apache-2.0 |
fortunave3gxx/android_kernel_samsung_fortuna-common-old | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
arun6582/django | django/core/cache/backends/dummy.py | 629 | 1213 | "Dummy cache backend"
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
class DummyCache(BaseCache):
def __init__(self, host, *args, **kwargs):
BaseCache.__init__(self, *args, **kwargs)
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
return True
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
return default
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
def get_many(self, keys, version=None):
return {}
def has_key(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
return False
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
pass
def delete_many(self, keys, version=None):
pass
def clear(self):
pass
| bsd-3-clause |
ISA-tools/bii-webapp | bii_webapp/apps/browse/urls.py | 1 | 2197 | from django.conf.urls import patterns, url
from views import *
from django.views.generic.base import RedirectView
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', browse,name='browse.browse'),
url(r'^(\d+)/$', browse,name='browse.browse'),
url(r'^$',RedirectView.as_view(url='/browse/1/')),
url(r'^investigation$',investigation,name='browse.investigation'),
url(r'^study$',study,name='browse.study'),
url(r'^investigation/([^//]+)/$',investigation,name='browse.investigation'),
url(r'^investigation/([^//]+)/study/([^//]+)/$',study,name='browse.investigation.study'),
url(r'^investigation/([^//]+)/study/([^//]+)/assay/([^//]+)/([^//]+)/$',assay,name='browse.investigation.study.assay'),
url(r'^investigation/([^//]+)/study/([^//]+)/sample/(\d+)/$',sample,name='browse.investigation.study.sample'),
url(r'^study/([^//]+)/$',study,name='browse.study'),
url(r'^study/([^//]+)/assay/([^//]+)/([^//]+)/$',assay,name='browse.study.assay'),
url(r'^study/([^//]+)/sample/(\d+)/$',sample,name='browse.study.sample'),
url(r'^updateInvestigation$',updateInvestigation,name='browse.updateInvestigation'),
url(r'^updateStudy$',updateStudy,name='browse.updateStudy'),
url(r'^deleteInvestigation$',deleteInvestigation,name='browse.deleteInvestigation'),
url(r'^deleteStudy$',deleteStudy,name='browse.deleteStudy'),
# url(r'^upload/',upload,name='upload'),
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# url('^accounts/profile/', 'main.views.private_profile'),
# url('^profile/(\w+)', 'main.views.public_profile'),
# url(r'^profiles/', include('profiles.urls')),
)
| mit |
mou4e/zirconium | tools/telemetry/telemetry/util/path.py | 13 | 1600 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry.core import util
# TODO(dtu): Move these functions from core.util to here.
GetBaseDir = util.GetBaseDir
GetTelemetryDir = util.GetTelemetryDir
GetUnittestDataDir = util.GetUnittestDataDir
GetChromiumSrcDir = util.GetChromiumSrcDir
AddDirToPythonPath = util.AddDirToPythonPath
GetBuildDirectories = util.GetBuildDirectories
def IsExecutable(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
def FindInstalledWindowsApplication(application_path):
"""Search common Windows installation directories for an application.
Args:
application_path: Path to application relative from installation location.
Returns:
A string representing the full path, or None if not found.
"""
search_paths = [os.getenv('PROGRAMFILES(X86)'),
os.getenv('PROGRAMFILES'),
os.getenv('LOCALAPPDATA')]
search_paths += os.getenv('PATH', '').split(os.pathsep)
for search_path in search_paths:
if not search_path:
continue
path = os.path.join(search_path, application_path)
if IsExecutable(path):
return path
return None
def IsSubpath(subpath, superpath):
"""Returns True iff subpath is or is in superpath."""
subpath = os.path.realpath(subpath)
superpath = os.path.realpath(superpath)
while len(subpath) >= len(superpath):
if subpath == superpath:
return True
subpath = os.path.split(subpath)[0]
return False
| bsd-3-clause |
Zhongqilong/mykbengineer | kbe/src/lib/python/Lib/test/test_weakset.py | 86 | 15546 | import unittest
from test import support
from weakref import proxy, ref, WeakSet
import operator
import copy
import string
import os
from random import randrange, shuffle
import sys
import warnings
import collections
from collections import UserString as ustr
import gc
import contextlib
class Foo:
pass
class RefCycle:
def __init__(self):
self.cycle = self
class TestWeakSet(unittest.TestCase):
def setUp(self):
# need to keep references to them
self.items = [ustr(c) for c in ('a', 'b', 'c')]
self.items2 = [ustr(c) for c in ('x', 'y', 'z')]
self.ab_items = [ustr(c) for c in 'ab']
self.abcde_items = [ustr(c) for c in 'abcde']
self.def_items = [ustr(c) for c in 'def']
self.ab_weakset = WeakSet(self.ab_items)
self.abcde_weakset = WeakSet(self.abcde_items)
self.def_weakset = WeakSet(self.def_items)
self.letters = [ustr(c) for c in string.ascii_letters]
self.s = WeakSet(self.items)
self.d = dict.fromkeys(self.items)
self.obj = ustr('F')
self.fs = WeakSet([self.obj])
def test_methods(self):
weaksetmethods = dir(WeakSet)
for method in dir(set):
if method == 'test_c_api' or method.startswith('_'):
continue
self.assertIn(method, weaksetmethods,
"WeakSet missing method " + method)
def test_new_or_init(self):
self.assertRaises(TypeError, WeakSet, [], 2)
def test_len(self):
self.assertEqual(len(self.s), len(self.d))
self.assertEqual(len(self.fs), 1)
del self.obj
self.assertEqual(len(self.fs), 0)
def test_contains(self):
for c in self.letters:
self.assertEqual(c in self.s, c in self.d)
# 1 is not weakref'able, but that TypeError is caught by __contains__
self.assertNotIn(1, self.s)
self.assertIn(self.obj, self.fs)
del self.obj
self.assertNotIn(ustr('F'), self.fs)
def test_union(self):
u = self.s.union(self.items2)
for c in self.letters:
self.assertEqual(c in u, c in self.d or c in self.items2)
self.assertEqual(self.s, WeakSet(self.items))
self.assertEqual(type(u), WeakSet)
self.assertRaises(TypeError, self.s.union, [[]])
for C in set, frozenset, dict.fromkeys, list, tuple:
x = WeakSet(self.items + self.items2)
c = C(self.items2)
self.assertEqual(self.s.union(c), x)
del c
self.assertEqual(len(u), len(self.items) + len(self.items2))
self.items2.pop()
gc.collect()
self.assertEqual(len(u), len(self.items) + len(self.items2))
def test_or(self):
i = self.s.union(self.items2)
self.assertEqual(self.s | set(self.items2), i)
self.assertEqual(self.s | frozenset(self.items2), i)
def test_intersection(self):
s = WeakSet(self.letters)
i = s.intersection(self.items2)
for c in self.letters:
self.assertEqual(c in i, c in self.items2 and c in self.letters)
self.assertEqual(s, WeakSet(self.letters))
self.assertEqual(type(i), WeakSet)
for C in set, frozenset, dict.fromkeys, list, tuple:
x = WeakSet([])
self.assertEqual(i.intersection(C(self.items)), x)
self.assertEqual(len(i), len(self.items2))
self.items2.pop()
gc.collect()
self.assertEqual(len(i), len(self.items2))
def test_isdisjoint(self):
self.assertTrue(self.s.isdisjoint(WeakSet(self.items2)))
self.assertTrue(not self.s.isdisjoint(WeakSet(self.letters)))
def test_and(self):
i = self.s.intersection(self.items2)
self.assertEqual(self.s & set(self.items2), i)
self.assertEqual(self.s & frozenset(self.items2), i)
def test_difference(self):
i = self.s.difference(self.items2)
for c in self.letters:
self.assertEqual(c in i, c in self.d and c not in self.items2)
self.assertEqual(self.s, WeakSet(self.items))
self.assertEqual(type(i), WeakSet)
self.assertRaises(TypeError, self.s.difference, [[]])
def test_sub(self):
i = self.s.difference(self.items2)
self.assertEqual(self.s - set(self.items2), i)
self.assertEqual(self.s - frozenset(self.items2), i)
def test_symmetric_difference(self):
i = self.s.symmetric_difference(self.items2)
for c in self.letters:
self.assertEqual(c in i, (c in self.d) ^ (c in self.items2))
self.assertEqual(self.s, WeakSet(self.items))
self.assertEqual(type(i), WeakSet)
self.assertRaises(TypeError, self.s.symmetric_difference, [[]])
self.assertEqual(len(i), len(self.items) + len(self.items2))
self.items2.pop()
gc.collect()
self.assertEqual(len(i), len(self.items) + len(self.items2))
def test_xor(self):
i = self.s.symmetric_difference(self.items2)
self.assertEqual(self.s ^ set(self.items2), i)
self.assertEqual(self.s ^ frozenset(self.items2), i)
def test_sub_and_super(self):
self.assertTrue(self.ab_weakset <= self.abcde_weakset)
self.assertTrue(self.abcde_weakset <= self.abcde_weakset)
self.assertTrue(self.abcde_weakset >= self.ab_weakset)
self.assertFalse(self.abcde_weakset <= self.def_weakset)
self.assertFalse(self.abcde_weakset >= self.def_weakset)
self.assertTrue(set('a').issubset('abc'))
self.assertTrue(set('abc').issuperset('a'))
self.assertFalse(set('a').issubset('cbs'))
self.assertFalse(set('cbs').issuperset('a'))
def test_lt(self):
self.assertTrue(self.ab_weakset < self.abcde_weakset)
self.assertFalse(self.abcde_weakset < self.def_weakset)
self.assertFalse(self.ab_weakset < self.ab_weakset)
self.assertFalse(WeakSet() < WeakSet())
def test_gt(self):
self.assertTrue(self.abcde_weakset > self.ab_weakset)
self.assertFalse(self.abcde_weakset > self.def_weakset)
self.assertFalse(self.ab_weakset > self.ab_weakset)
self.assertFalse(WeakSet() > WeakSet())
def test_gc(self):
# Create a nest of cycles to exercise overall ref count check
s = WeakSet(Foo() for i in range(1000))
for elem in s:
elem.cycle = s
elem.sub = elem
elem.set = WeakSet([elem])
def test_subclass_with_custom_hash(self):
# Bug #1257731
class H(WeakSet):
def __hash__(self):
return int(id(self) & 0x7fffffff)
s=H()
f=set()
f.add(s)
self.assertIn(s, f)
f.remove(s)
f.add(s)
f.discard(s)
def test_init(self):
s = WeakSet()
s.__init__(self.items)
self.assertEqual(s, self.s)
s.__init__(self.items2)
self.assertEqual(s, WeakSet(self.items2))
self.assertRaises(TypeError, s.__init__, s, 2);
self.assertRaises(TypeError, s.__init__, 1);
def test_constructor_identity(self):
s = WeakSet(self.items)
t = WeakSet(s)
self.assertNotEqual(id(s), id(t))
def test_hash(self):
self.assertRaises(TypeError, hash, self.s)
def test_clear(self):
self.s.clear()
self.assertEqual(self.s, WeakSet([]))
self.assertEqual(len(self.s), 0)
def test_copy(self):
dup = self.s.copy()
self.assertEqual(self.s, dup)
self.assertNotEqual(id(self.s), id(dup))
def test_add(self):
x = ustr('Q')
self.s.add(x)
self.assertIn(x, self.s)
dup = self.s.copy()
self.s.add(x)
self.assertEqual(self.s, dup)
self.assertRaises(TypeError, self.s.add, [])
self.fs.add(Foo())
self.assertTrue(len(self.fs) == 1)
self.fs.add(self.obj)
self.assertTrue(len(self.fs) == 1)
def test_remove(self):
x = ustr('a')
self.s.remove(x)
self.assertNotIn(x, self.s)
self.assertRaises(KeyError, self.s.remove, x)
self.assertRaises(TypeError, self.s.remove, [])
def test_discard(self):
a, q = ustr('a'), ustr('Q')
self.s.discard(a)
self.assertNotIn(a, self.s)
self.s.discard(q)
self.assertRaises(TypeError, self.s.discard, [])
def test_pop(self):
for i in range(len(self.s)):
elem = self.s.pop()
self.assertNotIn(elem, self.s)
self.assertRaises(KeyError, self.s.pop)
def test_update(self):
retval = self.s.update(self.items2)
self.assertEqual(retval, None)
for c in (self.items + self.items2):
self.assertIn(c, self.s)
self.assertRaises(TypeError, self.s.update, [[]])
def test_update_set(self):
self.s.update(set(self.items2))
for c in (self.items + self.items2):
self.assertIn(c, self.s)
def test_ior(self):
self.s |= set(self.items2)
for c in (self.items + self.items2):
self.assertIn(c, self.s)
def test_intersection_update(self):
retval = self.s.intersection_update(self.items2)
self.assertEqual(retval, None)
for c in (self.items + self.items2):
if c in self.items2 and c in self.items:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(TypeError, self.s.intersection_update, [[]])
def test_iand(self):
self.s &= set(self.items2)
for c in (self.items + self.items2):
if c in self.items2 and c in self.items:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_difference_update(self):
retval = self.s.difference_update(self.items2)
self.assertEqual(retval, None)
for c in (self.items + self.items2):
if c in self.items and c not in self.items2:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(TypeError, self.s.difference_update, [[]])
self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])
def test_isub(self):
self.s -= set(self.items2)
for c in (self.items + self.items2):
if c in self.items and c not in self.items2:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_symmetric_difference_update(self):
retval = self.s.symmetric_difference_update(self.items2)
self.assertEqual(retval, None)
for c in (self.items + self.items2):
if (c in self.items) ^ (c in self.items2):
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])
def test_ixor(self):
self.s ^= set(self.items2)
for c in (self.items + self.items2):
if (c in self.items) ^ (c in self.items2):
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_inplace_on_self(self):
t = self.s.copy()
t |= t
self.assertEqual(t, self.s)
t &= t
self.assertEqual(t, self.s)
t -= t
self.assertEqual(t, WeakSet())
t = self.s.copy()
t ^= t
self.assertEqual(t, WeakSet())
def test_eq(self):
# issue 5964
self.assertTrue(self.s == self.s)
self.assertTrue(self.s == WeakSet(self.items))
self.assertFalse(self.s == set(self.items))
self.assertFalse(self.s == list(self.items))
self.assertFalse(self.s == tuple(self.items))
self.assertFalse(self.s == WeakSet([Foo]))
self.assertFalse(self.s == 1)
def test_ne(self):
self.assertTrue(self.s != set(self.items))
s1 = WeakSet()
s2 = WeakSet()
self.assertFalse(s1 != s2)
def test_weak_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
# Create new items to be sure no-one else holds a reference
items = [ustr(c) for c in ('a', 'b', 'c')]
s = WeakSet(items)
it = iter(s)
next(it) # Trigger internal iteration
# Destroy an item
del items[-1]
gc.collect() # just in case
# We have removed either the first consumed items, or another one
self.assertIn(len(list(it)), [len(items), len(items) - 1])
del it
# The removal has been committed
self.assertEqual(len(s), len(items))
def test_weak_destroy_and_mutate_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
items = [ustr(c) for c in string.ascii_letters]
s = WeakSet(items)
@contextlib.contextmanager
def testcontext():
try:
it = iter(s)
# Start iterator
yielded = ustr(str(next(it)))
# Schedule an item for removal and recreate it
u = ustr(str(items.pop()))
if yielded == u:
# The iterator still has a reference to the removed item,
# advance it (issue #20006).
next(it)
gc.collect() # just in case
yield u
finally:
it = None # should commit all removals
with testcontext() as u:
self.assertNotIn(u, s)
with testcontext() as u:
self.assertRaises(KeyError, s.remove, u)
self.assertNotIn(u, s)
with testcontext() as u:
s.add(u)
self.assertIn(u, s)
t = s.copy()
with testcontext() as u:
s.update(t)
self.assertEqual(len(s), len(t))
with testcontext() as u:
s.clear()
self.assertEqual(len(s), 0)
def test_len_cycles(self):
N = 20
items = [RefCycle() for i in range(N)]
s = WeakSet(items)
del items
it = iter(s)
try:
next(it)
except StopIteration:
pass
gc.collect()
n1 = len(s)
del it
gc.collect()
n2 = len(s)
# one item may be kept alive inside the iterator
self.assertIn(n1, (0, 1))
self.assertEqual(n2, 0)
def test_len_race(self):
# Extended sanity checks for len() in the face of cyclic collection
self.addCleanup(gc.set_threshold, *gc.get_threshold())
for th in range(1, 100):
N = 20
gc.collect(0)
gc.set_threshold(th, th, th)
items = [RefCycle() for i in range(N)]
s = WeakSet(items)
del items
# All items will be collected at next garbage collection pass
it = iter(s)
try:
next(it)
except StopIteration:
pass
n1 = len(s)
del it
n2 = len(s)
self.assertGreaterEqual(n1, 0)
self.assertLessEqual(n1, N)
self.assertGreaterEqual(n2, 0)
self.assertLessEqual(n2, n1)
def test_main(verbose=None):
support.run_unittest(TestWeakSet)
if __name__ == "__main__":
test_main(verbose=True)
| lgpl-3.0 |
civisanalytics/ansible | lib/ansible/plugins/cache/memcached.py | 46 | 6133 | # (c) 2014, Brian Coca, Josh Drake, et al
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import collections
import os
import time
from multiprocessing import Lock
from itertools import chain
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.plugins.cache.base import BaseCacheModule
try:
import memcache
except ImportError:
raise AnsibleError("python-memcached is required for the memcached fact cache")
class ProxyClientPool(object):
"""
Memcached connection pooling for thread/fork safety. Inspired by py-redis
connection pool.
Available connections are maintained in a deque and released in a FIFO manner.
"""
def __init__(self, *args, **kwargs):
self.max_connections = kwargs.pop('max_connections', 1024)
self.connection_args = args
self.connection_kwargs = kwargs
self.reset()
def reset(self):
self.pid = os.getpid()
self._num_connections = 0
self._available_connections = collections.deque(maxlen=self.max_connections)
self._locked_connections = set()
self._lock = Lock()
def _check_safe(self):
if self.pid != os.getpid():
with self._lock:
if self.pid == os.getpid():
# bail out - another thread already acquired the lock
return
self.disconnect_all()
self.reset()
def get_connection(self):
self._check_safe()
try:
connection = self._available_connections.popleft()
except IndexError:
connection = self.create_connection()
self._locked_connections.add(connection)
return connection
def create_connection(self):
if self._num_connections >= self.max_connections:
raise RuntimeError("Too many memcached connections")
self._num_connections += 1
return memcache.Client(*self.connection_args, **self.connection_kwargs)
def release_connection(self, connection):
self._check_safe()
self._locked_connections.remove(connection)
self._available_connections.append(connection)
def disconnect_all(self):
for conn in chain(self._available_connections, self._locked_connections):
conn.disconnect_all()
def __getattr__(self, name):
def wrapped(*args, **kwargs):
return self._proxy_client(name, *args, **kwargs)
return wrapped
def _proxy_client(self, name, *args, **kwargs):
conn = self.get_connection()
try:
return getattr(conn, name)(*args, **kwargs)
finally:
self.release_connection(conn)
class CacheModuleKeys(collections.MutableSet):
"""
A set subclass that keeps track of insertion time and persists
the set in memcached.
"""
PREFIX = 'ansible_cache_keys'
def __init__(self, cache, *args, **kwargs):
self._cache = cache
self._keyset = dict(*args, **kwargs)
def __contains__(self, key):
return key in self._keyset
def __iter__(self):
return iter(self._keyset)
def __len__(self):
return len(self._keyset)
def add(self, key):
self._keyset[key] = time.time()
self._cache.set(self.PREFIX, self._keyset)
def discard(self, key):
del self._keyset[key]
self._cache.set(self.PREFIX, self._keyset)
def remove_by_timerange(self, s_min, s_max):
for k in self._keyset.keys():
t = self._keyset[k]
if s_min < t < s_max:
del self._keyset[k]
self._cache.set(self.PREFIX, self._keyset)
class CacheModule(BaseCacheModule):
def __init__(self, *args, **kwargs):
if C.CACHE_PLUGIN_CONNECTION:
connection = C.CACHE_PLUGIN_CONNECTION.split(',')
else:
connection = ['127.0.0.1:11211']
self._timeout = C.CACHE_PLUGIN_TIMEOUT
self._prefix = C.CACHE_PLUGIN_PREFIX
self._cache = ProxyClientPool(connection, debug=0)
self._keys = CacheModuleKeys(self._cache, self._cache.get(CacheModuleKeys.PREFIX) or [])
def _make_key(self, key):
return "{0}{1}".format(self._prefix, key)
def _expire_keys(self):
if self._timeout > 0:
expiry_age = time.time() - self._timeout
self._keys.remove_by_timerange(0, expiry_age)
def get(self, key):
value = self._cache.get(self._make_key(key))
# guard against the key not being removed from the keyset;
# this could happen in cases where the timeout value is changed
# between invocations
if value is None:
self.delete(key)
raise KeyError
return value
def set(self, key, value):
self._cache.set(self._make_key(key), value, time=self._timeout, min_compress_len=1)
self._keys.add(key)
def keys(self):
self._expire_keys()
return list(iter(self._keys))
def contains(self, key):
self._expire_keys()
return key in self._keys
def delete(self, key):
self._cache.delete(self._make_key(key))
self._keys.discard(key)
def flush(self):
for key in self.keys():
self.delete(key)
def copy(self):
return self._keys.copy()
def __getstate__(self):
return dict()
def __setstate__(self, data):
self.__init__()
| gpl-3.0 |
klmitch/neutron | neutron/quota/__init__.py | 5 | 13086 | # Copyright (c) 2015 OpenStack Foundation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Quotas for instances, volumes, and floating ips."""
import sys
from oslo_config import cfg
from oslo_log import log as logging
from oslo_log import versionutils
from oslo_utils import importutils
import six
import webob
from neutron._i18n import _, _LI, _LW
from neutron.common import exceptions
from neutron.db.quota import api as quota_api
from neutron.quota import resource_registry
LOG = logging.getLogger(__name__)
QUOTA_DB_MODULE = 'neutron.db.quota.driver'
QUOTA_DB_DRIVER = '%s.DbQuotaDriver' % QUOTA_DB_MODULE
QUOTA_CONF_DRIVER = 'neutron.quota.ConfDriver'
default_quota_items = ['network', 'subnet', 'port']
quota_opts = [
cfg.ListOpt('quota_items',
default=default_quota_items,
deprecated_for_removal=True,
help=_('Resource name(s) that are supported in quota '
'features. This option is now deprecated for '
'removal.')),
cfg.IntOpt('default_quota',
default=-1,
help=_('Default number of resource allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_network',
default=10,
help=_('Number of networks allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_subnet',
default=10,
help=_('Number of subnets allowed per tenant, '
'A negative value means unlimited.')),
cfg.IntOpt('quota_port',
default=50,
help=_('Number of ports allowed per tenant. '
'A negative value means unlimited.')),
cfg.StrOpt('quota_driver',
default=QUOTA_DB_DRIVER,
help=_('Default driver to use for quota checks')),
cfg.BoolOpt('track_quota_usage',
default=True,
help=_('Keep in track in the database of current resource'
'quota usage. Plugins which do not leverage the '
'neutron database should set this flag to False')),
]
# Register the configuration options
cfg.CONF.register_opts(quota_opts, 'QUOTAS')
class ConfDriver(object):
"""Configuration driver.
Driver to perform necessary checks to enforce quotas and obtain
quota information. The default driver utilizes the default values
in neutron.conf.
"""
def _get_quotas(self, context, resources):
"""Get quotas.
A helper method which retrieves the quotas for the specific
resources identified by keys, and which apply to the current
context.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resources.
"""
quotas = {}
for resource in resources.values():
quotas[resource.name] = resource.default
return quotas
def limit_check(self, context, tenant_id,
resources, values):
"""Check simple quota limits.
For limits--those quotas for which there is no usage
synchronization function--this method checks that a set of
proposed values are permitted by the limit restriction.
If any of the proposed values is over the defined quota, an
OverQuota exception will be raised with the sorted list of the
resources which are too high. Otherwise, the method returns
nothing.
:param context: The request context, for access checks.
:param tenant_id: The tenant_id to check quota.
:param resources: A dictionary of the registered resources.
:param values: A dictionary of the values to check against the
quota.
"""
# Ensure no value is less than zero
unders = [key for key, val in values.items() if val < 0]
if unders:
raise exceptions.InvalidQuotaValue(unders=sorted(unders))
# Get the applicable quotas
quotas = self._get_quotas(context, resources)
# Check the quotas and construct a list of the resources that
# would be put over limit by the desired values
overs = [key for key, val in values.items()
if quotas[key] >= 0 and quotas[key] < val]
if overs:
raise exceptions.OverQuota(overs=sorted(overs), quotas=quotas,
usages={})
@staticmethod
def get_tenant_quotas(context, resources, tenant_id):
quotas = {}
sub_resources = dict((k, v) for k, v in resources.items())
for resource in sub_resources.values():
quotas[resource.name] = resource.default
return quotas
@staticmethod
def get_all_quotas(context, resources):
return []
@staticmethod
def delete_tenant_quota(context, tenant_id):
msg = _('Access to this resource was denied.')
raise webob.exc.HTTPForbidden(msg)
@staticmethod
def update_quota_limit(context, tenant_id, resource, limit):
msg = _('Access to this resource was denied.')
raise webob.exc.HTTPForbidden(msg)
def make_reservation(self, context, tenant_id, resources, deltas, plugin):
"""This driver does not support reservations.
This routine is provided for backward compatibility purposes with
the API controllers which have now been adapted to make reservations
rather than counting resources and checking limits - as this
routine ultimately does.
"""
for resource in deltas.keys():
count = QUOTAS.count(context, resource, plugin, tenant_id)
total_use = deltas.get(resource, 0) + count
deltas[resource] = total_use
self.limit_check(
context,
tenant_id,
resource_registry.get_all_resources(),
deltas)
# return a fake reservation - the REST controller expects it
return quota_api.ReservationInfo('fake', None, None, None)
def commit_reservation(self, context, reservation_id):
"""This is a noop as this driver does not support reservations."""
def cancel_reservation(self, context, reservation_id):
"""This is a noop as this driver does not support reservations."""
class QuotaEngine(object):
"""Represent the set of recognized quotas."""
_instance = None
@classmethod
def get_instance(cls):
if not cls._instance:
cls._instance = cls()
return cls._instance
def __init__(self, quota_driver_class=None):
"""Initialize a Quota object."""
self._driver = None
self._driver_class = quota_driver_class
def get_driver(self):
if self._driver is None:
_driver_class = (self._driver_class or
cfg.CONF.QUOTAS.quota_driver)
if (_driver_class == QUOTA_DB_DRIVER and
QUOTA_DB_MODULE not in sys.modules):
# If quotas table is not loaded, force config quota driver.
_driver_class = QUOTA_CONF_DRIVER
LOG.info(_LI("ConfDriver is used as quota_driver because the "
"loaded plugin does not support 'quotas' table."))
if isinstance(_driver_class, six.string_types):
_driver_class = importutils.import_object(_driver_class)
if isinstance(_driver_class, ConfDriver):
versionutils.report_deprecated_feature(
LOG, _LW("The quota driver neutron.quota.ConfDriver is "
"deprecated as of Liberty. "
"neutron.db.quota.driver.DbQuotaDriver should "
"be used in its place"))
self._driver = _driver_class
LOG.info(_LI('Loaded quota_driver: %s.'), _driver_class)
return self._driver
def count(self, context, resource_name, *args, **kwargs):
"""Count a resource.
For countable resources, invokes the count() function and
returns its result. Arguments following the context and
resource are passed directly to the count function declared by
the resource.
:param context: The request context, for access checks.
:param resource_name: The name of the resource, as a string.
"""
# Get the resource
res = resource_registry.get_resource(resource_name)
if not res or not hasattr(res, 'count'):
raise exceptions.QuotaResourceUnknown(unknown=[resource_name])
return res.count(context, *args, **kwargs)
def make_reservation(self, context, tenant_id, deltas, plugin):
# Verify that resources are managed by the quota engine
# Ensure no value is less than zero
unders = [key for key, val in deltas.items() if val < 0]
if unders:
raise exceptions.InvalidQuotaValue(unders=sorted(unders))
requested_resources = set(deltas.keys())
all_resources = resource_registry.get_all_resources()
managed_resources = set([res for res in all_resources.keys()
if res in requested_resources])
# Make sure we accounted for all of them...
unknown_resources = requested_resources - managed_resources
if unknown_resources:
raise exceptions.QuotaResourceUnknown(
unknown=sorted(unknown_resources))
# FIXME(salv-orlando): There should be no reason for sending all the
# resource in the registry to the quota driver, but as other driver
# APIs request them, this will be sorted out with a different patch.
return self.get_driver().make_reservation(
context,
tenant_id,
all_resources,
deltas,
plugin)
def commit_reservation(self, context, reservation_id):
self.get_driver().commit_reservation(context, reservation_id)
def cancel_reservation(self, context, reservation_id):
self.get_driver().cancel_reservation(context, reservation_id)
def limit_check(self, context, tenant_id, **values):
"""Check simple quota limits.
For limits--those quotas for which there is no usage
synchronization function--this method checks that a set of
proposed values are permitted by the limit restriction. The
values to check are given as keyword arguments, where the key
identifies the specific quota limit to check, and the value is
the proposed value.
This method will raise a QuotaResourceUnknown exception if a
given resource is unknown or if it is not a countable resource.
If any of the proposed values exceeds the respective quota defined
for the tenant, an OverQuota exception will be raised.
The exception will include a sorted list with the resources
which exceed the quota limit. Otherwise, the method returns nothing.
:param context: Request context
:param tenant_id: Tenant for which the quota limit is being checked
:param values: Dict specifying requested deltas for each resource
"""
# TODO(salv-orlando): Deprecate calls to this API
# Verify that resources are managed by the quota engine
requested_resources = set(values.keys())
managed_resources = set([res for res in
resource_registry.get_all_resources()
if res in requested_resources])
# Make sure we accounted for all of them...
unknown_resources = requested_resources - managed_resources
if unknown_resources:
raise exceptions.QuotaResourceUnknown(
unknown=sorted(unknown_resources))
return self.get_driver().limit_check(
context, tenant_id, resource_registry.get_all_resources(), values)
QUOTAS = QuotaEngine.get_instance()
def register_resources_from_config():
# This operation is now deprecated. All the neutron core and extended
# resource for which quota limits are enforced explicitly register
# themselves with the quota engine.
for resource_item in (set(cfg.CONF.QUOTAS.quota_items) -
set(default_quota_items)):
resource_registry.register_resource_by_name(resource_item)
register_resources_from_config()
| apache-2.0 |
google/language-resources | festival_utils/festival_prompts_from_tsv.py | 2 | 1218 | #! /usr/bin/env python
# Copyright 2016, 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts a prompts file in TSV format to Festival's Scheme format.
"""
from __future__ import unicode_literals
import io
STDIN = io.open(0, mode='rt', encoding='utf-8', closefd=False)
STDOUT = io.open(1, mode='wt', encoding='utf-8', closefd=False)
def main(unused_args):
for line in STDIN:
line = line.rstrip('\n')
fields = line.split('\t')
assert len(fields) >= 2
utterance_id = fields[0]
prompt_text = fields[1]
assert '"' not in prompt_text
STDOUT.write('( %s "%s" )\n' % (utterance_id, prompt_text))
return
if __name__ == '__main__':
main(None)
| apache-2.0 |
marcellodesales/svnedge-console | ext/solaris-sparc/pkg-toolkit/pkg/vendor-packages/pkg/client/image.py | 4 | 127776 | #!/usr/bin/python2.4
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
import datetime as dt
import errno
import os
import platform
import shutil
import tempfile
import time
import urllib
import pkg.Uuid25
import pkg.catalog as catalog
import pkg.client.api_errors as api_errors
import pkg.client.constraint as constraint
import pkg.client.history as history
import pkg.client.imageconfig as imageconfig
import pkg.client.imageplan as imageplan
import pkg.client.imagestate as imagestate
import pkg.client.pkgplan as pkgplan
import pkg.client.progress as progress
import pkg.client.publisher as publisher
import pkg.client.transport.transport as transport
import pkg.fmri
import pkg.manifest as manifest
import pkg.misc as misc
import pkg.portable as portable
import pkg.variant as variant
import pkg.version
from pkg.client.debugvalues import DebugValues
from pkg.client import global_settings
from pkg.client.imagetypes import IMG_USER, IMG_ENTIRE
from pkg.misc import CfgCacheError
from pkg.misc import EmptyI, EmptyDict
from pkg.misc import msg, emsg
CATALOG_CACHE_FILE = "catalog_cache"
img_user_prefix = ".org.opensolaris,pkg"
img_root_prefix = "var/pkg"
PKG_STATE_INSTALLED = "installed"
PKG_STATE_KNOWN = "known"
class Image(object):
"""An Image object is a directory tree containing the laid-down contents
of a self-consistent graph of Packages.
An Image has a root path.
An Image of type IMG_ENTIRE does not have a parent Image. Other Image
types must have a parent Image. The external state of the parent Image
must be accessible from the Image's context, or duplicated within the
Image (IMG_PARTIAL for zones, for instance).
The parent of a user Image can be a partial Image. The parent of a
partial Image must be an entire Image.
An Image of type IMG_USER stores its external state at self.root +
".org.opensolaris,pkg".
An Image of type IMG_ENTIRE or IMG_PARTIAL stores its external state at
self.root + "/var/pkg".
An Image needs to be able to have a different repository set than the
system's root Image.
Directory layout
$IROOT/catalog
Directory containing catalogs for URIs of interest. Filename is
the escaped URI of the catalog.
$IROOT/file
Directory containing file hashes of installed packages.
$IROOT/pkg
Directory containing manifests and states of installed packages.
$IROOT/index
Directory containing reverse-index databases.
$IROOT/cfg_cache
File containing image's cached configuration.
$IROOT/opaque
File containing image's opaque state.
$IROOT/state/installed
Directory containing files whose names identify the installed
packages.
All of these directories and files other than state are considered
essential for any image to be complete. To add a new essential file or
subdirectory, the following steps should be done.
If it's a directory, add it to the image_subdirs list below and it will
be created automatically. The programmer must fill the directory as
needed. If a file is added, the programmer is responsible for creating
that file during image creation at an appropriate place and time.
If a directory is required to be present in order for an image to be
identifiable as such, it should go into required_subdirs instead.
However, upgrade issues abound; this list should probably not change.
Once those steps have been carried out, the change should be added
to the test suite for image corruption (t_pkg_install_corrupt_image.py).
This will likely also involve a change to
SingleDepotTestCaseCorruptImage in testutils.py. Each of these files
outline what must be updated.
XXX Root path probably can't be absolute, so that we can combine or
reuse Image contents.
XXX Image file format? Image file manipulation API?"""
required_subdirs = [ "catalog", "file", "pkg" ]
image_subdirs = required_subdirs + [ "index", "state/installed" ]
def __init__(self):
self.arch_change = False
self.cfg_cache = None
self.type = None
self.root = None
self.history = history.History()
self.imgdir = None
self.pkgdir = None
self.img_prefix = None
self.index_dir = None
self.repo_uris = []
self.filter_tags = {}
self.__catalogs = {}
self._catalog = {}
self.__pkg_states = None
self.new_variants = {}
self.dl_cache_dir = None
self.dl_cache_incoming = None
self.is_user_cache_dir = False
self.state = imagestate.ImageState(self)
self.attrs = {
"Policy-Require-Optional": False,
"Policy-Pursue-Latest": True
}
self.__catalog_cache_mod_time = None
self.imageplan = None # valid after evaluation succeeds
self.constraints = constraint.ConstraintSet()
# Transport operations for this image
self.transport = transport.Transport(self)
# a place to keep info about saved_files; needed by file action
self.saved_files = {}
# right now we don't explicitly set dir/file modes everywhere;
# set umask to proper value to prevent problems w/ overly
# locked down umask.
os.umask(0022)
def _check_subdirs(self, sub_d, prefix):
for n in self.required_subdirs:
if not os.path.isdir(os.path.join(sub_d, prefix, n)):
return False
return True
def image_type(self, d):
"""Returns the type of image at directory: d; or None"""
rv = None
if os.path.isdir(os.path.join(d, img_user_prefix)) and \
os.path.isfile(os.path.join(d, img_user_prefix,
imageconfig.CFG_FILE)) and \
self._check_subdirs(d, img_user_prefix):
rv = IMG_USER
elif os.path.isdir(os.path.join(d, img_root_prefix)) \
and os.path.isfile(os.path.join(d,
img_root_prefix, imageconfig.CFG_FILE)) and \
self._check_subdirs(d, img_root_prefix):
rv = IMG_ENTIRE
return rv
def find_root(self, d, exact_match=False):
# Ascend from the given directory d to find first
# encountered image. If exact_match is true, if the
# image found doesn't match startd, raise an
# ImageNotFoundException.
startd = d
# eliminate problem if relative path such as "." is passed in
d = os.path.realpath(d)
while True:
imgtype = self.image_type(d)
if imgtype == IMG_USER:
# XXX Look at image file to determine filter
# tags and repo URIs.
if exact_match and \
os.path.realpath(startd) != \
os.path.realpath(d):
raise api_errors.ImageNotFoundException(
exact_match, startd, d)
self.__set_dirs(imgtype=imgtype, root=d)
self.attrs["Build-Release"] = "5.11"
return
elif imgtype == IMG_ENTIRE:
# XXX Look at image file to determine filter
# tags and repo URIs.
# XXX Look at image file to determine if this
# image is a partial image.
if exact_match and \
os.path.realpath(startd) != \
os.path.realpath(d):
raise api_errors.ImageNotFoundException(
exact_match, startd, d)
self.__set_dirs(imgtype=imgtype, root=d)
self.attrs["Build-Release"] = "5.11"
return
# XXX follow symlinks or not?
oldpath = d
d = os.path.normpath(os.path.join(d, os.path.pardir))
# Make sure we are making progress and aren't in an
# infinite loop.
#
# (XXX - Need to deal with symlinks here too)
if d == oldpath:
raise api_errors.ImageNotFoundException(
exact_match, startd, d)
def load_config(self):
"""Load this image's cached configuration from the default
location."""
# XXX Incomplete with respect to doc/image.txt description of
# configuration.
if self.root == None:
raise RuntimeError, "self.root must be set"
ic = imageconfig.ImageConfig(self.root)
ic.read(self.imgdir)
self.cfg_cache = ic
def save_config(self):
# First, create the image directories if they haven't been, so
# the cfg_cache can be written.
self.mkdirs()
self.cfg_cache.write(self.imgdir)
# XXX mkdirs and set_attrs() need to be combined into a create
# operation.
def mkdirs(self):
for sd in self.image_subdirs:
if not os.path.isdir(os.path.join(self.imgdir, sd)):
os.makedirs(os.path.join(self.imgdir, sd))
def __set_dirs(self, imgtype, root):
self.type = imgtype
self.root = root
if self.type == IMG_USER:
self.img_prefix = img_user_prefix
else:
self.img_prefix = img_root_prefix
# Change directory to the root of the image so that we can
# remove any directories beneath us. If we're changing the
# image, don't chdir, as we're likely changing to a new BE
# and want to be able to unmount it later.
if not self.imgdir and os.path.isdir(root):
os.chdir(root)
# The specified root may have been a relative path.
self.root = os.getcwd()
self.imgdir = os.path.join(self.root, self.img_prefix)
self.pkgdir = os.path.join(self.imgdir, "pkg")
self.history.root_dir = self.imgdir
if "PKG_CACHEDIR" in os.environ:
self.dl_cache_dir = os.path.normpath( \
os.environ["PKG_CACHEDIR"])
self.is_user_cache_dir = True
else:
self.dl_cache_dir = os.path.normpath( \
os.path.join(self.imgdir, "download"))
self.dl_cache_incoming = os.path.normpath(os.path.join(
self.dl_cache_dir, "incoming-%d" % os.getpid()))
def set_attrs(self, imgtype, root, is_zone, prefix, pub_url,
ssl_key=None, ssl_cert=None, variants=EmptyDict,
refresh_allowed=True, progtrack=None):
self.__set_dirs(imgtype=imgtype, root=root)
# Create the publisher object before creating the image...
repo = publisher.Repository()
repo.add_origin(pub_url, ssl_cert=ssl_cert, ssl_key=ssl_key)
newpub = publisher.Publisher(prefix,
meta_root=self._get_publisher_meta_root(prefix),
repositories=[repo])
# Initialize and store the configuration object.
self.cfg_cache = imageconfig.ImageConfig(self.root)
# ...so that if creation of the Publisher object fails, an
# empty, useless image won't be left behind.
if not os.path.exists(os.path.join(self.imgdir,
imageconfig.CFG_FILE)):
self.history.log_operation_start("image-create")
else:
self.history.log_operation_start("image-set-attributes")
# Determine and add the default variants for the image.
if is_zone:
self.cfg_cache.filters["opensolaris.zone"] = "nonglobal"
self.cfg_cache.variants[
"variant.opensolaris.zone"] = "nonglobal"
else:
self.cfg_cache.variants[
"variant.opensolaris.zone"] = "global"
self.cfg_cache.variants["variant.arch"] = \
variants.get("variant.arch", platform.processor())
# After setting up the default variants, add any overrides or
# additional variants specified.
self.cfg_cache.variants.update(variants)
# Now everything is ready for publisher configuration.
self.cfg_cache.preferred_publisher = newpub.prefix
self.add_publisher(newpub, refresh_allowed=refresh_allowed,
progtrack=progtrack)
# No need to save configuration as add_publisher will do that
# if successful.
self.history.log_operation_end()
def is_liveroot(self):
return bool(self.root == "/" or DebugValues.get_value("simulate_live_root"))
def is_zone(self):
return self.cfg_cache.variants[
"variant.opensolaris.zone"] == "nonglobal"
def get_arch(self):
if "variant.arch" in self.new_variants:
return self.new_variants["variant.arch"]
else:
return self.cfg_cache.variants["variant.arch"]
def get_root(self):
return self.root
def gen_publishers(self, inc_disabled=False):
if not self.cfg_cache:
raise CfgCacheError, "empty ImageConfig"
for p in self.cfg_cache.publishers:
pub = self.cfg_cache.publishers[p]
if inc_disabled or not pub.disabled:
yield self.cfg_cache.publishers[p]
def check_cert_validity(self):
"""Look through the publishers defined for the image. Print
a message and exit with an error if one of the certificates
has expired. If certificates are getting close to expiration,
print a warning instead."""
for p in self.gen_publishers():
for r in p.repositories:
for uri in r.origins:
if uri.ssl_cert:
misc.validate_ssl_cert(
uri.ssl_cert,
prefix=p.prefix, uri=uri)
return True
def has_publisher(self, prefix=None, alias=None):
"""Returns a boolean value indicating whether a publisher
exists in the image configuration that matches the given
prefix or alias."""
for pub in self.gen_publishers(inc_disabled=True):
if prefix == pub.prefix or (alias and
alias == pub.alias):
return True
return False
def remove_publisher(self, prefix=None, alias=None, progtrack=None):
if not progtrack:
progtrack = progress.QuietProgressTracker()
self.history.log_operation_start("remove-publisher")
try:
pub = self.get_publisher(prefix=prefix,
alias=alias)
except api_errors.ApiException, e:
self.history.log_operation_end(e)
raise
if pub.prefix == self.cfg_cache.preferred_publisher:
e = api_errors.RemovePreferredPublisher()
self.history.log_operation_end(error=e)
raise e
self.cfg_cache.remove_publisher(prefix)
self.save_config()
self.remove_publisher_metadata(pub)
self.load_catalogs(progtrack, force=True)
self.history.log_operation_end()
def get_publishers(self):
return self.cfg_cache.publishers
def get_publisher(self, prefix=None, alias=None, origin=None):
publishers = [p for p in self.get_publishers().values()]
for pub in publishers:
if prefix and prefix == pub.prefix:
return pub
elif alias and alias == pub.alias:
return pub
elif origin and \
pub.selected_repository.has_origin(origin):
return pub
raise api_errors.UnknownPublisher(max(prefix, alias, origin))
def get_publisher_last_update_time(self, prefix, cached=True):
"""Returns a datetime object (or 'None') representing the last
time the catalog for a publisher was updated.
If the catalog has already been loaded, this reflects the
in-memory state of the catalog.
If the catalog has not already been loaded or 'cached' is False,
then the catalog will be temporarily loaded and the most recent
information returned."""
if not cached:
try:
cat = self.__catalogs[prefix]
except KeyError:
pass
else:
update_dt = cat.last_modified()
if update_dt:
update_dt = catalog.ts_to_datetime(
update_dt)
return update_dt
# Temporarily retrieve the catalog object, but don't
# cache it as that would interfere with load_catalogs.
try:
croot = "%s/catalog/%s" % (self.imgdir, prefix)
cat = catalog.Catalog(croot, publisher=prefix)
except (EnvironmentError, catalog.CatalogException):
cat = None
update_dt = None
if cat:
update_dt = cat.last_modified()
if update_dt:
update_dt = catalog.ts_to_datetime(update_dt)
return update_dt
def get_preferred_publisher(self):
"""Returns the prefix of the preferred publisher."""
return self.cfg_cache.preferred_publisher
def set_preferred_publisher(self, prefix=None, alias=None, pub=None):
"""Sets the preferred publisher for packaging operations.
'prefix' is an optional string value specifying the name of
a publisher; ignored if 'pub' is provided.
'alias' is an optional string value specifying the alias of
a publisher; ignored if 'pub' is provided.
'pub' is an optional Publisher object identifying the
publisher to set as the preferred publisher.
One of the above parameters must be provided."""
self.history.log_operation_start("set-preferred-publisher")
if not pub:
try:
pub = self.get_publisher(prefix=prefix,
alias=alias)
except api_errors.UnknownPublisher, e:
self.history.log_operation_end(error=e)
raise
if pub.disabled:
e = api_errors.SetDisabledPublisherPreferred(pub)
self.history.log_operation_end(error=e)
raise e
self.cfg_cache.preferred_publisher = pub.prefix
self.save_config()
self.history.log_operation_end()
def set_property(self, prop_name, prop_value):
assert prop_name != "preferred-publisher"
self.cfg_cache.properties[prop_name] = prop_value
self.save_config()
def get_property(self, prop_name):
return self.cfg_cache.properties[prop_name]
def has_property(self, prop_name):
return prop_name in self.cfg_cache.properties
def delete_property(self, prop_name):
assert prop_name != "preferred-publisher"
del self.cfg_cache.properties[prop_name]
self.save_config()
def properties(self):
for p in self.cfg_cache.properties:
yield p
def add_publisher(self, pub, refresh_allowed=True, progtrack=None):
"""Adds the provided publisher object to the image
configuration.
'refresh_allowed' is an optional, boolean value indicating
whether the publisher's metadata should be retrieved when adding
it to the image's configuration.
'progtrack' is an optional ProgressTracker object."""
self.history.log_operation_start("add-publisher")
for p in self.cfg_cache.publishers.values():
if pub == p or (pub.alias and pub.alias == p.alias):
error = api_errors.DuplicatePublisher(pub)
self.history.log_operation_end(error=error)
raise error
# Must assign this first before performing any more operations.
pub.meta_root = self._get_publisher_meta_root(pub.prefix)
self.cfg_cache.publishers[pub.prefix] = pub
# This ensures that if data is leftover from a publisher
# with the same prefix as this one that it gets purged
# first to prevent usage of stale data.
self.remove_publisher_metadata(pub)
if refresh_allowed:
try:
# First, verify that the publisher has a valid
# pkg(5) repository.
self.transport.valid_publisher_test(pub)
self.__retrieve_catalogs(full_refresh=True,
pubs=[pub], progtrack=progtrack)
except Exception, e:
# Remove the newly added publisher since the
# retrieval failed.
del self.cfg_cache.publishers[pub.prefix]
self.history.log_operation_end(error=e)
raise
except:
# Remove the newly added publisher since the
# retrieval failed.
del self.cfg_cache.publishers[pub.prefix]
self.history.log_operation_end(
result=history.RESULT_FAILED_UNKNOWN)
raise
# Only after success should the configuration be saved.
self.save_config()
self.history.log_operation_end()
def verify(self, fmri, progresstracker, **args):
"""generator that returns any errors in installed pkgs
as tuple of action, list of errors"""
for act in self.get_manifest(fmri).gen_actions(
self.list_excludes()):
errors = act.verify(self, pkg_fmri=fmri, **args)
progresstracker.verify_add_progress(fmri)
actname = act.distinguished_name()
if errors:
progresstracker.verify_yield_error(actname,
errors)
yield (act, errors)
def __call_imageplan_evaluate(self, ip, verbose=False):
if verbose:
msg(_("Before evaluation:"))
msg(ip)
# A plan can be requested without actually performing an
# operation on the image.
if self.history.operation_name:
self.history.operation_start_state = ip.get_plan()
try:
ip.evaluate()
except constraint.ConstraintException, e:
raise api_errors.PlanCreationException(
constraint_violations=str(e).split("\n"))
self.imageplan = ip
if self.history.operation_name:
self.history.operation_end_state = \
ip.get_plan(full=False)
if verbose:
msg(_("After evaluation:"))
ip.display()
def image_change_variant(self, variants, progtrack, check_cancelation,
noexecute, verbose=False):
ip = imageplan.ImagePlan(self, progtrack, lambda: False,
noexecute=noexecute, variants=variants,
recursive_removal=True)
progtrack.evaluate_start()
# make sure that some variants are actually changing
variants = dict(set(variants.iteritems()) - \
set(self.cfg_cache.variants.iteritems()))
if not variants:
self.__call_imageplan_evaluate(ip, verbose)
msg("No variant changes.")
return
#
# only get manifests for all architectures if we're
# changing the architecture variant
#
if "variant.arch" in variants:
self.arch_change=True
#
# we can't set self.new_variants until after we
# instantiate the image plan since the image plan has
# to cache information like the old excludes, and
# once we set new_variants things like self.list_excludes()
# and self.get_arch() will report valus based off of the
# new variants we're moving too.
#
self.new_variants = variants
for fmri in self.gen_installed_pkgs():
m = self.get_manifest(fmri)
m_arch = m.get_variants("variant.arch");
if not m_arch:
# keep packages that don't have an explicit arch
ip.propose_fmri(fmri)
elif self.get_arch() in m_arch:
# keep packages that match the current arch
ip.propose_fmri(fmri)
else:
# remove packages for different archs
ip.propose_fmri_removal(fmri)
self.__call_imageplan_evaluate(ip, verbose)
def image_config_update(self):
if not self.new_variants:
return
ic = self.cfg_cache
ic.variants.update(self.new_variants)
ic.write(self.imgdir)
ic = imageconfig.ImageConfig(self.root)
ic.read(self.imgdir)
self.cfg_cache = ic
def repair(self, repairs, progtrack):
"""Repair any actions in the fmri that failed a verify."""
# XXX: This (lambda x: False) is temporary until we move pkg fix
# into the api and can actually use the
# api::__check_cancelation() function.
pps = []
for fmri, actions in repairs:
msg("Repairing: %-50s" % fmri.get_pkg_stem())
m = self.get_manifest(fmri)
pp = pkgplan.PkgPlan(self, progtrack, lambda: False)
pp.propose_repair(fmri, m, actions)
pp.evaluate(self.list_excludes(), self.list_excludes())
pps.append(pp)
ip = imageplan.ImagePlan(self, progtrack, lambda: False)
ip.update_index = False
progtrack.evaluate_start()
ip.pkg_plans = pps
ip.evaluate()
self.imageplan = ip
if ip.actuators.reboot_needed() and self.is_liveroot():
raise api_errors.RebootNeededOnLiveImageException()
ip.preexecute()
ip.execute()
return True
def has_manifest(self, fmri):
mpath = fmri.get_dir_path()
local_mpath = "%s/pkg/%s/manifest" % (self.imgdir, mpath)
if (os.path.exists(local_mpath)):
return True
return False
def __fetch_manifest(self, fmri, excludes=EmptyI):
"""A wrapper call for getting manifests. This invokes
the transport method, gets the manifest, and performs
any additional image-related processing."""
m = self.transport.get_manifest(fmri, excludes,
self.state.get_intent_str(fmri))
# What is the client currently processing?
targets = self.state.get_targets()
intent = None
for entry in targets:
target, reason = entry
# Ignore the publisher for comparison.
np_target = target.get_fmri(anarchy=True)
np_fmri = fmri.get_fmri(anarchy=True)
if np_target == np_fmri:
intent = reason
# If no intent could be found, assume INTENT_INFO.
self.state.set_touched_manifest(fmri,
max(intent, imagestate.INTENT_INFO))
return m
def __touch_manifest(self, fmri):
"""Perform steps necessary to 'touch' a manifest to provide
intent information. Ignores most exceptions as this operation
is only for informational purposes."""
# What is the client currently processing?
target, intent = self.state.get_target()
# Ignore dry-runs of operations or operations which do not have
# a set target.
if not target or intent == imagestate.INTENT_EVALUATE:
return
if not self.state.get_touched_manifest(fmri, intent):
# If the manifest for this fmri hasn't been "seen"
# before, determine if intent information needs to be
# provided.
# Ignore the publisher for comparison.
np_target = target.get_fmri(anarchy=True)
np_fmri = fmri.get_fmri(anarchy=True)
if np_target == np_fmri:
# If the client is currently processing
# the given fmri (for an install, etc.)
# then intent information is needed.
try:
self.transport.touch_manifest(fmri,
self.state.get_intent_str(fmri))
except (api_errors.UnknownPublisher,
api_errors.TransportError), e:
# It's not fatal if we can't find
# or reach the publisher.
pass
self.state.set_touched_manifest(fmri, intent)
def get_manifest_path(self, fmri):
"""Return path to on-disk manifest"""
mpath = os.path.join(self.imgdir, "pkg",
fmri.get_dir_path(), "manifest")
return mpath
def __get_manifest(self, fmri, excludes=EmptyI):
"""Find on-disk manifest and create in-memory Manifest
object.... grab from server if needed"""
try:
return manifest.CachedManifest(fmri, self.pkgdir,
self.cfg_cache.preferred_publisher,
excludes)
except KeyError:
return self.__fetch_manifest(fmri, excludes)
def get_manifest(self, fmri, all_arch=False):
"""return manifest; uses cached version if available.
all_arch controls whether manifest contains actions
for all architectures"""
# Normally elide other arch variants
add_to_cache=True
if self.arch_change or all_arch:
all_arch = True
add_to_cache = False
v = EmptyI
else:
arch = {"variant.arch": self.get_arch()}
v = [variant.Variants(arch).allow_action]
m = self.__get_manifest(fmri, v)
self.__touch_manifest(fmri)
return m
def installed_file_publisher(self, filepath):
"""Find the pkg's installed file named by filepath.
Return the publisher that installed this package."""
f = file(filepath)
try:
flines = f.readlines()
version, pub = flines
version = version.strip()
pub = pub.strip()
f.close()
except ValueError:
# If we get a ValueError, we've encountered an
# installed file of a previous format. If we want
# upgrade to work in this situation, it's necessary
# to assume that the package was installed from
# the preferred publisher. Here, we set up
# the publisher to record that.
if flines:
pub = flines[0]
pub = pub.strip()
newpub = "%s_%s" % (pkg.fmri.PREF_PUB_PFX,
pub)
else:
newpub = "%s_%s" % (pkg.fmri.PREF_PUB_PFX,
self.get_preferred_publisher())
pub = newpub
try:
f = file(filepath, "w")
f.writelines(["VERSION_1\n", newpub])
f.close()
except IOError, e:
if e.errno not in (errno.EACCES, errno.EROFS):
raise
assert pub
return pub
def _install_file(self, fmri):
"""Returns the path to the "installed" file for a given fmri."""
return "%s/pkg/%s/installed" % (self.imgdir,
fmri.get_dir_path())
def install_file_present(self, fmri):
"""Returns true if the package named by the fmri is installed
on the system. Otherwise, returns false."""
return os.path.exists(self._install_file(fmri))
def add_install_file(self, fmri):
"""Take an image and fmri. Write a file to disk that
indicates that the package named by the fmri has been
installed."""
# XXX This can be removed at some point in the future once we
# think this link is available on all systems
if not os.path.isdir("%s/state/installed" % self.imgdir):
self.__update_installed_pkgs()
try:
f = file(self._install_file(fmri), "w")
except EnvironmentError:
try:
os.makedirs(os.path.dirname(
self._install_file(fmri)))
except EnvironmentError, e:
if e.errno == errno.EACCES:
raise api_errors.PermissionsException(
e.filename)
if e.errno != errno.EEXIST and \
not os.path.isdir(e.filename):
raise
f = file(self._install_file(fmri), "w")
f.writelines(["VERSION_1\n", fmri.get_publisher_str()])
f.close()
fi = file("%s/state/installed/%s" % (self.imgdir,
fmri.get_link_path()), "w")
fi.close()
self.__pkg_states[urllib.unquote(fmri.get_link_path())] = \
(PKG_STATE_INSTALLED, fmri)
def remove_install_file(self, fmri):
"""Take an image and a fmri. Remove the file from disk
that indicates that the package named by the fmri has been
installed."""
# XXX This can be removed at some point in the future once we
# think this link is available on all systems
if not os.path.isdir("%s/state/installed" % self.imgdir):
self.__update_installed_pkgs()
os.unlink(self._install_file(fmri))
try:
os.unlink("%s/state/installed/%s" % (self.imgdir,
fmri.get_link_path()))
except EnvironmentError, e:
if e.errno != errno.ENOENT:
raise
self.__pkg_states[urllib.unquote(fmri.get_link_path())] = \
(PKG_STATE_KNOWN, fmri)
def __update_installed_pkgs(self):
"""Take the image's record of installed packages from the
prototype layout, with an installed file in each
$META/pkg/stem/version directory, to the $META/state/installed
summary directory form."""
# If the directory is empty or it doesn't exist, we should
# populate it. The easy test is to try to remove the directory,
# which will fail if it's already got entries in it, or doesn't
# exist. Other errors are beyond our capability to handle.
statedir = os.path.join(self.imgdir, "state", "installed")
try:
os.rmdir(statedir)
except EnvironmentError, e:
if e.errno in (errno.EEXIST, errno.ENOTEMPTY):
return
elif e.errno == errno.EACCES:
# The directory may exist and be non-empty
# even though we got EACCES. Try
# to determine its emptiness another way.
try:
if os.path.isdir(statedir) and \
len(os.listdir(statedir)) > 0:
return
except EnvironmentError:
# ignore this error, pass on the
# original access error
pass
raise api_errors.PermissionsException(
e.filename)
elif e.errno != errno.ENOENT:
raise
tmpdir = os.path.join(self.imgdir, "state", "installed.build")
# Create the link forest in a temporary directory. We should
# only execute this method once (if ever) in the lifetime of an
# image, but if the path already exists and makedirs() blows up,
# just be quiet if it's already a directory. If it's not a
# directory or something else weird happens, re-raise.
try:
os.makedirs(tmpdir)
except OSError, e:
if e.errno == errno.EACCES:
raise api_errors.PermissionsException(
e.filename)
if e.errno != errno.EEXIST or \
not os.path.isdir(tmpdir):
raise
return
proot = os.path.join(self.imgdir, "pkg")
for pd, vd in (
(p, v)
for p in sorted(os.listdir(proot))
for v in sorted(os.listdir(os.path.join(proot, p)))
):
path = os.path.join(proot, pd, vd, "installed")
if not os.path.exists(path):
continue
fmristr = urllib.unquote("%s@%s" % (pd, vd))
pub = self.installed_file_publisher(path)
f = pkg.fmri.PkgFmri(fmristr, publisher = pub)
fi = file(os.path.join(tmpdir, f.get_link_path()), "w")
fi.close()
# Someone may have already created this directory. Junk the
# directory we just populated if that's the case.
try:
portable.rename(tmpdir, statedir)
except EnvironmentError, e:
if e.errno != errno.EEXIST:
raise
shutil.rmtree(tmpdir)
def get_version_installed(self, pfmri):
"""Returns an fmri of the installed package matching the
package stem of the given fmri or None if no match is found."""
for f in self.gen_installed_pkgs():
if f.is_same_pkg(pfmri):
return f
return None
def get_pkg_state_by_fmri(self, pfmri):
"""Given pfmri, determine the local state of the package."""
return self.__pkg_states.get(pfmri.get_fmri(anarchy = True)[5:],
(PKG_STATE_KNOWN, None))[0]
def get_pkg_pub_by_fmri(self, pfmri):
"""Return the publisher from which 'pfmri' was installed."""
f = self.__pkg_states.get(pfmri.get_fmri(anarchy = True)[5:],
(PKG_STATE_KNOWN, None))[1]
if f:
# Return the non-preferred-prefixed name
return f.get_publisher()
return None
def fmri_set_default_publisher(self, fmri):
"""If the FMRI supplied as an argument does not have
a publisher, set it to the image's preferred publisher."""
if fmri.has_publisher():
return
fmri.set_publisher(self.get_preferred_publisher(), True)
def get_catalog(self, fmri, exception = False):
"""Given a FMRI, look at the publisher and return the
correct catalog for this image."""
# If FMRI has no publisher, or is default publisher,
# then return the catalog for the preferred publisher
if not fmri.has_publisher() or fmri.preferred_publisher():
cat = self.__catalogs[self.get_preferred_publisher()]
else:
try:
cat = self.__catalogs[fmri.get_publisher()]
except KeyError:
# If the publisher that installed this package
# has vanished, pick the default publisher
# instead.
if exception:
raise
else:
cat = self.__catalogs[\
self.get_preferred_publisher()]
return cat
def has_version_installed(self, fmri):
"""Check that the version given in the FMRI or a successor is
installed in the current image."""
v = self.get_version_installed(fmri)
if v and not fmri.has_publisher():
fmri.set_publisher(v.get_publisher_str())
elif not fmri.has_publisher():
fmri.set_publisher(self.get_preferred_publisher(), True)
if v and v.is_successor(fmri):
return True
return False
def older_version_installed(self, fmri):
"""This method is used by the package plan to determine if an
older version of the package is installed. This takes the
destination fmri and checks if an older package exists."""
v = self.get_version_installed(fmri)
assert fmri.has_publisher()
if v:
return v
return None
def is_installed(self, fmri):
"""Check that the exact version given in the FMRI is installed
in the current image."""
# All FMRIs passed to is_installed shall have a publisher
assert fmri.has_publisher()
v = self.get_version_installed(fmri)
if not v:
return False
return v == fmri
def list_excludes(self, new_variants=None):
"""Generate a list of callables that each return True if an
action is to be included in the image using the currently
defined variants for the image, or an updated set if
new_variants are specified. The callables take a single action
argument. Variants, facets and filters will be handled in
this fashion."""
# XXX simple for now; facets and filters need impl.
if new_variants:
new_vars = self.cfg_cache.variants.copy()
new_vars.update(new_variants)
return [new_vars.allow_action]
elif self.new_variants:
new_vars = self.cfg_cache.variants.copy()
new_vars.update(self.new_variants)
return [new_vars.allow_action]
else:
return [self.cfg_cache.variants.allow_action]
def __build_dependents(self, progtrack):
"""Build a dictionary mapping packages to the list of packages
that have required dependencies on them."""
self.__req_dependents = {}
for fmri in self.gen_installed_pkgs():
progtrack.evaluate_progress(fmri)
mfst = self.get_manifest(fmri)
for dep in mfst.gen_actions_by_type("depend",
self.list_excludes()):
if dep.attrs["type"] != "require":
continue
dfmri = self.strtofmri(dep.attrs["fmri"])
if dfmri not in self.__req_dependents:
self.__req_dependents[dfmri] = []
self.__req_dependents[dfmri].append(fmri)
def get_dependents(self, pfmri, progtrack):
"""Return a list of the packages directly dependent on the given
FMRI."""
if not hasattr(self, "_Image__req_dependents"):
self.__build_dependents(progtrack)
dependents = []
# We run through all the keys, in case a package is depended
# upon under multiple versions. That is, if pkgA depends on
# libc@1 and pkgB depends on libc@2, we need to return both pkgA
# and pkgB. If we used package names as keys, this would be
# simpler, but it wouldn't handle catalog operations (such as
# rename) that might have been applied to the fmri.
for f in self.__req_dependents.iterkeys():
if pfmri.is_successor(f):
dependents.extend(self.__req_dependents[f])
return dependents
def refresh_publishers(self, full_refresh=False, immediate=False,
pubs=None, progtrack=None, validate=True):
"""Refreshes the metadata (e.g. catalog) for one or more
publishers.
'full_refresh' is an optional boolean value indicating whether
a full retrieval of publisher metadata (e.g. catalogs) or only
an update to the existing metadata should be performed. When
True, 'immediate' is also set to True.
'immediate' is an optional boolean value indicating whether the
a refresh should occur now. If False, a publisher's selected
repository will only be checked for updates if the update
interval period recorded in the image configuration has been
exceeded; ignored when 'full_refresh' is True.
'pubs' is a list of publisher prefixes or publisher objects
to refresh. Passing an empty list or using the default value
implies all publishers.
'validate' is an optional, boolean value indicating whether a
connectivity test should be performed before attempting to
retrieve publisher metadata."""
if full_refresh:
immediate = True
if not progtrack:
progtrack = progress.QuietProgressTracker()
self.history.log_operation_start("refresh-publishers")
# Verify validity of certificates before attempting network
# operations.
try:
self.check_cert_validity()
except api_errors.ExpiringCertificate, e:
# XXX need client messaging framework
misc.emsg(e)
pubs_to_refresh = []
if not pubs:
# Omit disabled publishers.
pubs = [p for p in self.gen_publishers()]
for pub in pubs:
p = pub
if not isinstance(p, publisher.Publisher):
p = self.get_publisher(prefix=p)
if p.disabled:
e = api_errors.DisabledPublisher(p)
self.history.log_operation_end(error=e)
raise e
if immediate or p.needs_refresh:
pubs_to_refresh.append(p)
if not pubs_to_refresh:
# Trigger a load of the catalogs if they haven't been
# loaded yet for the sake of our caller.
self.load_catalogs(progtrack)
self.history.log_operation_end()
return
try:
if validate:
# Before an attempt is made to retrieve catalogs
# from the publisher repositories, a check needs
# to be done to ensure that the client isn't
# stuck behind a captive portal.
self.transport.captive_portal_test()
self.__retrieve_catalogs(full_refresh=full_refresh,
pubs=pubs_to_refresh, progtrack=progtrack)
except (api_errors.ApiException, catalog.CatalogException), e:
# Reload catalogs; this picks up any updates and
# ensures the catalog is loaded for callers.
self.load_catalogs(progtrack, force=True)
self.history.log_operation_end(error=e)
raise
self.history.log_operation_end()
def __retrieve_catalogs(self, full_refresh=False, pubs=None,
progtrack=None):
"""Retrieves the catalogs for the specified publishers
performing full or incremental updates as needed or indicated.
'full_refresh' is a boolean value indicating whether a full
update should be forced for the specified publishers.
'pubs' is an optional list of publisher objects to refresh the
metadata for. If not provided or 'None', all publishers will be
refreshed. Disabled publishers are always ignored regardless of
whether this list is provided.
'progtrack' is an optional ProgressTracker object."""
if not progtrack:
progtrack = progress.QuietProgressTracker()
failed = []
total = 0
if not pubs:
pubs = list(self.gen_publishers())
try:
# Ensure Image directory structure is valid.
self.mkdirs()
# Load the catalogs, if they haven't been already, so
# incremental updates can be performed.
self.load_catalogs(progtrack)
except EnvironmentError, e:
self.history.log_operation_end(error=e)
raise
progtrack.refresh_start(len(pubs))
def catalog_changed(prefix, old_ts, old_size):
if not old_ts or not old_size:
# It didn't exist before.
return True
croot = "%s/catalog/%s" % (self.imgdir, prefix)
c = catalog.Catalog(croot, publisher=prefix)
if c.last_modified() != old_ts:
return True
if c.size() != old_size:
return True
return False
updated = 0
succeeded = 0
for pub in pubs:
if pub.disabled:
continue
total += 1
progtrack.refresh_progress(pub.prefix)
full_refresh_this_pub = False
cat = None
ts = None
size = 0
if pub.prefix in self.__catalogs:
cat = self.__catalogs[pub.prefix]
ts = cat.last_modified()
size = cat.size()
# Although we may have a catalog with a
# timestamp, the user may have changed the
# origin URL for the publisher. If this has
# occurred, we need to perform a full refresh.
repo = pub.selected_repository
if cat.origin() not in repo.origins:
full_refresh_this_pub = True
if full_refresh or full_refresh_this_pub:
# Set timestamp to None in order
# to perform full refresh.
ts = None
try:
self.transport.get_catalog(pub, ts)
except api_errors.TransportError, e:
failed.append((pub, e))
else:
if catalog_changed(pub.prefix, ts, size):
updated += 1
pub.last_refreshed = dt.datetime.utcnow()
succeeded += 1
if updated > 0:
# If any publisher metadata was changed, then destroy
# the catalog cache, update the installed package list,
# and force a reload of all catalog data.
self.__destroy_catalog_cache()
self.__update_installed_pkgs()
self.load_catalogs(progtrack, force=True)
progtrack.refresh_done()
if failed:
raise api_errors.CatalogRefreshException(failed, total,
succeeded)
return updated > 0
CATALOG_CACHE_VERSION = 4
def __cache_catalogs(self, progtrack, pubs=None):
"""Read in all the catalogs and cache the data.
'pubs' is a list of publisher objects to include when caching
the image's configured publisher metadata.
"""
progtrack.cache_catalogs_start()
cache = {}
publist = []
try:
publist = dict(
(p.prefix, p) for p in self.gen_publishers()
)
except CfgCacheError:
# No publishers defined. If the caller hasn't
# supplied publishers to cache, raise the error
if not pubs:
raise
if pubs:
# If caller passed publishers, include this in
# the list of publishers to cache. These might
# be publisher objects that haven't been added
# to the image configuration yet.
for p in pubs:
publist[p.prefix] = p
for pub in publist.itervalues():
try:
catalog.Catalog.read_catalog(cache,
pub.meta_root, pub=pub.prefix)
except EnvironmentError, e:
# If a catalog file is just missing, ignore it.
# If there's a worse error, make sure the user
# knows about it.
if e.errno == errno.ENOENT:
pass
else:
raise
self._catalog = cache
# Use the current time until the actual file timestamp can be
# retrieved at the end. That way, if an exception is raised
# or an early return occurs, it will still be set.
self.__catalog_cache_mod_time = int(time.time())
# Remove old catalog cache files.
croot = os.path.join(self.imgdir, "catalog")
for fname in ("pkg_names.pkl", "catalog.pkl"):
fpath = os.path.join(croot, fname)
try:
portable.remove(fpath)
except KeyboardInterrupt:
raise
except:
# If for any reason, the file can't be removed,
# it doesn't matter.
pass
try:
cfd, ctmp = tempfile.mkstemp(dir=croot)
cf = os.fdopen(cfd, "wb")
except EnvironmentError:
# If the cache can't be written, it doesn't matter.
progtrack.cache_catalogs_done()
return
def cleanup():
try:
if cf:
cf.close()
except EnvironmentError:
pass
try:
portable.remove(ctmp)
except EnvironmentError:
pass
# First, the list of all publishers is built assigning each
# one a sequentially incremented integer as they are discovered.
# This number is used as a mapping code for publishers to reduce
# the size of the catalog cache.
pubs = {}
for pkg_name in cache:
vers = cache[pkg_name]
for k, v in vers.iteritems():
if k == "versions":
continue
for p in v[1]:
if p not in pubs:
pubs[p] = str(len(pubs))
# '|' is used to separate fields of information (such
# as fmri name and each version).
# '!' is used to separate items within a field (such as
# information about a version).
# '^' is used to separate item values (such as a publisher and
# its index number).
# First line of file is the version of the catalog cache.
try:
cf.write("%s\n" % self.CATALOG_CACHE_VERSION)
except EnvironmentError:
# If the cache can't be written, it doesn't matter.
cleanup()
progtrack.cache_catalogs_done()
return
except:
cleanup()
raise
# Second line of the file is the list of publisher prefixes
# and their index number used to decode the fmri entries.
publine = "!".join([
"^".join((p, pubs[p])) for p in pubs
])
try:
cf.write("%s\n" % publine)
except EnvironmentError:
# If the cache can't be written, it doesn't matter.
cleanup()
progtrack.cache_catalogs_done()
return
except:
cleanup()
raise
# All lines after the first two are made up of a package's
# version-specific fmri and the list of publishers that have
# it in their catalog, or where it was installed from.
for pkg_name in sorted(cache.keys()):
vers = cache[pkg_name]
# Iteration has to be performed over versions to retain
# sort order.
first = True
release = None
build_release = None
branch = None
for v in vers["versions"]:
f, fpubs = vers[str(v)]
known = "^".join(
pubs[p] for p in fpubs
if fpubs[p]
)
unknown = "^".join(
pubs[p] for p in fpubs
if not fpubs[p]
)
if first:
# When writing the first entry for a
# package, write its full fmri.
first = False
release = f.version.release
build_release = f.version.build_release
branch = f.version.branch
sfmri = f.get_fmri(anarchy=True,
include_scheme=False)
else:
# For successive entries, write only
# what is not shared by the previous
# entry.
rmatch = f.version.release == release
brmatch = f.version.build_release == \
build_release
bmatch = f.version.branch == branch
sver = str(f.version)
if rmatch and brmatch and bmatch:
# If release, build_release, and
# branch match the last entry,
# they can be omitted.
sfmri = ":" + sver.split(":")[1]
elif rmatch and brmatch:
# If release and build_release
# match the last entry, they can
# be omitted.
sfmri = "-" + sver.split("-")[1]
elif rmatch:
# If release matches the last
# entry, it can be omitted.
sfmri = "," + sver.split(",")[1]
else:
# Nothing matched the previous
# entry except the name, so the
# full version must be written.
sfmri = "@" + sver
release = f.version.release
build_release = f.version.build_release
branch = f.version.branch
line = sfmri + "|" + known + "!" + unknown
try:
cf.write(line + "\n")
except EnvironmentError:
# If the cache can't be written, it
# doesn't matter.
progtrack.cache_catalogs_done()
cleanup()
return
except:
cleanup()
raise
cfpath = os.path.join(croot, CATALOG_CACHE_FILE)
try:
cf.close()
cf = None
os.chmod(ctmp, 0644)
portable.rename(ctmp, cfpath)
except EnvironmentError:
# If the cache can't be written, it doesn't matter.
progtrack.cache_catalogs_done()
cleanup()
return
except:
cleanup()
raise
# Update the mod time with the actual timestamp from the file.
self.__catalog_cache_mod_time = \
self.__get_catalog_cache_mod_time()
progtrack.cache_catalogs_done()
def __get_catalog_cache_mod_time(self):
"""Internal helper function used to obtain last modification
time of the on-disk catalog cache."""
croot = os.path.join(self.imgdir, "catalog")
cache_file = os.path.join(croot, CATALOG_CACHE_FILE)
try:
mod_time = os.stat(cache_file).st_mtime
except EnvironmentError, e:
if e.errno == errno.EACCES:
raise api_errors.PermissionsException(
e.filename)
if e.errno != errno.ENOENT:
raise
mod_time = None
return mod_time
def __load_catalog_cache(self, progtrack):
"""Read in the cached catalog data."""
progtrack.load_catalog_cache_start()
croot = os.path.join(self.imgdir, "catalog")
cache_file = os.path.join(croot, CATALOG_CACHE_FILE)
mod_time = self.__get_catalog_cache_mod_time()
if self._catalog:
if mod_time == self.__catalog_cache_mod_time:
# Cache already loaded and up to date.
progtrack.load_catalog_cache_done()
return
try:
cf = file(cache_file, "rb")
except EnvironmentError, e:
self._catalog = {}
self.__catalog_cache_mod_time = None
if e.errno == errno.EACCES:
raise api_errors.PermissionsException(
e.filename)
if e.errno == errno.ENOENT:
raise api_errors.CatalogCacheMissing()
raise
# First line should be version.
try:
ver = cf.readline().strip()
ver = int(ver)
except ValueError:
ver = None
# If we don't recognize the version, complain.
if ver != self.CATALOG_CACHE_VERSION:
raise api_errors.CatalogCacheBadVersion(
ver, expected=self.CATALOG_CACHE_VERSION)
# Second line should be the list of publishers.
publine = cf.readline().strip()
if not publine:
publine = ""
pubidx = {}
for e in publine.split("!"):
try:
p, idx = e.split("^")
except ValueError:
raise api_errors.CatalogCacheInvalid(
publine, line_number=2)
pubidx[idx] = p
if not pubidx:
raise api_errors.CatalogCacheInvalid(
publine, line_number=2)
self._catalog = {}
# Read until EOF.
pkg_name = None
sver = None
for lnum, line in ((i + 3, l.strip())
for i, l in enumerate(cf)):
# The first of these line for each package is of
# the format:
# fmri|pub1_known^pub2...!pub1_unknown^pub2...
#
# Successive versions of the same package are of
# the format:
# @ver|pub1_known^pub2...!pub1_unknown^pub2...
try:
sfmri, spubs = line.split("|", 1)
sfmri = sfmri.strip()
except (AttributeError, ValueError):
raise api_errors.CatalogCacheInvalid(
line, line_number=lnum)
if sfmri[0] in (":", "-", ",", "@") and \
not pkg_name:
# The previous line should have been a
# full fmri or provided enough info
# to construct one for this entry.
raise api_errors.CatalogCacheInvalid(
line, line_number=lnum)
elif sfmri[0] == ":":
# Everything but the timestamp is the
# same as the previous entry.
sfmri = "%s@%s%s" % (pkg_name,
sver.split(":")[0], sfmri)
elif sfmri[0] == "-":
# Everything but the branch is the same
# as the previous entry.
sfmri = "%s@%s%s" % (pkg_name,
sver.split("-")[0], sfmri)
elif sfmri[0] == ",":
# Everything but the release is the same
# as the previous entry.
sfmri = "%s@%s%s" % (pkg_name,
sver.split(",")[0], sfmri)
elif sfmri[0] == "@":
# If the entry starts with this, then
# only the package name is shared.
sfmri = pkg_name + sfmri
known, unknown = spubs.split("!")
# Transform the publisher index numbers into
# their equivalent prefixes.
pubs = {}
for k in known.split("^"):
if k in pubidx:
pubs[pubidx[k]] = True
for u in unknown.split("^"):
if u in pubidx:
pubs[pubidx[u]] = False
if not pubs:
raise api_errors.CatalogCacheInvalid(
line, line_number=lnum)
# Build the FMRI from the provided string and
# cache the result using the publisher info.
try:
pfmri = pkg.fmri.PkgFmri(sfmri)
pkg_name = pfmri.pkg_name
sver = sfmri.split("@", 1)[1]
except (pkg.fmri.FmriError, IndexError), e:
raise api_errors.CatalogCacheInvalid(
line, line_number=lnum)
catalog.Catalog.fast_cache_fmri(self._catalog,
pfmri, sver, pubs)
try:
cf.close()
except EnvironmentError:
# All of the data was retrieved, so this error
# doesn't matter.
pass
# Now that all of the data has been loaded, set the
# modification time.
self.__catalog_cache_mod_time = mod_time
progtrack.load_catalog_cache_done()
def load_catalogs(self, progtrack, force=False):
"""Load publisher catalog data.
'progtrack' should be a ProgressTracker object that will be used
to provide progress information to clients.
'force' is an optional, boolean value that, when 'True', will
cause the publisher catalog data to be loaded again even if it
has been already. It defaults to 'False', which will cause the
catalog data to only be loaded when not already loaded or when
the catalog cache has been modified (which should only happen in
the case of another process modifying it)."""
if not force and self.__catalogs and \
self.__pkg_states is not None:
last_mod_time = self.__catalog_cache_mod_time
if last_mod_time:
mod_time = self.__get_catalog_cache_mod_time()
if mod_time == last_mod_time:
# Don't load the catalogs as they are
# already loaded and state information
# is up to date.
return
elif not mod_time:
# Don't load the catalogs since no cache
# exists on-disk but an in-memory one
# does. This can happen for
# unprivileged users, or in a readonly
# environment such as a Live CD where
# the cache does not exist for space
# or other reasons.
return
assert progtrack
# Flush existing catalog data.
self.__catalogs = {}
for pub in self.gen_publishers():
croot = "%s/catalog/%s" % (self.imgdir, pub.prefix)
progtrack.catalog_start(pub.prefix)
if pub.prefix == self.cfg_cache.preferred_publisher:
pubpfx = "%s_%s" % (pkg.fmri.PREF_PUB_PFX,
pub.prefix)
c = catalog.Catalog(croot,
publisher=pubpfx)
else:
c = catalog.Catalog(croot,
publisher=pub.prefix)
self.__catalogs[pub.prefix] = c
progtrack.catalog_done()
# Load package state information as this will be used during
# catalog cache generation.
self.__load_pkg_states()
# Try to load the catalog cache file. If that fails, call
# cache_catalogs so that the data from the canonical text copies
# of the catalogs from each publisher will be loaded and the
# data cached.
#
# XXX Given that this is a read operation, should we be writing?
try:
self.__load_catalog_cache(progtrack)
except api_errors.CatalogCacheError:
# If the load failed because of a bad version,
# corruption, or because it was missing, just try to
# rebuild it automatically.
self.__cache_catalogs(progtrack)
# Add the packages which are installed, but not in the catalog.
# XXX Should we have a different state for these, so we can flag
# them to the user?
for state, f in self.__pkg_states.values():
if state != PKG_STATE_INSTALLED:
continue
# cache_fmri will automatically determine whether the
# fmri is in the catalog and then cache if needed. The
# fmri (or its version or publisher information) could
# be missing for a number of reasons:
# * the package's publisher was removed, and no other
# publisher has a matching catalog entry
# * the fmri does not exist in the catalogs of any
# existing publisher, even though the publisher
# of the installed package has a catalog
# * the package's publisher was removed or does not
# exist in the installed package publisher's
# catalog, but another publisher has a matching
# catalog entry, so the fmri has been cached with
# the other publisher's information, and the
# installed publisher's information is missing
#
# The state of the package itself may be installed, but
# the package is unknown to the publisher (not in its
# catalog).
catalog.Catalog.cache_fmri(self._catalog, f,
f.get_publisher(), known=False)
def __destroy_catalog_cache(self):
croot = os.path.join(self.imgdir, "catalog")
# Remove catalog cache files (including old ones).
croot = os.path.join(self.imgdir, "catalog")
for fname in ("pkg_names.pkl", "catalog.pkl", "catalog_cache"):
fpath = os.path.join(croot, fname)
try:
portable.remove(fpath)
except KeyboardInterrupt:
raise
except:
# If for any reason, the file can't be removed,
# it doesn't matter as it will be overwritten.
pass
# Reset the in-memory cache.
self._catalog = {}
self.__catalog_cache_mod_time = None
def _get_publisher_meta_root(self, prefix):
return os.path.join(self.imgdir, "catalog", prefix)
def has_catalog(self, prefix):
return os.path.exists(os.path.join(
self._get_publisher_meta_root(prefix), "catalog"))
def remove_publisher_metadata(self, pub):
"""Removes the metadata for the specified publisher object."""
try:
del self.__catalogs[pub.prefix]
except KeyError:
# May not have been loaded yet.
pass
pub.remove_meta_root()
self.__destroy_catalog_cache()
def gen_installed_pkg_names(self):
"""Generate the string representation of all installed
packages. This is faster than going through gen_installed_pkgs
when all that will be done is to extract the strings from
the result.
"""
if self.__pkg_states is not None:
for i in self.__pkg_states.values():
yield i[1].get_fmri(anarchy=True)
else:
installed_state_dir = "%s/state/installed" % \
self.imgdir
if os.path.isdir(installed_state_dir):
for pl in os.listdir(installed_state_dir):
yield "pkg:/" + urllib.unquote(pl)
else:
proot = "%s/pkg" % self.imgdir
for pd in sorted(os.listdir(proot)):
for vd in \
sorted(os.listdir("%s/%s" %
(proot, pd))):
path = "%s/%s/%s/installed" % \
(proot, pd, vd)
if not os.path.exists(path):
continue
yield urllib.unquote(
"pkg:/%s@%s" % (pd, vd))
# This could simply call self.inventory() (or be replaced by inventory),
# but it turns out to be about 20% slower.
def gen_installed_pkgs(self):
"""Return an iteration through the installed packages."""
self.__load_pkg_states()
return (i[1] for i in self.__pkg_states.values())
def __load_pkg_states(self):
"""Build up the package state dictionary.
This dictionary maps the full fmri string to a tuple of the
state, the prefix of the publisher from which it's installed,
and the fmri object.
Note that this dictionary only maps installed packages. Use
get_pkg_state_by_fmri() to retrieve the state for arbitrary
packages.
"""
if self.__pkg_states is not None:
return
installed_state_dir = "%s/state/installed" % self.imgdir
self.__pkg_states = {}
# If the state directory structure has already been created,
# loading information from it is fast. The directory is
# populated with symlinks, named by their (url-encoded) FMRI,
# which point to the "installed" file in the corresponding
# directory under /var/pkg.
if os.path.isdir(installed_state_dir):
for pl in sorted(os.listdir(installed_state_dir)):
fmristr = urllib.unquote(pl)
f = pkg.fmri.PkgFmri(fmristr)
path = self._install_file(f)
pub = self.installed_file_publisher(path)
f.set_publisher(pub)
self.__pkg_states[fmristr] = \
(PKG_STATE_INSTALLED, f)
return
# Otherwise, we must iterate through the earlier installed
# state. One day, this can be removed.
proot = "%s/pkg" % self.imgdir
for pd in sorted(os.listdir(proot)):
for vd in sorted(os.listdir("%s/%s" % (proot, pd))):
path = "%s/%s/%s/installed" % (proot, pd, vd)
if not os.path.exists(path):
continue
fmristr = urllib.unquote("%s@%s" % (pd, vd))
pub = self.installed_file_publisher(path)
f = pkg.fmri.PkgFmri(fmristr, publisher = pub)
self.__pkg_states[fmristr] = \
(PKG_STATE_INSTALLED, f)
def clear_pkg_state(self):
self.__pkg_states = None
def strtofmri(self, myfmri):
return pkg.fmri.PkgFmri(myfmri, self.attrs["Build-Release"])
def strtomatchingfmri(self, myfmri):
return pkg.fmri.MatchingPkgFmri(myfmri,
self.attrs["Build-Release"])
def load_constraints(self, progtrack):
"""Load constraints for all install pkgs"""
for fmri in self.gen_installed_pkgs():
# skip loading if already done
if self.constraints.start_loading(fmri):
mfst = self.get_manifest(fmri)
for dep in mfst.gen_actions_by_type("depend",
self.list_excludes()):
progtrack.evaluate_progress()
f, con = dep.parse(self,
fmri.get_name())
self.constraints.update_constraints(con)
self.constraints.finish_loading(fmri)
def get_installed_unbound_inc_list(self):
"""Returns list of packages containing incorporation
dependencies on which no other pkgs depend."""
inc_tuples = []
dependents = set()
for fmri in self.gen_installed_pkgs():
fmri_name = fmri.get_pkg_stem()
mfst = self.get_manifest(fmri)
for dep in mfst.gen_actions_by_type("depend",
self.list_excludes()):
con_fmri = dep.get_constrained_fmri(self)
if con_fmri:
con_name = con_fmri.get_pkg_stem()
dependents.add(con_name)
inc_tuples.append((fmri_name, con_name))
# remove those incorporations which are depended on by other
# incorporations.
deletions = 0
for i, a in enumerate(inc_tuples[:]):
if a[0] in dependents:
del inc_tuples[i - deletions]
return list(set([ a[0] for a in inc_tuples ]))
def get_user_by_name(self, name):
return portable.get_user_by_name(name, self.root,
self.type != IMG_USER)
def get_name_by_uid(self, uid, returnuid = False):
# XXX What to do about IMG_PARTIAL?
try:
return portable.get_name_by_uid(uid, self.root,
self.type != IMG_USER)
except KeyError:
if returnuid:
return uid
else:
raise
def get_group_by_name(self, name):
return portable.get_group_by_name(name, self.root,
self.type != IMG_USER)
def get_name_by_gid(self, gid, returngid = False):
try:
return portable.get_name_by_gid(gid, self.root,
self.type != IMG_USER)
except KeyError:
if returngid:
return gid
else:
raise
@staticmethod
def __multimatch(name, patterns, matcher):
"""Applies a matcher to a name across a list of patterns.
Returns all tuples of patterns which match the name. Each tuple
contains the index into the original list, the pattern itself,
the package version, the publisher, and the raw publisher
string."""
return [
(i, pat, pat.tuple()[2],
pat.get_publisher(), pat.get_publisher_str())
for i, pat in enumerate(patterns)
if matcher(name, pat.tuple()[1])
]
def __inventory(self, patterns=None, all_known=False, matcher=None,
constraint=pkg.version.CONSTRAINT_AUTO, ordered=True):
"""Private method providing the back-end for inventory()."""
if not matcher:
matcher = pkg.fmri.fmri_match
if not patterns:
patterns = []
# Store the original patterns before we possibly turn them into
# PkgFmri objects, so we can give them back to the user in error
# messages.
opatterns = patterns[:]
illegals = []
for i, pat in enumerate(patterns):
if not isinstance(pat, pkg.fmri.PkgFmri):
try:
if "*" in pat or "?" in pat:
matcher = pkg.fmri.glob_match
patterns[i] = \
pkg.fmri.MatchingPkgFmri(
pat, "5.11")
else:
patterns[i] = \
pkg.fmri.PkgFmri(pat,
"5.11")
except pkg.fmri.IllegalFmri, e:
illegals.append(e)
if illegals:
raise api_errors.InventoryException(illegal=illegals)
ppub = self.cfg_cache.preferred_publisher
# matchingpats is the set of all the patterns which matched a
# package in the catalog. This allows us to return partial
# failure if some patterns match and some don't.
# XXX It would be nice to keep track of why some patterns failed
# to match -- based on name, version, or publisher.
matchingpats = set()
if ordered:
entries = sorted(self._catalog.keys())
else:
entries = self._catalog.keys()
for name in entries:
# Eliminate all patterns not matching "name". If there
# are no patterns left, go on to the next name, but only
# if there were any to start with.
matches = self.__multimatch(name, patterns, matcher)
if patterns and not matches:
continue
newest = self._catalog[name]["versions"][-1]
for ver in reversed(self._catalog[name]["versions"]):
# If a pattern specified a version and that
# version isn't succeeded by "ver", then record
# the pattern for removal from consideration.
nomatch = []
for i, match in enumerate(matches):
if match[2] and \
not ver.is_successor(match[2],
constraint):
nomatch.append(i)
# Eliminate the name matches that didn't match
# on versions. We need to create a new list
# because we need to reuse the original
# "matches" for each new version.
vmatches = [
matches[i]
for i, match in enumerate(matches)
if i not in nomatch
]
# If we deleted all contenders (if we had any to
# begin with), go on to the next version.
if matches and not vmatches:
continue
# Like the version skipping above, do the same
# for publishers.
pubstate = self._catalog[name][str(ver)][1]
nomatch = []
for i, match in enumerate(vmatches):
if match[3] and \
match[3] not in pubstate:
nomatch.append(i)
pmatches = [
vmatches[i]
for i, match in enumerate(vmatches)
if i not in nomatch
]
if vmatches and not pmatches:
continue
# If no patterns were specified or any still-
# matching pattern specified no publisher, we
# use the entire list of publishers for this
# version. Otherwise, we use the intersection
# of the list of publishers in pubstate, and
# the publishers in the patterns.
aset = set(i[3] for i in pmatches)
if aset and None not in aset:
publist = set(
m[3:5]
for m in pmatches
if m[3] in pubstate
)
else:
publist = zip(pubstate.keys(),
pubstate.keys())
pfmri = self._catalog[name][str(ver)][0]
inst_state = self.get_pkg_state_by_fmri(pfmri)
inst_pub = self.get_pkg_pub_by_fmri(pfmri)
state = {
"upgradable": ver != newest,
"frozen": False,
"incorporated": False,
"excludes": False
}
# We yield copies of the fmri objects in the
# catalog because we add the publishers in, and
# don't want to mess up the canonical catalog.
# If a pattern had specified a publisher as
# preferred, be sure to emit an fmri that way,
# too.
yielded = False
if all_known:
for pub, rpub in publist:
nfmri = pfmri.copy()
nfmri.set_publisher(rpub,
pub == ppub)
st = state.copy()
if pub == inst_pub:
st["state"] = \
PKG_STATE_INSTALLED
else:
st["state"] = \
PKG_STATE_KNOWN
st["in_catalog"] = pubstate[pub]
yield nfmri, st
yielded = True
elif inst_state == PKG_STATE_INSTALLED:
nfmri = pfmri.copy()
nfmri.set_publisher(inst_pub,
inst_pub == ppub)
state["state"] = inst_state
state["in_catalog"] = pubstate[inst_pub]
yield nfmri, state
yielded = True
if yielded:
matchingpats |= set(
i[:2] for i in pmatches)
nonmatchingpats = [
opatterns[i]
for i, f in set(enumerate(patterns)) - matchingpats
]
if nonmatchingpats:
raise api_errors.InventoryException(
notfound=nonmatchingpats)
def inventory(self, *args, **kwargs):
"""Enumerate the package FMRIs in the image's catalog, yielding
a list of tuples of the format (fmri, pkg state dict).
If "patterns" is None (the default) or an empty sequence, all
package names will match. Otherwise, it is a list of patterns
to match against FMRIs in the catalog.
If "all_known" is False (the default), only installed packages
will be enumerated. If True, all known packages will be
enumerated.
The "matcher" parameter should specify a function taking two
string arguments: a name and a pattern, returning True if the
pattern matches the name, and False otherwise. By default, the
matcher will be pkg.fmri.fmri_match().
The "constraint" parameter defines how a version specified in a
pattern matches a version in the catalog. By default, a natural
"subsetting" constraint is used.
The "ordered" parameter is a boolean value that indicates
whether the returned list should first be sorted by name before
being sorted by version (descending). By default, this is True.
"""
# "preferred" is a private argument that is currently only used
# in evaluate_fmri(), but could be made more generally useful.
# "preferred" ensures that all potential matches from the
# preferred publisher are generated before those from
# non-preferred publishers. In the current implementation, this
# consumes more memory.
preferred = kwargs.pop("preferred", False)
ppub = self.cfg_cache.preferred_publisher
if not preferred:
for f in self.__inventory(*args, **kwargs):
yield f
else:
nplist = []
firstnp = None
for f in self.__inventory(*args, **kwargs):
if f[0].get_publisher() == ppub:
yield f
else:
nplist.append(f)
for f in nplist:
yield f
def update_index_dir(self, postfix="index"):
"""Since the index directory will not reliably be updated when
the image root is, this should be called prior to using the
index directory.
"""
self.index_dir = os.path.join(self.imgdir, postfix)
def incoming_download_dir(self):
"""Return the directory path for incoming downloads
that have yet to be completed. Once a file has been
successfully downloaded, it is moved to the cached download
directory."""
return self.dl_cache_incoming
def cached_download_dir(self):
"""Return the directory path for cached content.
Files that have been successfully downloaded live here."""
return self.dl_cache_dir
def cleanup_downloads(self):
"""Clean up any downloads that were in progress but that
did not successfully finish."""
shutil.rmtree(self.dl_cache_incoming, True)
def cleanup_cached_content(self):
"""Delete the directory that stores all of our cached
downloaded content. This may take a while for a large
directory hierarchy. Don't clean up caches if the
user overrode the underlying setting using PKG_CACHEDIR. """
if not self.is_user_cache_dir and \
self.cfg_cache.get_policy(imageconfig.FLUSH_CONTENT_CACHE):
msg("Deleting content cache")
shutil.rmtree(self.dl_cache_dir, True)
def salvagedir(self, path):
"""Called when directory contains something and it's not
supposed to because it's being deleted. XXX Need to work out a
better error passback mechanism. Path is rooted in /...."""
salvagedir = os.path.normpath(
os.path.join(self.imgdir, "lost+found",
path + "-" + time.strftime("%Y%m%dT%H%M%SZ")))
parent = os.path.dirname(salvagedir)
if not os.path.exists(parent):
os.makedirs(parent)
shutil.move(os.path.normpath(os.path.join(self.root, path)),
salvagedir)
# XXX need a better way to do this.
emsg("\nWarning - directory %s not empty - contents preserved "
"in %s" % (path, salvagedir))
def temporary_file(self):
"""create a temp file under image directory for various
purposes"""
tempdir = os.path.normpath(os.path.join(self.imgdir, "tmp"))
if not os.path.exists(tempdir):
os.makedirs(tempdir)
fd, name = tempfile.mkstemp(dir=tempdir)
os.close(fd)
return name
def __filter_install_matches(self, matches, names):
"""Attempts to eliminate redundant matches found during
packaging operations:
* First, stems of installed packages for publishers that
are now unknown (no longer present in the image
configuration) are dropped.
* Second, if multiple matches are still present, stems of
of installed packages, that are not presently in the
corresponding publisher's catalog, are dropped.
* Finally, if multiple matches are still present, all
stems except for those in state PKG_STATE_INSTALLED are
dropped.
Returns a list of the filtered matches, along with a dict of
their unique names and a dict containing package state
information."""
olist = []
onames = {}
# First eliminate any duplicate matches that are for unknown
# publishers (publishers which have been removed from the image
# configuration).
publist = [p.prefix for p in self.get_publishers().values()]
for m in matches:
if m.get_publisher() in publist:
stem = m.get_pkg_stem()
onames[stem] = names[stem]
olist.append(m)
# Next, if there are still multiple matches, eliminate fmris
# belonging to publishers that no longer have the fmri in their
# catalog.
found_state = False
if len(onames) > 1:
mlist = []
mnames = {}
for m in olist:
stem = m.get_pkg_stem()
st = onames[stem]
if st["in_catalog"]:
if st["state"] == PKG_STATE_INSTALLED:
found_state = True
mnames[stem] = onames[stem]
mlist.append(m)
olist = mlist
onames = mnames
# Finally, if there are still multiple matches, and a known stem
# has been found in the provided state, then eliminate any stems
# that do not have the specified state.
if found_state and len(onames) > 1:
mlist = []
mnames = {}
for m in olist:
stem = m.get_pkg_stem()
if onames[stem]["state"] == PKG_STATE_INSTALLED:
mnames[stem] = onames[stem]
mlist.append(m)
olist = mlist
onames = mnames
return olist, onames
def make_install_plan(self, pkg_list, progtrack, check_cancelation,
noexecute, filters=None, verbose=False, multimatch_ignore=False):
"""Take a list of packages, specified in pkg_list, and attempt
to assemble an appropriate image plan. This is a helper
routine for some common operations in the client.
This method checks all publishers for a package match;
however, it defaults to choosing the preferred publisher
when an ambiguous package name is specified. If the user
wishes to install a package from a non-preferred publisher,
the full FMRI that contains a publisher should be used
to name the package.
'multimatch_ignore' is an optional, boolean value that
indicates whether packages that have multiple matches for
only non-preferred publishers should be ignored when creating
the install plan. This is intended to be used during an
image-update.
"""
self.load_catalogs(progtrack)
if filters is None:
filters = []
error = 0
ip = imageplan.ImagePlan(self, progtrack, check_cancelation,
filters=filters, noexecute=noexecute)
progtrack.evaluate_start()
self.load_constraints(progtrack)
unmatched_fmris = []
multiple_matches = []
illegal_fmris = []
constraint_violations = []
# order package list so that any unbound incorporations are
# done first
inc_list = self.get_installed_unbound_inc_list()
head = []
tail = []
for p in pkg_list:
if p in inc_list:
head.append(p)
else:
tail.append(p)
pkg_list = head + tail
# This approach works only for cases w/ simple
# incorporations; the apply_constraints_to_fmri
# call below binds the version too quickly. This
# awaits a proper solver.
for p in pkg_list:
progtrack.evaluate_progress()
try:
conp = pkg.fmri.PkgFmri(p,
self.attrs["Build-Release"])
except pkg.fmri.IllegalFmri:
illegal_fmris.append(p)
error = 1
continue
try:
conp = \
self.constraints.apply_constraints_to_fmri(
conp, auto=True)
except constraint.ConstraintException, e:
error = 1
constraint_violations.extend(str(e).split("\n"))
continue
# If we were passed in an fmri object or a string that
# anchors the package stem with the scheme, match on the
# stem exactly as given. Otherwise we can let the
# default, looser matching mechanism be used.
# inventory() will override if globbing characters are
# used.
matcher = None
if isinstance(p, pkg.fmri.PkgFmri) or \
p.startswith("pkg:/"):
matcher = pkg.fmri.exact_name_match
try:
matches = list(self.inventory([conp],
all_known=True, matcher=matcher,
ordered=False))
except api_errors.InventoryException, e:
assert(not (e.notfound and e.illegal))
assert(e.notfound or e.illegal)
error = 1
if e.notfound:
unmatched_fmris.append(p)
else:
illegal_fmris.append(p)
continue
pnames = {}
pmatch = []
npnames = {}
npmatch = []
for m, st in matches:
if m.preferred_publisher():
pnames[m.get_pkg_stem()] = st
pmatch.append(m)
else:
npnames[m.get_pkg_stem()] = st
npmatch.append(m)
if len(pnames) > 1:
# There can only be one preferred publisher, so
# filtering is pointless and these are truly
# ambiguous matches.
multiple_matches.append((p, pnames.keys()))
error = 1
continue
elif not pnames and len(npnames) > 1:
npmatch, npnames = \
self.__filter_install_matches(npmatch,
npnames)
if len(npnames) > 1:
if multimatch_ignore:
# Caller has requested that this
# package be skipped if multiple
# matches are found.
continue
# If there are still multiple matches
# after filtering, fail.
multiple_matches.append((p,
npnames.keys()))
error = 1
continue
# matches is a list reverse sorted by version, so take
# the first; i.e., the latest.
if pmatch:
ip.propose_fmri(pmatch[0])
else:
ip.propose_fmri(npmatch[0])
if error != 0:
raise api_errors.PlanCreationException(unmatched_fmris,
multiple_matches, [], illegal_fmris,
constraint_violations=constraint_violations)
self.__call_imageplan_evaluate(ip, verbose)
def make_uninstall_plan(self, fmri_list, recursive_removal,
progresstracker, check_cancelation, noexecute, verbose=False):
ip = imageplan.ImagePlan(self, progresstracker,
check_cancelation, recursive_removal, noexecute=noexecute)
self.load_catalogs(progresstracker)
err = 0
unmatched_fmris = []
multiple_matches = []
missing_matches = []
illegal_fmris = []
progresstracker.evaluate_start()
for ppat in fmri_list:
progresstracker.evaluate_progress()
try:
matches = list(self.inventory([ppat],
ordered=False))
except api_errors.InventoryException, e:
assert(not (e.notfound and e.illegal))
if e.notfound:
try:
list(self.inventory([ppat],
all_known=True,
ordered=False))
missing_matches.append(ppat)
except api_errors.InventoryException:
unmatched_fmris.append(ppat)
elif e.illegal:
illegal_fmris.append(ppat)
else:
raise RuntimeError("Caught inventory "
"exception without unmatched or "
"illegal fmris set.")
err = 1
continue
if len(matches) > 1:
matchlist = [m for m, state in matches]
multiple_matches.append((ppat, matchlist))
err = 1
continue
# Propose the removal of the first (and only!) match.
ip.propose_fmri_removal(matches[0][0])
if err == 1:
raise api_errors.PlanCreationException(unmatched_fmris,
multiple_matches, missing_matches, illegal_fmris)
self.__call_imageplan_evaluate(ip, verbose)
def ipkg_is_up_to_date(self, actual_cmd, check_cancelation, noexecute,
refresh_allowed=True, progtrack=None):
""" Test whether SUNWipkg is updated to the latest version
known to be available for this image """
#
# This routine makes the distinction between the "target image",
# which will be altered, and the "running image", which is
# to say whatever image appears to contain the version of the
# pkg command we're running.
#
#
# There are two relevant cases here:
# 1) Packaging code and image we're updating are the same
# image. (i.e. 'pkg image-update')
#
# 2) Packaging code's image and the image we're updating are
# different (i.e. 'pkg image-update -R')
#
# In general, we care about getting the user to run the
# most recent packaging code available for their build. So,
# if we're not in the liveroot case, we create a new image
# which represents "/" on the system.
#
if not progtrack:
progtrack = progress.QuietProgressTracker()
img = self
if not img.is_liveroot():
newimg = Image()
cmdpath = os.path.join(os.getcwd(), actual_cmd)
cmdpath = os.path.realpath(cmdpath)
cmddir = os.path.dirname(os.path.realpath(cmdpath))
#
# Find the path to ourselves, and use that
# as a way to locate the image we're in. It's
# not perfect-- we could be in a developer's
# workspace, for example.
#
newimg.find_root(cmddir)
newimg.load_config()
if refresh_allowed:
# If refreshing publisher metadata is allowed,
# then perform a refresh so that a new SUNWipkg
# can be discovered.
try:
newimg.refresh_publishers(
progtrack=progtrack)
except api_errors.CatalogRefreshException, cre:
cre.message = \
_("SUNWipkg update check failed.")
raise
else:
# If refresh wasn't called, the catalogs have to
# be manually loaded.
newimg.load_catalogs(progtrack)
img = newimg
# XXX call to progress tracker that SUNWipkg is being refreshed
img.make_install_plan(["SUNWipkg"], progtrack,
check_cancelation, noexecute, filters = [])
return img.imageplan.nothingtodo()
def installed_fmris_from_args(self, args):
"""Helper function to translate client command line arguments
into a list of installed fmris. Used by info, contents,
verify.
"""
found = []
notfound = []
illegals = []
try:
for m in self.inventory(args, ordered=False):
found.append(m[0])
except api_errors.InventoryException, e:
illegals = e.illegal
notfound = e.notfound
return found, notfound, illegals
def can_change_file_ownership(self):
"""Determine whether the current process has the authority to
change the ownership of a file to the given uid and gid within
this image. Either uid or gid can be -1.
"""
if not hasattr(self, "_change_file_ownership"):
t = None
try:
t = self.temporary_file()
portable.chown(t, 0, 0)
self._change_file_ownership = True
except OSError:
self._change_file_ownership = False
if t:
os.unlink(t)
return self._change_file_ownership
| agpl-3.0 |
DaniilLeksin/theblog | env/lib/python2.7/site-packages/django/contrib/gis/tests/gis_migrations/test_operations.py | 9 | 4776 | from __future__ import unicode_literals
from unittest import skipUnless
from django.contrib.gis.tests.utils import HAS_SPATIAL_DB, mysql
from django.db import connection, migrations, models
from django.db.migrations.migration import Migration
from django.db.migrations.state import ProjectState
from django.test import TransactionTestCase
if HAS_SPATIAL_DB:
from django.contrib.gis.db.models import fields
try:
from django.contrib.gis.models import GeometryColumns
HAS_GEOMETRY_COLUMNS = True
except ImportError:
HAS_GEOMETRY_COLUMNS = False
@skipUnless(HAS_SPATIAL_DB, "Spatial db is required.")
class OperationTests(TransactionTestCase):
available_apps = ["django.contrib.gis.tests.gis_migrations"]
def tearDown(self):
# Delete table after testing
self.apply_operations('gis', self.current_state, [migrations.DeleteModel("Neighborhood")])
super(OperationTests, self).tearDown()
def get_table_description(self, table):
with connection.cursor() as cursor:
return connection.introspection.get_table_description(cursor, table)
def assertColumnExists(self, table, column):
self.assertIn(column, [c.name for c in self.get_table_description(table)])
def assertColumnNotExists(self, table, column):
self.assertNotIn(column, [c.name for c in self.get_table_description(table)])
def apply_operations(self, app_label, project_state, operations):
migration = Migration('name', app_label)
migration.operations = operations
with connection.schema_editor() as editor:
return migration.apply(project_state, editor)
def set_up_test_model(self):
operations = [migrations.CreateModel(
"Neighborhood",
[
("id", models.AutoField(primary_key=True)),
('name', models.CharField(max_length=100, unique=True)),
('geom', fields.MultiPolygonField(srid=4326)),
],
)]
return self.apply_operations('gis', ProjectState(), operations)
def assertGeometryColumnsCount(self, expected_count):
table_name = "gis_neighborhood"
if connection.features.uppercases_column_names:
table_name = table_name.upper()
self.assertEqual(
GeometryColumns.objects.filter(**{
GeometryColumns.table_name_col(): table_name,
}).count(),
expected_count
)
def test_add_gis_field(self):
"""
Tests the AddField operation with a GIS-enabled column.
"""
project_state = self.set_up_test_model()
operation = migrations.AddField(
"Neighborhood",
"path",
fields.LineStringField(srid=4326),
)
new_state = project_state.clone()
operation.state_forwards("gis", new_state)
with connection.schema_editor() as editor:
operation.database_forwards("gis", editor, project_state, new_state)
self.current_state = new_state
self.assertColumnExists("gis_neighborhood", "path")
# Test GeometryColumns when available
if HAS_GEOMETRY_COLUMNS:
self.assertGeometryColumnsCount(2)
if self.has_spatial_indexes:
with connection.cursor() as cursor:
indexes = connection.introspection.get_indexes(cursor, "gis_neighborhood")
self.assertIn('path', indexes)
def test_remove_gis_field(self):
"""
Tests the RemoveField operation with a GIS-enabled column.
"""
project_state = self.set_up_test_model()
operation = migrations.RemoveField("Neighborhood", "geom")
new_state = project_state.clone()
operation.state_forwards("gis", new_state)
with connection.schema_editor() as editor:
operation.database_forwards("gis", editor, project_state, new_state)
self.current_state = new_state
self.assertColumnNotExists("gis_neighborhood", "geom")
# Test GeometryColumns when available
if HAS_GEOMETRY_COLUMNS:
self.assertGeometryColumnsCount(0)
def test_create_model_spatial_index(self):
self.current_state = self.set_up_test_model()
if not self.has_spatial_indexes:
self.skipTest("No support for Spatial indexes")
with connection.cursor() as cursor:
indexes = connection.introspection.get_indexes(cursor, "gis_neighborhood")
self.assertIn('geom', indexes)
@property
def has_spatial_indexes(self):
if mysql:
with connection.cursor() as cursor:
return connection.introspection.supports_spatial_index(cursor, "gis_neighborhood")
return True
| gpl-2.0 |
kobejean/tensorflow | tensorflow/python/debug/cli/cli_config_test.py | 68 | 5541 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for cli_config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import shutil
import tempfile
from tensorflow.python.debug.cli import cli_config
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
class CLIConfigTest(test_util.TensorFlowTestCase):
def setUp(self):
self._tmp_dir = tempfile.mkdtemp()
self._tmp_config_path = os.path.join(self._tmp_dir, ".tfdbg_config")
self.assertFalse(gfile.Exists(self._tmp_config_path))
super(CLIConfigTest, self).setUp()
def tearDown(self):
shutil.rmtree(self._tmp_dir)
super(CLIConfigTest, self).tearDown()
def testConstructCLIConfigWithoutFile(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
self.assertEqual(20, config.get("graph_recursion_depth"))
self.assertEqual(True, config.get("mouse_mode"))
with self.assertRaises(KeyError):
config.get("property_that_should_not_exist")
self.assertTrue(gfile.Exists(self._tmp_config_path))
def testCLIConfigForwardCompatibilityTest(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
with open(self._tmp_config_path, "rt") as f:
config_json = json.load(f)
# Remove a field to simulate forward compatibility test.
del config_json["graph_recursion_depth"]
with open(self._tmp_config_path, "wt") as f:
json.dump(config_json, f)
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
self.assertEqual(20, config.get("graph_recursion_depth"))
def testModifyConfigValue(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
config.set("graph_recursion_depth", 9)
config.set("mouse_mode", False)
self.assertEqual(9, config.get("graph_recursion_depth"))
self.assertEqual(False, config.get("mouse_mode"))
def testModifyConfigValueWithTypeCasting(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
config.set("graph_recursion_depth", "18")
config.set("mouse_mode", "false")
self.assertEqual(18, config.get("graph_recursion_depth"))
self.assertEqual(False, config.get("mouse_mode"))
def testModifyConfigValueWithTypeCastingFailure(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
with self.assertRaises(ValueError):
config.set("mouse_mode", "maybe")
def testLoadFromModifiedConfigFile(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
config.set("graph_recursion_depth", 9)
config.set("mouse_mode", False)
config2 = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
self.assertEqual(9, config2.get("graph_recursion_depth"))
self.assertEqual(False, config2.get("mouse_mode"))
def testSummarizeFromConfig(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
output = config.summarize()
self.assertEqual(
["Command-line configuration:",
"",
" graph_recursion_depth: %d" % config.get("graph_recursion_depth"),
" mouse_mode: %s" % config.get("mouse_mode")], output.lines)
def testSummarizeFromConfigWithHighlight(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
output = config.summarize(highlight="mouse_mode")
self.assertEqual(
["Command-line configuration:",
"",
" graph_recursion_depth: %d" % config.get("graph_recursion_depth"),
" mouse_mode: %s" % config.get("mouse_mode")], output.lines)
self.assertEqual((2, 12, ["underline", "bold"]),
output.font_attr_segs[3][0])
self.assertEqual((14, 18, "bold"), output.font_attr_segs[3][1])
def testSetCallback(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
test_value = {"graph_recursion_depth": -1}
def callback(config):
test_value["graph_recursion_depth"] = config.get("graph_recursion_depth")
config.set_callback("graph_recursion_depth", callback)
config.set("graph_recursion_depth", config.get("graph_recursion_depth") - 1)
self.assertEqual(test_value["graph_recursion_depth"],
config.get("graph_recursion_depth"))
def testSetCallbackInvalidPropertyName(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
with self.assertRaises(KeyError):
config.set_callback("nonexistent_property_name", print)
def testSetCallbackNotCallable(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
with self.assertRaises(TypeError):
config.set_callback("graph_recursion_depth", 1)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
extremenetworks/xkit | EXOS/Python/mibview/mibview.py | 2 | 1758 | #!/usr/bin/env python2.7
# usage: mibview.py [-h] mibString
#
# Convert wildcarded OID MIB views into OID/mask format as used by EXOS.
#
# positional arguments:
# mibString Wildcard MIB string to be translated into OID/mask format
#
# optional arguments:
# -h, --help show this help message and exit
import argparse
import sys
class ArgParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
def main():
parser = ArgParser(prog='mibview.py',
description='Convert wildcarded OID MIB views into OID/mask format as used by EXOS.')
parser.add_argument('mibString',
help='Wildcard MIB string to be translated into OID/mask format',
type=str, nargs=1)
args = parser.parse_args()
mibView = args.mibString[0].split('.')
#Bitmask to be used in the final output.
mask = 0
newMibView = []
for branch in mibView:
#Left shift the bits in the mask.
mask = mask << 1
if branch is not '*':
#Insert a 1 in the low-order bits of the mask.
mask = mask + 1
#Also, add this value to the new MIB view we are building.
newMibView.append(branch)
else:
#Keep the zero in the low-order bit. Add a zero to the new MIB view we are building.
newMibView.append('0')
newMibView = ".".join(newMibView)
print newMibView + '/' + format(mask, 'X')
if __name__ == '__main__':
try:
main()
except SystemExit:
#Catch SystemExit to prevent EXOS shell from exiting to login prompt, if we're running this in EXOS
pass
| bsd-2-clause |
mjfarmer/scada_py | env/lib/python2.7/site-packages/twisted/internet/error.py | 12 | 12677 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Exceptions and errors for use in twisted.internet modules.
"""
from __future__ import division, absolute_import
import socket
from twisted.python import deprecate
from twisted.python.versions import Version
class BindError(Exception):
"""An error occurred binding to an interface"""
def __str__(self):
s = self.__doc__
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class CannotListenError(BindError):
"""
This gets raised by a call to startListening, when the object cannotstart
listening.
@ivar interface: the interface I tried to listen on
@ivar port: the port I tried to listen on
@ivar socketError: the exception I got when I tried to listen
@type socketError: L{socket.error}
"""
def __init__(self, interface, port, socketError):
BindError.__init__(self, interface, port, socketError)
self.interface = interface
self.port = port
self.socketError = socketError
def __str__(self):
iface = self.interface or 'any'
return "Couldn't listen on %s:%s: %s." % (iface, self.port,
self.socketError)
class MulticastJoinError(Exception):
"""
An attempt to join a multicast group failed.
"""
class MessageLengthError(Exception):
"""Message is too long to send"""
def __str__(self):
s = self.__doc__
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class DNSLookupError(IOError):
"""DNS lookup failed"""
def __str__(self):
s = self.__doc__
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class ConnectInProgressError(Exception):
"""A connect operation was started and isn't done yet."""
# connection errors
class ConnectError(Exception):
"""An error occurred while connecting"""
def __init__(self, osError=None, string=""):
self.osError = osError
Exception.__init__(self, string)
def __str__(self):
s = self.__doc__ or self.__class__.__name__
if self.osError:
s = '%s: %s' % (s, self.osError)
if self.args[0]:
s = '%s: %s' % (s, self.args[0])
s = '%s.' % s
return s
class ConnectBindError(ConnectError):
"""Couldn't bind"""
class UnknownHostError(ConnectError):
"""Hostname couldn't be looked up"""
class NoRouteError(ConnectError):
"""No route to host"""
class ConnectionRefusedError(ConnectError):
"""Connection was refused by other side"""
class TCPTimedOutError(ConnectError):
"""TCP connection timed out"""
class BadFileError(ConnectError):
"""File used for UNIX socket is no good"""
class ServiceNameUnknownError(ConnectError):
"""Service name given as port is unknown"""
class UserError(ConnectError):
"""User aborted connection"""
class TimeoutError(UserError):
"""User timeout caused connection failure"""
class SSLError(ConnectError):
"""An SSL error occurred"""
class VerifyError(Exception):
"""Could not verify something that was supposed to be signed.
"""
class PeerVerifyError(VerifyError):
"""The peer rejected our verify error.
"""
class CertificateError(Exception):
"""
We did not find a certificate where we expected to find one.
"""
try:
import errno
errnoMapping = {
errno.ENETUNREACH: NoRouteError,
errno.ECONNREFUSED: ConnectionRefusedError,
errno.ETIMEDOUT: TCPTimedOutError,
}
if hasattr(errno, "WSAECONNREFUSED"):
errnoMapping[errno.WSAECONNREFUSED] = ConnectionRefusedError
errnoMapping[errno.WSAENETUNREACH] = NoRouteError
except ImportError:
errnoMapping = {}
def getConnectError(e):
"""Given a socket exception, return connection error."""
if isinstance(e, Exception):
args = e.args
else:
args = e
try:
number, string = args
except ValueError:
return ConnectError(string=e)
if hasattr(socket, 'gaierror') and isinstance(e, socket.gaierror):
# Only works in 2.2 in newer. Really that means always; #5978 covers
# this and other wierdnesses in this function.
klass = UnknownHostError
else:
klass = errnoMapping.get(number, ConnectError)
return klass(number, string)
class ConnectionClosed(Exception):
"""
Connection was closed, whether cleanly or non-cleanly.
"""
class ConnectionLost(ConnectionClosed):
"""Connection to the other side was lost in a non-clean fashion"""
def __str__(self):
s = self.__doc__.strip().splitlines()[0]
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class ConnectionAborted(ConnectionLost):
"""
Connection was aborted locally, using
L{twisted.internet.interfaces.ITCPTransport.abortConnection}.
@since: 11.1
"""
class ConnectionDone(ConnectionClosed):
"""Connection was closed cleanly"""
def __str__(self):
s = self.__doc__
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class FileDescriptorOverrun(ConnectionLost):
"""
A mis-use of L{IUNIXTransport.sendFileDescriptor} caused the connection to
be closed.
Each file descriptor sent using C{sendFileDescriptor} must be associated
with at least one byte sent using L{ITransport.write}. If at any point
fewer bytes have been written than file descriptors have been sent, the
connection is closed with this exception.
"""
class ConnectionFdescWentAway(ConnectionLost):
"""Uh""" #TODO
class AlreadyCalled(ValueError):
"""Tried to cancel an already-called event"""
def __str__(self):
s = self.__doc__
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class AlreadyCancelled(ValueError):
"""Tried to cancel an already-cancelled event"""
def __str__(self):
s = self.__doc__
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class PotentialZombieWarning(Warning):
"""
Emitted when L{IReactorProcess.spawnProcess} is called in a way which may
result in termination of the created child process not being reported.
Deprecated in Twisted 10.0.
"""
MESSAGE = (
"spawnProcess called, but the SIGCHLD handler is not "
"installed. This probably means you have not yet "
"called reactor.run, or called "
"reactor.run(installSignalHandler=0). You will probably "
"never see this process finish, and it may become a "
"zombie process.")
deprecate.deprecatedModuleAttribute(
Version("Twisted", 10, 0, 0),
"There is no longer any potential for zombie process.",
__name__,
"PotentialZombieWarning")
class ProcessDone(ConnectionDone):
"""A process has ended without apparent errors"""
def __init__(self, status):
Exception.__init__(self, "process finished with exit code 0")
self.exitCode = 0
self.signal = None
self.status = status
class ProcessTerminated(ConnectionLost):
"""
A process has ended with a probable error condition
@ivar exitCode: See L{__init__}
@ivar signal: See L{__init__}
@ivar status: See L{__init__}
"""
def __init__(self, exitCode=None, signal=None, status=None):
"""
@param exitCode: The exit status of the process. This is roughly like
the value you might pass to L{os.exit}. This is L{None} if the
process exited due to a signal.
@type exitCode: L{int} or L{types.NoneType}
@param signal: The exit signal of the process. This is L{None} if the
process did not exit due to a signal.
@type signal: L{int} or L{types.NoneType}
@param status: The exit code of the process. This is a platform
specific combination of the exit code and the exit signal. See
L{os.WIFEXITED} and related functions.
@type status: L{int}
"""
self.exitCode = exitCode
self.signal = signal
self.status = status
s = "process ended"
if exitCode is not None: s = s + " with exit code %s" % exitCode
if signal is not None: s = s + " by signal %s" % signal
Exception.__init__(self, s)
class ProcessExitedAlready(Exception):
"""
The process has already exited and the operation requested can no longer
be performed.
"""
class NotConnectingError(RuntimeError):
"""The Connector was not connecting when it was asked to stop connecting"""
def __str__(self):
s = self.__doc__
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class NotListeningError(RuntimeError):
"""The Port was not listening when it was asked to stop listening"""
def __str__(self):
s = self.__doc__
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class ReactorNotRunning(RuntimeError):
"""
Error raised when trying to stop a reactor which is not running.
"""
class ReactorNotRestartable(RuntimeError):
"""
Error raised when trying to run a reactor which was stopped.
"""
class ReactorAlreadyRunning(RuntimeError):
"""
Error raised when trying to start the reactor multiple times.
"""
class ReactorAlreadyInstalledError(AssertionError):
"""
Could not install reactor because one is already installed.
"""
class ConnectingCancelledError(Exception):
"""
An C{Exception} that will be raised when an L{IStreamClientEndpoint} is
cancelled before it connects.
@ivar address: The L{IAddress} that is the destination of the
cancelled L{IStreamClientEndpoint}.
"""
def __init__(self, address):
"""
@param address: The L{IAddress} that is the destination of the
L{IStreamClientEndpoint} that was cancelled.
"""
Exception.__init__(self, address)
self.address = address
class NoProtocol(Exception):
"""
An C{Exception} that will be raised when the factory given to a
L{IStreamClientEndpoint} returns C{None} from C{buildProtocol}.
"""
class UnsupportedAddressFamily(Exception):
"""
An attempt was made to use a socket with an address family (eg I{AF_INET},
I{AF_INET6}, etc) which is not supported by the reactor.
"""
class UnsupportedSocketType(Exception):
"""
An attempt was made to use a socket of a type (eg I{SOCK_STREAM},
I{SOCK_DGRAM}, etc) which is not supported by the reactor.
"""
class AlreadyListened(Exception):
"""
An attempt was made to listen on a file descriptor which can only be
listened on once.
"""
class InvalidAddressError(ValueError):
"""
An invalid address was specified (i.e. neither IPv4 or IPv6, or expected
one and got the other).
@ivar address: See L{__init__}
@ivar message: See L{__init__}
"""
def __init__(self, address, message):
"""
@param address: The address that was provided.
@type address: L{bytes}
@param message: A native string of additional information provided by
the calling context.
@type address: L{str}
"""
self.address = address
self.message = message
__all__ = [
'BindError', 'CannotListenError', 'MulticastJoinError',
'MessageLengthError', 'DNSLookupError', 'ConnectInProgressError',
'ConnectError', 'ConnectBindError', 'UnknownHostError', 'NoRouteError',
'ConnectionRefusedError', 'TCPTimedOutError', 'BadFileError',
'ServiceNameUnknownError', 'UserError', 'TimeoutError', 'SSLError',
'VerifyError', 'PeerVerifyError', 'CertificateError',
'getConnectError', 'ConnectionClosed', 'ConnectionLost',
'ConnectionDone', 'ConnectionFdescWentAway', 'AlreadyCalled',
'AlreadyCancelled', 'PotentialZombieWarning', 'ProcessDone',
'ProcessTerminated', 'ProcessExitedAlready', 'NotConnectingError',
'NotListeningError', 'ReactorNotRunning', 'ReactorAlreadyRunning',
'ReactorAlreadyInstalledError', 'ConnectingCancelledError',
'UnsupportedAddressFamily', 'UnsupportedSocketType', 'InvalidAddressError']
| gpl-3.0 |
zeroSteiner/boltons | docs/conf.py | 1 | 9797 | # -*- coding: utf-8 -*-
#
# boltons documentation build configuration file, created by
# sphinx-quickstart on Sat Mar 21 00:34:18 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import sphinx
from pprint import pprint
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
CUR_PATH = os.path.dirname(os.path.abspath(__file__))
PROJECT_PATH = os.path.abspath(CUR_PATH + '/../')
PACKAGE_PATH = os.path.abspath(CUR_PATH + '/../boltons/')
sys.path.insert(0, PROJECT_PATH)
sys.path.insert(0, PACKAGE_PATH)
pprint(os.environ)
def get_mod_stats():
# TODO: docstring percentage.
import pkgutil
from boltons.funcutils import get_module_callables
mod_count = 0
tot_type_count = 0
tot_func_count = 0
ignore = lambda attr_name: attr_name.startswith('_')
for _, mod_name, _ in pkgutil.iter_modules([PACKAGE_PATH]):
if not mod_name.endswith('utils'):
continue
mod = __import__(mod_name)
types, funcs = get_module_callables(mod, ignore=ignore)
if not len(types) and not len(funcs):
continue
mod_count += 1
tot_type_count += len(types)
tot_func_count += len(funcs)
ret = (mod_count, tot_type_count, tot_func_count)
print ('==== %s modules ==== %s types ==== %s funcs ====' % ret)
return ret
B_MOD_COUNT, B_TYPE_COUNT, B_FUNC_COUNT = get_mod_stats()
rst_epilog = """
.. |b_mod_count| replace:: {mod_count}
.. |b_type_count| replace:: {type_count}
.. |b_func_count| replace:: {func_count}
""".format(mod_count=B_MOD_COUNT,
type_count=B_TYPE_COUNT,
func_count=B_FUNC_COUNT)
# -- General configuration ------------------------------------------------
autosummary_generate = True
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Read the Docs is version 1.2 as of writing
if sphinx.version_info[:2] < (1, 3):
extensions.append('sphinxcontrib.napoleon')
else:
extensions.append('sphinx.ext.napoleon')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'boltons'
copyright = u'2018, Mahmoud Hashemi'
author = u'Mahmoud Hashemi'
version = '18.0'
release = '18.0.0'
if os.name != 'nt':
today_fmt = '%B %d, %Y'
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('https://docs.python.org/2.7', None)}
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
html_theme = 'default'
else: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = ['_themes', sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# TEMP: see https://github.com/rtfd/readthedocs.org/issues/1692
# Add RTD Theme Path.
#if 'html_theme_path' in globals():
# html_theme_path.append('/home/docs/checkouts/readthedocs.org/readthedocs/templates/sphinx')
#else:
# html_theme_path = ['_themes', '/home/docs/checkouts/readthedocs.org/readthedocs/templates/sphinx']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'boltonsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'boltons.tex', u'boltons Documentation',
u'Mahmoud Hashemi', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'boltons', u'boltons Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'boltons', u'boltons Documentation',
author, 'boltons', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| bsd-3-clause |
sauloal/introgressionbrowser | vcfmerger/py-editdist-0.3/test.py | 5 | 1900 | #!/usr/bin/env python
# Copyright (c) 2006 Damien Miller <djm@mindrot.org>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# $Id: test.py,v 1.3 2007/05/03 23:36:36 djm Exp $
import editdist
import unittest
import random
test_vectors = (
( 'abc', 'abc', 0 ),
( 'abc', 'ab', 1 ),
( 'abc', 'abcd', 1 ),
( 'abc', 'bc', 1 ),
( 'abc', 'a', 2 ),
( 'abc', '', 3 ),
( '', '', 0 ),
( 'abc', 'acx', 2 ),
( 'abc', 'acxx', 3 ),
( 'abc', 'bcd', 2 ),
( 'a' * 1000, 'a' * 1000, 0 ),
( 'a' * 1000, 'b' * 1000, 1000),
)
def randstring(l):
a = "abcdefghijklmnopqrstuvwxyz"
r = ""
for i in range(0, l):
r += a[random.randint(0, len(a) - 1)]
return r
class TestRadix(unittest.TestCase):
def test_00__test_vectors(self):
for a, b, score in test_vectors:
self.assertEqual(editdist.distance(a, b), score)
def test_01__reversed_test_vectors(self):
for b, a, score in test_vectors:
self.assertEqual(editdist.distance(a, b), score)
def test_02__fuzz(self):
for i in range(0, 32) + range(128, 1024, 128):
for j in range(0, 32):
a = randstring(i)
b = randstring(j)
dist = editdist.distance(a, b)
self.assert_(dist >= 0)
def main():
unittest.main()
if __name__ == '__main__':
main()
| mit |
cisco-openstack/neutron | neutron/tests/retargetable/client_fixtures.py | 24 | 3529 | # Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
This module defines client fixtures that can be used to target the
Neutron API via different methods.
"""
import abc
import fixtures
import six
from neutron.common import exceptions as n_exc
from neutron import context
from neutron import manager
from neutron.tests import base
from neutron.tests.unit import testlib_api
@six.add_metaclass(abc.ABCMeta)
class AbstractClientFixture(fixtures.Fixture):
"""
Base class for a client that can interact the neutron api in some
manner.
"""
@abc.abstractproperty
def NotFound(self):
"""The exception that indicates a resource could not be found.
Tests can use this property to assert for a missing resource
in a client-agnostic way.
"""
@abc.abstractmethod
def create_network(self, **kwargs):
pass
@abc.abstractmethod
def update_network(self, id_, **kwargs):
pass
@abc.abstractmethod
def get_network(self, id_, fields=None):
pass
@abc.abstractmethod
def get_networks(self, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
pass
@abc.abstractmethod
def delete_network(self, id_):
pass
class PluginClientFixture(AbstractClientFixture):
"""Targets the Neutron API via the plugin API"""
def __init__(self, plugin_conf):
super(PluginClientFixture, self).__init__()
self.plugin_conf = plugin_conf
def _setUp(self):
super(PluginClientFixture, self)._setUp()
self.useFixture(testlib_api.SqlFixture())
self.useFixture(self.plugin_conf)
self.useFixture(base.PluginFixture(self.plugin_conf.plugin_name))
@property
def ctx(self):
if not hasattr(self, '_ctx'):
self._ctx = context.Context('', 'test-tenant')
return self._ctx
@property
def plugin(self):
return manager.NeutronManager.get_plugin()
@property
def NotFound(self):
return n_exc.NetworkNotFound
def create_network(self, **kwargs):
# Supply defaults that are expected to be set by the api
# framwork
kwargs.setdefault('admin_state_up', True)
kwargs.setdefault('shared', False)
data = dict(network=kwargs)
result = self.plugin.create_network(self.ctx, data)
return base.AttributeDict(result)
def update_network(self, id_, **kwargs):
data = dict(network=kwargs)
result = self.plugin.update_network(self.ctx, id_, data)
return base.AttributeDict(result)
def get_network(self, *args, **kwargs):
result = self.plugin.get_network(self.ctx, *args, **kwargs)
return base.AttributeDict(result)
def get_networks(self, *args, **kwargs):
result = self.plugin.get_networks(self.ctx, *args, **kwargs)
return [base.AttributeDict(x) for x in result]
def delete_network(self, id_):
self.plugin.delete_network(self.ctx, id_)
| apache-2.0 |
tradej/pcs | pcs/cluster.py | 1 | 61319 | from __future__ import absolute_import
from __future__ import print_function
import os
import subprocess
import re
import usage
import utils
import sys
import status
import prop
import resource
import stonith
import constraint
import settings
import socket
import tempfile
import datetime
import json
import xml.dom.minidom
import threading
import corosync_conf as corosync_conf_utils
import pcsd
try: # Python 2
from commands import getstatusoutput
except ImportError: # Python 3
from subprocess import getstatusoutput
pcs_dir = os.path.dirname(os.path.realpath(__file__))
def cluster_cmd(argv):
if len(argv) == 0:
usage.cluster()
exit(1)
sub_cmd = argv.pop(0)
if (sub_cmd == "help"):
usage.cluster(argv)
elif (sub_cmd == "setup"):
if "--name" in utils.pcs_options:
corosync_setup([utils.pcs_options["--name"]] + argv)
else:
utils.err("A cluster name (--name <name>) is required to setup a cluster")
elif (sub_cmd == "sync"):
sync_nodes(utils.getNodesFromCorosyncConf(),utils.getCorosyncConf())
elif (sub_cmd == "status"):
status.cluster_status(argv)
elif (sub_cmd == "pcsd-status"):
cluster_gui_status(argv)
elif (sub_cmd == "certkey"):
cluster_certkey(argv)
elif (sub_cmd == "auth"):
cluster_auth(argv)
elif (sub_cmd == "token"):
cluster_token(argv)
elif (sub_cmd == "token-nodes"):
cluster_token_nodes(argv)
elif (sub_cmd == "start"):
if "--all" in utils.pcs_options:
start_cluster_all()
else:
start_cluster(argv)
elif (sub_cmd == "stop"):
if "--all" in utils.pcs_options:
stop_cluster_all()
else:
stop_cluster(argv)
elif (sub_cmd == "kill"):
kill_cluster(argv)
elif (sub_cmd == "standby"):
node_standby(argv)
elif (sub_cmd == "unstandby"):
node_standby(argv, False)
elif (sub_cmd == "enable"):
if "--all" in utils.pcs_options:
enable_cluster_all()
else:
enable_cluster(argv)
elif (sub_cmd == "disable"):
if "--all" in utils.pcs_options:
disable_cluster_all()
else:
disable_cluster(argv)
elif (sub_cmd == "remote-node"):
cluster_remote_node(argv)
elif (sub_cmd == "cib"):
get_cib(argv)
elif (sub_cmd == "cib-push"):
cluster_push(argv)
elif (sub_cmd == "cib-upgrade"):
cluster_upgrade()
elif (sub_cmd == "edit"):
cluster_edit(argv)
elif (sub_cmd == "node"):
cluster_node(argv)
elif (sub_cmd == "localnode"):
cluster_localnode(argv)
elif (sub_cmd == "uidgid"):
cluster_uidgid(argv)
elif (sub_cmd == "corosync"):
cluster_get_corosync_conf(argv)
elif (sub_cmd == "reload"):
cluster_reload(argv)
elif (sub_cmd == "destroy"):
cluster_destroy(argv)
elif (sub_cmd == "verify"):
cluster_verify(argv)
elif (sub_cmd == "report"):
cluster_report(argv)
elif (sub_cmd == "quorum"):
if argv and argv[0] == "unblock":
cluster_quorum_unblock(argv[1:])
else:
usage.cluster(["quorum"])
sys.exit(1)
else:
usage.cluster()
sys.exit(1)
# Create config and then send it to all of the nodes and start
# corosync & pacemaker on the nodes
# partial_argv is an array of args passed to corosync configure sync_start
def sync_start(partial_argv, nodes):
sync(partial_argv, nodes)
print("Starting cluster on nodes: " + ", ".join(nodes) + "...")
start_cluster_nodes(nodes)
def sync(partial_argv,nodes):
argv = partial_argv[:]
# send local cluster pcsd configs to the new nodes
# may be used for sending corosync config as well in future
pcsd_data = {
'nodes': nodes,
'force': True,
}
output, retval = utils.run_pcsdcli('send_local_configs', pcsd_data)
if retval == 0 and output['status'] == 'ok' and output['data']:
err_msgs = []
try:
for node in nodes:
node_response = output['data'][node]
if node_response['status'] == 'notauthorized':
err_msgs.append(
"Unable to authenticate to " + node
+ ", try running 'pcs cluster auth'"
)
if node_response['status'] not in ['ok', 'not_supported']:
err_msgs.append(
"Unable to set pcsd configs on {0}".format(node)
)
except:
err_msgs.append('Unable to communicate with pcsd')
else:
err_msgs.append("Unable to set pcsd configs")
for err_msg in err_msgs:
print("Warning: {0}".format(err_msg))
config = corosync_setup(argv,True)
sync_nodes(nodes,config)
def sync_nodes(nodes,config):
for node in nodes:
utils.setCorosyncConfig(node,config)
def cluster_auth(argv):
if len(argv) == 0:
auth_nodes(utils.getNodesFromCorosyncConf())
else:
auth_nodes(argv)
def cluster_token(argv):
if len(argv) > 1:
utils.err("Must specify only one node")
elif len(argv) == 0:
utils.err("Must specify a node to get authorization token from")
node = argv[0]
tokens = utils.readTokens()
if node in tokens:
print(tokens[node])
else:
utils.err("No authorization token for: %s" % (node))
def cluster_token_nodes(argv):
print("\n".join(sorted(utils.readTokens().keys())))
def auth_nodes(nodes):
if "-u" in utils.pcs_options:
username = utils.pcs_options["-u"]
else:
username = None
if "-p" in utils.pcs_options:
password = utils.pcs_options["-p"]
else:
password = None
set_nodes = set(nodes)
need_auth = "--force" in utils.pcs_options or (username or password)
if not need_auth:
for node in set_nodes:
status = utils.checkAuthorization(node)
if status[0] == 3:
need_auth = True
break
mutually_authorized = False
if status[0] == 0:
try:
auth_status = json.loads(status[1])
if auth_status["success"]:
if set_nodes.issubset(set(auth_status["node_list"])):
mutually_authorized = True
except (ValueError, KeyError):
pass
if not mutually_authorized:
need_auth = True
break
if need_auth:
if username == None:
username = utils.get_terminal_input('Username: ')
if password == None:
password = utils.get_terminal_password()
auth_nodes_do(
set_nodes, username, password, '--force' in utils.pcs_options,
'--local' in utils.pcs_options
)
else:
for node in set_nodes:
print(node + ": Already authorized")
def auth_nodes_do(nodes, username, password, force, local):
pcsd_data = {
'nodes': list(set(nodes)),
'username': username,
'password': password,
'force': force,
'local': local,
}
output, retval = utils.run_pcsdcli('auth', pcsd_data)
if retval == 0 and output['status'] == 'ok' and output['data']:
failed = False
try:
if not output['data']['sync_successful']:
utils.err(
"Some nodes had a newer tokens than the local node. "
+ "Local node's tokens were updated. "
+ "Please repeat the authentication if needed."
)
for node, result in output['data']['auth_responses'].items():
if result['status'] == 'ok':
print("{0}: Authorized".format(node))
elif result['status'] == 'already_authorized':
print("{0}: Already authorized".format(node))
elif result['status'] == 'bad_password':
utils.err(
"{0}: Username and/or password is incorrect".format(node),
False
)
failed = True
elif result['status'] == 'noresponse':
utils.err("Unable to communicate with {0}".format(node), False)
failed = True
else:
utils.err("Unexpected response from {0}".format(node), False)
failed = True
if output['data']['sync_nodes_err']:
utils.err(
(
"Unable to synchronize and save tokens on nodes: {0}. "
+ "Are they authorized?"
).format(
", ".join(output['data']['sync_nodes_err'])
),
False
)
failed = True
except:
utils.err('Unable to communicate with pcsd')
if failed:
sys.exit(1)
return
utils.err('Unable to communicate with pcsd')
# If no arguments get current cluster node status, otherwise get listed
# nodes status
def cluster_gui_status(argv,dont_exit = False):
bad_nodes = False
if len(argv) == 0:
nodes = utils.getNodesFromCorosyncConf()
if len(nodes) == 0:
if utils.is_rhel6():
utils.err("no nodes found in cluster.conf")
else:
utils.err("no nodes found in corosync.conf")
bad_nodes = check_nodes(nodes, " ")
else:
bad_nodes = check_nodes(argv, " ")
if bad_nodes and not dont_exit:
sys.exit(2)
def cluster_certkey(argv):
return pcsd.pcsd_certkey(argv)
# Check and see if pcsd is running on the nodes listed
def check_nodes(nodes, prefix = ""):
bad_nodes = False
pm_nodes = utils.getPacemakerNodesID(True)
cs_nodes = utils.getCorosyncNodesID(True)
for node in nodes:
status = utils.checkAuthorization(node)
if node not in list(pm_nodes.values()):
for n_id, n in cs_nodes.items():
if node == n and n_id in pm_nodes:
real_node_name = pm_nodes[n_id]
if real_node_name == "(null)":
real_node_name = "*Unknown*"
node = real_node_name + " (" + node + ")"
break
if status[0] == 0:
print(prefix + node + ": Online")
elif status[0] == 3:
print(prefix + node + ": Unable to authenticate")
bad_nodes = True
else:
print(prefix + node + ": Offline")
bad_nodes = True
return bad_nodes
def corosync_setup(argv,returnConfig=False):
fedora_config = not utils.is_rhel6()
primary_nodes = []
hostname_map = {}
# If node contains a ',' we only care about the first address
for node in argv[1:]:
nodename = ""
if "," in node:
nodename = node.split(',')[0]
if "/" in node:
if nodename == "":
nodename = node.split('/')[0]
hostname_map[nodename] = node.split('/')[1]
if nodename == "":
nodename = node
primary_nodes.append(nodename)
if len(argv) < 2:
usage.cluster()
exit(1)
if not returnConfig and "--start" in utils.pcs_options and not "--local" in utils.pcs_options:# and fedora_config:
sync_start(argv, primary_nodes)
if "--enable" in utils.pcs_options:
enable_cluster(primary_nodes)
pcsd.pcsd_sync_certs([])
return
elif not returnConfig and not "--local" in utils.pcs_options:# and fedora_config:
sync(argv, primary_nodes)
if "--enable" in utils.pcs_options:
enable_cluster(primary_nodes)
pcsd.pcsd_sync_certs([])
return
else:
nodes = argv[1:]
cluster_name = argv[0]
# Verify that all nodes are resolvable otherwise problems may occur
nodes_unresolvable = False
udpu_rrp = False
node_addr_list = []
for node in nodes:
addr_list = utils.parse_multiring_node(node)
if addr_list[1]:
udpu_rrp = True
node_addr_list.extend(addr_list)
for node_addr in node_addr_list:
if node_addr:
try:
socket.getaddrinfo(node_addr, None)
except socket.error:
print("Warning: Unable to resolve hostname: %s" % node_addr)
nodes_unresolvable = True
if udpu_rrp:
for node in nodes:
if "," not in node:
utils.err("if one node is configured for RRP, all nodes must configured for RRP")
if nodes_unresolvable and "--force" not in utils.pcs_options:
utils.err("Unable to resolve all hostnames (use --force to override).")
transport = "udp" if utils.is_rhel6() else "udpu"
if "--transport" in utils.pcs_options:
transport = utils.pcs_options["--transport"]
if (
transport not in ("udp", "udpu")
and
"--force" not in utils.pcs_options
):
utils.err(
"unknown transport '%s', use --force to override" % transport
)
if transport == "udpu" and utils.is_rhel6():
print(("Warning: Using udpu transport on a CMAN cluster, "
+ "cluster restart is required after node add or remove"))
if (
transport == "udpu"
and
("--addr0" in utils.pcs_options or "--addr1" in utils.pcs_options)
):
utils.err("--addr0 and --addr1 can only be used with --transport=udp")
rrpmode = None
if "--rrpmode" in utils.pcs_options or udpu_rrp or "--addr0" in utils.pcs_options:
rrpmode = "passive"
if "--rrpmode" in utils.pcs_options:
rrpmode = utils.pcs_options["--rrpmode"]
if rrpmode == "active" and "--force" not in utils.pcs_options:
utils.err("using a RRP mode of 'active' is not supported or tested, use --force to override")
elif rrpmode != "passive" and "--force" not in utils.pcs_options:
utils.err("%s is an unknown RRP mode, use --force to override" % rrpmode)
if fedora_config == True:
if os.path.exists(settings.corosync_conf_file) and not "--force" in utils.pcs_options:
utils.err("%s already exists, use --force to overwrite" % settings.corosync_conf_file)
if not ("--corosync_conf" in utils.pcs_options and "--local" in utils.pcs_options):
cib_path = os.path.join(settings.cib_dir, "cib.xml")
if os.path.exists(cib_path) and not "--force" in utils.pcs_options:
utils.err("%s already exists, use --force to overwrite" % cib_path)
if "--corosync_conf" not in utils.pcs_options:
cluster_destroy([])
for opt in ["--wait_for_all", "--auto_tie_breaker", "--last_man_standing"]:
if (
opt in utils.pcs_options
and
utils.pcs_options[opt] not in ["0", "1"]
):
utils.err(
"'%s' is not a valid value for %s, use 0 or 1"
% (utils.pcs_options[opt], opt)
)
auto_tie_breaker = False
corosync_conf = corosync_conf_utils.Section("")
totem_section = corosync_conf_utils.Section("totem")
nodelist_section = corosync_conf_utils.Section("nodelist")
quorum_section = corosync_conf_utils.Section("quorum")
logging_section = corosync_conf_utils.Section("logging")
corosync_conf.add_section(totem_section)
corosync_conf.add_section(nodelist_section)
corosync_conf.add_section(quorum_section)
corosync_conf.add_section(logging_section)
totem_section.add_attribute("version", "2")
totem_section.add_attribute("secauth", "off")
totem_section.add_attribute("cluster_name", cluster_name)
totem_section.add_attribute("transport", transport)
if "--token" in utils.pcs_options:
totem_section.add_attribute("token", utils.pcs_options["--token"])
if "--token_coefficient" in utils.pcs_options:
totem_section.add_attribute(
"token_coefficient", utils.pcs_options["--token_coefficient"]
)
if "--join" in utils.pcs_options:
totem_section.add_attribute("join", utils.pcs_options["--join"])
if "--consensus" in utils.pcs_options:
totem_section.add_attribute(
"consensus", utils.pcs_options["--consensus"]
)
if "--miss_count_const" in utils.pcs_options:
totem_section.add_attribute(
"miss_count_const", utils.pcs_options["--miss_count_const"]
)
if "--fail_recv_const" in utils.pcs_options:
totem_section.add_attribute(
"fail_recv_const", utils.pcs_options["--fail_recv_const"]
)
if rrpmode:
totem_section.add_attribute("rrp_mode", rrpmode)
if transport == "udp":
interface_ids = []
if "--addr0" in utils.pcs_options:
interface_ids.append("0")
if "--addr1" in utils.pcs_options:
interface_ids.append("1")
for interface in interface_ids:
interface_section = corosync_conf_utils.Section("interface")
totem_section.add_section(interface_section)
interface_section.add_attribute("ringnumber", interface)
interface_section.add_attribute(
"bindnetaddr", utils.pcs_options["--addr" + interface]
)
if "--broadcast" + interface in utils.pcs_options:
interface_section.add_attribute("broadcast", "yes")
else:
if "--mcast" + interface in utils.pcs_options:
mcastaddr = utils.pcs_options["--mcast" + interface]
elif interface == "0":
mcastaddr = "239.255.1.1"
else:
mcastaddr = "239.255.2.1"
interface_section.add_attribute("mcastaddr", mcastaddr)
if "--mcastport" + interface in utils.pcs_options:
mcastport = utils.pcs_options["--mcastport" + interface]
else:
mcastport = "5405"
interface_section.add_attribute("mcastport", mcastport)
if "--ttl" + interface in utils.pcs_options:
interface_section.add_attribute(
"ttl", utils.pcs_options["--ttl" + interface]
)
if "--ipv6" in utils.pcs_options:
totem_section.add_attribute("ip_version", "ipv6")
for node_id, node_addrs in enumerate(nodes, 1):
node0, node1 = utils.parse_multiring_node(node_addrs)
node_section = corosync_conf_utils.Section("node")
nodelist_section.add_section(node_section)
node_section.add_attribute("ring0_addr", node0)
if udpu_rrp:
node_section.add_attribute("ring1_addr", node1)
if node0 in hostname_map:
node_section.add_attribute("name", hostname_map[node0])
node_section.add_attribute("nodeid", node_id)
quorum_section.add_attribute("provider", "corosync_votequorum")
if "--wait_for_all" in utils.pcs_options:
quorum_section.add_attribute(
"wait_for_all", utils.pcs_options["--wait_for_all"]
)
if "--auto_tie_breaker" in utils.pcs_options:
quorum_section.add_attribute(
"auto_tie_breaker", utils.pcs_options["--auto_tie_breaker"]
)
if utils.pcs_options["--auto_tie_breaker"] == "1":
auto_tie_breaker = True
if "--last_man_standing" in utils.pcs_options:
quorum_section.add_attribute(
"last_man_standing", utils.pcs_options["--last_man_standing"]
)
if "--last_man_standing_window" in utils.pcs_options:
quorum_section.add_attribute(
"last_man_standing_window",
utils.pcs_options["--last_man_standing_window"]
)
if len(nodes) == 2 and not auto_tie_breaker:
quorum_section.add_attribute("two_node", "1")
logging_section.add_attribute("to_logfile", "yes")
logging_section.add_attribute("logfile", "/var/log/cluster/corosync.log")
logging_section.add_attribute("to_syslog", "yes")
if returnConfig:
return str(corosync_conf)
utils.setCorosyncConf(str(corosync_conf))
else:
broadcast = (
("--broadcast0" in utils.pcs_options)
or
("--broadcast1" in utils.pcs_options)
)
if broadcast:
transport = "udpb"
if "--broadcast0" not in utils.pcs_options:
print(("Warning: Enabling broadcast for ring 0 "
+ "as CMAN does not support broadcast in only one ring"))
if "--broadcast1" not in utils.pcs_options:
print(("Warning: Enabling broadcast for ring 1 "
+ "as CMAN does not support broadcast in only one ring"))
cluster_conf_location = settings.cluster_conf_file
if returnConfig:
cc_temp = tempfile.NamedTemporaryFile('w+b', -1, ".pcs")
cluster_conf_location = cc_temp.name
if os.path.exists(cluster_conf_location) and not "--force" in utils.pcs_options and not returnConfig:
print("Error: %s already exists, use --force to overwrite" % cluster_conf_location)
sys.exit(1)
output, retval = utils.run(["/usr/sbin/ccs", "-i", "-f", cluster_conf_location, "--createcluster", cluster_name])
if retval != 0:
print(output)
utils.err("error creating cluster: %s" % cluster_name)
output, retval = utils.run(["/usr/sbin/ccs", "-i", "-f", cluster_conf_location, "--addfencedev", "pcmk-redirect", "agent=fence_pcmk"])
if retval != 0:
print(output)
utils.err("error creating fence dev: %s" % cluster_name)
cman_opts = []
cman_opts.append("transport=" + transport)
cman_opts.append("broadcast=" + ("yes" if broadcast else "no"))
if len(nodes) == 2:
cman_opts.append("two_node=1")
cman_opts.append("expected_votes=1")
output, retval = utils.run(
["/usr/sbin/ccs", "-f", cluster_conf_location, "--setcman"]
+ cman_opts
)
if retval != 0:
print(output)
utils.err("error setting cman options")
for node in nodes:
if udpu_rrp:
node0, node1 = node.split(",")
elif "--addr1" in utils.pcs_options:
node0 = node
node1 = utils.pcs_options["--addr1"]
else:
node0 = node
node1 = None
output, retval = utils.run(["/usr/sbin/ccs", "-f", cluster_conf_location, "--addnode", node0])
if retval != 0:
print(output)
utils.err("error adding node: %s" % node0)
if node1:
output, retval = utils.run([
"/usr/sbin/ccs", "-f", cluster_conf_location,
"--addalt", node0, node1
])
if retval != 0:
print(output)
utils.err(
"error adding alternative address for node: %s" % node0
)
output, retval = utils.run(["/usr/sbin/ccs", "-i", "-f", cluster_conf_location, "--addmethod", "pcmk-method", node0])
if retval != 0:
print(output)
utils.err("error adding fence method: %s" % node0)
output, retval = utils.run(["/usr/sbin/ccs", "-i", "-f", cluster_conf_location, "--addfenceinst", "pcmk-redirect", node0, "pcmk-method", "port="+node0])
if retval != 0:
print(output)
utils.err("error adding fence instance: %s" % node0)
if not broadcast:
for interface in ("0", "1"):
if "--addr" + interface not in utils.pcs_options:
continue
mcastaddr = "239.255.1.1" if interface == "0" else "239.255.2.1"
mcast_options = []
if "--mcast" + interface in utils.pcs_options:
mcastaddr = utils.pcs_options["--mcast" + interface]
mcast_options.append(mcastaddr)
if "--mcastport" + interface in utils.pcs_options:
mcast_options.append(
"port=" + utils.pcs_options["--mcastport" + interface]
)
if "--ttl" + interface in utils.pcs_options:
mcast_options.append(
"ttl=" + utils.pcs_options["--ttl" + interface]
)
output, retval = utils.run(
["/usr/sbin/ccs", "-f", cluster_conf_location,
"--setmulticast" if interface == "0" else "--setaltmulticast"]
+ mcast_options
)
if retval != 0:
print(output)
utils.err("error adding ring%s settings" % interface)
totem_options = []
if "--token" in utils.pcs_options:
totem_options.append("token=" + utils.pcs_options["--token"])
if "--join" in utils.pcs_options:
totem_options.append("join=" + utils.pcs_options["--join"])
if "--consensus" in utils.pcs_options:
totem_options.append(
"consensus=" + utils.pcs_options["--consensus"]
)
if "--miss_count_const" in utils.pcs_options:
totem_options.append(
"miss_count_const=" + utils.pcs_options["--miss_count_const"]
)
if "--fail_recv_const" in utils.pcs_options:
totem_options.append(
"fail_recv_const=" + utils.pcs_options["--fail_recv_const"]
)
if rrpmode:
totem_options.append("rrp_mode=" + rrpmode)
if totem_options:
output, retval = utils.run(
["/usr/sbin/ccs", "-f", cluster_conf_location, "--settotem"]
+ totem_options
)
if retval != 0:
print(output)
utils.err("error setting totem options")
if "--wait_for_all" in utils.pcs_options:
print("Warning: --wait_for_all"\
" ignored as it is not supported on CMAN clusters")
if "--auto_tie_breaker" in utils.pcs_options:
print("Warning: --auto_tie_breaker"\
" ignored as it is not supported on CMAN clusters")
if "--last_man_standing" in utils.pcs_options:
print("Warning: --last_man_standing"\
" ignored as it is not supported on CMAN clusters")
if "--last_man_standing_window" in utils.pcs_options:
print("Warning: --last_man_standing_window"\
" ignored as it is not supported on CMAN clusters")
if "--token_coefficient" in utils.pcs_options:
print("Warning: --token_coefficient"\
" ignored as it is not supported on CMAN clusters")
if "--ipv6" in utils.pcs_options:
print("Warning: --ipv6"\
" ignored as it is not supported on CMAN clusters")
if returnConfig:
cc_temp.seek(0)
cluster_conf_data = cc_temp.read()
cc_temp.close()
return cluster_conf_data
if "--start" in utils.pcs_options:
start_cluster([])
if "--enable" in utils.pcs_options:
enable_cluster([])
def get_local_network():
args = ["/sbin/ip", "route"]
p = subprocess.Popen(args, stdout=subprocess.PIPE)
iproute_out = p.stdout.read()
network_addr = re.search(r"^([0-9\.]+)", iproute_out)
if network_addr:
return network_addr.group(1)
else:
utils.err("unable to determine network address, is interface up?")
def start_cluster(argv):
if len(argv) > 0:
start_cluster_nodes(argv)
return
print("Starting Cluster...")
if utils.is_rhel6():
# Verify that CMAN_QUORUM_TIMEOUT is set, if not, then we set it to 0
retval, output = getstatusoutput('source /etc/sysconfig/cman ; [ -z "$CMAN_QUORUM_TIMEOUT" ]')
if retval == 0:
with open("/etc/sysconfig/cman", "a") as cman_conf_file:
cman_conf_file.write("\nCMAN_QUORUM_TIMEOUT=0\n")
output, retval = utils.run(["service", "cman","start"])
if retval != 0:
print(output)
utils.err("unable to start cman")
else:
output, retval = utils.run(["service", "corosync","start"])
if retval != 0:
print(output)
utils.err("unable to start corosync")
output, retval = utils.run(["service", "pacemaker", "start"])
if retval != 0:
print(output)
utils.err("unable to start pacemaker")
def start_cluster_all():
start_cluster_nodes(utils.getNodesFromCorosyncConf())
def start_cluster_nodes(nodes):
threads = dict()
for node in nodes:
threads[node] = NodeStartThread(node)
error_list = utils.run_node_threads(threads)
if error_list:
utils.err("unable to start all nodes\n" + "\n".join(error_list))
def stop_cluster_all():
stop_cluster_nodes(utils.getNodesFromCorosyncConf())
def stop_cluster_nodes(nodes):
all_nodes = utils.getNodesFromCorosyncConf()
unknown_nodes = set(nodes) - set(all_nodes)
if unknown_nodes:
utils.err(
"nodes '%s' do not appear to exist in configuration"
% "', '".join(unknown_nodes)
)
stopping_all = set(nodes) >= set(all_nodes)
if not "--force" in utils.pcs_options and not stopping_all:
error_list = []
for node in nodes:
retval, data = utils.get_remote_quorumtool_output(node)
if retval != 0:
error_list.append(node + ": " + data)
continue
# we are sure whether we are on cman cluster or not because only
# nodes from a local cluster can be stopped (see nodes validation
# above)
if utils.is_rhel6():
quorum_info = utils.parse_cman_quorum_info(data)
else:
quorum_info = utils.parse_quorumtool_output(data)
if quorum_info:
if not quorum_info["quorate"]:
continue
if utils.is_node_stop_cause_quorum_loss(
quorum_info, local=False, node_list=nodes
):
utils.err(
"Stopping the node(s) will cause a loss of the quorum"
+ ", use --force to override"
)
else:
# We have the info, no need to print errors
error_list = []
break
if not utils.is_node_offline_by_quorumtool_output(data):
error_list.append("Unable to get quorum status")
# else the node seems to be stopped already
if error_list:
utils.err(
"Unable to determine whether stopping the nodes will cause "
+ "a loss of the quorum, use --force to override\n"
+ "\n".join(error_list)
)
threads = dict()
for node in nodes:
threads[node] = NodeStopPacemakerThread(node)
error_list = utils.run_node_threads(threads)
if error_list:
utils.err("unable to stop all nodes\n" + "\n".join(error_list))
threads = dict()
for node in nodes:
threads[node] = NodeStopCorosyncThread(node)
error_list = utils.run_node_threads(threads)
if error_list:
utils.err("unable to stop all nodes\n" + "\n".join(error_list))
def node_standby(argv,standby=True):
if len(argv) > 1:
if standby:
usage.cluster(["standby"])
else:
usage.cluster(["unstandby"])
sys.exit(1)
nodes = utils.getNodesFromPacemaker()
if "--all" not in utils.pcs_options:
options_node = []
if argv:
if argv[0] not in nodes:
utils.err(
"node '%s' does not appear to exist in configuration"
% argv[0]
)
else:
options_node = ["-N", argv[0]]
if standby:
utils.run(["crm_standby", "-v", "on"] + options_node)
else:
utils.run(["crm_standby", "-D"] + options_node)
else:
for node in nodes:
if standby:
utils.run(["crm_standby", "-v", "on", "-N", node])
else:
utils.run(["crm_standby", "-D", "-N", node])
def enable_cluster(argv):
if len(argv) > 0:
enable_cluster_nodes(argv)
return
utils.enableServices()
def disable_cluster(argv):
if len(argv) > 0:
disable_cluster_nodes(argv)
return
utils.disableServices()
def enable_cluster_all():
enable_cluster_nodes(utils.getNodesFromCorosyncConf())
def disable_cluster_all():
disable_cluster_nodes(utils.getNodesFromCorosyncConf())
def enable_cluster_nodes(nodes):
error_list = utils.map_for_error_list(utils.enableCluster, nodes)
if len(error_list) > 0:
utils.err("unable to enable all nodes\n" + "\n".join(error_list))
def disable_cluster_nodes(nodes):
error_list = utils.map_for_error_list(utils.disableCluster, nodes)
if len(error_list) > 0:
utils.err("unable to disable all nodes\n" + "\n".join(error_list))
def destroy_cluster(argv):
if len(argv) > 0:
# stop pacemaker and resources while cluster is still quorate
threads = dict()
for node in argv:
threads[node] = NodeStopPacemakerThread(node)
error_list = utils.run_node_threads(threads)
# proceed with destroy regardless of errors
# destroy will stop any remaining cluster daemons
threads = dict()
for node in argv:
threads[node] = NodeDestroyThread(node)
error_list = utils.run_node_threads(threads)
if error_list:
utils.err("unable to destroy cluster\n" + "\n".join(error_list))
def stop_cluster(argv):
if len(argv) > 0:
stop_cluster_nodes(argv)
return
if not "--force" in utils.pcs_options:
if utils.is_rhel6():
output_status, retval = utils.run(["cman_tool", "status"])
output_nodes, retval = utils.run([
"cman_tool", "nodes", "-F", "id,type,votes,name"
])
if output_status == output_nodes:
# when both commands return the same error
output = output_status
else:
output = output_status + "\n---Votes---\n" + output_nodes
quorum_info = utils.parse_cman_quorum_info(output)
else:
output, retval = utils.run(["corosync-quorumtool", "-p", "-s"])
# retval is 0 on success if node is not in partition with quorum
# retval is 1 on error OR on success if node has quorum
quorum_info = utils.parse_quorumtool_output(output)
if quorum_info:
if utils.is_node_stop_cause_quorum_loss(quorum_info, local=True):
utils.err(
"Stopping the node will cause a loss of the quorum"
+ ", use --force to override"
)
elif not utils.is_node_offline_by_quorumtool_output(output):
utils.err(
"Unable to determine whether stopping the node will cause "
+ "a loss of the quorum, use --force to override"
)
# else the node seems to be stopped already, proceed to be sure
stop_all = (
"--pacemaker" not in utils.pcs_options
and
"--corosync" not in utils.pcs_options
)
if stop_all or "--pacemaker" in utils.pcs_options:
stop_cluster_pacemaker()
if stop_all or "--corosync" in utils.pcs_options:
stop_cluster_corosync()
def stop_cluster_pacemaker():
print("Stopping Cluster (pacemaker)...", end=' ')
output, retval = utils.run(["service", "pacemaker","stop"])
if retval != 0:
print(output, end=' ')
utils.err("unable to stop pacemaker")
def stop_cluster_corosync():
if utils.is_rhel6():
print("Stopping Cluster (cman)...", end=' ')
output, retval = utils.run(["service", "cman","stop"])
if retval != 0:
print(output, end=' ')
utils.err("unable to stop cman")
else:
print("Stopping Cluster (corosync)...", end=' ')
output, retval = utils.run(["service", "corosync","stop"])
if retval != 0:
print(output, end=' ')
utils.err("unable to stop corosync")
def kill_cluster(argv):
daemons = ["crmd", "pengine", "attrd", "lrmd", "stonithd", "cib", "pacemakerd", "corosync"]
output, retval = utils.run(["killall", "-9"] + daemons)
# if retval != 0:
# print "Error: unable to execute killall -9"
# print output
# sys.exit(1)
def cluster_push(argv):
if len(argv) > 2:
usage.cluster(["cib-push"])
sys.exit(1)
filename = None
scope = None
for arg in argv:
if "=" not in arg:
filename = arg
else:
arg_name, arg_value = arg.split("=", 1)
if arg_name == "scope" and "--config" not in utils.pcs_options:
if not utils.is_valid_cib_scope(arg_value):
utils.err("invalid CIB scope '%s'" % arg_value)
else:
scope = arg_value
else:
usage.cluster(["cib-push"])
sys.exit(1)
if "--config" in utils.pcs_options:
scope = "configuration"
if not filename:
usage.cluster(["cib-push"])
sys.exit(1)
try:
new_cib_dom = xml.dom.minidom.parse(filename)
if scope and not new_cib_dom.getElementsByTagName(scope):
utils.err(
"unable to push cib, scope '%s' not present in new cib"
% scope
)
except (EnvironmentError, xml.parsers.expat.ExpatError) as e:
utils.err("unable to parse new cib: %s" % e)
command = ["cibadmin", "--replace", "--xml-file", filename]
if scope:
command.append("--scope=%s" % scope)
output, retval = utils.run(command)
if retval != 0:
utils.err("unable to push cib\n" + output)
else:
print("CIB updated")
def cluster_upgrade():
output, retval = utils.run(["cibadmin", "--upgrade", "--force"])
if retval != 0:
utils.err("unable to upgrade cluster: %s" % output)
print("Cluster CIB has been upgraded to latest version")
def cluster_edit(argv):
if 'EDITOR' in os.environ:
if len(argv) > 1:
usage.cluster(["edit"])
sys.exit(1)
scope = None
scope_arg = ""
for arg in argv:
if "=" not in arg:
usage.cluster(["edit"])
sys.exit(1)
else:
arg_name, arg_value = arg.split("=", 1)
if arg_name == "scope" and "--config" not in utils.pcs_options:
if not utils.is_valid_cib_scope(arg_value):
utils.err("invalid CIB scope '%s'" % arg_value)
else:
scope_arg = arg
scope = arg_value
else:
usage.cluster(["edit"])
sys.exit(1)
if "--config" in utils.pcs_options:
scope = "configuration"
# Leave scope_arg empty as cluster_push will pick up a --config
# option from utils.pcs_options
scope_arg = ""
editor = os.environ['EDITOR']
tempcib = tempfile.NamedTemporaryFile('w+b',-1,".pcs")
cib = utils.get_cib(scope)
tempcib.write(cib)
tempcib.flush()
try:
subprocess.call([editor, tempcib.name])
except OSError:
utils.err("unable to open file with $EDITOR: " + editor)
tempcib.seek(0)
newcib = "".join(tempcib.readlines())
if newcib == cib:
print("CIB not updated, no changes detected")
else:
cluster_push([_f for _f in [tempcib.name, scope_arg] if _f])
else:
utils.err("$EDITOR environment variable is not set")
def get_cib(argv):
if len(argv) > 2:
usage.cluster(["cib"])
sys.exit(1)
filename = None
scope = None
for arg in argv:
if "=" not in arg:
filename = arg
else:
arg_name, arg_value = arg.split("=", 1)
if arg_name == "scope" and "--config" not in utils.pcs_options:
if not utils.is_valid_cib_scope(arg_value):
utils.err("invalid CIB scope '%s'" % arg_value)
else:
scope = arg_value
else:
usage.cluster(["cib"])
sys.exit(1)
if "--config" in utils.pcs_options:
scope = "configuration"
if not filename:
print(utils.get_cib(scope), end=' ')
else:
try:
f = open(filename, 'w')
output = utils.get_cib(scope)
if output != "":
f.write(output)
else:
utils.err("No data in the CIB")
except IOError as e:
utils.err("Unable to write to file '%s', %s" % (filename, e.strerror))
def cluster_node(argv):
if len(argv) != 2:
usage.cluster();
sys.exit(1)
if argv[0] == "add":
add_node = True
elif argv[0] in ["remove","delete"]:
add_node = False
else:
usage.cluster();
sys.exit(1)
node = argv[1]
node0, node1 = utils.parse_multiring_node(node)
if "/" in node:
alt_hostname = node.split('/')[1]
else:
alt_hostname = None
if not node0:
utils.err("missing ring 0 address of the node")
status,output = utils.checkAuthorization(node0)
if status == 2:
utils.err("pcsd is not running on %s" % node0)
elif status == 3:
utils.err(
"%s is not yet authenticated (try pcs cluster auth %s)"
% (node0, node0)
)
if add_node == True:
need_ring1_address = utils.need_ring1_address(utils.getCorosyncConf())
if not node1 and need_ring1_address:
utils.err(
"cluster is configured for RRP, "
"you have to specify ring 1 address for the node"
)
elif node1 and not need_ring1_address:
utils.err(
"cluster is not configured for RRP, "
"you must not specify ring 1 address for the node"
)
corosync_conf = None
(canAdd, error) = utils.canAddNodeToCluster(node0)
if not canAdd:
utils.err("Unable to add '%s' to cluster: %s" % (node0, error))
for my_node in utils.getNodesFromCorosyncConf():
retval, output = utils.addLocalNode(my_node, node0, node1, alt_hostname)
if retval != 0:
print("Error: unable to add %s on %s - %s" % (node0, my_node, output.strip()), file=sys.stderr)
else:
print("%s: Corosync updated" % my_node)
corosync_conf = output
if corosync_conf != None:
# send local cluster pcsd configs to the new node
# may be used for sending corosync config as well in future
pcsd_data = {
'nodes': [node0],
'force': True,
}
output, retval = utils.run_pcsdcli('send_local_configs', pcsd_data)
if retval != 0:
utils.err("Unable to set pcsd configs")
if output['status'] == 'notauthorized':
utils.err(
"Unable to authenticate to " + node0
+ ", try running 'pcs cluster auth'"
)
if output['status'] == 'ok' and output['data']:
try:
node_response = output['data'][node0]
if node_response['status'] not in ['ok', 'not_supported']:
utils.err("Unable to set pcsd configs")
except:
utils.err('Unable to communicate with pcsd')
utils.setCorosyncConfig(node0, corosync_conf)
if "--enable" in utils.pcs_options:
utils.enableCluster(node0)
if "--start" in utils.pcs_options or utils.is_rhel6():
# always start new node on cman cluster
# otherwise it will get fenced
utils.startCluster(node0)
pcsd_data = {'nodes': [node0]}
utils.run_pcsdcli('send_local_certs', pcsd_data)
utils.run_pcsdcli('pcsd_restart_nodes', pcsd_data)
else:
utils.err("Unable to update any nodes")
output, retval = utils.reloadCorosync()
if utils.is_cman_with_udpu_transport():
print(("Warning: Using udpu transport on a CMAN cluster, "
+ "cluster restart is required to apply node addition"))
else:
if node0 not in utils.getNodesFromCorosyncConf():
utils.err(
"node '%s' does not appear to exist in configuration" % node0
)
if not "--force" in utils.pcs_options:
retval, data = utils.get_remote_quorumtool_output(node0)
if retval != 0:
utils.err(
"Unable to determine whether removing the node will cause "
+ "a loss of the quorum, use --force to override\n"
+ data
)
# we are sure whether we are on cman cluster or not because only
# nodes from a local cluster can be stopped (see nodes validation
# above)
if utils.is_rhel6():
quorum_info = utils.parse_cman_quorum_info(data)
else:
quorum_info = utils.parse_quorumtool_output(data)
if quorum_info:
if utils.is_node_stop_cause_quorum_loss(
quorum_info, local=False, node_list=[node0]
):
utils.err(
"Removing the node will cause a loss of the quorum"
+ ", use --force to override"
)
elif not utils.is_node_offline_by_quorumtool_output(data):
utils.err(
"Unable to determine whether removing the node will cause "
+ "a loss of the quorum, use --force to override\n"
+ data
)
# else the node seems to be stopped already, we're ok to proceed
nodesRemoved = False
c_nodes = utils.getNodesFromCorosyncConf()
destroy_cluster([node0])
for my_node in c_nodes:
if my_node == node0:
continue
retval, output = utils.removeLocalNode(my_node, node0)
if retval != 0:
print("Error: unable to remove %s on %s - %s" % (node0,my_node,output.strip()), file=sys.stderr)
else:
if output[0] == 0:
print("%s: Corosync updated" % my_node)
nodesRemoved = True
else:
print("%s: Error executing command occured: %s" % (my_node, "".join(output[1])), file=sys.stderr)
if nodesRemoved == False:
utils.err("Unable to update any nodes")
output, retval = utils.reloadCorosync()
output, retval = utils.run(["crm_node", "--force", "-R", node0])
if utils.is_cman_with_udpu_transport():
print(("Warning: Using udpu transport on a CMAN cluster, "
+ "cluster restart is required to apply node removal"))
def cluster_localnode(argv):
if len(argv) != 2:
usage.cluster()
exit(1)
elif argv[0] == "add":
node = argv[1]
if not utils.is_rhel6():
success = utils.addNodeToCorosync(node)
else:
success = utils.addNodeToClusterConf(node)
if success:
print("%s: successfully added!" % node)
else:
utils.err("unable to add %s" % node)
elif argv[0] in ["remove","delete"]:
node = argv[1]
if not utils.is_rhel6():
success = utils.removeNodeFromCorosync(node)
else:
success = utils.removeNodeFromClusterConf(node)
if success:
print("%s: successfully removed!" % node)
else:
utils.err("unable to remove %s" % node)
else:
usage.cluster()
exit(1)
def cluster_uidgid_rhel6(argv, silent_list = False):
if not os.path.isfile(settings.cluster_conf_file):
utils.err("the file doesn't exist on this machine, create a cluster before running this command" % settings.cluster_conf_file)
if len(argv) == 0:
found = False
output, retval = utils.run(["/usr/sbin/ccs", "-f", settings.cluster_conf_file, "--lsmisc"])
if retval != 0:
utils.err("error running ccs\n" + output)
lines = output.split('\n')
for line in lines:
if line.startswith('UID/GID: '):
print(line)
found = True
if not found and not silent_list:
print("No uidgids configured in cluster.conf")
return
command = argv.pop(0)
uid=""
gid=""
if (command == "add" or command == "rm") and len(argv) > 0:
for arg in argv:
if arg.find('=') == -1:
utils.err("uidgid options must be of the form uid=<uid> gid=<gid>")
(k,v) = arg.split('=',1)
if k != "uid" and k != "gid":
utils.err("%s is not a valid key, you must use uid or gid" %k)
if k == "uid":
uid = v
if k == "gid":
gid = v
if uid == "" and gid == "":
utils.err("you must set either uid or gid")
if command == "add":
output, retval = utils.run(["/usr/sbin/ccs", "-f", settings.cluster_conf_file, "--setuidgid", "uid="+uid, "gid="+gid])
if retval != 0:
utils.err("unable to add uidgid\n" + output.rstrip())
elif command == "rm":
output, retval = utils.run(["/usr/sbin/ccs", "-f", settings.cluster_conf_file, "--rmuidgid", "uid="+uid, "gid="+gid])
if retval != 0:
utils.err("unable to remove uidgid\n" + output.rstrip())
# If we make a change, we sync out the changes to all nodes unless we're using -f
if not utils.usefile:
sync_nodes(utils.getNodesFromCorosyncConf(), utils.getCorosyncConf())
else:
usage.cluster(["uidgid"])
exit(1)
def cluster_uidgid(argv, silent_list = False):
if utils.is_rhel6():
cluster_uidgid_rhel6(argv, silent_list)
return
if len(argv) == 0:
found = False
uid_gid_files = os.listdir(settings.corosync_uidgid_dir)
for ug_file in uid_gid_files:
uid_gid_dict = utils.read_uid_gid_file(ug_file)
if "uid" in uid_gid_dict or "gid" in uid_gid_dict:
line = "UID/GID: uid="
if "uid" in uid_gid_dict:
line += uid_gid_dict["uid"]
line += " gid="
if "gid" in uid_gid_dict:
line += uid_gid_dict["gid"]
print(line)
found = True
if not found and not silent_list:
print("No uidgids configured in cluster.conf")
return
command = argv.pop(0)
uid=""
gid=""
if (command == "add" or command == "rm") and len(argv) > 0:
for arg in argv:
if arg.find('=') == -1:
utils.err("uidgid options must be of the form uid=<uid> gid=<gid>")
(k,v) = arg.split('=',1)
if k != "uid" and k != "gid":
utils.err("%s is not a valid key, you must use uid or gid" %k)
if k == "uid":
uid = v
if k == "gid":
gid = v
if uid == "" and gid == "":
utils.err("you must set either uid or gid")
if command == "add":
utils.write_uid_gid_file(uid,gid)
elif command == "rm":
retval = utils.remove_uid_gid_file(uid,gid)
if retval == False:
utils.err("no uidgid files with uid=%s and gid=%s found" % (uid,gid))
else:
usage.cluster(["uidgid"])
exit(1)
def cluster_get_corosync_conf(argv):
if utils.is_rhel6():
utils.err("corosync.conf is not supported on CMAN clusters")
if len(argv) > 1:
usage.cluster()
exit(1)
if len(argv) == 0:
print(utils.getCorosyncConf())
return
node = argv[0]
retval, output = utils.getCorosyncConfig(node)
if retval != 0:
utils.err(output)
else:
print(output)
def cluster_reload(argv):
if len(argv) != 1 or argv[0] != "corosync":
usage.cluster(["reload"])
exit(1)
output, retval = utils.reloadCorosync()
if retval != 0 or "invalid option" in output:
utils.err(output.rstrip())
print("Corosync reloaded")
# Completely tear down the cluster & remove config files
# Code taken from cluster-clean script in pacemaker
def cluster_destroy(argv):
if "--all" in utils.pcs_options:
destroy_cluster(utils.getNodesFromCorosyncConf())
else:
print("Shutting down pacemaker/corosync services...")
os.system("service pacemaker stop")
os.system("service corosync stop")
print("Killing any remaining services...")
os.system("killall -q -9 corosync aisexec heartbeat pacemakerd ccm stonithd ha_logd lrmd crmd pengine attrd pingd mgmtd cib fenced dlm_controld gfs_controld")
utils.disableServices()
print("Removing all cluster configuration files...")
if utils.is_rhel6():
os.system("rm -f /etc/cluster/cluster.conf")
else:
os.system("rm -f /etc/corosync/corosync.conf")
state_files = ["cib.xml*", "cib-*", "core.*", "hostcache", "cts.*",
"pe*.bz2","cib.*"]
for name in state_files:
os.system("find /var/lib -name '"+name+"' -exec rm -f \{\} \;")
def cluster_verify(argv):
nofilename = True
if len(argv) == 1:
filename = argv.pop(0)
nofilename = False
elif len(argv) > 1:
usage.cluster("verify")
options = []
if "-V" in utils.pcs_options:
options.append("-V")
if nofilename:
options.append("--live-check")
else:
options.append("--xml-file")
options.append(filename)
output, retval = utils.run([settings.crm_verify] + options)
if output != "":
print(output)
stonith.stonith_level_verify()
return retval
def cluster_report(argv):
if len(argv) != 1:
usage.cluster(["report"])
sys.exit(1)
outfile = argv[0]
dest_outfile = outfile + ".tar.bz2"
if os.path.exists(dest_outfile):
if "--force" not in utils.pcs_options:
utils.err(dest_outfile + " already exists, use --force to overwrite")
else:
try:
os.remove(dest_outfile)
except OSError as e:
utils.err("Unable to remove " + dest_outfile + ": " + e.strerror)
crm_report_opts = []
crm_report_opts.append("-f")
if "--from" in utils.pcs_options:
crm_report_opts.append(utils.pcs_options["--from"])
if "--to" in utils.pcs_options:
crm_report_opts.append("-t")
crm_report_opts.append(utils.pcs_options["--to"])
else:
yesterday = datetime.datetime.now() - datetime.timedelta(1)
crm_report_opts.append(yesterday.strftime("%Y-%m-%d %H:%M"))
crm_report_opts.append(outfile)
output, retval = utils.run([settings.crm_report] + crm_report_opts)
newoutput = ""
for line in output.split("\n"):
if line.startswith("cat:") or line.startswith("grep") or line.startswith("grep") or line.startswith("tail"):
continue
if "We will attempt to remove" in line:
continue
if "-p option" in line:
continue
if "However, doing" in line:
continue
if "to diagnose" in line:
continue
newoutput = newoutput + line + "\n"
if retval != 0:
utils.err(newoutput)
print(newoutput)
def cluster_remote_node(argv):
if len(argv) < 1:
usage.cluster(["remote-node"])
sys.exit(1)
command = argv.pop(0)
if command == "add":
if len(argv) < 2:
usage.cluster(["remote-node"])
sys.exit(1)
hostname = argv.pop(0)
rsc = argv.pop(0)
if not utils.dom_get_resource(utils.get_cib_dom(), rsc):
utils.err("unable to find resource '%s'" % rsc)
resource.resource_update(rsc, ["meta", "remote-node="+hostname] + argv)
elif command in ["remove","delete"]:
if len(argv) < 1:
usage.cluster(["remote-node"])
sys.exit(1)
hostname = argv.pop(0)
dom = utils.get_cib_dom()
nvpairs = dom.getElementsByTagName("nvpair")
nvpairs_to_remove = []
for nvpair in nvpairs:
if nvpair.getAttribute("name") == "remote-node" and nvpair.getAttribute("value") == hostname:
for np in nvpair.parentNode.getElementsByTagName("nvpair"):
if np.getAttribute("name").startswith("remote-"):
nvpairs_to_remove.append(np)
if len(nvpairs_to_remove) == 0:
utils.err("unable to remove: cannot find remote-node '%s'" % hostname)
for nvpair in nvpairs_to_remove[:]:
nvpair.parentNode.removeChild(nvpair)
dom = constraint.remove_constraints_containing_node(dom, hostname)
utils.replace_cib_configuration(dom)
else:
usage.cluster(["remote-node"])
sys.exit(1)
def cluster_quorum_unblock(argv):
if len(argv) > 0:
usage.cluster(["quorum", "unblock"])
sys.exit(1)
if utils.is_rhel6():
utils.err("operation is not supported on CMAN clusters")
output, retval = utils.run(
["corosync-cmapctl", "-g", "runtime.votequorum.wait_for_all_status"]
)
if retval != 0:
utils.err("unable to check quorum status")
if output.split("=")[-1].strip() != "1":
utils.err("cluster is not waiting for nodes to establish quorum")
unjoined_nodes = (
set(utils.getNodesFromCorosyncConf())
-
set(utils.getCorosyncActiveNodes())
)
if not unjoined_nodes:
utils.err("no unjoined nodes found")
for node in unjoined_nodes:
stonith.stonith_confirm([node])
output, retval = utils.run(
["corosync-cmapctl", "-s", "quorum.cancel_wait_for_all", "u8", "1"]
)
if retval != 0:
utils.err("unable to cancel waiting for nodes")
print("Quorum unblocked")
startup_fencing = prop.get_set_properties().get("startup-fencing", "")
utils.set_cib_property(
"startup-fencing",
"false" if startup_fencing.lower() != "false" else "true"
)
utils.set_cib_property("startup-fencing", startup_fencing)
print("Waiting for nodes cancelled")
class NodeActionThread(threading.Thread):
def __init__(self, node):
super(NodeActionThread, self).__init__()
self.node = node
self.retval = 0
self.output = ""
class NodeStartThread(NodeActionThread):
def run(self):
self.retval, self.output = utils.startCluster(self.node, quiet=True)
class NodeStopPacemakerThread(NodeActionThread):
def run(self):
self.retval, self.output = utils.stopCluster(
self.node, quiet=True, pacemaker=True, corosync=False
)
class NodeStopCorosyncThread(NodeActionThread):
def run(self):
self.retval, self.output = utils.stopCluster(
self.node, quiet=True, pacemaker=False, corosync=True
)
class NodeDestroyThread(NodeActionThread):
def run(self):
self.retval, self.output = utils.destroyCluster(self.node, quiet=True)
| gpl-2.0 |
j2sol/ursula | library/sensu_process_check.py | 1 | 4037 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2014, Blue Box Group, Inc.
# Copyright 2014, Craig Tracey <craigtracey@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import traceback
def main():
module = AnsibleModule(
argument_spec=dict(
service=dict(default=None, required=True),
short_service_name=dict(required=False),
warn_over=dict(default=15, required=False),
crit_over=dict(default=30, required=False),
interval=dict(default=30, required=False),
occurrences=dict(default=2, required=False),
plugin_dir=dict(default='/etc/sensu/plugins', required=False),
check_dir=dict(default='/etc/sensu/conf.d/checks', required=False),
state=dict(default='present', required=False, choices=['present','absent'])
)
)
if module.params['state'] == 'present':
try:
changed = False
if not module.params['short_service_name']:
short_service_name = os.path.basename(module.params['service'])
check_path = "%s/%s-service.json" % ( module.params['check_dir'], short_service_name)
command = "%s/check-procs.rb -p %s -w %s -c %s -W 1 -C 1" % (module.params['plugin_dir'], module.params['service'], module.params['warn_over'], module.params['crit_over'] )
notification = "unexpected number of %s processes" % ( module.params['service'] )
check=dict({
'checks': {
short_service_name: {
'command': command,
'standalone': True,
'handlers': [ 'default' ],
'interval': int(module.params['interval']),
'notification': notification,
'occurrences': int(module.params['occurrences'])
}
}
})
if os.path.isfile(check_path):
with open(check_path) as fh:
if json.load(fh) == check:
module.exit_json(changed=False, result="ok")
else:
with open(check_path, "w") as fh:
fh.write(json.dumps(check, indent=4))
module.exit_json(changed=True, result="changed")
else:
with open(check_path, "w") as fh:
fh.write(json.dumps(check, indent=4))
module.exit_json(changed=True, result="created")
except Exception as e:
formatted_lines = traceback.format_exc()
module.fail_json(msg="creating the check failed: %s %s" % (e,formatted_lines))
else:
try:
changed = False
if not module.params['short_service_name']:
short_service_name = os.path.basename(module.params['service'])
check_path = '%s/%s-service.json' % (module.params['check_dir'], short_service_name)
if os.path.isfile(check_path):
os.remove(check_path)
module.exit_json(changed=True, result="changed")
else:
module.exit_json(changed=False, result="ok")
except Exception as e:
formatted_lines = traceback.format_exc()
module.fail_json(msg="removing the check failed: %s %s" % (e,formatted_lines))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
main()
| mit |
2014c2g14/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/unittest/case.py | 743 | 48873 | """Test case implementation"""
import sys
import functools
import difflib
import pprint
import re
import warnings
import collections
from . import result
from .util import (strclass, safe_repr, _count_diff_all_purpose,
_count_diff_hashable)
__unittest = True
DIFF_OMITTED = ('\nDiff is %s characters long. '
'Set self.maxDiff to None to see it.')
class SkipTest(Exception):
"""
Raise this exception in a test to skip it.
Usually you can use TestCase.skipTest() or one of the skipping decorators
instead of raising this directly.
"""
class _ExpectedFailure(Exception):
"""
Raise this when a test is expected to fail.
This is an implementation detail.
"""
def __init__(self, exc_info):
super(_ExpectedFailure, self).__init__()
self.exc_info = exc_info
class _UnexpectedSuccess(Exception):
"""
The test was supposed to fail, but it didn't!
"""
class _Outcome(object):
def __init__(self):
self.success = True
self.skipped = None
self.unexpectedSuccess = None
self.expectedFailure = None
self.errors = []
self.failures = []
def _id(obj):
return obj
def skip(reason):
"""
Unconditionally skip a test.
"""
def decorator(test_item):
if not isinstance(test_item, type):
@functools.wraps(test_item)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
test_item = skip_wrapper
test_item.__unittest_skip__ = True
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skipIf(condition, reason):
"""
Skip a test if the condition is true.
"""
if condition:
return skip(reason)
return _id
def skipUnless(condition, reason):
"""
Skip a test unless the condition is true.
"""
if not condition:
return skip(reason)
return _id
def expectedFailure(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception:
raise _ExpectedFailure(sys.exc_info())
raise _UnexpectedSuccess
return wrapper
class _AssertRaisesBaseContext(object):
def __init__(self, expected, test_case, callable_obj=None,
expected_regex=None):
self.expected = expected
self.test_case = test_case
if callable_obj is not None:
try:
self.obj_name = callable_obj.__name__
except AttributeError:
self.obj_name = str(callable_obj)
else:
self.obj_name = None
if isinstance(expected_regex, (bytes, str)):
expected_regex = re.compile(expected_regex)
self.expected_regex = expected_regex
self.msg = None
def _raiseFailure(self, standardMsg):
msg = self.test_case._formatMessage(self.msg, standardMsg)
raise self.test_case.failureException(msg)
def handle(self, name, callable_obj, args, kwargs):
"""
If callable_obj is None, assertRaises/Warns is being used as a
context manager, so check for a 'msg' kwarg and return self.
If callable_obj is not None, call it passing args and kwargs.
"""
if callable_obj is None:
self.msg = kwargs.pop('msg', None)
return self
with self:
callable_obj(*args, **kwargs)
class _AssertRaisesContext(_AssertRaisesBaseContext):
"""A context manager used to implement TestCase.assertRaises* methods."""
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
if self.obj_name:
self._raiseFailure("{} not raised by {}".format(exc_name,
self.obj_name))
else:
self._raiseFailure("{} not raised".format(exc_name))
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
# store exception, without traceback, for later retrieval
self.exception = exc_value.with_traceback(None)
if self.expected_regex is None:
return True
expected_regex = self.expected_regex
if not expected_regex.search(str(exc_value)):
self._raiseFailure('"{}" does not match "{}"'.format(
expected_regex.pattern, str(exc_value)))
return True
class _AssertWarnsContext(_AssertRaisesBaseContext):
"""A context manager used to implement TestCase.assertWarns* methods."""
def __enter__(self):
# The __warningregistry__'s need to be in a pristine state for tests
# to work properly.
for v in sys.modules.values():
if getattr(v, '__warningregistry__', None):
v.__warningregistry__ = {}
self.warnings_manager = warnings.catch_warnings(record=True)
self.warnings = self.warnings_manager.__enter__()
warnings.simplefilter("always", self.expected)
return self
def __exit__(self, exc_type, exc_value, tb):
self.warnings_manager.__exit__(exc_type, exc_value, tb)
if exc_type is not None:
# let unexpected exceptions pass through
return
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
first_matching = None
for m in self.warnings:
w = m.message
if not isinstance(w, self.expected):
continue
if first_matching is None:
first_matching = w
if (self.expected_regex is not None and
not self.expected_regex.search(str(w))):
continue
# store warning for later retrieval
self.warning = w
self.filename = m.filename
self.lineno = m.lineno
return
# Now we simply try to choose a helpful failure message
if first_matching is not None:
self._raiseFailure('"{}" does not match "{}"'.format(
self.expected_regex.pattern, str(first_matching)))
if self.obj_name:
self._raiseFailure("{} not triggered by {}".format(exc_name,
self.obj_name))
else:
self._raiseFailure("{} not triggered".format(exc_name))
class TestCase(object):
"""A class whose instances are single test cases.
By default, the test code itself should be placed in a method named
'runTest'.
If the fixture may be used for many test cases, create as
many test methods as are needed. When instantiating such a TestCase
subclass, specify in the constructor arguments the name of the test method
that the instance is to execute.
Test authors should subclass TestCase for their own tests. Construction
and deconstruction of the test's environment ('fixture') can be
implemented by overriding the 'setUp' and 'tearDown' methods respectively.
If it is necessary to override the __init__ method, the base class
__init__ method must always be called. It is important that subclasses
should not change the signature of their __init__ method, since instances
of the classes are instantiated automatically by parts of the framework
in order to be run.
When subclassing TestCase, you can set these attributes:
* failureException: determines which exception will be raised when
the instance's assertion methods fail; test methods raising this
exception will be deemed to have 'failed' rather than 'errored'.
* longMessage: determines whether long messages (including repr of
objects used in assert methods) will be printed on failure in *addition*
to any explicit message passed.
* maxDiff: sets the maximum length of a diff in failure messages
by assert methods using difflib. It is looked up as an instance
attribute so can be configured by individual tests if required.
"""
failureException = AssertionError
longMessage = True
maxDiff = 80*8
# If a string is longer than _diffThreshold, use normal comparison instead
# of difflib. See #11763.
_diffThreshold = 2**16
# Attribute used by TestSuite for classSetUp
_classSetupFailed = False
def __init__(self, methodName='runTest'):
"""Create an instance of the class that will use the named test
method when executed. Raises a ValueError if the instance does
not have a method with the specified name.
"""
self._testMethodName = methodName
self._outcomeForDoCleanups = None
self._testMethodDoc = 'No test'
try:
testMethod = getattr(self, methodName)
except AttributeError:
if methodName != 'runTest':
# we allow instantiation with no explicit method name
# but not an *incorrect* or missing method name
raise ValueError("no such test method in %s: %s" %
(self.__class__, methodName))
else:
self._testMethodDoc = testMethod.__doc__
self._cleanups = []
# Map types to custom assertEqual functions that will compare
# instances of said type in more detail to generate a more useful
# error message.
self._type_equality_funcs = {}
self.addTypeEqualityFunc(dict, 'assertDictEqual')
self.addTypeEqualityFunc(list, 'assertListEqual')
self.addTypeEqualityFunc(tuple, 'assertTupleEqual')
self.addTypeEqualityFunc(set, 'assertSetEqual')
self.addTypeEqualityFunc(frozenset, 'assertSetEqual')
self.addTypeEqualityFunc(str, 'assertMultiLineEqual')
def addTypeEqualityFunc(self, typeobj, function):
"""Add a type specific assertEqual style function to compare a type.
This method is for use by TestCase subclasses that need to register
their own type equality functions to provide nicer error messages.
Args:
typeobj: The data type to call this function on when both values
are of the same type in assertEqual().
function: The callable taking two arguments and an optional
msg= argument that raises self.failureException with a
useful error message when the two arguments are not equal.
"""
self._type_equality_funcs[typeobj] = function
def addCleanup(self, function, *args, **kwargs):
"""Add a function, with arguments, to be called when the test is
completed. Functions added are called on a LIFO basis and are
called after tearDown on test failure or success.
Cleanup items are called even if setUp fails (unlike tearDown)."""
self._cleanups.append((function, args, kwargs))
def setUp(self):
"Hook method for setting up the test fixture before exercising it."
pass
def tearDown(self):
"Hook method for deconstructing the test fixture after testing it."
pass
@classmethod
def setUpClass(cls):
"Hook method for setting up class fixture before running tests in the class."
@classmethod
def tearDownClass(cls):
"Hook method for deconstructing the class fixture after running all tests in the class."
def countTestCases(self):
return 1
def defaultTestResult(self):
return result.TestResult()
def shortDescription(self):
"""Returns a one-line description of the test, or None if no
description has been provided.
The default implementation of this method returns the first line of
the specified test method's docstring.
"""
doc = self._testMethodDoc
return doc and doc.split("\n")[0].strip() or None
def id(self):
return "%s.%s" % (strclass(self.__class__), self._testMethodName)
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self._testMethodName == other._testMethodName
def __hash__(self):
return hash((type(self), self._testMethodName))
def __str__(self):
return "%s (%s)" % (self._testMethodName, strclass(self.__class__))
def __repr__(self):
return "<%s testMethod=%s>" % \
(strclass(self.__class__), self._testMethodName)
def _addSkip(self, result, reason):
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None:
addSkip(self, reason)
else:
warnings.warn("TestResult has no addSkip method, skips not reported",
RuntimeWarning, 2)
result.addSuccess(self)
def _executeTestPart(self, function, outcome, isTest=False):
try:
function()
except KeyboardInterrupt:
raise
except SkipTest as e:
outcome.success = False
outcome.skipped = str(e)
except _UnexpectedSuccess:
exc_info = sys.exc_info()
outcome.success = False
if isTest:
outcome.unexpectedSuccess = exc_info
else:
outcome.errors.append(exc_info)
except _ExpectedFailure:
outcome.success = False
exc_info = sys.exc_info()
if isTest:
outcome.expectedFailure = exc_info
else:
outcome.errors.append(exc_info)
except self.failureException:
outcome.success = False
outcome.failures.append(sys.exc_info())
exc_info = sys.exc_info()
except:
outcome.success = False
outcome.errors.append(sys.exc_info())
def run(self, result=None):
orig_result = result
if result is None:
result = self.defaultTestResult()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
result.startTest(self)
testMethod = getattr(self, self._testMethodName)
if (getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)):
# If the class or method was skipped.
try:
skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
or getattr(testMethod, '__unittest_skip_why__', ''))
self._addSkip(result, skip_why)
finally:
result.stopTest(self)
return
try:
outcome = _Outcome()
self._outcomeForDoCleanups = outcome
self._executeTestPart(self.setUp, outcome)
if outcome.success:
self._executeTestPart(testMethod, outcome, isTest=True)
self._executeTestPart(self.tearDown, outcome)
self.doCleanups()
if outcome.success:
result.addSuccess(self)
else:
if outcome.skipped is not None:
self._addSkip(result, outcome.skipped)
for exc_info in outcome.errors:
result.addError(self, exc_info)
for exc_info in outcome.failures:
result.addFailure(self, exc_info)
if outcome.unexpectedSuccess is not None:
addUnexpectedSuccess = getattr(result, 'addUnexpectedSuccess', None)
if addUnexpectedSuccess is not None:
addUnexpectedSuccess(self)
else:
warnings.warn("TestResult has no addUnexpectedSuccess method, reporting as failures",
RuntimeWarning)
result.addFailure(self, outcome.unexpectedSuccess)
if outcome.expectedFailure is not None:
addExpectedFailure = getattr(result, 'addExpectedFailure', None)
if addExpectedFailure is not None:
addExpectedFailure(self, outcome.expectedFailure)
else:
warnings.warn("TestResult has no addExpectedFailure method, reporting as passes",
RuntimeWarning)
result.addSuccess(self)
return result
finally:
result.stopTest(self)
if orig_result is None:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
def doCleanups(self):
"""Execute all cleanup functions. Normally called for you after
tearDown."""
outcome = self._outcomeForDoCleanups or _Outcome()
while self._cleanups:
function, args, kwargs = self._cleanups.pop()
part = lambda: function(*args, **kwargs)
self._executeTestPart(part, outcome)
# return this for backwards compatibility
# even though we no longer us it internally
return outcome.success
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the test without collecting errors in a TestResult"""
self.setUp()
getattr(self, self._testMethodName)()
self.tearDown()
while self._cleanups:
function, args, kwargs = self._cleanups.pop(-1)
function(*args, **kwargs)
def skipTest(self, reason):
"""Skip this test."""
raise SkipTest(reason)
def fail(self, msg=None):
"""Fail immediately, with the given message."""
raise self.failureException(msg)
def assertFalse(self, expr, msg=None):
"""Check that the expression is false."""
if expr:
msg = self._formatMessage(msg, "%s is not false" % safe_repr(expr))
raise self.failureException(msg)
def assertTrue(self, expr, msg=None):
"""Check that the expression is true."""
if not expr:
msg = self._formatMessage(msg, "%s is not true" % safe_repr(expr))
raise self.failureException(msg)
def _formatMessage(self, msg, standardMsg):
"""Honour the longMessage attribute when generating failure messages.
If longMessage is False this means:
* Use only an explicit message if it is provided
* Otherwise use the standard message for the assert
If longMessage is True:
* Use the standard message
* If an explicit message is provided, plus ' : ' and the explicit message
"""
if not self.longMessage:
return msg or standardMsg
if msg is None:
return standardMsg
try:
# don't switch to '{}' formatting in Python 2.X
# it changes the way unicode input is handled
return '%s : %s' % (standardMsg, msg)
except UnicodeDecodeError:
return '%s : %s' % (safe_repr(standardMsg), safe_repr(msg))
def assertRaises(self, excClass, callableObj=None, *args, **kwargs):
"""Fail unless an exception of class excClass is raised
by callableObj when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
raised, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
If called with callableObj omitted or None, will return a
context object used like this::
with self.assertRaises(SomeException):
do_something()
An optional keyword argument 'msg' can be provided when assertRaises
is used as a context object.
The context manager keeps a reference to the exception as
the 'exception' attribute. This allows you to inspect the
exception after the assertion::
with self.assertRaises(SomeException) as cm:
do_something()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, 3)
"""
context = _AssertRaisesContext(excClass, self, callableObj)
return context.handle('assertRaises', callableObj, args, kwargs)
def assertWarns(self, expected_warning, callable_obj=None, *args, **kwargs):
"""Fail unless a warning of class warnClass is triggered
by callable_obj when invoked with arguments args and keyword
arguments kwargs. If a different type of warning is
triggered, it will not be handled: depending on the other
warning filtering rules in effect, it might be silenced, printed
out, or raised as an exception.
If called with callable_obj omitted or None, will return a
context object used like this::
with self.assertWarns(SomeWarning):
do_something()
An optional keyword argument 'msg' can be provided when assertWarns
is used as a context object.
The context manager keeps a reference to the first matching
warning as the 'warning' attribute; similarly, the 'filename'
and 'lineno' attributes give you information about the line
of Python code from which the warning was triggered.
This allows you to inspect the warning after the assertion::
with self.assertWarns(SomeWarning) as cm:
do_something()
the_warning = cm.warning
self.assertEqual(the_warning.some_attribute, 147)
"""
context = _AssertWarnsContext(expected_warning, self, callable_obj)
return context.handle('assertWarns', callable_obj, args, kwargs)
def _getAssertEqualityFunc(self, first, second):
"""Get a detailed comparison function for the types of the two args.
Returns: A callable accepting (first, second, msg=None) that will
raise a failure exception if first != second with a useful human
readable error message for those types.
"""
#
# NOTE(gregory.p.smith): I considered isinstance(first, type(second))
# and vice versa. I opted for the conservative approach in case
# subclasses are not intended to be compared in detail to their super
# class instances using a type equality func. This means testing
# subtypes won't automagically use the detailed comparison. Callers
# should use their type specific assertSpamEqual method to compare
# subclasses if the detailed comparison is desired and appropriate.
# See the discussion in http://bugs.python.org/issue2578.
#
if type(first) is type(second):
asserter = self._type_equality_funcs.get(type(first))
if asserter is not None:
if isinstance(asserter, str):
asserter = getattr(self, asserter)
return asserter
return self._baseAssertEqual
def _baseAssertEqual(self, first, second, msg=None):
"""The default assertEqual implementation, not type specific."""
if not first == second:
standardMsg = '%s != %s' % (safe_repr(first), safe_repr(second))
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertEqual(self, first, second, msg=None):
"""Fail if the two objects are unequal as determined by the '=='
operator.
"""
assertion_func = self._getAssertEqualityFunc(first, second)
assertion_func(first, second, msg=msg)
def assertNotEqual(self, first, second, msg=None):
"""Fail if the two objects are equal as determined by the '!='
operator.
"""
if not first != second:
msg = self._formatMessage(msg, '%s == %s' % (safe_repr(first),
safe_repr(second)))
raise self.failureException(msg)
def assertAlmostEqual(self, first, second, places=None, msg=None,
delta=None):
"""Fail if the two objects are unequal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is more than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
If the two objects compare equal then they will automatically
compare almost equal.
"""
if first == second:
# shortcut
return
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if abs(first - second) <= delta:
return
standardMsg = '%s != %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if round(abs(second-first), places) == 0:
return
standardMsg = '%s != %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertNotAlmostEqual(self, first, second, places=None, msg=None,
delta=None):
"""Fail if the two objects are equal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is less than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
Objects that are equal automatically fail.
"""
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if not (first == second) and abs(first - second) > delta:
return
standardMsg = '%s == %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if not (first == second) and round(abs(second-first), places) != 0:
return
standardMsg = '%s == %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertSequenceEqual(self, seq1, seq2, msg=None, seq_type=None):
"""An equality assertion for ordered sequences (like lists and tuples).
For the purposes of this function, a valid ordered sequence type is one
which can be indexed, has a length, and has an equality operator.
Args:
seq1: The first sequence to compare.
seq2: The second sequence to compare.
seq_type: The expected datatype of the sequences, or None if no
datatype should be enforced.
msg: Optional message to use on failure instead of a list of
differences.
"""
if seq_type is not None:
seq_type_name = seq_type.__name__
if not isinstance(seq1, seq_type):
raise self.failureException('First sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq1)))
if not isinstance(seq2, seq_type):
raise self.failureException('Second sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq2)))
else:
seq_type_name = "sequence"
differing = None
try:
len1 = len(seq1)
except (TypeError, NotImplementedError):
differing = 'First %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
try:
len2 = len(seq2)
except (TypeError, NotImplementedError):
differing = 'Second %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
if seq1 == seq2:
return
seq1_repr = safe_repr(seq1)
seq2_repr = safe_repr(seq2)
if len(seq1_repr) > 30:
seq1_repr = seq1_repr[:30] + '...'
if len(seq2_repr) > 30:
seq2_repr = seq2_repr[:30] + '...'
elements = (seq_type_name.capitalize(), seq1_repr, seq2_repr)
differing = '%ss differ: %s != %s\n' % elements
for i in range(min(len1, len2)):
try:
item1 = seq1[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of first %s\n' %
(i, seq_type_name))
break
try:
item2 = seq2[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of second %s\n' %
(i, seq_type_name))
break
if item1 != item2:
differing += ('\nFirst differing element %d:\n%s\n%s\n' %
(i, item1, item2))
break
else:
if (len1 == len2 and seq_type is None and
type(seq1) != type(seq2)):
# The sequences are the same, but have differing types.
return
if len1 > len2:
differing += ('\nFirst %s contains %d additional '
'elements.\n' % (seq_type_name, len1 - len2))
try:
differing += ('First extra element %d:\n%s\n' %
(len2, seq1[len2]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of first %s\n' % (len2, seq_type_name))
elif len1 < len2:
differing += ('\nSecond %s contains %d additional '
'elements.\n' % (seq_type_name, len2 - len1))
try:
differing += ('First extra element %d:\n%s\n' %
(len1, seq2[len1]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of second %s\n' % (len1, seq_type_name))
standardMsg = differing
diffMsg = '\n' + '\n'.join(
difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
standardMsg = self._truncateMessage(standardMsg, diffMsg)
msg = self._formatMessage(msg, standardMsg)
self.fail(msg)
def _truncateMessage(self, message, diff):
max_diff = self.maxDiff
if max_diff is None or len(diff) <= max_diff:
return message + diff
return message + (DIFF_OMITTED % len(diff))
def assertListEqual(self, list1, list2, msg=None):
"""A list-specific equality assertion.
Args:
list1: The first list to compare.
list2: The second list to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(list1, list2, msg, seq_type=list)
def assertTupleEqual(self, tuple1, tuple2, msg=None):
"""A tuple-specific equality assertion.
Args:
tuple1: The first tuple to compare.
tuple2: The second tuple to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(tuple1, tuple2, msg, seq_type=tuple)
def assertSetEqual(self, set1, set2, msg=None):
"""A set-specific equality assertion.
Args:
set1: The first set to compare.
set2: The second set to compare.
msg: Optional message to use on failure instead of a list of
differences.
assertSetEqual uses ducktyping to support different types of sets, and
is optimized for sets specifically (parameters must support a
difference method).
"""
try:
difference1 = set1.difference(set2)
except TypeError as e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError as e:
self.fail('first argument does not support set difference: %s' % e)
try:
difference2 = set2.difference(set1)
except TypeError as e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError as e:
self.fail('second argument does not support set difference: %s' % e)
if not (difference1 or difference2):
return
lines = []
if difference1:
lines.append('Items in the first set but not the second:')
for item in difference1:
lines.append(repr(item))
if difference2:
lines.append('Items in the second set but not the first:')
for item in difference2:
lines.append(repr(item))
standardMsg = '\n'.join(lines)
self.fail(self._formatMessage(msg, standardMsg))
def assertIn(self, member, container, msg=None):
"""Just like self.assertTrue(a in b), but with a nicer default message."""
if member not in container:
standardMsg = '%s not found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIn(self, member, container, msg=None):
"""Just like self.assertTrue(a not in b), but with a nicer default message."""
if member in container:
standardMsg = '%s unexpectedly found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertIs(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is b), but with a nicer default message."""
if expr1 is not expr2:
standardMsg = '%s is not %s' % (safe_repr(expr1),
safe_repr(expr2))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNot(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is not b), but with a nicer default message."""
if expr1 is expr2:
standardMsg = 'unexpectedly identical: %s' % (safe_repr(expr1),)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictEqual(self, d1, d2, msg=None):
self.assertIsInstance(d1, dict, 'First argument is not a dictionary')
self.assertIsInstance(d2, dict, 'Second argument is not a dictionary')
if d1 != d2:
standardMsg = '%s != %s' % (safe_repr(d1, True), safe_repr(d2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
pprint.pformat(d1).splitlines(),
pprint.pformat(d2).splitlines())))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictContainsSubset(self, subset, dictionary, msg=None):
"""Checks whether dictionary is a superset of subset."""
warnings.warn('assertDictContainsSubset is deprecated',
DeprecationWarning)
missing = []
mismatched = []
for key, value in subset.items():
if key not in dictionary:
missing.append(key)
elif value != dictionary[key]:
mismatched.append('%s, expected: %s, actual: %s' %
(safe_repr(key), safe_repr(value),
safe_repr(dictionary[key])))
if not (missing or mismatched):
return
standardMsg = ''
if missing:
standardMsg = 'Missing: %s' % ','.join(safe_repr(m) for m in
missing)
if mismatched:
if standardMsg:
standardMsg += '; '
standardMsg += 'Mismatched values: %s' % ','.join(mismatched)
self.fail(self._formatMessage(msg, standardMsg))
def assertCountEqual(self, first, second, msg=None):
"""An unordered sequence comparison asserting that the same elements,
regardless of order. If the same element occurs more than once,
it verifies that the elements occur the same number of times.
self.assertEqual(Counter(list(first)),
Counter(list(second)))
Example:
- [0, 1, 1] and [1, 0, 1] compare equal.
- [0, 0, 1] and [0, 1] compare unequal.
"""
first_seq, second_seq = list(first), list(second)
try:
first = collections.Counter(first_seq)
second = collections.Counter(second_seq)
except TypeError:
# Handle case with unhashable elements
differences = _count_diff_all_purpose(first_seq, second_seq)
else:
if first == second:
return
differences = _count_diff_hashable(first_seq, second_seq)
if differences:
standardMsg = 'Element counts were not equal:\n'
lines = ['First has %d, Second has %d: %r' % diff for diff in differences]
diffMsg = '\n'.join(lines)
standardMsg = self._truncateMessage(standardMsg, diffMsg)
msg = self._formatMessage(msg, standardMsg)
self.fail(msg)
def assertMultiLineEqual(self, first, second, msg=None):
"""Assert that two multi-line strings are equal."""
self.assertIsInstance(first, str, 'First argument is not a string')
self.assertIsInstance(second, str, 'Second argument is not a string')
if first != second:
# don't use difflib if the strings are too long
if (len(first) > self._diffThreshold or
len(second) > self._diffThreshold):
self._baseAssertEqual(first, second, msg)
firstlines = first.splitlines(keepends=True)
secondlines = second.splitlines(keepends=True)
if len(firstlines) == 1 and first.strip('\r\n') == first:
firstlines = [first + '\n']
secondlines = [second + '\n']
standardMsg = '%s != %s' % (safe_repr(first, True),
safe_repr(second, True))
diff = '\n' + ''.join(difflib.ndiff(firstlines, secondlines))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertLess(self, a, b, msg=None):
"""Just like self.assertTrue(a < b), but with a nicer default message."""
if not a < b:
standardMsg = '%s not less than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertLessEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a <= b), but with a nicer default message."""
if not a <= b:
standardMsg = '%s not less than or equal to %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreater(self, a, b, msg=None):
"""Just like self.assertTrue(a > b), but with a nicer default message."""
if not a > b:
standardMsg = '%s not greater than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreaterEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a >= b), but with a nicer default message."""
if not a >= b:
standardMsg = '%s not greater than or equal to %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNone(self, obj, msg=None):
"""Same as self.assertTrue(obj is None), with a nicer default message."""
if obj is not None:
standardMsg = '%s is not None' % (safe_repr(obj),)
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNotNone(self, obj, msg=None):
"""Included for symmetry with assertIsNone."""
if obj is None:
standardMsg = 'unexpectedly None'
self.fail(self._formatMessage(msg, standardMsg))
def assertIsInstance(self, obj, cls, msg=None):
"""Same as self.assertTrue(isinstance(obj, cls)), with a nicer
default message."""
if not isinstance(obj, cls):
standardMsg = '%s is not an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIsInstance(self, obj, cls, msg=None):
"""Included for symmetry with assertIsInstance."""
if isinstance(obj, cls):
standardMsg = '%s is an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertRaisesRegex(self, expected_exception, expected_regex,
callable_obj=None, *args, **kwargs):
"""Asserts that the message in a raised exception matches a regex.
Args:
expected_exception: Exception class expected to be raised.
expected_regex: Regex (re pattern object or string) expected
to be found in error message.
callable_obj: Function to be called.
msg: Optional message used in case of failure. Can only be used
when assertRaisesRegex is used as a context manager.
args: Extra args.
kwargs: Extra kwargs.
"""
context = _AssertRaisesContext(expected_exception, self, callable_obj,
expected_regex)
return context.handle('assertRaisesRegex', callable_obj, args, kwargs)
def assertWarnsRegex(self, expected_warning, expected_regex,
callable_obj=None, *args, **kwargs):
"""Asserts that the message in a triggered warning matches a regexp.
Basic functioning is similar to assertWarns() with the addition
that only warnings whose messages also match the regular expression
are considered successful matches.
Args:
expected_warning: Warning class expected to be triggered.
expected_regex: Regex (re pattern object or string) expected
to be found in error message.
callable_obj: Function to be called.
msg: Optional message used in case of failure. Can only be used
when assertWarnsRegex is used as a context manager.
args: Extra args.
kwargs: Extra kwargs.
"""
context = _AssertWarnsContext(expected_warning, self, callable_obj,
expected_regex)
return context.handle('assertWarnsRegex', callable_obj, args, kwargs)
def assertRegex(self, text, expected_regex, msg=None):
"""Fail the test unless the text matches the regular expression."""
if isinstance(expected_regex, (str, bytes)):
assert expected_regex, "expected_regex must not be empty."
expected_regex = re.compile(expected_regex)
if not expected_regex.search(text):
msg = msg or "Regex didn't match"
msg = '%s: %r not found in %r' % (msg, expected_regex.pattern, text)
raise self.failureException(msg)
def assertNotRegex(self, text, unexpected_regex, msg=None):
"""Fail the test if the text matches the regular expression."""
if isinstance(unexpected_regex, (str, bytes)):
unexpected_regex = re.compile(unexpected_regex)
match = unexpected_regex.search(text)
if match:
msg = msg or "Regex matched"
msg = '%s: %r matches %r in %r' % (msg,
text[match.start():match.end()],
unexpected_regex.pattern,
text)
raise self.failureException(msg)
def _deprecate(original_func):
def deprecated_func(*args, **kwargs):
warnings.warn(
'Please use {0} instead.'.format(original_func.__name__),
DeprecationWarning, 2)
return original_func(*args, **kwargs)
return deprecated_func
# see #9424
failUnlessEqual = assertEquals = _deprecate(assertEqual)
failIfEqual = assertNotEquals = _deprecate(assertNotEqual)
failUnlessAlmostEqual = assertAlmostEquals = _deprecate(assertAlmostEqual)
failIfAlmostEqual = assertNotAlmostEquals = _deprecate(assertNotAlmostEqual)
failUnless = assert_ = _deprecate(assertTrue)
failUnlessRaises = _deprecate(assertRaises)
failIf = _deprecate(assertFalse)
assertRaisesRegexp = _deprecate(assertRaisesRegex)
assertRegexpMatches = _deprecate(assertRegex)
class FunctionTestCase(TestCase):
"""A test case that wraps a test function.
This is useful for slipping pre-existing test functions into the
unittest framework. Optionally, set-up and tidy-up functions can be
supplied. As with TestCase, the tidy-up ('tearDown') function will
always be called if the set-up ('setUp') function ran successfully.
"""
def __init__(self, testFunc, setUp=None, tearDown=None, description=None):
super(FunctionTestCase, self).__init__()
self._setUpFunc = setUp
self._tearDownFunc = tearDown
self._testFunc = testFunc
self._description = description
def setUp(self):
if self._setUpFunc is not None:
self._setUpFunc()
def tearDown(self):
if self._tearDownFunc is not None:
self._tearDownFunc()
def runTest(self):
self._testFunc()
def id(self):
return self._testFunc.__name__
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._setUpFunc == other._setUpFunc and \
self._tearDownFunc == other._tearDownFunc and \
self._testFunc == other._testFunc and \
self._description == other._description
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((type(self), self._setUpFunc, self._tearDownFunc,
self._testFunc, self._description))
def __str__(self):
return "%s (%s)" % (strclass(self.__class__),
self._testFunc.__name__)
def __repr__(self):
return "<%s tec=%s>" % (strclass(self.__class__),
self._testFunc)
def shortDescription(self):
if self._description is not None:
return self._description
doc = self._testFunc.__doc__
return doc and doc.split("\n")[0].strip() or None
| gpl-3.0 |
lamby/pkg-codespeak-lib | testing/test_iniconfig.py | 162 | 8194 | import py
import pytest
from py._iniconfig import IniConfig, ParseError, __all__ as ALL
from py._iniconfig import iscommentline
from textwrap import dedent
def pytest_generate_tests(metafunc):
if 'input' in metafunc.funcargnames:
for name, (input, expected) in check_tokens.items():
metafunc.addcall(id=name, funcargs={
'input': input,
'expected': expected,
})
elif hasattr(metafunc.function, 'multi'):
kwargs = metafunc.function.multi.kwargs
names, values = zip(*kwargs.items())
values = cartesian_product(*values)
for p in values:
metafunc.addcall(funcargs=dict(zip(names, p)))
def cartesian_product(L,*lists):
# copied from http://bit.ly/cyIXjn
if not lists:
for x in L:
yield (x,)
else:
for x in L:
for y in cartesian_product(lists[0],*lists[1:]):
yield (x,)+y
check_tokens = {
'section': (
'[section]',
[(0, 'section', None, None)]
),
'value': (
'value = 1',
[(0, None, 'value', '1')]
),
'value in section': (
'[section]\nvalue=1',
[(0, 'section', None, None), (1, 'section', 'value', '1')]
),
'value with continuation': (
'names =\n Alice\n Bob',
[(0, None, 'names', 'Alice\nBob')]
),
'value with aligned continuation': (
'names = Alice\n'
' Bob',
[(0, None, 'names', 'Alice\nBob')]
),
'blank line':(
'[section]\n\nvalue=1',
[(0, 'section', None, None), (2, 'section', 'value', '1')]
),
'comment': (
'# comment',
[]
),
'comment on value': (
'value = 1',
[(0, None, 'value', '1')]
),
'comment on section': (
'[section] #comment',
[(0, 'section', None, None)]
),
'comment2': (
'; comment',
[]
),
'comment2 on section': (
'[section] ;comment',
[(0, 'section', None, None)]
),
'pseudo section syntax in value': (
'name = value []',
[(0, None, 'name', 'value []')]
),
'assignment in value': (
'value = x = 3',
[(0, None, 'value', 'x = 3')]
),
'use of colon for name-values': (
'name: y',
[(0, None, 'name', 'y')]
),
'use of colon without space': (
'value:y=5',
[(0, None, 'value', 'y=5')]
),
'equality gets precedence': (
'value=xyz:5',
[(0, None, 'value', 'xyz:5')]
),
}
def parse(input):
# only for testing purposes - _parse() does not use state except path
ini = object.__new__(IniConfig)
ini.path = "sample"
return ini._parse(input.splitlines(True))
def parse_a_error(input):
return py.test.raises(ParseError, parse, input)
def test_tokenize(input, expected):
parsed = parse(input)
assert parsed == expected
def test_parse_empty():
parsed = parse("")
assert not parsed
ini = IniConfig("sample", "")
assert not ini.sections
def test_ParseError():
e = ParseError("filename", 0, "hello")
assert str(e) == "filename:1: hello"
def test_continuation_needs_perceeding_token():
excinfo = parse_a_error(' Foo')
assert excinfo.value.lineno == 0
def test_continuation_cant_be_after_section():
excinfo = parse_a_error('[section]\n Foo')
assert excinfo.value.lineno == 1
def test_section_cant_be_empty():
excinfo = parse_a_error('[]')
@py.test.mark.multi(line=[
'!!',
])
def test_error_on_weird_lines(line):
parse_a_error(line)
def test_iniconfig_from_file(tmpdir):
path = tmpdir/'test.txt'
path.write('[metadata]\nname=1')
config = IniConfig(path=path)
assert list(config.sections) == ['metadata']
config = IniConfig(path, "[diff]")
assert list(config.sections) == ['diff']
py.test.raises(TypeError, "IniConfig(data=path.read())")
def test_iniconfig_section_first(tmpdir):
excinfo = py.test.raises(ParseError, """
IniConfig("x", data='name=1')
""")
assert excinfo.value.msg == "no section header defined"
def test_iniconig_section_duplicate_fails():
excinfo = py.test.raises(ParseError, r"""
IniConfig("x", data='[section]\n[section]')
""")
assert 'duplicate section' in str(excinfo.value)
def test_iniconfig_duplicate_key_fails():
excinfo = py.test.raises(ParseError, r"""
IniConfig("x", data='[section]\nname = Alice\nname = bob')
""")
assert 'duplicate name' in str(excinfo.value)
def test_iniconfig_lineof():
config = IniConfig("x.ini", data=
'[section]\n'
'value = 1\n'
'[section2]\n'
'# comment\n'
'value =2'
)
assert config.lineof('missing') is None
assert config.lineof('section') == 1
assert config.lineof('section2') == 3
assert config.lineof('section', 'value') == 2
assert config.lineof('section2','value') == 5
assert config['section'].lineof('value') == 2
assert config['section2'].lineof('value') == 5
def test_iniconfig_get_convert():
config= IniConfig("x", data='[section]\nint = 1\nfloat = 1.1')
assert config.get('section', 'int') == '1'
assert config.get('section', 'int', convert=int) == 1
def test_iniconfig_get_missing():
config= IniConfig("x", data='[section]\nint = 1\nfloat = 1.1')
assert config.get('section', 'missing', default=1) == 1
assert config.get('section', 'missing') is None
def test_section_get():
config = IniConfig("x", data='[section]\nvalue=1')
section = config['section']
assert section.get('value', convert=int) == 1
assert section.get('value', 1) == "1"
assert section.get('missing', 2) == 2
def test_missing_section():
config = IniConfig("x", data='[section]\nvalue=1')
py.test.raises(KeyError,'config["other"]')
def test_section_getitem():
config = IniConfig("x", data='[section]\nvalue=1')
assert config['section']['value'] == '1'
assert config['section']['value'] == '1'
def test_section_iter():
config = IniConfig("x", data='[section]\nvalue=1')
names = list(config['section'])
assert names == ['value']
items = list(config['section'].items())
assert items==[('value', '1')]
def test_config_iter():
config = IniConfig("x.ini", data=dedent('''
[section1]
value=1
[section2]
value=2
'''))
l = list(config)
assert len(l) == 2
assert l[0].name == 'section1'
assert l[0]['value'] == '1'
assert l[1].name == 'section2'
assert l[1]['value'] == '2'
def test_config_contains():
config = IniConfig("x.ini", data=dedent('''
[section1]
value=1
[section2]
value=2
'''))
assert 'xyz' not in config
assert 'section1' in config
assert 'section2' in config
def test_iter_file_order():
config = IniConfig("x.ini", data="""
[section2] #cpython dict ordered before section
value = 1
value2 = 2 # dict ordered before value
[section]
a = 1
b = 2
""")
l = list(config)
secnames = [x.name for x in l]
assert secnames == ['section2', 'section']
assert list(config['section2']) == ['value', 'value2']
assert list(config['section']) == ['a', 'b']
def test_example_pypirc():
config = IniConfig("pypirc", data=dedent('''
[distutils]
index-servers =
pypi
other
[pypi]
repository: <repository-url>
username: <username>
password: <password>
[other]
repository: http://example.com/pypi
username: <username>
password: <password>
'''))
distutils, pypi, other = list(config)
assert distutils["index-servers"] == "pypi\nother"
assert pypi['repository'] == '<repository-url>'
assert pypi['username'] == '<username>'
assert pypi['password'] == '<password>'
assert ['repository', 'username', 'password'] == list(other)
def test_api_import():
assert ALL == ['IniConfig', 'ParseError']
@pytest.mark.parametrize("line", [
"#qwe",
" #qwe",
";qwe",
" ;qwe",
])
def test_iscommentline_true(line):
assert iscommentline(line)
| mit |
yuyichao/Scical.jl | cmake/cmake-python-helper.py | 3 | 2168 | # Copyright (C) 2012~2013 by Yichao Yu
# yyc1992@gmail.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# This file incorporates work covered by the following copyright and
# permission notice:
#
# Copyright (c) 2007, Simon Edwards <simon@simonzone.com>
# Redistribution and use is allowed according to the terms of the BSD
# license. For details see the accompanying COPYING-CMAKE-SCRIPTS file.
from __future__ import print_function
def get_sys_info():
import sys
import distutils.sysconfig
import imp
print("exec_prefix:%s" % sys.exec_prefix)
print("short_version:%s" % sys.version[:3])
print("long_version:%s" % sys.version.split()[0])
print("py_inc_dir:%s" % distutils.sysconfig.get_python_inc())
print("site_packages_dir:%s" %
distutils.sysconfig.get_python_lib(plat_specific=1))
try:
magic_tag = imp.get_tag()
except AttributeError:
magic_tag = ''
print("magic_tag:%s" % magic_tag)
return 0
def compile_file(infile):
import py_compile
try:
py_compile.compile(infile, doraise=True)
return 0
except py_compile.PyCompileError as e:
print(e.msg)
return 1
def main(argv):
if argv[1] == '--get-sys-info':
return get_sys_info()
elif argv[1] == '--compile':
return compile_file(argv[2])
else:
import sys
print('Unknown options %s' % argv[1:], file=sys.stderr)
return 1
if '__main__' == __name__:
import sys
sys.exit(main(sys.argv))
| gpl-3.0 |
bollu/polymage | sandbox/tests/test_grouping.py | 1 | 1428 | from __future__ import absolute_import, division, print_function
from fractions import Fraction
import sys
import subprocess
sys.path.insert(0, '../')
from compiler import *
from constructs import *
def test_tree_graph():
R = Parameter(Int, "R")
C = Parameter(Int, "C")
x = Variable(Int, "x")
y = Variable(Int, "y")
row = Interval(Int, 0, R-1)
col = Interval(Int, 0, C-1)
img = Image(Float, "img", [R, C])
alpha = 0.6
F = {}
L = 3
for l in range(0, (2**L)-1):
F[l] = Function(([x, y], [row, col]), Float, "F"+str(l))
for l in range(0, (2**(L-1))-1):
F[l].defn = [ (alpha) * F[2*l+1](x, y) + (1-alpha) * F[2*l+2](x, y) ]
for l in range((2**(L-1))-1, (2**L)-1):
F[l].defn = [ l * img(x, y) ]
p_est = [ (R, 1024), (C, 1024) ]
# build the pipeline
pipeline = buildPipeline([F[0]],
param_estimates = p_est,
group_size = 100,
pipe_name = "tree")
filename = "tree_graph"
dot_file = filename+".dot"
png_file = filename+".png"
g = pipeline.pipeline_graph
g.write(filename+".dot")
dotty_str = "dot -Tpng "+dot_file+" -o "+png_file
subprocess.check_output(dotty_str, shell=True)
filename = 'tree.cpp'
c_file = open(filename, 'w')
c_file.write(pipeline.generate_code().__str__())
c_file.close()
return
| apache-2.0 |
MinFu/youtube-dl | youtube_dl/extractor/canalc2.py | 145 | 1258 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class Canalc2IE(InfoExtractor):
IE_NAME = 'canalc2.tv'
_VALID_URL = r'http://.*?\.canalc2\.tv/video\.asp\?.*?idVideo=(?P<id>\d+)'
_TEST = {
'url': 'http://www.canalc2.tv/video.asp?idVideo=12163&voir=oui',
'md5': '060158428b650f896c542dfbb3d6487f',
'info_dict': {
'id': '12163',
'ext': 'mp4',
'title': 'Terrasses du Numérique'
}
}
def _real_extract(self, url):
video_id = re.match(self._VALID_URL, url).group('id')
# We need to set the voir field for getting the file name
url = 'http://www.canalc2.tv/video.asp?idVideo=%s&voir=oui' % video_id
webpage = self._download_webpage(url, video_id)
file_name = self._search_regex(
r"so\.addVariable\('file','(.*?)'\);",
webpage, 'file name')
video_url = 'http://vod-flash.u-strasbg.fr:8080/' + file_name
title = self._html_search_regex(
r'class="evenement8">(.*?)</a>', webpage, 'title')
return {
'id': video_id,
'ext': 'mp4',
'url': video_url,
'title': title,
}
| unlicense |
SCOAP3/invenio | invenio/legacy/websubmit/file_metadata_plugins/extractor_plugin.py | 13 | 2472 | # This file is part of Invenio.
# Copyright (C) 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
WebSubmit Metadata Plugin - This is the generic metadata extraction
plugin. Contains methods to extract metadata from many kinds of files.
Dependencies: extractor
"""
__plugin_version__ = "WebSubmit File Metadata Plugin API 1.0"
import extractor
from invenio.legacy.bibdocfile.api import decompose_file
def can_read_local(inputfile):
"""
Checks if inputfile is among metadata-readable file types
@param inputfile: path to the image
@type inputfile: string
@rtype: boolean
@return: True if file can be processed
"""
# Check file type (0 base, 1 name, 2 ext)
ext = decompose_file(inputfile)[2]
return ext.lower() in ['.html', '.doc', '.ps', '.xls', '.ppt',
'.ps', '.sxw', '.sdw', '.dvi', '.man', '.flac',
'.mp3', '.nsf', '.sid', '.ogg', '.wav', '.png',
'.deb', '.rpm', '.tar.gz', '.zip', '.elf',
'.s3m', '.xm', '.it', '.flv', '.real', '.avi',
'.mpeg', '.qt', '.asf']
def read_metadata_local(inputfile, verbose):
"""
Metadata extraction from many kind of files
@param inputfile: path to the image
@type inputfile: string
@param verbose: verbosity
@type verbose: int
@rtype: dict
@return: dictionary with metadata
"""
# Initialization dict
meta_info = {}
# Extraction
xtract = extractor.Extractor()
# Get the keywords
keys = xtract.extract(inputfile)
# Loop to dump data to the dict
for keyword_type, keyword in keys:
meta_info[keyword_type.encode('iso-8859-1')] = \
keyword.encode('iso-8859-1')
# Return the dictionary
return meta_info
| gpl-2.0 |
achang97/YouTunes | lib/python2.7/site-packages/pip/_vendor/progress/counter.py | 510 | 1502 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from . import Infinite, Progress
from .helpers import WriteMixin
class Counter(WriteMixin, Infinite):
message = ''
hide_cursor = True
def update(self):
self.write(str(self.index))
class Countdown(WriteMixin, Progress):
hide_cursor = True
def update(self):
self.write(str(self.remaining))
class Stack(WriteMixin, Progress):
phases = (u' ', u'▁', u'▂', u'▃', u'▄', u'▅', u'▆', u'▇', u'█')
hide_cursor = True
def update(self):
nphases = len(self.phases)
i = min(nphases - 1, int(self.progress * nphases))
self.write(self.phases[i])
class Pie(Stack):
phases = (u'○', u'◔', u'◑', u'◕', u'●')
| mit |
leb2dg/osf.io | api/actions/serializers.py | 3 | 6414 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from rest_framework import generics
from rest_framework import serializers as ser
from api.base import utils
from api.base.exceptions import Conflict
from api.base.exceptions import JSONAPIAttributeException
from api.base.serializers import JSONAPISerializer
from api.base.serializers import LinksField
from api.base.serializers import RelationshipField
from api.base.serializers import HideIfProviderCommentsAnonymous
from api.base.serializers import HideIfProviderCommentsPrivate
from osf.exceptions import InvalidTriggerError
from osf.models import PreprintService
from osf.utils.workflows import DefaultStates, DefaultTriggers
class ReviewableCountsRelationshipField(RelationshipField):
def __init__(self, *args, **kwargs):
kwargs['related_meta'] = kwargs.get('related_meta') or {}
if 'include_state_counts' not in kwargs['related_meta']:
kwargs['related_meta']['include_state_counts'] = True
super(ReviewableCountsRelationshipField, self).__init__(*args, **kwargs)
def get_meta_information(self, metadata, provider):
# Clone metadata because its mutability is questionable
metadata = dict(metadata or {})
# Make counts opt-in
show_counts = utils.is_truthy(self.context['request'].query_params.get('related_counts', False))
# Only include counts on detail routes
is_detail = self.context.get('view') and not isinstance(self.context['view'], generics.ListAPIView)
# Weird hack to avoid being called twice
# get_meta_information is called with both self.related_meta and self.self_meta.
# `is` could probably be used here but this seems more comprehensive.
is_related_meta = metadata.pop('include_state_counts', False)
if show_counts and is_detail and is_related_meta:
# Finally, require users to have view_actions permissions
auth = utils.get_user_auth(self.context['request'])
if auth and auth.logged_in and auth.user.has_perm('view_actions', provider):
metadata.update(provider.get_reviewable_state_counts())
return super(ReviewableCountsRelationshipField, self).get_meta_information(metadata, provider)
class TargetRelationshipField(RelationshipField):
_target_class = None
def __init__(self, *args, **kwargs):
self._target_class = kwargs.pop('target_class', None)
super(TargetRelationshipField, self).__init__(*args, **kwargs)
@property
def TargetClass(self):
if self._target_class:
return self._target_class
raise NotImplementedError()
def get_object(self, object_id):
return self.TargetClass.load(object_id)
def to_internal_value(self, data):
target = self.get_object(data)
return {'target': target}
class BaseActionSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'id',
'trigger',
'from_state',
'to_state',
'date_created',
'date_modified',
'target',
])
id = ser.CharField(source='_id', read_only=True)
trigger = ser.ChoiceField(choices=DefaultTriggers.choices())
comment = HideIfProviderCommentsPrivate(ser.CharField(max_length=65535, required=False))
from_state = ser.ChoiceField(choices=DefaultStates.choices(), read_only=True)
to_state = ser.ChoiceField(choices=DefaultStates.choices(), read_only=True)
date_created = ser.DateTimeField(source='created', read_only=True)
date_modified = ser.DateTimeField(source='modified', read_only=True)
creator = RelationshipField(
read_only=True,
related_view='users:user-detail',
related_view_kwargs={'user_id': '<creator._id>'},
filter_key='creator__guids___id',
always_embed=True,
)
links = LinksField(
{
'self': 'get_action_url',
}
)
@property
def get_action_url(self):
raise NotImplementedError()
def get_absolute_url(self, obj):
return self.get_action_url(obj)
def create(self, validated_data):
trigger = validated_data.pop('trigger')
user = validated_data.pop('user')
target = validated_data.pop('target')
comment = validated_data.pop('comment', '')
try:
if trigger == DefaultTriggers.ACCEPT.value:
return target.run_accept(user, comment)
if trigger == DefaultTriggers.REJECT.value:
return target.run_reject(user, comment)
if trigger == DefaultTriggers.EDIT_COMMENT.value:
return target.run_edit_comment(user, comment)
if trigger == DefaultTriggers.SUBMIT.value:
return target.run_submit(user)
except InvalidTriggerError as e:
# Invalid transition from the current state
raise Conflict(e.message)
else:
raise JSONAPIAttributeException(attribute='trigger', detail='Invalid trigger.')
class Meta:
type_ = 'actions'
abstract = True
class ReviewActionSerializer(BaseActionSerializer):
class Meta:
type_ = 'review-actions'
filterable_fields = frozenset([
'id',
'trigger',
'from_state',
'to_state',
'date_created',
'date_modified',
'provider',
'target',
])
provider = RelationshipField(
read_only=True,
related_view='preprint_providers:preprint_provider-detail',
related_view_kwargs={'provider_id': '<target.provider._id>'},
filter_key='target__provider___id',
)
creator = HideIfProviderCommentsAnonymous(RelationshipField(
read_only=True,
related_view='users:user-detail',
related_view_kwargs={'user_id': '<creator._id>'},
filter_key='creator__guids___id',
always_embed=True,
))
target = TargetRelationshipField(
target_class=PreprintService,
read_only=False,
required=True,
related_view='preprints:preprint-detail',
related_view_kwargs={'preprint_id': '<target._id>'},
filter_key='target__guids___id',
)
def get_action_url(self, obj):
return utils.absolute_reverse('actions:action-detail', kwargs={'action_id': obj._id, 'version': self.context['request'].parser_context['kwargs']['version']})
| apache-2.0 |
dwightgunning/django | tests/admin_scripts/tests.py | 22 | 93594 | # -*- coding: utf-8 -*-
"""
A series of tests to establish that the command-line management tools work as
advertised - especially with regards to the handling of the
DJANGO_SETTINGS_MODULE and default settings.py files.
"""
from __future__ import unicode_literals
import codecs
import os
import re
import shutil
import socket
import subprocess
import sys
import tempfile
import unittest
import django
from django import conf, get_version
from django.conf import settings
from django.core.management import (
BaseCommand, CommandError, call_command, color,
)
from django.db import ConnectionHandler
from django.db.migrations.exceptions import MigrationSchemaMissing
from django.db.migrations.recorder import MigrationRecorder
from django.test import (
LiveServerTestCase, SimpleTestCase, mock, override_settings,
)
from django.test.runner import DiscoverRunner
from django.utils._os import npath, upath
from django.utils.encoding import force_text
from django.utils.six import PY2, PY3, StringIO
custom_templates_dir = os.path.join(os.path.dirname(upath(__file__)), 'custom_templates')
SYSTEM_CHECK_MSG = 'System check identified no issues'
class AdminScriptTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(AdminScriptTestCase, cls).setUpClass()
cls.test_dir = os.path.realpath(os.path.join(
tempfile.gettempdir(),
cls.__name__,
'test_project',
))
if not os.path.exists(cls.test_dir):
os.makedirs(cls.test_dir)
with open(os.path.join(cls.test_dir, '__init__.py'), 'w'):
pass
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.test_dir)
super(AdminScriptTestCase, cls).tearDownClass()
def write_settings(self, filename, apps=None, is_dir=False, sdict=None, extra=None):
if is_dir:
settings_dir = os.path.join(self.test_dir, filename)
os.mkdir(settings_dir)
settings_file_path = os.path.join(settings_dir, '__init__.py')
else:
settings_file_path = os.path.join(self.test_dir, filename)
with open(settings_file_path, 'w') as settings_file:
settings_file.write('# -*- coding: utf-8 -*\n')
settings_file.write('# Settings file automatically generated by admin_scripts test case\n')
if extra:
settings_file.write("%s\n" % extra)
exports = [
'DATABASES',
'ROOT_URLCONF',
'SECRET_KEY',
]
for s in exports:
if hasattr(settings, s):
o = getattr(settings, s)
if not isinstance(o, (dict, tuple, list)):
o = "'%s'" % o
settings_file.write("%s = %s\n" % (s, o))
if apps is None:
apps = ['django.contrib.auth', 'django.contrib.contenttypes', 'admin_scripts']
settings_file.write("INSTALLED_APPS = %s\n" % apps)
if sdict:
for k, v in sdict.items():
settings_file.write("%s = %s\n" % (k, v))
def remove_settings(self, filename, is_dir=False):
full_name = os.path.join(self.test_dir, filename)
if is_dir:
shutil.rmtree(full_name)
else:
os.remove(full_name)
# Also try to remove the compiled file; if it exists, it could
# mess up later tests that depend upon the .py file not existing
try:
if sys.platform.startswith('java'):
# Jython produces module$py.class files
os.remove(re.sub(r'\.py$', '$py.class', full_name))
else:
# CPython produces module.pyc files
os.remove(full_name + 'c')
except OSError:
pass
# Also remove a __pycache__ directory, if it exists
cache_name = os.path.join(self.test_dir, '__pycache__')
if os.path.isdir(cache_name):
shutil.rmtree(cache_name)
def _ext_backend_paths(self):
"""
Returns the paths for any external backend packages.
"""
paths = []
first_package_re = re.compile(r'(^[^\.]+)\.')
for backend in settings.DATABASES.values():
result = first_package_re.findall(backend['ENGINE'])
if result and result != ['django']:
backend_pkg = __import__(result[0])
backend_dir = os.path.dirname(backend_pkg.__file__)
paths.append(os.path.dirname(backend_dir))
return paths
def run_test(self, script, args, settings_file=None, apps=None):
base_dir = os.path.dirname(self.test_dir)
# The base dir for Django's tests is one level up.
tests_dir = os.path.dirname(os.path.dirname(upath(__file__)))
# The base dir for Django is one level above the test dir. We don't use
# `import django` to figure that out, so we don't pick up a Django
# from site-packages or similar.
django_dir = os.path.dirname(tests_dir)
ext_backend_base_dirs = self._ext_backend_paths()
# Define a temporary environment for the subprocess
test_environ = os.environ.copy()
if sys.platform.startswith('java'):
python_path_var_name = 'JYTHONPATH'
else:
python_path_var_name = 'PYTHONPATH'
old_cwd = os.getcwd()
# Set the test environment
if settings_file:
test_environ['DJANGO_SETTINGS_MODULE'] = str(settings_file)
elif 'DJANGO_SETTINGS_MODULE' in test_environ:
del test_environ['DJANGO_SETTINGS_MODULE']
python_path = [base_dir, django_dir, tests_dir]
python_path.extend(ext_backend_base_dirs)
# Use native strings for better compatibility
test_environ[str(python_path_var_name)] = npath(os.pathsep.join(python_path))
test_environ[str('PYTHONWARNINGS')] = str('')
# Move to the test directory and run
os.chdir(self.test_dir)
out, err = subprocess.Popen([sys.executable, script] + args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=test_environ, universal_newlines=True).communicate()
# Move back to the old working directory
os.chdir(old_cwd)
return out, err
def run_django_admin(self, args, settings_file=None):
script_dir = os.path.abspath(os.path.join(os.path.dirname(upath(django.__file__)), 'bin'))
return self.run_test(os.path.join(script_dir, 'django-admin.py'), args, settings_file)
def run_manage(self, args, settings_file=None):
def safe_remove(path):
try:
os.remove(path)
except OSError:
pass
conf_dir = os.path.dirname(upath(conf.__file__))
template_manage_py = os.path.join(conf_dir, 'project_template', 'manage.py')
test_manage_py = os.path.join(self.test_dir, 'manage.py')
shutil.copyfile(template_manage_py, test_manage_py)
with open(test_manage_py, 'r') as fp:
manage_py_contents = fp.read()
manage_py_contents = manage_py_contents.replace(
"{{ project_name }}", "test_project")
with open(test_manage_py, 'w') as fp:
fp.write(manage_py_contents)
self.addCleanup(safe_remove, test_manage_py)
return self.run_test('./manage.py', args, settings_file)
def assertNoOutput(self, stream):
"Utility assertion: assert that the given stream is empty"
self.assertEqual(len(stream), 0, "Stream should be empty: actually contains '%s'" % stream)
def assertOutput(self, stream, msg, regex=False):
"Utility assertion: assert that the given message exists in the output"
stream = force_text(stream)
if regex:
self.assertIsNotNone(re.search(msg, stream),
"'%s' does not match actual output text '%s'" % (msg, stream))
else:
self.assertIn(msg, stream, "'%s' does not match actual output text '%s'" % (msg, stream))
def assertNotInOutput(self, stream, msg):
"Utility assertion: assert that the given message doesn't exist in the output"
stream = force_text(stream)
self.assertNotIn(msg, stream, "'%s' matches actual output text '%s'" % (msg, stream))
##########################################################################
# DJANGO ADMIN TESTS
# This first series of test classes checks the environment processing
# of the django-admin.py script
##########################################################################
class DjangoAdminNoSettings(AdminScriptTestCase):
"A series of tests for django-admin.py when there is no settings.py file."
def test_builtin_command(self):
"no settings: django-admin builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_bad_settings(self):
"no settings: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"no settings: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
class DjangoAdminDefaultSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when using a settings.py file that
contains the test application.
"""
def setUp(self):
self.write_settings('settings.py')
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"default: django-admin builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_settings(self):
"default: django-admin builtin commands succeed if settings are provided as argument"
args = ['check', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_environment(self):
"default: django-admin builtin commands succeed if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_bad_settings(self):
"default: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"default: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"default: django-admin can't execute user commands if it isn't provided settings"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No Django settings specified")
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"default: django-admin can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=test_project.settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_environment(self):
"default: django-admin can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
class DjangoAdminFullPathDefaultSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when using a settings.py file that
contains the test application specified using a full path.
"""
def setUp(self):
self.write_settings('settings.py', ['django.contrib.auth', 'django.contrib.contenttypes',
'admin_scripts', 'admin_scripts.complex_app'])
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"fulldefault: django-admin builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_settings(self):
"fulldefault: django-admin builtin commands succeed if a settings file is provided"
args = ['check', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_environment(self):
"fulldefault: django-admin builtin commands succeed if the environment contains settings"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_bad_settings(self):
"fulldefault: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"fulldefault: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"fulldefault: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No Django settings specified")
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"fulldefault: django-admin can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=test_project.settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_environment(self):
"fulldefault: django-admin can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
class DjangoAdminMinimalSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when using a settings.py file that
doesn't contain the test application.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes'])
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"minimal: django-admin builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_settings(self):
"minimal: django-admin builtin commands fail if settings are provided as argument"
args = ['check', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No installed app with label 'admin_scripts'.")
def test_builtin_with_environment(self):
"minimal: django-admin builtin commands fail if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(out)
self.assertOutput(err, "No installed app with label 'admin_scripts'.")
def test_builtin_with_bad_settings(self):
"minimal: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"minimal: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"minimal: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No Django settings specified")
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"minimal: django-admin can't execute user commands, even if settings are provided as argument"
args = ['noargs_command', '--settings=test_project.settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_environment(self):
"minimal: django-admin can't execute user commands, even if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
class DjangoAdminAlternateSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when using a settings file
with a name other than 'settings.py'.
"""
def setUp(self):
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('alternate_settings.py')
def test_builtin_command(self):
"alternate: django-admin builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_settings(self):
"alternate: django-admin builtin commands succeed if settings are provided as argument"
args = ['check', '--settings=test_project.alternate_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_environment(self):
"alternate: django-admin builtin commands succeed if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'test_project.alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_bad_settings(self):
"alternate: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"alternate: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"alternate: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No Django settings specified")
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"alternate: django-admin can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=test_project.alternate_settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_environment(self):
"alternate: django-admin can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args, 'test_project.alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
class DjangoAdminMultipleSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when multiple settings files
(including the default 'settings.py') are available. The default settings
file is insufficient for performing the operations described, so the
alternate settings must be used by the running script.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes'])
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('settings.py')
self.remove_settings('alternate_settings.py')
def test_builtin_command(self):
"alternate: django-admin builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_settings(self):
"alternate: django-admin builtin commands succeed if settings are provided as argument"
args = ['check', '--settings=test_project.alternate_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_environment(self):
"alternate: django-admin builtin commands succeed if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'test_project.alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_bad_settings(self):
"alternate: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"alternate: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"alternate: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No Django settings specified")
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"alternate: django-admin can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=test_project.alternate_settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_environment(self):
"alternate: django-admin can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args, 'test_project.alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
class DjangoAdminSettingsDirectory(AdminScriptTestCase):
"""
A series of tests for django-admin.py when the settings file is in a
directory. (see #9751).
"""
def setUp(self):
self.write_settings('settings', is_dir=True)
def tearDown(self):
self.remove_settings('settings', is_dir=True)
def test_setup_environ(self):
"directory: startapp creates the correct directory"
args = ['startapp', 'settings_test']
app_path = os.path.join(self.test_dir, 'settings_test')
out, err = self.run_django_admin(args, 'test_project.settings')
self.addCleanup(shutil.rmtree, app_path)
self.assertNoOutput(err)
self.assertTrue(os.path.exists(app_path))
with open(os.path.join(app_path, 'apps.py'), 'r') as f:
content = f.read()
self.assertIn("class SettingsTestConfig(AppConfig)", content)
self.assertIn("name = 'settings_test'", content)
if not PY3:
with open(os.path.join(app_path, 'models.py'), 'r') as fp:
content = fp.read()
self.assertIn(
"from __future__ import unicode_literals\n",
content,
)
def test_setup_environ_custom_template(self):
"directory: startapp creates the correct directory with a custom template"
template_path = os.path.join(custom_templates_dir, 'app_template')
args = ['startapp', '--template', template_path, 'custom_settings_test']
app_path = os.path.join(self.test_dir, 'custom_settings_test')
out, err = self.run_django_admin(args, 'test_project.settings')
self.addCleanup(shutil.rmtree, app_path)
self.assertNoOutput(err)
self.assertTrue(os.path.exists(app_path))
self.assertTrue(os.path.exists(os.path.join(app_path, 'api.py')))
@unittest.skipIf(PY2, "Python 2 doesn't support Unicode package names.")
def test_startapp_unicode_name(self):
"directory: startapp creates the correct directory with unicode characters"
args = ['startapp', 'こんにちは']
app_path = os.path.join(self.test_dir, 'こんにちは')
out, err = self.run_django_admin(args, 'test_project.settings')
self.addCleanup(shutil.rmtree, app_path)
self.assertNoOutput(err)
self.assertTrue(os.path.exists(app_path))
with open(os.path.join(app_path, 'apps.py'), 'r', encoding='utf8') as f:
content = f.read()
self.assertIn("class こんにちはConfig(AppConfig)", content)
self.assertIn("name = 'こんにちは'", content)
def test_builtin_command(self):
"directory: django-admin builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_bad_settings(self):
"directory: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"directory: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"directory: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No Django settings specified")
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_builtin_with_settings(self):
"directory: django-admin builtin commands succeed if settings are provided as argument"
args = ['check', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_environment(self):
"directory: django-admin builtin commands succeed if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
##########################################################################
# MANAGE.PY TESTS
# This next series of test classes checks the environment processing
# of the generated manage.py script
##########################################################################
class ManageNoSettings(AdminScriptTestCase):
"A series of tests for manage.py when there is no settings.py file."
def test_builtin_command(self):
"no settings: manage.py builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?(test_project\.)?settings'?", regex=True)
def test_builtin_with_bad_settings(self):
"no settings: manage.py builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"no settings: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
class ManageDefaultSettings(AdminScriptTestCase):
"""A series of tests for manage.py when using a settings.py file that
contains the test application.
"""
def setUp(self):
self.write_settings('settings.py')
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"default: manage.py builtin commands succeed when default settings are appropriate"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_settings(self):
"default: manage.py builtin commands succeed if settings are provided as argument"
args = ['check', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_environment(self):
"default: manage.py builtin commands succeed if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_bad_settings(self):
"default: manage.py builtin commands succeed if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"default: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"default: manage.py can execute user commands when default settings are appropriate"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_settings(self):
"default: manage.py can execute user commands when settings are provided as argument"
args = ['noargs_command', '--settings=test_project.settings']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_environment(self):
"default: manage.py can execute user commands when settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
class ManageFullPathDefaultSettings(AdminScriptTestCase):
"""A series of tests for manage.py when using a settings.py file that
contains the test application specified using a full path.
"""
def setUp(self):
self.write_settings('settings.py', ['django.contrib.auth', 'django.contrib.contenttypes', 'admin_scripts'])
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"fulldefault: manage.py builtin commands succeed when default settings are appropriate"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_settings(self):
"fulldefault: manage.py builtin commands succeed if settings are provided as argument"
args = ['check', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_environment(self):
"fulldefault: manage.py builtin commands succeed if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_bad_settings(self):
"fulldefault: manage.py builtin commands succeed if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"fulldefault: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"fulldefault: manage.py can execute user commands when default settings are appropriate"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_settings(self):
"fulldefault: manage.py can execute user commands when settings are provided as argument"
args = ['noargs_command', '--settings=test_project.settings']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_environment(self):
"fulldefault: manage.py can execute user commands when settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
class ManageMinimalSettings(AdminScriptTestCase):
"""A series of tests for manage.py when using a settings.py file that
doesn't contain the test application.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes'])
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"minimal: manage.py builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No installed app with label 'admin_scripts'.")
def test_builtin_with_settings(self):
"minimal: manage.py builtin commands fail if settings are provided as argument"
args = ['check', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No installed app with label 'admin_scripts'.")
def test_builtin_with_environment(self):
"minimal: manage.py builtin commands fail if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'test_project.settings')
self.assertNoOutput(out)
self.assertOutput(err, "No installed app with label 'admin_scripts'.")
def test_builtin_with_bad_settings(self):
"minimal: manage.py builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"minimal: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"minimal: manage.py can't execute user commands without appropriate settings"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"minimal: manage.py can't execute user commands, even if settings are provided as argument"
args = ['noargs_command', '--settings=test_project.settings']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_environment(self):
"minimal: manage.py can't execute user commands, even if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args, 'test_project.settings')
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
class ManageAlternateSettings(AdminScriptTestCase):
"""A series of tests for manage.py when using a settings file
with a name other than 'settings.py'.
"""
def setUp(self):
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('alternate_settings.py')
def test_builtin_command(self):
"alternate: manage.py builtin commands fail with an error when no default settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?(test_project\.)?settings'?", regex=True)
def test_builtin_with_settings(self):
"alternate: manage.py builtin commands work with settings provided as argument"
args = ['check', '--settings=alternate_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertOutput(out, SYSTEM_CHECK_MSG)
self.assertNoOutput(err)
def test_builtin_with_environment(self):
"alternate: manage.py builtin commands work if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'alternate_settings')
self.assertOutput(out, SYSTEM_CHECK_MSG)
self.assertNoOutput(err)
def test_builtin_with_bad_settings(self):
"alternate: manage.py builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"alternate: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"alternate: manage.py can't execute user commands without settings"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?(test_project\.)?settings'?", regex=True)
def test_custom_command_with_settings(self):
"alternate: manage.py can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=alternate_settings']
out, err = self.run_manage(args)
self.assertOutput(
out,
"EXECUTE: noargs_command options=[('no_color', False), "
"('pythonpath', None), ('settings', 'alternate_settings'), "
"('traceback', False), ('verbosity', 1)]"
)
self.assertNoOutput(err)
def test_custom_command_with_environment(self):
"alternate: manage.py can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args, 'alternate_settings')
self.assertOutput(
out,
"EXECUTE: noargs_command options=[('no_color', False), "
"('pythonpath', None), ('settings', None), ('traceback', False), "
"('verbosity', 1)]"
)
self.assertNoOutput(err)
def test_custom_command_output_color(self):
"alternate: manage.py output syntax color can be deactivated with the `--no-color` option"
args = ['noargs_command', '--no-color', '--settings=alternate_settings']
out, err = self.run_manage(args)
self.assertOutput(
out,
"EXECUTE: noargs_command options=[('no_color', True), "
"('pythonpath', None), ('settings', 'alternate_settings'), "
"('traceback', False), ('verbosity', 1)]"
)
self.assertNoOutput(err)
class ManageMultipleSettings(AdminScriptTestCase):
"""A series of tests for manage.py when multiple settings files
(including the default 'settings.py') are available. The default settings
file is insufficient for performing the operations described, so the
alternate settings must be used by the running script.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes'])
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('settings.py')
self.remove_settings('alternate_settings.py')
def test_builtin_command(self):
"multiple: manage.py builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No installed app with label 'admin_scripts'.")
def test_builtin_with_settings(self):
"multiple: manage.py builtin commands succeed if settings are provided as argument"
args = ['check', '--settings=alternate_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_environment(self):
"multiple: manage.py can execute builtin commands if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_bad_settings(self):
"multiple: manage.py builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"multiple: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"multiple: manage.py can't execute user commands using default settings"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"multiple: manage.py can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=alternate_settings']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_environment(self):
"multiple: manage.py can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args, 'alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
class ManageSettingsWithSettingsErrors(AdminScriptTestCase):
"""
Tests for manage.py when using the default settings.py file containing
runtime errors.
"""
def tearDown(self):
self.remove_settings('settings.py')
def write_settings_with_import_error(self, filename):
settings_file_path = os.path.join(self.test_dir, filename)
with open(settings_file_path, 'w') as settings_file:
settings_file.write('# Settings file automatically generated by admin_scripts test case\n')
settings_file.write('# The next line will cause an import error:\nimport foo42bar\n')
def test_import_error(self):
"""
import error: manage.py builtin commands shows useful diagnostic info
when settings with import errors is provided (#14130).
"""
self.write_settings_with_import_error('settings.py')
args = ['check', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named")
self.assertOutput(err, "foo42bar")
def test_attribute_error(self):
"""
manage.py builtin commands does not swallow attribute error due to bad
settings (#18845).
"""
self.write_settings('settings.py', sdict={'BAD_VAR': 'INSTALLED_APPS.crash'})
args = ['collectstatic', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "AttributeError: 'list' object has no attribute 'crash'")
def test_key_error(self):
self.write_settings('settings.py', sdict={'BAD_VAR': 'DATABASES["blah"]'})
args = ['collectstatic', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "KeyError: 'blah'")
def test_help(self):
"""
Test listing available commands output note when only core commands are
available.
"""
self.write_settings('settings.py', sdict={'MEDIA_URL': '"/no_ending_slash"'})
args = ['help']
out, err = self.run_manage(args)
self.assertOutput(out, 'only Django core commands are listed')
self.assertNoOutput(err)
class ManageCheck(AdminScriptTestCase):
def tearDown(self):
self.remove_settings('settings.py')
def test_nonexistent_app(self):
""" manage.py check reports an error on a non-existent app in
INSTALLED_APPS """
self.write_settings('settings.py',
apps=['admin_scriptz.broken_app'],
sdict={'USE_I18N': False})
args = ['check']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, 'ImportError')
self.assertOutput(err, 'No module named')
self.assertOutput(err, 'admin_scriptz')
def test_broken_app(self):
""" manage.py check reports an ImportError if an app's models.py
raises one on import """
self.write_settings('settings.py', apps=['admin_scripts.broken_app'])
args = ['check']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, 'ImportError')
def test_complex_app(self):
""" manage.py check does not raise an ImportError validating a
complex app with nested calls to load_app """
self.write_settings(
'settings.py',
apps=[
'admin_scripts.complex_app',
'admin_scripts.simple_app',
'django.contrib.admin.apps.SimpleAdminConfig',
'django.contrib.auth',
'django.contrib.contenttypes',
],
sdict={
'DEBUG': True
}
)
args = ['check']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertEqual(out, 'System check identified no issues (0 silenced).\n')
def test_app_with_import(self):
""" manage.py check does not raise errors when an app imports a base
class that itself has an abstract base. """
self.write_settings('settings.py',
apps=['admin_scripts.app_with_import',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sites'],
sdict={'DEBUG': True})
args = ['check']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertEqual(out, 'System check identified no issues (0 silenced).\n')
def test_output_format(self):
""" All errors/warnings should be sorted by level and by message. """
self.write_settings('settings.py',
apps=['admin_scripts.app_raising_messages',
'django.contrib.auth',
'django.contrib.contenttypes'],
sdict={'DEBUG': True})
args = ['check']
out, err = self.run_manage(args)
expected_err = (
"SystemCheckError: System check identified some issues:\n"
"\n"
"ERRORS:\n"
"?: An error\n"
"\tHINT: Error hint\n"
"\n"
"WARNINGS:\n"
"a: Second warning\n"
"obj: First warning\n"
"\tHINT: Hint\n"
"\n"
"System check identified 3 issues (0 silenced).\n"
)
self.assertEqual(err, expected_err)
self.assertNoOutput(out)
def test_warning_does_not_halt(self):
"""
When there are only warnings or less serious messages, then Django
should not prevent user from launching their project, so `check`
command should not raise `CommandError` exception.
In this test we also test output format.
"""
self.write_settings('settings.py',
apps=['admin_scripts.app_raising_warning',
'django.contrib.auth',
'django.contrib.contenttypes'],
sdict={'DEBUG': True})
args = ['check']
out, err = self.run_manage(args)
expected_err = (
"System check identified some issues:\n" # No "CommandError: " part
"\n"
"WARNINGS:\n"
"?: A warning\n"
"\n"
"System check identified 1 issue (0 silenced).\n"
)
self.assertEqual(err, expected_err)
self.assertNoOutput(out)
class CustomTestRunner(DiscoverRunner):
def __init__(self, *args, **kwargs):
assert 'liveserver' not in kwargs
super(CustomTestRunner, self).__init__(*args, **kwargs)
def run_tests(self, test_labels, extra_tests=None, **kwargs):
pass
class ManageTestCommand(AdminScriptTestCase):
def setUp(self):
from django.core.management.commands.test import Command as TestCommand
self.cmd = TestCommand()
def test_liveserver(self):
"""
Ensure that the --liveserver option sets the environment variable
correctly.
Refs #2879.
"""
# Backup original state
address_predefined = 'DJANGO_LIVE_TEST_SERVER_ADDRESS' in os.environ
old_address = os.environ.get('DJANGO_LIVE_TEST_SERVER_ADDRESS')
self.cmd.handle(verbosity=0, testrunner='admin_scripts.tests.CustomTestRunner')
# Original state hasn't changed
self.assertEqual('DJANGO_LIVE_TEST_SERVER_ADDRESS' in os.environ, address_predefined)
self.assertEqual(os.environ.get('DJANGO_LIVE_TEST_SERVER_ADDRESS'), old_address)
self.cmd.handle(verbosity=0, testrunner='admin_scripts.tests.CustomTestRunner',
liveserver='blah')
# Variable was correctly set
self.assertEqual(os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'], 'blah')
# Restore original state
if address_predefined:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = old_address
else:
del os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS']
class ManageRunserver(AdminScriptTestCase):
def setUp(self):
from django.core.management.commands.runserver import Command
def monkey_run(*args, **options):
return
self.output = StringIO()
self.cmd = Command(stdout=self.output)
self.cmd.run = monkey_run
def assertServerSettings(self, addr, port, ipv6=None, raw_ipv6=False):
self.assertEqual(self.cmd.addr, addr)
self.assertEqual(self.cmd.port, port)
self.assertEqual(self.cmd.use_ipv6, ipv6)
self.assertEqual(self.cmd._raw_ipv6, raw_ipv6)
def test_runserver_addrport(self):
self.cmd.handle()
self.assertServerSettings('127.0.0.1', '8000')
self.cmd.handle(addrport="1.2.3.4:8000")
self.assertServerSettings('1.2.3.4', '8000')
self.cmd.handle(addrport="7000")
self.assertServerSettings('127.0.0.1', '7000')
@unittest.skipUnless(socket.has_ipv6, "platform doesn't support IPv6")
def test_runner_addrport_ipv6(self):
self.cmd.handle(addrport="", use_ipv6=True)
self.assertServerSettings('::1', '8000', ipv6=True, raw_ipv6=True)
self.cmd.handle(addrport="7000", use_ipv6=True)
self.assertServerSettings('::1', '7000', ipv6=True, raw_ipv6=True)
self.cmd.handle(addrport="[2001:0db8:1234:5678::9]:7000")
self.assertServerSettings('2001:0db8:1234:5678::9', '7000', ipv6=True, raw_ipv6=True)
def test_runner_hostname(self):
self.cmd.handle(addrport="localhost:8000")
self.assertServerSettings('localhost', '8000')
self.cmd.handle(addrport="test.domain.local:7000")
self.assertServerSettings('test.domain.local', '7000')
@unittest.skipUnless(socket.has_ipv6, "platform doesn't support IPv6")
def test_runner_hostname_ipv6(self):
self.cmd.handle(addrport="test.domain.local:7000", use_ipv6=True)
self.assertServerSettings('test.domain.local', '7000', ipv6=True)
def test_runner_ambiguous(self):
# Only 4 characters, all of which could be in an ipv6 address
self.cmd.handle(addrport="beef:7654")
self.assertServerSettings('beef', '7654')
# Uses only characters that could be in an ipv6 address
self.cmd.handle(addrport="deadbeef:7654")
self.assertServerSettings('deadbeef', '7654')
def test_no_database(self):
"""
Ensure runserver.check_migrations doesn't choke on empty DATABASES.
"""
tested_connections = ConnectionHandler({})
with mock.patch('django.core.management.commands.runserver.connections', new=tested_connections):
self.cmd.check_migrations()
def test_readonly_database(self):
"""
Ensure runserver.check_migrations doesn't choke when a database is read-only
(with possibly no django_migrations table).
"""
with mock.patch.object(
MigrationRecorder, 'ensure_schema',
side_effect=MigrationSchemaMissing()):
self.cmd.check_migrations()
# Check a warning is emitted
self.assertIn("Not checking migrations", self.output.getvalue())
class ManageRunserverEmptyAllowedHosts(AdminScriptTestCase):
def setUp(self):
self.write_settings('settings.py', sdict={
'ALLOWED_HOSTS': [],
'DEBUG': False,
})
def tearDown(self):
self.remove_settings('settings.py')
def test_empty_allowed_hosts_error(self):
out, err = self.run_manage(['runserver'])
self.assertNoOutput(out)
self.assertOutput(err, 'CommandError: You must set settings.ALLOWED_HOSTS if DEBUG is False.')
class ManageTestserver(AdminScriptTestCase):
from django.core.management.commands.testserver import Command as TestserverCommand
@mock.patch.object(TestserverCommand, 'handle')
def test_testserver_handle_params(self, mock_handle):
out = StringIO()
call_command('testserver', 'blah.json', stdout=out)
mock_handle.assert_called_with(
'blah.json',
stdout=out, settings=None, pythonpath=None, verbosity=1,
traceback=False, addrport='', no_color=False, use_ipv6=False,
skip_checks=True, interactive=True,
)
##########################################################################
# COMMAND PROCESSING TESTS
# Check that user-space commands are correctly handled - in particular,
# that arguments to the commands are correctly parsed and processed.
##########################################################################
class CommandTypes(AdminScriptTestCase):
"Tests for the various types of base command types that can be defined."
def setUp(self):
self.write_settings('settings.py')
def tearDown(self):
self.remove_settings('settings.py')
def test_version(self):
"version is handled as a special case"
args = ['version']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, get_version())
def test_version_alternative(self):
"--version is equivalent to version"
args1, args2 = ['version'], ['--version']
# It's possible one outputs on stderr and the other on stdout, hence the set
self.assertEqual(set(self.run_manage(args1)), set(self.run_manage(args2)))
def test_help(self):
"help is handled as a special case"
args = ['help']
out, err = self.run_manage(args)
self.assertOutput(out, "Type 'manage.py help <subcommand>' for help on a specific subcommand.")
self.assertOutput(out, '[django]')
self.assertOutput(out, 'startapp')
self.assertOutput(out, 'startproject')
def test_help_commands(self):
"help --commands shows the list of all available commands"
args = ['help', '--commands']
out, err = self.run_manage(args)
self.assertNotInOutput(out, 'usage:')
self.assertNotInOutput(out, 'Options:')
self.assertNotInOutput(out, '[django]')
self.assertOutput(out, 'startapp')
self.assertOutput(out, 'startproject')
self.assertNotInOutput(out, '\n\n')
def test_help_alternative(self):
"--help is equivalent to help"
args1, args2 = ['help'], ['--help']
self.assertEqual(self.run_manage(args1), self.run_manage(args2))
def test_help_short_altert(self):
"-h is handled as a short form of --help"
args1, args2 = ['--help'], ['-h']
self.assertEqual(self.run_manage(args1), self.run_manage(args2))
def test_specific_help(self):
"--help can be used on a specific command"
args = ['check', '--help']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "Checks the entire Django project for potential problems.")
def test_color_style(self):
style = color.no_style()
self.assertEqual(style.ERROR('Hello, world!'), 'Hello, world!')
style = color.make_style('nocolor')
self.assertEqual(style.ERROR('Hello, world!'), 'Hello, world!')
style = color.make_style('dark')
self.assertIn('Hello, world!', style.ERROR('Hello, world!'))
self.assertNotEqual(style.ERROR('Hello, world!'), 'Hello, world!')
# Default palette has color.
style = color.make_style('')
self.assertIn('Hello, world!', style.ERROR('Hello, world!'))
self.assertNotEqual(style.ERROR('Hello, world!'), 'Hello, world!')
def test_command_color(self):
class Command(BaseCommand):
requires_system_checks = False
def handle(self, *args, **options):
self.stdout.write('Hello, world!', self.style.ERROR)
self.stderr.write('Hello, world!', self.style.ERROR)
out = StringIO()
err = StringIO()
command = Command(stdout=out, stderr=err)
command.execute()
if color.supports_color():
self.assertIn('Hello, world!\n', out.getvalue())
self.assertIn('Hello, world!\n', err.getvalue())
self.assertNotEqual(out.getvalue(), 'Hello, world!\n')
self.assertNotEqual(err.getvalue(), 'Hello, world!\n')
else:
self.assertEqual(out.getvalue(), 'Hello, world!\n')
self.assertEqual(err.getvalue(), 'Hello, world!\n')
def test_command_no_color(self):
"--no-color prevent colorization of the output"
class Command(BaseCommand):
requires_system_checks = False
def handle(self, *args, **options):
self.stdout.write('Hello, world!', self.style.ERROR)
self.stderr.write('Hello, world!', self.style.ERROR)
out = StringIO()
err = StringIO()
command = Command(stdout=out, stderr=err, no_color=True)
command.execute()
self.assertEqual(out.getvalue(), 'Hello, world!\n')
self.assertEqual(err.getvalue(), 'Hello, world!\n')
out = StringIO()
err = StringIO()
command = Command(stdout=out, stderr=err)
command.execute(no_color=True)
self.assertEqual(out.getvalue(), 'Hello, world!\n')
self.assertEqual(err.getvalue(), 'Hello, world!\n')
def test_custom_stdout(self):
class Command(BaseCommand):
requires_system_checks = False
def handle(self, *args, **options):
self.stdout.write("Hello, World!")
out = StringIO()
command = Command(stdout=out)
command.execute()
self.assertEqual(out.getvalue(), "Hello, World!\n")
out.truncate(0)
new_out = StringIO()
command.execute(stdout=new_out)
self.assertEqual(out.getvalue(), "")
self.assertEqual(new_out.getvalue(), "Hello, World!\n")
def test_custom_stderr(self):
class Command(BaseCommand):
requires_system_checks = False
def handle(self, *args, **options):
self.stderr.write("Hello, World!")
err = StringIO()
command = Command(stderr=err)
command.execute()
self.assertEqual(err.getvalue(), "Hello, World!\n")
err.truncate(0)
new_err = StringIO()
command.execute(stderr=new_err)
self.assertEqual(err.getvalue(), "")
self.assertEqual(new_err.getvalue(), "Hello, World!\n")
def test_base_command(self):
"User BaseCommands can execute when a label is provided"
args = ['base_command', 'testlabel']
expected_labels = "('testlabel',)"
self._test_base_command(args, expected_labels)
def test_base_command_no_label(self):
"User BaseCommands can execute when no labels are provided"
args = ['base_command']
expected_labels = "()"
self._test_base_command(args, expected_labels)
def test_base_command_multiple_label(self):
"User BaseCommands can execute when no labels are provided"
args = ['base_command', 'testlabel', 'anotherlabel']
expected_labels = "('testlabel', 'anotherlabel')"
self._test_base_command(args, expected_labels)
def test_base_command_with_option(self):
"User BaseCommands can execute with options when a label is provided"
args = ['base_command', 'testlabel', '--option_a=x']
expected_labels = "('testlabel',)"
self._test_base_command(args, expected_labels, option_a="'x'")
def test_base_command_with_options(self):
"User BaseCommands can execute with multiple options when a label is provided"
args = ['base_command', 'testlabel', '-a', 'x', '--option_b=y']
expected_labels = "('testlabel',)"
self._test_base_command(args, expected_labels, option_a="'x'", option_b="'y'")
def test_base_command_with_wrong_option(self):
"User BaseCommands outputs command usage when wrong option is specified"
args = ['base_command', '--invalid']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "usage: manage.py base_command")
self.assertOutput(err, "error: unrecognized arguments: --invalid")
def _test_base_command(self, args, labels, option_a="'1'", option_b="'2'"):
out, err = self.run_manage(args)
expected_out = (
"EXECUTE:BaseCommand labels=%s, "
"options=[('no_color', False), ('option_a', %s), ('option_b', %s), "
"('option_c', '3'), ('pythonpath', None), ('settings', None), "
"('traceback', False), ('verbosity', 1)]") % (labels, option_a, option_b)
self.assertNoOutput(err)
self.assertOutput(out, expected_out)
def test_base_run_from_argv(self):
"""
Test run_from_argv properly terminates even with custom execute() (#19665)
Also test proper traceback display.
"""
err = StringIO()
command = BaseCommand(stderr=err)
def raise_command_error(*args, **kwargs):
raise CommandError("Custom error")
command.execute = lambda args: args # This will trigger TypeError
# If the Exception is not CommandError it should always
# raise the original exception.
with self.assertRaises(TypeError):
command.run_from_argv(['', ''])
# If the Exception is CommandError and --traceback is not present
# this command should raise a SystemExit and don't print any
# traceback to the stderr.
command.execute = raise_command_error
err.truncate(0)
with self.assertRaises(SystemExit):
command.run_from_argv(['', ''])
err_message = err.getvalue()
self.assertNotIn("Traceback", err_message)
self.assertIn("CommandError", err_message)
# If the Exception is CommandError and --traceback is present
# this command should raise the original CommandError as if it
# were not a CommandError.
err.truncate(0)
with self.assertRaises(CommandError):
command.run_from_argv(['', '', '--traceback'])
def test_run_from_argv_non_ascii_error(self):
"""
Test that non-ASCII message of CommandError does not raise any
UnicodeDecodeError in run_from_argv.
"""
def raise_command_error(*args, **kwargs):
raise CommandError("Erreur personnalisée")
command = BaseCommand(stderr=StringIO())
command.execute = raise_command_error
with self.assertRaises(SystemExit):
command.run_from_argv(['', ''])
def test_run_from_argv_closes_connections(self):
"""
A command called from the command line should close connections after
being executed (#21255).
"""
command = BaseCommand(stderr=StringIO())
command.check = lambda: []
command.handle = lambda *args, **kwargs: args
with mock.patch('django.core.management.base.connections') as mock_connections:
command.run_from_argv(['', ''])
# Test connections have been closed
self.assertTrue(mock_connections.close_all.called)
def test_noargs(self):
"NoArg Commands can be executed"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(
out,
"EXECUTE: noargs_command options=[('no_color', False), "
"('pythonpath', None), ('settings', None), ('traceback', False), "
"('verbosity', 1)]"
)
def test_noargs_with_args(self):
"NoArg Commands raise an error if an argument is provided"
args = ['noargs_command', 'argument']
out, err = self.run_manage(args)
self.assertOutput(err, "error: unrecognized arguments: argument")
def test_app_command(self):
"User AppCommands can execute when a single app name is provided"
args = ['app_command', 'auth']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:AppCommand name=django.contrib.auth, options=")
self.assertOutput(
out,
", options=[('no_color', False), ('pythonpath', None), "
"('settings', None), ('traceback', False), ('verbosity', 1)]"
)
def test_app_command_no_apps(self):
"User AppCommands raise an error when no app name is provided"
args = ['app_command']
out, err = self.run_manage(args)
self.assertOutput(err, 'error: Enter at least one application label.')
def test_app_command_multiple_apps(self):
"User AppCommands raise an error when multiple app names are provided"
args = ['app_command', 'auth', 'contenttypes']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:AppCommand name=django.contrib.auth, options=")
self.assertOutput(
out,
", options=[('no_color', False), ('pythonpath', None), "
"('settings', None), ('traceback', False), ('verbosity', 1)]"
)
self.assertOutput(out, "EXECUTE:AppCommand name=django.contrib.contenttypes, options=")
self.assertOutput(
out,
", options=[('no_color', False), ('pythonpath', None), "
"('settings', None), ('traceback', False), ('verbosity', 1)]"
)
def test_app_command_invalid_app_label(self):
"User AppCommands can execute when a single app name is provided"
args = ['app_command', 'NOT_AN_APP']
out, err = self.run_manage(args)
self.assertOutput(err, "No installed app with label 'NOT_AN_APP'.")
def test_app_command_some_invalid_app_labels(self):
"User AppCommands can execute when some of the provided app names are invalid"
args = ['app_command', 'auth', 'NOT_AN_APP']
out, err = self.run_manage(args)
self.assertOutput(err, "No installed app with label 'NOT_AN_APP'.")
def test_label_command(self):
"User LabelCommands can execute when a label is provided"
args = ['label_command', 'testlabel']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(
out,
"EXECUTE:LabelCommand label=testlabel, options=[('no_color', False), "
"('pythonpath', None), ('settings', None), ('traceback', False), ('verbosity', 1)]"
)
def test_label_command_no_label(self):
"User LabelCommands raise an error if no label is provided"
args = ['label_command']
out, err = self.run_manage(args)
self.assertOutput(err, 'Enter at least one label')
def test_label_command_multiple_label(self):
"User LabelCommands are executed multiple times if multiple labels are provided"
args = ['label_command', 'testlabel', 'anotherlabel']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(
out,
"EXECUTE:LabelCommand label=testlabel, options=[('no_color', False), "
"('pythonpath', None), ('settings', None), ('traceback', False), ('verbosity', 1)]"
)
self.assertOutput(
out,
"EXECUTE:LabelCommand label=anotherlabel, options=[('no_color', False), "
"('pythonpath', None), ('settings', None), ('traceback', False), ('verbosity', 1)]"
)
class Discovery(SimpleTestCase):
def test_precedence(self):
"""
Apps listed first in INSTALLED_APPS have precedence.
"""
with self.settings(INSTALLED_APPS=['admin_scripts.complex_app',
'admin_scripts.simple_app',
'django.contrib.auth',
'django.contrib.contenttypes']):
out = StringIO()
call_command('duplicate', stdout=out)
self.assertEqual(out.getvalue().strip(), 'complex_app')
with self.settings(INSTALLED_APPS=['admin_scripts.simple_app',
'admin_scripts.complex_app',
'django.contrib.auth',
'django.contrib.contenttypes']):
out = StringIO()
call_command('duplicate', stdout=out)
self.assertEqual(out.getvalue().strip(), 'simple_app')
class ArgumentOrder(AdminScriptTestCase):
"""Tests for 2-stage argument parsing scheme.
django-admin command arguments are parsed in 2 parts; the core arguments
(--settings, --traceback and --pythonpath) are parsed using a basic parser,
ignoring any unknown options. Then the full settings are
passed to the command parser, which extracts commands of interest to the
individual command.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes'])
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('settings.py')
self.remove_settings('alternate_settings.py')
def test_setting_then_option(self):
""" Options passed after settings are correctly handled. """
args = ['base_command', 'testlabel', '--settings=alternate_settings', '--option_a=x']
self._test(args)
def test_setting_then_short_option(self):
""" Short options passed after settings are correctly handled. """
args = ['base_command', 'testlabel', '--settings=alternate_settings', '-a', 'x']
self._test(args)
def test_option_then_setting(self):
""" Options passed before settings are correctly handled. """
args = ['base_command', 'testlabel', '--option_a=x', '--settings=alternate_settings']
self._test(args)
def test_short_option_then_setting(self):
""" Short options passed before settings are correctly handled. """
args = ['base_command', 'testlabel', '-a', 'x', '--settings=alternate_settings']
self._test(args)
def test_option_then_setting_then_option(self):
""" Options are correctly handled when they are passed before and after
a setting. """
args = ['base_command', 'testlabel', '--option_a=x', '--settings=alternate_settings', '--option_b=y']
self._test(args, option_b="'y'")
def _test(self, args, option_b="'2'"):
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(
out,
"EXECUTE:BaseCommand labels=('testlabel',), options=[('no_color', False), "
"('option_a', 'x'), ('option_b', %s), ('option_c', '3'), "
"('pythonpath', None), ('settings', 'alternate_settings'), "
"('traceback', False), ('verbosity', 1)]" % option_b
)
@override_settings(ROOT_URLCONF='admin_scripts.urls')
class StartProject(LiveServerTestCase, AdminScriptTestCase):
available_apps = [
'admin_scripts',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
]
def test_wrong_args(self):
"Make sure passing the wrong kinds of arguments outputs an error and prints usage"
out, err = self.run_django_admin(['startproject'])
self.assertNoOutput(out)
self.assertOutput(err, "usage:")
self.assertOutput(err, "You must provide a project name.")
def test_simple_project(self):
"Make sure the startproject management command creates a project"
args = ['startproject', 'testproject']
testproject_dir = os.path.join(self.test_dir, 'testproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
# running again..
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "already exists")
def test_invalid_project_name(self):
"Make sure the startproject management command validates a project name"
for bad_name in ('7testproject', '../testproject'):
args = ['startproject', bad_name]
testproject_dir = os.path.join(self.test_dir, bad_name)
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
if PY2:
self.assertOutput(
err,
"Error: '%s' is not a valid project name. Please make "
"sure the name begins with a letter or underscore." % bad_name
)
else:
self.assertOutput(
err,
"Error: '%s' is not a valid project name. Please make "
"sure the name is a valid identifier." % bad_name
)
self.assertFalse(os.path.exists(testproject_dir))
def test_simple_project_different_directory(self):
"Make sure the startproject management command creates a project in a specific directory"
args = ['startproject', 'testproject', 'othertestproject']
testproject_dir = os.path.join(self.test_dir, 'othertestproject')
os.mkdir(testproject_dir)
self.addCleanup(shutil.rmtree, testproject_dir)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'manage.py')))
# running again..
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "already exists")
def test_custom_project_template(self):
"Make sure the startproject management command is able to use a different project template"
template_path = os.path.join(custom_templates_dir, 'project_template')
args = ['startproject', '--template', template_path, 'customtestproject']
testproject_dir = os.path.join(self.test_dir, 'customtestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'additional_dir')))
def test_template_dir_with_trailing_slash(self):
"Ticket 17475: Template dir passed has a trailing path separator"
template_path = os.path.join(custom_templates_dir, 'project_template' + os.sep)
args = ['startproject', '--template', template_path, 'customtestproject']
testproject_dir = os.path.join(self.test_dir, 'customtestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'additional_dir')))
def test_custom_project_template_from_tarball_by_path(self):
"Make sure the startproject management command is able to use a different project template from a tarball"
template_path = os.path.join(custom_templates_dir, 'project_template.tgz')
args = ['startproject', '--template', template_path, 'tarballtestproject']
testproject_dir = os.path.join(self.test_dir, 'tarballtestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'run.py')))
def test_custom_project_template_from_tarball_to_alternative_location(self):
"Startproject can use a project template from a tarball and create it in a specified location"
template_path = os.path.join(custom_templates_dir, 'project_template.tgz')
args = ['startproject', '--template', template_path, 'tarballtestproject', 'altlocation']
testproject_dir = os.path.join(self.test_dir, 'altlocation')
os.mkdir(testproject_dir)
self.addCleanup(shutil.rmtree, testproject_dir)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'run.py')))
def test_custom_project_template_from_tarball_by_url(self):
"""
The startproject management command is able to use a different project
template from a tarball via a URL.
"""
template_url = '%s/custom_templates/project_template.tgz' % self.live_server_url
args = ['startproject', '--template', template_url, 'urltestproject']
testproject_dir = os.path.join(self.test_dir, 'urltestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'run.py')))
def test_project_template_tarball_url(self):
"Startproject management command handles project template tar/zip balls from non-canonical urls"
template_url = '%s/custom_templates/project_template.tgz/' % self.live_server_url
args = ['startproject', '--template', template_url, 'urltestproject']
testproject_dir = os.path.join(self.test_dir, 'urltestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'run.py')))
def test_file_without_extension(self):
"Make sure the startproject management command is able to render custom files"
template_path = os.path.join(custom_templates_dir, 'project_template')
args = ['startproject', '--template', template_path, 'customtestproject', '-e', 'txt', '-n', 'Procfile']
testproject_dir = os.path.join(self.test_dir, 'customtestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'additional_dir')))
base_path = os.path.join(testproject_dir, 'additional_dir')
for f in ('Procfile', 'additional_file.py', 'requirements.txt'):
self.assertTrue(os.path.exists(os.path.join(base_path, f)))
with open(os.path.join(base_path, f)) as fh:
self.assertEqual(fh.read().strip(),
'# some file for customtestproject test project')
def test_custom_project_template_context_variables(self):
"Make sure template context variables are rendered with proper values"
template_path = os.path.join(custom_templates_dir, 'project_template')
args = ['startproject', '--template', template_path, 'another_project', 'project_dir']
testproject_dir = os.path.join(self.test_dir, 'project_dir')
os.mkdir(testproject_dir)
self.addCleanup(shutil.rmtree, testproject_dir)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
test_manage_py = os.path.join(testproject_dir, 'manage.py')
with open(test_manage_py, 'r') as fp:
content = force_text(fp.read())
self.assertIn("project_name = 'another_project'", content)
self.assertIn("project_directory = '%s'" % testproject_dir, content)
def test_no_escaping_of_project_variables(self):
"Make sure template context variables are not html escaped"
# We're using a custom command so we need the alternate settings
self.write_settings('alternate_settings.py')
self.addCleanup(self.remove_settings, 'alternate_settings.py')
template_path = os.path.join(custom_templates_dir, 'project_template')
args = [
'custom_startproject', '--template', template_path,
'another_project', 'project_dir', '--extra', '<&>',
'--settings=alternate_settings',
]
testproject_dir = os.path.join(self.test_dir, 'project_dir')
os.mkdir(testproject_dir)
self.addCleanup(shutil.rmtree, testproject_dir)
out, err = self.run_manage(args)
self.assertNoOutput(err)
test_manage_py = os.path.join(testproject_dir, 'additional_dir', 'extra.py')
with open(test_manage_py, 'r') as fp:
content = fp.read()
self.assertIn("<&>", content)
def test_custom_project_destination_missing(self):
"""
Make sure an exception is raised when the provided
destination directory doesn't exist
"""
template_path = os.path.join(custom_templates_dir, 'project_template')
args = ['startproject', '--template', template_path, 'yet_another_project', 'project_dir2']
testproject_dir = os.path.join(self.test_dir, 'project_dir2')
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Destination directory '%s' does not exist, please create it first." % testproject_dir)
self.assertFalse(os.path.exists(testproject_dir))
def test_custom_project_template_with_non_ascii_templates(self):
"""
The startproject management command is able to render templates with
non-ASCII content.
"""
template_path = os.path.join(custom_templates_dir, 'project_template')
args = ['startproject', '--template', template_path, '--extension=txt', 'customtestproject']
testproject_dir = os.path.join(self.test_dir, 'customtestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
path = os.path.join(testproject_dir, 'ticket-18091-non-ascii-template.txt')
with codecs.open(path, 'r', encoding='utf-8') as f:
self.assertEqual(f.read().splitlines(False), [
'Some non-ASCII text for testing ticket #18091:',
'üäö €'])
class DiffSettings(AdminScriptTestCase):
"""Tests for diffsettings management command."""
def test_basic(self):
"""Runs without error and emits settings diff."""
self.write_settings('settings_to_diff.py', sdict={'FOO': '"bar"'})
self.addCleanup(self.remove_settings, 'settings_to_diff.py')
args = ['diffsettings', '--settings=settings_to_diff']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "FOO = 'bar' ###")
def test_all(self):
"""The all option also shows settings with the default value."""
self.write_settings('settings_to_diff.py', sdict={'STATIC_URL': 'None'})
self.addCleanup(self.remove_settings, 'settings_to_diff.py')
args = ['diffsettings', '--settings=settings_to_diff', '--all']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "### STATIC_URL = None")
class Dumpdata(AdminScriptTestCase):
"""Tests for dumpdata management command."""
def setUp(self):
self.write_settings('settings.py')
def tearDown(self):
self.remove_settings('settings.py')
def test_pks_parsing(self):
"""Regression for #20509
Test would raise an exception rather than printing an error message.
"""
args = ['dumpdata', '--pks=1']
out, err = self.run_manage(args)
self.assertOutput(err, "You can only use --pks option with one model")
self.assertNoOutput(out)
class MainModule(AdminScriptTestCase):
"""python -m django works like django-admin."""
def test_runs_django_admin(self):
cmd_out, _ = self.run_django_admin(['--version'])
mod_out, _ = self.run_test('-m', ['django', '--version'])
self.assertEqual(mod_out, cmd_out)
| bsd-3-clause |
saydulk/django | django/utils/version.py | 344 | 2445 | from __future__ import unicode_literals
import datetime
import os
import subprocess
from django.utils.lru_cache import lru_cache
def get_version(version=None):
"Returns a PEP 386-compliant version number from VERSION."
version = get_complete_version(version)
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
main = get_main_version(version)
sub = ''
if version[3] == 'alpha' and version[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = '.dev%s' % git_changeset
elif version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[version[3]] + str(version[4])
return str(main + sub)
def get_main_version(version=None):
"Returns main version (X.Y[.Z]) from VERSION."
version = get_complete_version(version)
parts = 2 if version[2] == 0 else 3
return '.'.join(str(x) for x in version[:parts])
def get_complete_version(version=None):
"""Returns a tuple of the django version. If version argument is non-empty,
then checks for correctness of the tuple provided.
"""
if version is None:
from django import VERSION as version
else:
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
return version
def get_docs_version(version=None):
version = get_complete_version(version)
if version[3] != 'final':
return 'dev'
else:
return '%d.%d' % version[:2]
@lru_cache()
def get_git_changeset():
"""Returns a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers.
"""
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
git_log = subprocess.Popen('git log --pretty=format:%ct --quiet -1 HEAD',
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=repo_dir, universal_newlines=True)
timestamp = git_log.communicate()[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError:
return None
return timestamp.strftime('%Y%m%d%H%M%S')
| bsd-3-clause |
tensorflow/tensorflow | tensorflow/python/keras/layers/preprocessing/hashing_test.py | 6 | 12401 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for hashing layer."""
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import input_layer
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers.preprocessing import hashing
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class HashingTest(keras_parameterized.TestCase):
def test_hash_single_bin(self):
layer = hashing.Hashing(num_bins=1)
inp = np.asarray([['A'], ['B'], ['C'], ['D'], ['E']])
output = layer(inp)
self.assertAllClose([[0], [0], [0], [0], [0]], output)
def test_hash_dense_input_farmhash(self):
layer = hashing.Hashing(num_bins=2)
inp = np.asarray([['omar'], ['stringer'], ['marlo'], ['wire'],
['skywalker']])
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([[0], [0], [1], [0], [0]], output)
def test_hash_dense_input_mask_value_farmhash(self):
empty_mask_layer = hashing.Hashing(num_bins=3, mask_value='')
omar_mask_layer = hashing.Hashing(num_bins=3, mask_value='omar')
inp = np.asarray([['omar'], ['stringer'], ['marlo'], ['wire'],
['skywalker']])
empty_mask_output = empty_mask_layer(inp)
omar_mask_output = omar_mask_layer(inp)
# Outputs should be one more than test_hash_dense_input_farmhash (the zeroth
# bin is now reserved for masks).
self.assertAllClose([[1], [1], [2], [1], [1]], empty_mask_output)
# 'omar' should map to 0.
self.assertAllClose([[0], [1], [2], [1], [1]], omar_mask_output)
def test_hash_dense_list_input_farmhash(self):
layer = hashing.Hashing(num_bins=2)
inp = [['omar'], ['stringer'], ['marlo'], ['wire'], ['skywalker']]
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([[0], [0], [1], [0], [0]], output)
inp = ['omar', 'stringer', 'marlo', 'wire', 'skywalker']
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([0, 0, 1, 0, 0], output)
def test_hash_dense_int_input_farmhash(self):
layer = hashing.Hashing(num_bins=3)
inp = np.asarray([[0], [1], [2], [3], [4]])
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([[1], [0], [1], [0], [2]], output)
def test_hash_dense_input_siphash(self):
layer = hashing.Hashing(num_bins=2, salt=[133, 137])
inp = np.asarray([['omar'], ['stringer'], ['marlo'], ['wire'],
['skywalker']])
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
# Note the result is different from FarmHash.
self.assertAllClose([[0], [1], [0], [1], [0]], output)
layer_2 = hashing.Hashing(num_bins=2, salt=[211, 137])
output_2 = layer_2(inp)
# Note the result is different from (133, 137).
self.assertAllClose([[1], [0], [1], [0], [1]], output_2)
def test_hash_dense_int_input_siphash(self):
layer = hashing.Hashing(num_bins=3, salt=[133, 137])
inp = np.asarray([[0], [1], [2], [3], [4]])
output = layer(inp)
# Assert equal for hashed output that should be true on all platforms.
self.assertAllClose([[1], [1], [2], [0], [1]], output)
def test_hash_sparse_input_farmhash(self):
layer = hashing.Hashing(num_bins=2)
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = sparse_tensor.SparseTensor(
indices=indices,
values=['omar', 'stringer', 'marlo', 'wire', 'skywalker'],
dense_shape=[3, 2])
output = layer(inp)
self.assertAllClose(indices, output.indices)
self.assertAllClose([0, 0, 1, 0, 0], output.values)
def test_hash_sparse_input_mask_value_farmhash(self):
empty_mask_layer = hashing.Hashing(num_bins=3, mask_value='')
omar_mask_layer = hashing.Hashing(num_bins=3, mask_value='omar')
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = sparse_tensor.SparseTensor(
indices=indices,
values=['omar', 'stringer', 'marlo', 'wire', 'skywalker'],
dense_shape=[3, 2])
empty_mask_output = empty_mask_layer(inp)
omar_mask_output = omar_mask_layer(inp)
self.assertAllClose(indices, omar_mask_output.indices)
self.assertAllClose(indices, empty_mask_output.indices)
# Outputs should be one more than test_hash_sparse_input_farmhash (the
# zeroth bin is now reserved for masks).
self.assertAllClose([1, 1, 2, 1, 1], empty_mask_output.values)
# 'omar' should map to 0.
self.assertAllClose([0, 1, 2, 1, 1], omar_mask_output.values)
def test_hash_sparse_int_input_farmhash(self):
layer = hashing.Hashing(num_bins=3)
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = sparse_tensor.SparseTensor(
indices=indices, values=[0, 1, 2, 3, 4], dense_shape=[3, 2])
output = layer(inp)
self.assertAllClose(indices, output.indices)
self.assertAllClose([1, 0, 1, 0, 2], output.values)
def test_hash_sparse_input_siphash(self):
layer = hashing.Hashing(num_bins=2, salt=[133, 137])
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = sparse_tensor.SparseTensor(
indices=indices,
values=['omar', 'stringer', 'marlo', 'wire', 'skywalker'],
dense_shape=[3, 2])
output = layer(inp)
self.assertAllClose(output.indices, indices)
# The result should be same with test_hash_dense_input_siphash.
self.assertAllClose([0, 1, 0, 1, 0], output.values)
layer_2 = hashing.Hashing(num_bins=2, salt=[211, 137])
output = layer_2(inp)
# The result should be same with test_hash_dense_input_siphash.
self.assertAllClose([1, 0, 1, 0, 1], output.values)
def test_hash_sparse_int_input_siphash(self):
layer = hashing.Hashing(num_bins=3, salt=[133, 137])
indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]
inp = sparse_tensor.SparseTensor(
indices=indices, values=[0, 1, 2, 3, 4], dense_shape=[3, 2])
output = layer(inp)
self.assertAllClose(indices, output.indices)
self.assertAllClose([1, 1, 2, 0, 1], output.values)
def test_hash_ragged_string_input_farmhash(self):
layer = hashing.Hashing(num_bins=2)
inp_data = ragged_factory_ops.constant(
[['omar', 'stringer', 'marlo', 'wire'], ['marlo', 'skywalker', 'wire']],
dtype=dtypes.string)
out_data = layer(inp_data)
# Same hashed output as test_hash_sparse_input_farmhash
expected_output = [[0, 0, 1, 0], [1, 0, 0]]
self.assertAllEqual(expected_output, out_data)
inp_t = input_layer.Input(shape=(None,), ragged=True, dtype=dtypes.string)
out_t = layer(inp_t)
model = training.Model(inputs=inp_t, outputs=out_t)
self.assertAllClose(out_data, model.predict(inp_data))
def test_hash_ragged_input_mask_value(self):
empty_mask_layer = hashing.Hashing(num_bins=3, mask_value='')
omar_mask_layer = hashing.Hashing(num_bins=3, mask_value='omar')
inp_data = ragged_factory_ops.constant(
[['omar', 'stringer', 'marlo', 'wire'], ['marlo', 'skywalker', 'wire']],
dtype=dtypes.string)
empty_mask_output = empty_mask_layer(inp_data)
omar_mask_output = omar_mask_layer(inp_data)
# Outputs should be one more than test_hash_ragged_string_input_farmhash
# (the zeroth bin is now reserved for masks).
expected_output = [[1, 1, 2, 1], [2, 1, 1]]
self.assertAllClose(expected_output, empty_mask_output)
# 'omar' should map to 0.
expected_output = [[0, 1, 2, 1], [2, 1, 1]]
self.assertAllClose(expected_output, omar_mask_output)
def test_hash_ragged_int_input_farmhash(self):
layer = hashing.Hashing(num_bins=3)
inp_data = ragged_factory_ops.constant([[0, 1, 3, 4], [2, 1, 0]],
dtype=dtypes.int64)
out_data = layer(inp_data)
# Same hashed output as test_hash_sparse_input_farmhash
expected_output = [[1, 0, 0, 2], [1, 0, 1]]
self.assertAllEqual(expected_output, out_data)
inp_t = input_layer.Input(shape=(None,), ragged=True, dtype=dtypes.int64)
out_t = layer(inp_t)
model = training.Model(inputs=inp_t, outputs=out_t)
self.assertAllClose(out_data, model.predict(inp_data))
def test_hash_ragged_string_input_siphash(self):
layer = hashing.Hashing(num_bins=2, salt=[133, 137])
inp_data = ragged_factory_ops.constant(
[['omar', 'stringer', 'marlo', 'wire'], ['marlo', 'skywalker', 'wire']],
dtype=dtypes.string)
out_data = layer(inp_data)
# Same hashed output as test_hash_dense_input_siphash
expected_output = [[0, 1, 0, 1], [0, 0, 1]]
self.assertAllEqual(expected_output, out_data)
inp_t = input_layer.Input(shape=(None,), ragged=True, dtype=dtypes.string)
out_t = layer(inp_t)
model = training.Model(inputs=inp_t, outputs=out_t)
self.assertAllClose(out_data, model.predict(inp_data))
layer_2 = hashing.Hashing(num_bins=2, salt=[211, 137])
out_data = layer_2(inp_data)
expected_output = [[1, 0, 1, 0], [1, 1, 0]]
self.assertAllEqual(expected_output, out_data)
out_t = layer_2(inp_t)
model = training.Model(inputs=inp_t, outputs=out_t)
self.assertAllClose(out_data, model.predict(inp_data))
def test_hash_ragged_int_input_siphash(self):
layer = hashing.Hashing(num_bins=3, salt=[133, 137])
inp_data = ragged_factory_ops.constant([[0, 1, 3, 4], [2, 1, 0]],
dtype=dtypes.int64)
out_data = layer(inp_data)
# Same hashed output as test_hash_sparse_input_farmhash
expected_output = [[1, 1, 0, 1], [2, 1, 1]]
self.assertAllEqual(expected_output, out_data)
inp_t = input_layer.Input(shape=(None,), ragged=True, dtype=dtypes.int64)
out_t = layer(inp_t)
model = training.Model(inputs=inp_t, outputs=out_t)
self.assertAllClose(out_data, model.predict(inp_data))
def test_invalid_inputs(self):
with self.assertRaisesRegex(ValueError, 'cannot be `None`'):
_ = hashing.Hashing(num_bins=None)
with self.assertRaisesRegex(ValueError, 'cannot be `None`'):
_ = hashing.Hashing(num_bins=-1)
with self.assertRaisesRegex(ValueError, 'can only be a tuple of size 2'):
_ = hashing.Hashing(num_bins=2, salt='string')
with self.assertRaisesRegex(ValueError, 'can only be a tuple of size 2'):
_ = hashing.Hashing(num_bins=2, salt=[1])
with self.assertRaisesRegex(ValueError, 'can only be a tuple of size 2'):
_ = hashing.Hashing(num_bins=1, salt=constant_op.constant([133, 137]))
def test_hash_compute_output_signature(self):
input_shape = tensor_shape.TensorShape([2, 3])
input_spec = tensor_spec.TensorSpec(input_shape, dtypes.string)
layer = hashing.Hashing(num_bins=2)
output_spec = layer.compute_output_signature(input_spec)
self.assertEqual(output_spec.shape.dims, input_shape.dims)
self.assertEqual(output_spec.dtype, dtypes.int64)
@testing_utils.run_v2_only
def test_config_with_custom_name(self):
layer = hashing.Hashing(num_bins=2, name='hashing')
config = layer.get_config()
layer_1 = hashing.Hashing.from_config(config)
self.assertEqual(layer_1.name, layer.name)
if __name__ == '__main__':
test.main()
| apache-2.0 |
matthiascy/panda3d | direct/src/directtools/DirectGrid.py | 8 | 5373 |
from panda3d.core import *
from direct.showbase.DirectObject import DirectObject
from DirectUtil import *
from DirectGeometry import *
class DirectGrid(NodePath, DirectObject):
def __init__(self,gridSize=100.0,gridSpacing=5.0,planeColor=(0.5,0.5,0.5,0.5),parent = None):
# Initialize superclass
NodePath.__init__(self, 'DirectGrid')
# Don't wireframe or light
useDirectRenderStyle(self)
# Load up grid parts to initialize grid object
# Polygon used to mark grid plane
self.gridBack = loader.loadModel('models/misc/gridBack')
self.gridBack.reparentTo(self)
self.gridBack.setColor(*planeColor)
# Grid Lines
self.lines = self.attachNewNode('gridLines')
self.minorLines = LineNodePath(self.lines)
self.minorLines.lineNode.setName('minorLines')
self.minorLines.setColor(VBase4(0.3, 0.55, 1, 1))
self.minorLines.setThickness(1)
self.majorLines = LineNodePath(self.lines)
self.majorLines.lineNode.setName('majorLines')
self.majorLines.setColor(VBase4(0.3, 0.55, 1, 1))
self.majorLines.setThickness(5)
self.centerLines = LineNodePath(self.lines)
self.centerLines.lineNode.setName('centerLines')
self.centerLines.setColor(VBase4(1, 0, 0, 0))
self.centerLines.setThickness(3)
# Small marker to hilight snap-to-grid point
self.snapMarker = loader.loadModel('models/misc/sphere')
self.snapMarker.node().setName('gridSnapMarker')
self.snapMarker.reparentTo(self)
self.snapMarker.setColor(1, 0, 0, 1)
self.snapMarker.setScale(0.3)
self.snapPos = Point3(0)
# Initialize Grid characteristics
self.fXyzSnap = 1
self.fHprSnap = 1
self.gridSize = gridSize
self.gridSpacing = gridSpacing
self.snapAngle = 15.0
self.enable(parent = parent)
def enable(self, parent = None):
if parent:
self.reparentTo(parent)
else:
self.reparentTo(base.direct.group)
self.updateGrid()
self.fEnabled = 1
def disable(self):
self.detachNode()
self.fEnabled = 0
def toggleGrid(self, parent = None):
if self.fEnabled:
self.disable()
else:
self.enable(parent = parent)
def isEnabled(self):
return self.fEnabled
def updateGrid(self):
# Update grid lines based upon current grid spacing and grid size
# First reset existing grid lines
self.minorLines.reset()
self.majorLines.reset()
self.centerLines.reset()
# Now redraw lines
numLines = int(math.ceil(self.gridSize/self.gridSpacing))
scaledSize = numLines * self.gridSpacing
center = self.centerLines
minor = self.minorLines
major = self.majorLines
for i in range(-numLines, numLines + 1):
if i == 0:
center.moveTo(i * self.gridSpacing, -scaledSize, 0)
center.drawTo(i * self.gridSpacing, scaledSize, 0)
center.moveTo(-scaledSize, i * self.gridSpacing, 0)
center.drawTo(scaledSize, i * self.gridSpacing, 0)
else:
if (i % 5) == 0:
major.moveTo(i * self.gridSpacing, -scaledSize, 0)
major.drawTo(i * self.gridSpacing, scaledSize, 0)
major.moveTo(-scaledSize, i * self.gridSpacing, 0)
major.drawTo(scaledSize, i * self.gridSpacing, 0)
else:
minor.moveTo(i * self.gridSpacing, -scaledSize, 0)
minor.drawTo(i * self.gridSpacing, scaledSize, 0)
minor.moveTo(-scaledSize, i * self.gridSpacing, 0)
minor.drawTo(scaledSize, i * self.gridSpacing, 0)
center.create()
minor.create()
major.create()
if (self.gridBack):
self.gridBack.setScale(scaledSize)
def setXyzSnap(self, fSnap):
self.fXyzSnap = fSnap
def getXyzSnap(self):
return self.fXyzSnap
def setHprSnap(self, fSnap):
self.fHprSnap = fSnap
def getHprSnap(self):
return self.fHprSnap
def computeSnapPoint(self, point):
# Start of with current point
self.snapPos.assign(point)
# Snap if necessary
if self.fXyzSnap:
self.snapPos.set(
ROUND_TO(self.snapPos[0], self.gridSpacing),
ROUND_TO(self.snapPos[1], self.gridSpacing),
ROUND_TO(self.snapPos[2], self.gridSpacing))
# Move snap marker to this point
self.snapMarker.setPos(self.snapPos)
# Return the hit point
return self.snapPos
def computeSnapAngle(self, angle):
return ROUND_TO(angle, self.snapAngle)
def setSnapAngle(self, angle):
self.snapAngle = angle
def getSnapAngle(self):
return self.snapAngle
def setGridSpacing(self, spacing):
self.gridSpacing = spacing
self.updateGrid()
def getGridSpacing(self):
return self.gridSpacing
def setGridSize(self, size):
# Set size of grid back and redraw lines
self.gridSize = size
self.updateGrid()
def getGridSize(self):
return self.gridSize
| bsd-3-clause |
Balachan27/django | django/core/management/__init__.py | 35 | 13771 | from __future__ import unicode_literals
import collections
import os
import pkgutil
import sys
from importlib import import_module
import django
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import (
BaseCommand, CommandError, CommandParser, handle_default_options,
)
from django.core.management.color import color_style
from django.utils import autoreload, lru_cache, six
from django.utils._os import npath, upath
def find_commands(management_dir):
"""
Given a path to a management directory, returns a list of all the command
names that are available.
Returns an empty list if no commands are defined.
"""
command_dir = os.path.join(management_dir, 'commands')
return [name for _, name, is_pkg in pkgutil.iter_modules([npath(command_dir)])
if not is_pkg and not name.startswith('_')]
def load_command_class(app_name, name):
"""
Given a command name and an application name, returns the Command
class instance. All errors raised by the import process
(ImportError, AttributeError) are allowed to propagate.
"""
module = import_module('%s.management.commands.%s' % (app_name, name))
return module.Command()
@lru_cache.lru_cache(maxsize=None)
def get_commands():
"""
Returns a dictionary mapping command names to their callback applications.
This works by looking for a management.commands package in django.core, and
in each installed application -- if a commands package exists, all commands
in that package are registered.
Core commands are always included. If a settings module has been
specified, user-defined commands will also be included.
The dictionary is in the format {command_name: app_name}. Key-value
pairs from this dictionary can then be used in calls to
load_command_class(app_name, command_name)
If a specific version of a command must be loaded (e.g., with the
startapp command), the instantiated module can be placed in the
dictionary in place of the application name.
The dictionary is cached on the first call and reused on subsequent
calls.
"""
commands = {name: 'django.core' for name in find_commands(upath(__path__[0]))}
if not settings.configured:
return commands
for app_config in reversed(list(apps.get_app_configs())):
path = os.path.join(app_config.path, 'management')
commands.update({name: app_config.name for name in find_commands(path)})
return commands
def call_command(name, *args, **options):
"""
Calls the given command, with the given options and args/kwargs.
This is the primary API you should use for calling specific commands.
Some examples:
call_command('migrate')
call_command('shell', plain=True)
call_command('sqlmigrate', 'myapp')
"""
# Load the command object.
try:
app_name = get_commands()[name]
except KeyError:
raise CommandError("Unknown command: %r" % name)
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
command = app_name
else:
command = load_command_class(app_name, name)
# Simulate argument parsing to get the option defaults (see #10080 for details).
parser = command.create_parser('', name)
# Use the `dest` option name from the parser option
opt_mapping = {
sorted(s_opt.option_strings)[0].lstrip('-').replace('-', '_'): s_opt.dest
for s_opt in parser._actions if s_opt.option_strings
}
arg_options = {opt_mapping.get(key, key): value for key, value in options.items()}
defaults = parser.parse_args(args=args)
defaults = dict(defaults._get_kwargs(), **arg_options)
# Move positional args out of options to mimic legacy optparse
args = defaults.pop('args', ())
if 'skip_checks' not in options:
defaults['skip_checks'] = True
return command.execute(*args, **defaults)
class ManagementUtility(object):
"""
Encapsulates the logic of the django-admin and manage.py utilities.
A ManagementUtility has a number of commands, which can be manipulated
by editing the self.commands dictionary.
"""
def __init__(self, argv=None):
self.argv = argv or sys.argv[:]
self.prog_name = os.path.basename(self.argv[0])
self.settings_exception = None
def main_help_text(self, commands_only=False):
"""
Returns the script's main help text, as a string.
"""
if commands_only:
usage = sorted(get_commands().keys())
else:
usage = [
"",
"Type '%s help <subcommand>' for help on a specific subcommand." % self.prog_name,
"",
"Available subcommands:",
]
commands_dict = collections.defaultdict(lambda: [])
for name, app in six.iteritems(get_commands()):
if app == 'django.core':
app = 'django'
else:
app = app.rpartition('.')[-1]
commands_dict[app].append(name)
style = color_style()
for app in sorted(commands_dict.keys()):
usage.append("")
usage.append(style.NOTICE("[%s]" % app))
for name in sorted(commands_dict[app]):
usage.append(" %s" % name)
# Output an extra note if settings are not properly configured
if self.settings_exception is not None:
usage.append(style.NOTICE(
"Note that only Django core commands are listed "
"as settings are not properly configured (error: %s)."
% self.settings_exception))
return '\n'.join(usage)
def fetch_command(self, subcommand):
"""
Tries to fetch the given subcommand, printing a message with the
appropriate command called from the command line (usually
"django-admin" or "manage.py") if it can't be found.
"""
# Get commands outside of try block to prevent swallowing exceptions
commands = get_commands()
try:
app_name = commands[subcommand]
except KeyError:
if os.environ.get('DJANGO_SETTINGS_MODULE'):
# If `subcommand` is missing due to misconfigured settings, the
# following line will retrigger an ImproperlyConfigured exception
# (get_commands() swallows the original one) so the user is
# informed about it.
settings.INSTALLED_APPS
else:
sys.stderr.write("No Django settings specified.\n")
sys.stderr.write("Unknown command: %r\nType '%s help' for usage.\n" %
(subcommand, self.prog_name))
sys.exit(1)
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, subcommand)
return klass
def autocomplete(self):
"""
Output completion suggestions for BASH.
The output of this function is passed to BASH's `COMREPLY` variable and
treated as completion suggestions. `COMREPLY` expects a space
separated string as the result.
The `COMP_WORDS` and `COMP_CWORD` BASH environment variables are used
to get information about the cli input. Please refer to the BASH
man-page for more information about this variables.
Subcommand options are saved as pairs. A pair consists of
the long option string (e.g. '--exclude') and a boolean
value indicating if the option requires arguments. When printing to
stdout, an equal sign is appended to options which require arguments.
Note: If debugging this function, it is recommended to write the debug
output in a separate file. Otherwise the debug output will be treated
and formatted as potential completion suggestions.
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'DJANGO_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
curr = cwords[cword - 1]
except IndexError:
curr = ''
subcommands = list(get_commands()) + ['help']
options = [('--help', False)]
# subcommand
if cword == 1:
print(' '.join(sorted(filter(lambda x: x.startswith(curr), subcommands))))
# subcommand options
# special case: the 'help' subcommand has no options
elif cwords[0] in subcommands and cwords[0] != 'help':
subcommand_cls = self.fetch_command(cwords[0])
# special case: add the names of installed apps to options
if cwords[0] in ('dumpdata', 'sqlmigrate', 'sqlsequencereset', 'test'):
try:
app_configs = apps.get_app_configs()
# Get the last part of the dotted path as the app name.
options.extend((app_config.label, 0) for app_config in app_configs)
except ImportError:
# Fail silently if DJANGO_SETTINGS_MODULE isn't set. The
# user will find out once they execute the command.
pass
parser = subcommand_cls.create_parser('', cwords[0])
options.extend(
(sorted(s_opt.option_strings)[0], s_opt.nargs != 0)
for s_opt in parser._actions if s_opt.option_strings
)
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [opt for opt in options if opt[0] not in prev_opts]
# filter options by current input
options = sorted((k, v) for k, v in options if k.startswith(curr))
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print(opt_label)
sys.exit(1)
def execute(self):
"""
Given the command-line arguments, this figures out which subcommand is
being run, creates a parser appropriate to that command, and runs it.
"""
try:
subcommand = self.argv[1]
except IndexError:
subcommand = 'help' # Display help if no arguments were given.
# Preprocess options to extract --settings and --pythonpath.
# These options could affect the commands that are available, so they
# must be processed early.
parser = CommandParser(None, usage="%(prog)s subcommand [options] [args]", add_help=False)
parser.add_argument('--settings')
parser.add_argument('--pythonpath')
parser.add_argument('args', nargs='*') # catch-all
try:
options, args = parser.parse_known_args(self.argv[2:])
handle_default_options(options)
except CommandError:
pass # Ignore any option errors at this point.
no_settings_commands = [
'help', 'version', '--help', '--version', '-h',
'compilemessages', 'makemessages',
'startapp', 'startproject',
]
try:
settings.INSTALLED_APPS
except ImproperlyConfigured as exc:
self.settings_exception = exc
# A handful of built-in management commands work without settings.
# Load the default settings -- where INSTALLED_APPS is empty.
if subcommand in no_settings_commands:
settings.configure()
if settings.configured:
# Start the auto-reloading dev server even if the code is broken.
# The hardcoded condition is a code smell but we can't rely on a
# flag on the command class because we haven't located it yet.
if subcommand == 'runserver' and '--noreload' not in self.argv:
try:
autoreload.check_errors(django.setup)()
except Exception:
# The exception will be raised later in the child process
# started by the autoreloader.
pass
# In all other cases, django.setup() is required to succeed.
else:
django.setup()
self.autocomplete()
if subcommand == 'help':
if '--commands' in args:
sys.stdout.write(self.main_help_text(commands_only=True) + '\n')
elif len(options.args) < 1:
sys.stdout.write(self.main_help_text() + '\n')
else:
self.fetch_command(options.args[0]).print_help(self.prog_name, options.args[0])
# Special-cases: We want 'django-admin --version' and
# 'django-admin --help' to work, for backwards compatibility.
elif subcommand == 'version' or self.argv[1:] == ['--version']:
sys.stdout.write(django.get_version() + '\n')
elif self.argv[1:] in (['--help'], ['-h']):
sys.stdout.write(self.main_help_text() + '\n')
else:
self.fetch_command(subcommand).run_from_argv(self.argv)
def execute_from_command_line(argv=None):
"""
A simple method that runs a ManagementUtility.
"""
utility = ManagementUtility(argv)
utility.execute()
| bsd-3-clause |
GbalsaC/bitnamiP | django-wiki/wiki/editors/markitup.py | 2 | 2520 | from django import forms
from django.forms.util import flatatt
from django.utils.encoding import force_unicode
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
from wiki.editors.base import BaseEditor
class MarkItUpAdminWidget(forms.Widget):
"""A simplified more fail-safe widget for the backend"""
def __init__(self, attrs=None):
# The 'rows' and 'cols' attributes are required for HTML correctness.
default_attrs = {'class': 'markItUp',
'rows': '10', 'cols': '40',}
if attrs:
default_attrs.update(attrs)
super(MarkItUpAdminWidget, self).__init__(default_attrs)
def render(self, name, value, attrs=None):
if value is None: value = ''
final_attrs = self.build_attrs(attrs, name=name)
return mark_safe(u'<textarea%s>%s</textarea>' % (flatatt(final_attrs),
conditional_escape(force_unicode(value))))
class MarkItUpWidget(forms.Widget):
def __init__(self, attrs=None):
# The 'rows' and 'cols' attributes are required for HTML correctness.
default_attrs = {'class': 'markItUp',
'rows': '10', 'cols': '40',}
if attrs:
default_attrs.update(attrs)
super(MarkItUpWidget, self).__init__(default_attrs)
def render(self, name, value, attrs=None):
if value is None: value = ''
final_attrs = self.build_attrs(attrs, name=name)
return mark_safe(u'<div><textarea%s>%s</textarea></div>' % (flatatt(final_attrs),
conditional_escape(force_unicode(value))))
class MarkItUp(BaseEditor):
editor_id = 'markitup'
def get_admin_widget(self, instance=None):
return MarkItUpAdminWidget()
def get_widget(self, instance=None):
return MarkItUpWidget()
class AdminMedia:
css = {
'all': ("wiki/markitup/skins/simple/style.css",
"wiki/markitup/sets/admin/style.css",)
}
js = ("wiki/markitup/admin.init.js",
"wiki/markitup/jquery.markitup.js",
"wiki/markitup/sets/admin/set.js",
)
class Media:
css = {
'all': ("wiki/markitup/skins/simple/style.css",
"wiki/markitup/sets/frontend/style.css",)
}
js = ("wiki/markitup/frontend.init.js",
"wiki/markitup/jquery.markitup.js",
"wiki/markitup/sets/frontend/set.js",
)
| agpl-3.0 |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/numpy/lib/shape_base.py | 22 | 25915 | from __future__ import division, absolute_import, print_function
import warnings
import numpy.core.numeric as _nx
from numpy.core.numeric import (
asarray, zeros, outer, concatenate, isscalar, array, asanyarray
)
from numpy.core.fromnumeric import product, reshape
from numpy.core import vstack, atleast_3d
__all__ = [
'column_stack', 'row_stack', 'dstack', 'array_split', 'split',
'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims',
'apply_along_axis', 'kron', 'tile', 'get_array_wrap'
]
def apply_along_axis(func1d, axis, arr, *args, **kwargs):
"""
Apply a function to 1-D slices along the given axis.
Execute `func1d(a, *args)` where `func1d` operates on 1-D arrays and `a`
is a 1-D slice of `arr` along `axis`.
Parameters
----------
func1d : function
This function should accept 1-D arrays. It is applied to 1-D
slices of `arr` along the specified axis.
axis : integer
Axis along which `arr` is sliced.
arr : ndarray
Input array.
args : any
Additional arguments to `func1d`.
kwargs : any
Additional named arguments to `func1d`.
.. versionadded:: 1.9.0
Returns
-------
apply_along_axis : ndarray
The output array. The shape of `outarr` is identical to the shape of
`arr`, except along the `axis` dimension, where the length of `outarr`
is equal to the size of the return value of `func1d`. If `func1d`
returns a scalar `outarr` will have one fewer dimensions than `arr`.
See Also
--------
apply_over_axes : Apply a function repeatedly over multiple axes.
Examples
--------
>>> def my_func(a):
... \"\"\"Average first and last element of a 1-D array\"\"\"
... return (a[0] + a[-1]) * 0.5
>>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
>>> np.apply_along_axis(my_func, 0, b)
array([ 4., 5., 6.])
>>> np.apply_along_axis(my_func, 1, b)
array([ 2., 5., 8.])
For a function that doesn't return a scalar, the number of dimensions in
`outarr` is the same as `arr`.
>>> b = np.array([[8,1,7], [4,3,9], [5,2,6]])
>>> np.apply_along_axis(sorted, 1, b)
array([[1, 7, 8],
[3, 4, 9],
[2, 5, 6]])
"""
arr = asanyarray(arr)
nd = arr.ndim
if axis < 0:
axis += nd
if (axis >= nd):
raise ValueError("axis must be less than arr.ndim; axis=%d, rank=%d."
% (axis, nd))
ind = [0]*(nd-1)
i = zeros(nd, 'O')
indlist = list(range(nd))
indlist.remove(axis)
i[axis] = slice(None, None)
outshape = asarray(arr.shape).take(indlist)
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
# if res is a number, then we have a smaller output array
if isscalar(res):
outarr = zeros(outshape, asarray(res).dtype)
outarr[tuple(ind)] = res
Ntot = product(outshape)
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= outshape[n]) and (n > (1-nd)):
ind[n-1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
outarr[tuple(ind)] = res
k += 1
return outarr
else:
res = asanyarray(res)
Ntot = product(outshape)
holdshape = outshape
outshape = list(arr.shape)
outshape[axis] = res.size
outarr = zeros(outshape, res.dtype)
outarr = res.__array_wrap__(outarr)
outarr[tuple(i.tolist())] = res
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= holdshape[n]) and (n > (1-nd)):
ind[n-1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
res = asanyarray(func1d(arr[tuple(i.tolist())], *args, **kwargs))
outarr[tuple(i.tolist())] = res
k += 1
if res.shape == ():
outarr = outarr.squeeze(axis)
return outarr
def apply_over_axes(func, a, axes):
"""
Apply a function repeatedly over multiple axes.
`func` is called as `res = func(a, axis)`, where `axis` is the first
element of `axes`. The result `res` of the function call must have
either the same dimensions as `a` or one less dimension. If `res`
has one less dimension than `a`, a dimension is inserted before
`axis`. The call to `func` is then repeated for each axis in `axes`,
with `res` as the first argument.
Parameters
----------
func : function
This function must take two arguments, `func(a, axis)`.
a : array_like
Input array.
axes : array_like
Axes over which `func` is applied; the elements must be integers.
Returns
-------
apply_over_axis : ndarray
The output array. The number of dimensions is the same as `a`,
but the shape can be different. This depends on whether `func`
changes the shape of its output with respect to its input.
See Also
--------
apply_along_axis :
Apply a function to 1-D slices of an array along the given axis.
Notes
------
This function is equivalent to tuple axis arguments to reorderable ufuncs
with keepdims=True. Tuple axis arguments to ufuncs have been availabe since
version 1.7.0.
Examples
--------
>>> a = np.arange(24).reshape(2,3,4)
>>> a
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
Sum over axes 0 and 2. The result has same number of dimensions
as the original array:
>>> np.apply_over_axes(np.sum, a, [0,2])
array([[[ 60],
[ 92],
[124]]])
Tuple axis arguments to ufuncs are equivalent:
>>> np.sum(a, axis=(0,2), keepdims=True)
array([[[ 60],
[ 92],
[124]]])
"""
val = asarray(a)
N = a.ndim
if array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0:
axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError("function is not returning "
"an array of the correct shape")
return val
def expand_dims(a, axis):
"""
Expand the shape of an array.
Insert a new axis, corresponding to a given position in the array shape.
Parameters
----------
a : array_like
Input array.
axis : int
Position (amongst axes) where new axis is to be inserted.
Returns
-------
res : ndarray
Output array. The number of dimensions is one greater than that of
the input array.
See Also
--------
doc.indexing, atleast_1d, atleast_2d, atleast_3d
Examples
--------
>>> x = np.array([1,2])
>>> x.shape
(2,)
The following is equivalent to ``x[np.newaxis,:]`` or ``x[np.newaxis]``:
>>> y = np.expand_dims(x, axis=0)
>>> y
array([[1, 2]])
>>> y.shape
(1, 2)
>>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,newaxis]
>>> y
array([[1],
[2]])
>>> y.shape
(2, 1)
Note that some examples may use ``None`` instead of ``np.newaxis``. These
are the same objects:
>>> np.newaxis is None
True
"""
a = asarray(a)
shape = a.shape
if axis < 0:
axis = axis + len(shape) + 1
return a.reshape(shape[:axis] + (1,) + shape[axis:])
row_stack = vstack
def column_stack(tup):
"""
Stack 1-D arrays as columns into a 2-D array.
Take a sequence of 1-D arrays and stack them as columns
to make a single 2-D array. 2-D arrays are stacked as-is,
just like with `hstack`. 1-D arrays are turned into 2-D columns
first.
Parameters
----------
tup : sequence of 1-D or 2-D arrays.
Arrays to stack. All of them must have the same first dimension.
Returns
-------
stacked : 2-D array
The array formed by stacking the given arrays.
See Also
--------
hstack, vstack, concatenate
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.column_stack((a,b))
array([[1, 2],
[2, 3],
[3, 4]])
"""
arrays = []
for v in tup:
arr = array(v, copy=False, subok=True)
if arr.ndim < 2:
arr = array(arr, copy=False, subok=True, ndmin=2).T
arrays.append(arr)
return _nx.concatenate(arrays, 1)
def dstack(tup):
"""
Stack arrays in sequence depth wise (along third axis).
Takes a sequence of arrays and stack them along the third axis
to make a single array. Rebuilds arrays divided by `dsplit`.
This is a simple way to stack 2D arrays (images) into a single
3D array for processing.
This function continues to be supported for backward compatibility, but
you should prefer ``np.concatenate`` or ``np.stack``. The ``np.stack``
function was added in NumPy 1.10.
Parameters
----------
tup : sequence of arrays
Arrays to stack. All of them must have the same shape along all
but the third axis.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
stack : Join a sequence of arrays along a new axis.
vstack : Stack along first axis.
hstack : Stack along second axis.
concatenate : Join a sequence of arrays along an existing axis.
dsplit : Split array along third axis.
Notes
-----
Equivalent to ``np.concatenate(tup, axis=2)``.
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.dstack((a,b))
array([[[1, 2],
[2, 3],
[3, 4]]])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.dstack((a,b))
array([[[1, 2]],
[[2, 3]],
[[3, 4]]])
"""
return _nx.concatenate([atleast_3d(_m) for _m in tup], 2)
def _replace_zero_by_x_arrays(sub_arys):
for i in range(len(sub_arys)):
if len(_nx.shape(sub_arys[i])) == 0:
sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)
elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)):
sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)
return sub_arys
def array_split(ary, indices_or_sections, axis=0):
"""
Split an array into multiple sub-arrays.
Please refer to the ``split`` documentation. The only difference
between these functions is that ``array_split`` allows
`indices_or_sections` to be an integer that does *not* equally
divide the axis.
See Also
--------
split : Split array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(8.0)
>>> np.array_split(x, 3)
[array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7.])]
"""
try:
Ntotal = ary.shape[axis]
except AttributeError:
Ntotal = len(ary)
try:
# handle scalar case.
Nsections = len(indices_or_sections) + 1
div_points = [0] + list(indices_or_sections) + [Ntotal]
except TypeError:
# indices_or_sections is a scalar, not an array.
Nsections = int(indices_or_sections)
if Nsections <= 0:
raise ValueError('number sections must be larger than 0.')
Neach_section, extras = divmod(Ntotal, Nsections)
section_sizes = ([0] +
extras * [Neach_section+1] +
(Nsections-extras) * [Neach_section])
div_points = _nx.array(section_sizes).cumsum()
sub_arys = []
sary = _nx.swapaxes(ary, axis, 0)
for i in range(Nsections):
st = div_points[i]
end = div_points[i + 1]
sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0))
return sub_arys
def split(ary,indices_or_sections,axis=0):
"""
Split an array into multiple sub-arrays.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1-D array
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries
indicate where along `axis` the array is split. For example,
``[2, 3]`` would, for ``axis=0``, result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along `axis`,
an empty sub-array is returned correspondingly.
axis : int, optional
The axis along which to split, default is 0.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
Raises
------
ValueError
If `indices_or_sections` is given as an integer, but
a split does not result in equal division.
See Also
--------
array_split : Split an array into multiple sub-arrays of equal or
near-equal size. Does not raise an exception if
an equal division cannot be made.
hsplit : Split array into multiple sub-arrays horizontally (column-wise).
vsplit : Split array into multiple sub-arrays vertically (row wise).
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
concatenate : Join a sequence of arrays along an existing axis.
stack : Join a sequence of arrays along a new axis.
hstack : Stack arrays in sequence horizontally (column wise).
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
Examples
--------
>>> x = np.arange(9.0)
>>> np.split(x, 3)
[array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7., 8.])]
>>> x = np.arange(8.0)
>>> np.split(x, [3, 5, 6, 10])
[array([ 0., 1., 2.]),
array([ 3., 4.]),
array([ 5.]),
array([ 6., 7.]),
array([], dtype=float64)]
"""
try:
len(indices_or_sections)
except TypeError:
sections = indices_or_sections
N = ary.shape[axis]
if N % sections:
raise ValueError(
'array split does not result in an equal division')
res = array_split(ary, indices_or_sections, axis)
return res
def hsplit(ary, indices_or_sections):
"""
Split an array into multiple sub-arrays horizontally (column-wise).
Please refer to the `split` documentation. `hsplit` is equivalent
to `split` with ``axis=1``, the array is always split along the second
axis regardless of the array dimension.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
>>> np.hsplit(x, 2)
[array([[ 0., 1.],
[ 4., 5.],
[ 8., 9.],
[ 12., 13.]]),
array([[ 2., 3.],
[ 6., 7.],
[ 10., 11.],
[ 14., 15.]])]
>>> np.hsplit(x, np.array([3, 6]))
[array([[ 0., 1., 2.],
[ 4., 5., 6.],
[ 8., 9., 10.],
[ 12., 13., 14.]]),
array([[ 3.],
[ 7.],
[ 11.],
[ 15.]]),
array([], dtype=float64)]
With a higher dimensional array the split is still along the second axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.hsplit(x, 2)
[array([[[ 0., 1.]],
[[ 4., 5.]]]),
array([[[ 2., 3.]],
[[ 6., 7.]]])]
"""
if len(_nx.shape(ary)) == 0:
raise ValueError('hsplit only works on arrays of 1 or more dimensions')
if len(ary.shape) > 1:
return split(ary, indices_or_sections, 1)
else:
return split(ary, indices_or_sections, 0)
def vsplit(ary, indices_or_sections):
"""
Split an array into multiple sub-arrays vertically (row-wise).
Please refer to the ``split`` documentation. ``vsplit`` is equivalent
to ``split`` with `axis=0` (default), the array is always split along the
first axis regardless of the array dimension.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
>>> np.vsplit(x, 2)
[array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]]),
array([[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])]
>>> np.vsplit(x, np.array([3, 6]))
[array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]]),
array([[ 12., 13., 14., 15.]]),
array([], dtype=float64)]
With a higher dimensional array the split is still along the first axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.vsplit(x, 2)
[array([[[ 0., 1.],
[ 2., 3.]]]),
array([[[ 4., 5.],
[ 6., 7.]]])]
"""
if len(_nx.shape(ary)) < 2:
raise ValueError('vsplit only works on arrays of 2 or more dimensions')
return split(ary, indices_or_sections, 0)
def dsplit(ary, indices_or_sections):
"""
Split array into multiple sub-arrays along the 3rd axis (depth).
Please refer to the `split` documentation. `dsplit` is equivalent
to `split` with ``axis=2``, the array is always split along the third
axis provided the array dimension is greater than or equal to 3.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(2, 2, 4)
>>> x
array([[[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]],
[[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]]])
>>> np.dsplit(x, 2)
[array([[[ 0., 1.],
[ 4., 5.]],
[[ 8., 9.],
[ 12., 13.]]]),
array([[[ 2., 3.],
[ 6., 7.]],
[[ 10., 11.],
[ 14., 15.]]])]
>>> np.dsplit(x, np.array([3, 6]))
[array([[[ 0., 1., 2.],
[ 4., 5., 6.]],
[[ 8., 9., 10.],
[ 12., 13., 14.]]]),
array([[[ 3.],
[ 7.]],
[[ 11.],
[ 15.]]]),
array([], dtype=float64)]
"""
if len(_nx.shape(ary)) < 3:
raise ValueError('dsplit only works on arrays of 3 or more dimensions')
return split(ary, indices_or_sections, 2)
def get_array_prepare(*args):
"""Find the wrapper for the array with the highest priority.
In case of ties, leftmost wins. If no wrapper is found, return None
"""
wrappers = sorted((getattr(x, '__array_priority__', 0), -i,
x.__array_prepare__) for i, x in enumerate(args)
if hasattr(x, '__array_prepare__'))
if wrappers:
return wrappers[-1][-1]
return None
def get_array_wrap(*args):
"""Find the wrapper for the array with the highest priority.
In case of ties, leftmost wins. If no wrapper is found, return None
"""
wrappers = sorted((getattr(x, '__array_priority__', 0), -i,
x.__array_wrap__) for i, x in enumerate(args)
if hasattr(x, '__array_wrap__'))
if wrappers:
return wrappers[-1][-1]
return None
def kron(a, b):
"""
Kronecker product of two arrays.
Computes the Kronecker product, a composite array made of blocks of the
second array scaled by the first.
Parameters
----------
a, b : array_like
Returns
-------
out : ndarray
See Also
--------
outer : The outer product
Notes
-----
The function assumes that the number of dimensions of `a` and `b`
are the same, if necessary prepending the smallest with ones.
If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`,
the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`.
The elements are products of elements from `a` and `b`, organized
explicitly by::
kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]
where::
kt = it * st + jt, t = 0,...,N
In the common 2-D case (N=1), the block structure can be visualized::
[[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],
[ ... ... ],
[ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]
Examples
--------
>>> np.kron([1,10,100], [5,6,7])
array([ 5, 6, 7, 50, 60, 70, 500, 600, 700])
>>> np.kron([5,6,7], [1,10,100])
array([ 5, 50, 500, 6, 60, 600, 7, 70, 700])
>>> np.kron(np.eye(2), np.ones((2,2)))
array([[ 1., 1., 0., 0.],
[ 1., 1., 0., 0.],
[ 0., 0., 1., 1.],
[ 0., 0., 1., 1.]])
>>> a = np.arange(100).reshape((2,5,2,5))
>>> b = np.arange(24).reshape((2,3,4))
>>> c = np.kron(a,b)
>>> c.shape
(2, 10, 6, 20)
>>> I = (1,3,0,2)
>>> J = (0,2,1)
>>> J1 = (0,) + J # extend to ndim=4
>>> S1 = (1,) + b.shape
>>> K = tuple(np.array(I) * np.array(S1) + np.array(J1))
>>> c[K] == a[I]*b[J]
True
"""
b = asanyarray(b)
a = array(a, copy=False, subok=True, ndmin=b.ndim)
ndb, nda = b.ndim, a.ndim
if (nda == 0 or ndb == 0):
return _nx.multiply(a, b)
as_ = a.shape
bs = b.shape
if not a.flags.contiguous:
a = reshape(a, as_)
if not b.flags.contiguous:
b = reshape(b, bs)
nd = ndb
if (ndb != nda):
if (ndb > nda):
as_ = (1,)*(ndb-nda) + as_
else:
bs = (1,)*(nda-ndb) + bs
nd = nda
result = outer(a, b).reshape(as_+bs)
axis = nd-1
for _ in range(nd):
result = concatenate(result, axis=axis)
wrapper = get_array_prepare(a, b)
if wrapper is not None:
result = wrapper(result)
wrapper = get_array_wrap(a, b)
if wrapper is not None:
result = wrapper(result)
return result
def tile(A, reps):
"""
Construct an array by repeating A the number of times given by reps.
If `reps` has length ``d``, the result will have dimension of
``max(d, A.ndim)``.
If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new
axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,
or shape (1, 1, 3) for 3-D replication. If this is not the desired
behavior, promote `A` to d-dimensions manually before calling this
function.
If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.
Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as
(1, 1, 2, 2).
Note : Although tile may be used for broadcasting, it is strongly
recommended to use numpy's broadcasting operations and functions.
Parameters
----------
A : array_like
The input array.
reps : array_like
The number of repetitions of `A` along each axis.
Returns
-------
c : ndarray
The tiled output array.
See Also
--------
repeat : Repeat elements of an array.
broadcast_to : Broadcast an array to a new shape
Examples
--------
>>> a = np.array([0, 1, 2])
>>> np.tile(a, 2)
array([0, 1, 2, 0, 1, 2])
>>> np.tile(a, (2, 2))
array([[0, 1, 2, 0, 1, 2],
[0, 1, 2, 0, 1, 2]])
>>> np.tile(a, (2, 1, 2))
array([[[0, 1, 2, 0, 1, 2]],
[[0, 1, 2, 0, 1, 2]]])
>>> b = np.array([[1, 2], [3, 4]])
>>> np.tile(b, 2)
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
>>> np.tile(b, (2, 1))
array([[1, 2],
[3, 4],
[1, 2],
[3, 4]])
>>> c = np.array([1,2,3,4])
>>> np.tile(c,(4,1))
array([[1, 2, 3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4]])
"""
try:
tup = tuple(reps)
except TypeError:
tup = (reps,)
d = len(tup)
if all(x == 1 for x in tup) and isinstance(A, _nx.ndarray):
# Fixes the problem that the function does not make a copy if A is a
# numpy array and the repetitions are 1 in all dimensions
return _nx.array(A, copy=True, subok=True, ndmin=d)
else:
# Note that no copy of zero-sized arrays is made. However since they
# have no data there is no risk of an inadvertent overwrite.
c = _nx.array(A, copy=False, subok=True, ndmin=d)
if (d < c.ndim):
tup = (1,)*(c.ndim-d) + tup
shape_out = tuple(s*t for s, t in zip(c.shape, tup))
n = c.size
if n > 0:
for dim_in, nrep in zip(c.shape, tup):
if nrep != 1:
c = c.reshape(-1, n).repeat(nrep, 0)
n //= dim_in
return c.reshape(shape_out)
| apache-2.0 |
jieshen-sjtu/caffe-for-DDNet | python/caffe/imagenet/wrapper.py | 5 | 3938 | #!/usr/bin/env python
"""wrapper.py implements an end-to-end wrapper that classifies an image read
from disk, using the imagenet classifier.
"""
import numpy as np
import os
from skimage import io
from skimage import transform
import caffe
IMAGE_DIM = 256
CROPPED_DIM = 227
# Load the imagenet mean file
IMAGENET_MEAN = np.load(
os.path.join(os.path.dirname(__file__), 'ilsvrc_2012_mean.npy'))
def oversample(image, center_only=False):
"""
Oversamples an image. Currently the indices are hard coded to the
4 corners and the center of the image, as well as their flipped ones,
a total of 10 images.
Input:
image: an image of size (256 x 256 x 3) and has data type uint8.
center_only: if True, only return the center image.
Output:
images: the output of size (10 x 3 x 227 x 227)
"""
image = image.swapaxes(1, 2).swapaxes(0, 1)
indices = [0, IMAGE_DIM - CROPPED_DIM]
center = int(indices[1] / 2)
if center_only:
return np.ascontiguousarray(
image[np.newaxis, :, center:center + CROPPED_DIM,
center:center + CROPPED_DIM],
dtype=np.float32)
else:
images = np.empty((10, 3, CROPPED_DIM, CROPPED_DIM), dtype=np.float32)
curr = 0
for i in indices:
for j in indices:
images[curr] = image[:, i:i + CROPPED_DIM, j:j + CROPPED_DIM]
curr += 1
images[4] = image[:, center:center + CROPPED_DIM,
center:center + CROPPED_DIM]
# flipped version
images[5:] = images[:5, :, :, ::-1]
return images
def prepare_image(filename, center_only=False):
img = io.imread(filename)
if img.ndim == 2:
img = np.tile(img[:, :, np.newaxis], (1, 1, 3))
elif img.shape[2] == 4:
img = img[:, :, :3]
# Resize and convert to BGR
img_reshape = (transform.resize(img, (IMAGE_DIM,IMAGE_DIM)) * 255)[:, :, ::-1]
# subtract main
img_reshape -= IMAGENET_MEAN
return oversample(img_reshape, center_only)
class ImageNetClassifier(object):
"""
The ImageNetClassifier is a wrapper class to perform easier deployment
of models trained on imagenet.
"""
def __init__(self, model_def_file, pretrained_model, center_only=False,
num_output=1000):
if center_only:
num = 1
else:
num = 10
self.caffenet = caffe.CaffeNet(model_def_file, pretrained_model)
self._output_blobs = [np.empty((num, num_output, 1, 1), dtype=np.float32)]
self._center_only = center_only
def predict(self, filename):
input_blob = [prepare_image(filename, self._center_only)]
self.caffenet.Forward(input_blob, self._output_blobs)
return self._output_blobs[0].mean(0).flatten()
def main(argv):
"""
The main function will carry out classification.
"""
import gflags
import glob
import time
gflags.DEFINE_string("root", "", "The folder that contains images.")
gflags.DEFINE_string("ext", "JPEG", "The image extension.")
gflags.DEFINE_string("model_def", "", "The model definition file.")
gflags.DEFINE_string("pretrained_model", "", "The pretrained model.")
gflags.DEFINE_string("output", "", "The output numpy file.")
gflags.DEFINE_boolean("gpu", True, "use gpu for computation")
FLAGS = gflags.FLAGS
FLAGS(argv)
net = ImageNetClassifier(FLAGS.model_def, FLAGS.pretrained_model)
if FLAGS.gpu:
print 'Use gpu.'
net.caffenet.set_mode_gpu()
files = glob.glob(os.path.join(FLAGS.root, "*." + FLAGS.ext))
files.sort()
print 'A total of %d files' % len(files)
output = np.empty((len(files), net._output_blobs[0].shape[1]),
dtype=np.float32)
start = time.time()
for i, f in enumerate(files):
output[i] = net.predict(f)
if i % 1000 == 0 and i > 0:
print 'Processed %d files, elapsed %.2f s' % (i, time.time() - start)
# Finally, write the results
np.save(FLAGS.output, output)
print 'Done. Saved to %s.' % FLAGS.output
if __name__ == "__main__":
import sys
main(sys.argv)
| bsd-2-clause |
turbomanage/training-data-analyst | courses/developingapps/v1.2/python/pubsub-languageapi-spanner/bonus/quiz/gcp/spanner.py | 4 | 3809 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
# TODO: Import the spanner module
from google.cloud import spanner
# END TODO
"""
Get spanner management objects
"""
# TODO: Create a spanner Client
spanner_client = spanner.Client()
# END TODO
# TODO: Get a reference to the Cloud Spanner quiz-instance
instance = spanner_client.instance('quiz-instance')
# END TODO
# TODO: Get a referent to the Cloud Spanner quiz-database
database = instance.database('quiz-database')
# END TODO
"""
Takes an email address and reverses it (to be used as primary key)
"""
def reverse_email(email):
return '_'.join(list(reversed(email.replace('@','_').
replace('.','_').
split('_'))))
"""
Persists feedback data into Spanner
- create primary key value
- do a batch insert (even though it's a single record)
"""
def save_feedback(data):
# TODO: Create a batch object for database operations
with database.batch() as batch:
# TODO: Create a key for the record
# from the email, quiz and timestamp
feedback_id = '{}_{}_{}'.format(reverse_email(data['email']),
data['quiz'],
data['timestamp'])
# END TODO
# TODO: Use the batch to insert a record
# into the feedback table
# This needs the columns and values
batch.insert(
table='feedback',
columns=(
'feedbackId',
'email',
'quiz',
'timestamp',
'rating',
'score',
'feedback'
),
values=[
(
feedback_id,
data['email'],
data['quiz'],
data['timestamp'],
data['rating'],
data['score'],
data['feedback']
)
]
)
# END TODO
# END TODO
"""
Bonus: Save answer
"""
def save_answer(data):
with database.batch() as batch:
answer_id = '{}_{}_{}'.format(reverse_email(data['email']),
data['quiz'],
data['timestamp'])
batch.insert(
table='answers',
columns=(
'answerId',
'id',
'email',
'quiz',
'answer',
'correct',
'timestamp'
),
values=[
(
answer_id,
data['id'],
data['email'],
data['quiz'],
data['answer'],
data['correct'],
data['timestamp']
)
]
)
"""
Bonus: Get the leaderboard data
"""
def get_leaderboard():
with database.snapshot() as snapshot:
results = snapshot.execute_sql(
"SELECT quiz, email, COUNT(*) AS score FROM Answers"
" WHERE correct = answer"
" GROUP BY quiz, email"
" ORDER BY quiz, score DESC")
return list(results) | apache-2.0 |
OpenSlides/OpenSlides | server/openslides/users/signals.py | 6 | 7902 | from django.apps import apps
from django.contrib.auth.models import Permission
from django.db.models import Q
from openslides.utils.auth import GROUP_ADMIN_PK, GROUP_DEFAULT_PK
from openslides.utils.postgres import restart_id_sequence
from .models import Group, User
def get_permission_change_data(sender, permissions=None, **kwargs):
"""
Yields all necessary collections if 'users.can_see_name' permission changes.
"""
users_app = apps.get_app_config(app_label="users")
for permission in permissions:
# There could be only one 'users.can_see_name' and then we want to return data.
if permission.content_type.app_label == users_app.label:
if permission.codename == "can_see_name":
yield from users_app.get_startup_elements()
elif permission.codename == "can_see_extra_data":
yield users_app.get_model("User")
def create_builtin_groups_and_admin(**kwargs):
"""
Creates the builtin groups: Default, Delegates, Staff and Committees.
Creates the builtin user: admin.
"""
# Check whether there are groups in the database.
if Group.objects.exists():
# Do completely nothing if there are already some groups in the database.
return
permission_strings = (
"agenda.can_be_speaker",
"agenda.can_manage",
"agenda.can_manage_list_of_speakers",
"agenda.can_see",
"agenda.can_see_internal_items",
"agenda.can_see_list_of_speakers",
"assignments.can_manage",
"assignments.can_nominate_other",
"assignments.can_nominate_self",
"assignments.can_see",
"core.can_manage_config",
"core.can_manage_logos_and_fonts",
"core.can_manage_projector",
"core.can_manage_tags",
"core.can_see_frontpage",
"core.can_see_history",
"core.can_see_projector",
"core.can_see_autopilot",
"mediafiles.can_manage",
"mediafiles.can_see",
"motions.can_create",
"motions.can_create_amendments",
"motions.can_manage",
"motions.can_manage_metadata",
"motions.can_see",
"motions.can_see_internal",
"motions.can_support",
"users.can_change_password",
"users.can_manage",
"users.can_see_extra_data",
"users.can_see_name",
"chat.can_manage",
)
permission_query = Q()
permission_dict = {}
# Load all permissions
for permission_string in permission_strings:
app_label, codename = permission_string.split(".")
query_part = Q(content_type__app_label=app_label) & Q(codename=codename)
permission_query = permission_query | query_part
for permission in Permission.objects.select_related("content_type").filter(
permission_query
):
permission_string = ".".join(
(permission.content_type.app_label, permission.codename)
)
permission_dict[permission_string] = permission
# Default (pk 1 == GROUP_DEFAULT_PK)
base_permissions = (
permission_dict["agenda.can_see"],
permission_dict["agenda.can_see_internal_items"],
permission_dict["agenda.can_see_list_of_speakers"],
permission_dict["assignments.can_see"],
permission_dict["core.can_see_frontpage"],
permission_dict["core.can_see_projector"],
permission_dict["mediafiles.can_see"],
permission_dict["motions.can_see"],
permission_dict["users.can_see_name"],
permission_dict["users.can_change_password"],
)
group_default = Group(pk=GROUP_DEFAULT_PK, name="Default")
group_default.save(skip_autoupdate=True)
group_default.permissions.add(*base_permissions)
# Admin (pk 2 == GROUP_ADMIN_PK)
group_admin = Group(pk=GROUP_ADMIN_PK, name="Admin")
group_admin.save(skip_autoupdate=True)
# Delegates (pk 3)
delegates_permissions = (
permission_dict["agenda.can_see"],
permission_dict["agenda.can_see_internal_items"],
permission_dict["agenda.can_be_speaker"],
permission_dict["agenda.can_see_list_of_speakers"],
permission_dict["assignments.can_see"],
permission_dict["assignments.can_nominate_other"],
permission_dict["assignments.can_nominate_self"],
permission_dict["core.can_see_frontpage"],
permission_dict["core.can_see_projector"],
permission_dict["core.can_see_autopilot"],
permission_dict["mediafiles.can_see"],
permission_dict["motions.can_see"],
permission_dict["motions.can_create"],
permission_dict["motions.can_create_amendments"],
permission_dict["motions.can_support"],
permission_dict["users.can_see_name"],
permission_dict["users.can_change_password"],
)
group_delegates = Group(pk=3, name="Delegates")
group_delegates.save(skip_autoupdate=True)
group_delegates.permissions.add(*delegates_permissions)
# Staff (pk 4)
staff_permissions = (
permission_dict["agenda.can_see"],
permission_dict["agenda.can_see_internal_items"],
permission_dict["agenda.can_be_speaker"],
permission_dict["agenda.can_manage"],
permission_dict["agenda.can_see_list_of_speakers"],
permission_dict["agenda.can_manage_list_of_speakers"],
permission_dict["assignments.can_see"],
permission_dict["assignments.can_manage"],
permission_dict["assignments.can_nominate_other"],
permission_dict["assignments.can_nominate_self"],
permission_dict["core.can_see_frontpage"],
permission_dict["core.can_see_history"],
permission_dict["core.can_see_projector"],
permission_dict["core.can_manage_projector"],
permission_dict["core.can_manage_tags"],
permission_dict["mediafiles.can_see"],
permission_dict["mediafiles.can_manage"],
permission_dict["motions.can_see"],
permission_dict["motions.can_see_internal"],
permission_dict["motions.can_create"],
permission_dict["motions.can_create_amendments"],
permission_dict["motions.can_manage"],
permission_dict["motions.can_manage_metadata"],
permission_dict["users.can_see_name"],
permission_dict["users.can_manage"],
permission_dict["users.can_see_extra_data"],
permission_dict["users.can_change_password"],
)
group_staff = Group(pk=4, name="Staff")
group_staff.save(skip_autoupdate=True)
group_staff.permissions.add(*staff_permissions)
# Committees (pk 5)
committees_permissions = (
permission_dict["agenda.can_see"],
permission_dict["agenda.can_see_internal_items"],
permission_dict["agenda.can_see_list_of_speakers"],
permission_dict["assignments.can_see"],
permission_dict["core.can_see_frontpage"],
permission_dict["core.can_see_projector"],
permission_dict["mediafiles.can_see"],
permission_dict["motions.can_see"],
permission_dict["motions.can_create"],
permission_dict["motions.can_create_amendments"],
permission_dict["motions.can_support"],
permission_dict["users.can_see_name"],
permission_dict["users.can_change_password"],
)
group_committee = Group(pk=5, name="Committees")
group_committee.save(skip_autoupdate=True)
group_committee.permissions.add(*committees_permissions)
# Create or reset admin user
User.objects.create_or_reset_admin_user(skip_autoupdate=True)
# After each group was created, the permissions (many to many fields) where
# added to the group. But we do not have to update the cache by calling
# inform_changed_data() because the cache is updated on server start.
# For postgres: After inserting the groups by id, the id sequence needs to be restarted.
restart_id_sequence("auth_group")
| mit |
brutasse/graphite-web | webapp/graphite/account/migrations/0001_initial.py | 9 | 2951 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-14 11:22
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='MyGraph',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('url', models.TextField()),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('history', models.TextField(default=b'')),
('advancedUI', models.BooleanField(default=False)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Variable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('value', models.CharField(max_length=64)),
('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='account.Profile')),
],
),
migrations.CreateModel(
name='View',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='account.Profile')),
],
),
migrations.CreateModel(
name='Window',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('top', models.IntegerField()),
('left', models.IntegerField()),
('width', models.IntegerField()),
('height', models.IntegerField()),
('url', models.TextField()),
('interval', models.IntegerField(null=True)),
('view', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='account.View')),
],
),
migrations.AddField(
model_name='mygraph',
name='profile',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='account.Profile'),
),
]
| apache-2.0 |
GeoffEvans/aol_model | aol_model/tests/test_aod.py | 1 | 3438 | from aol_model.aod import Aod
from aol_model.ray import Ray
from numpy import sqrt, allclose, cross, dot
from aol_model.vector_utils import normalise
aod = Aod([0,0,1], [1,0,0], 1, 1, 1)
def test_on_axis_ray_displacement():
rays = [Ray([0,0,0],[0,0,1],800e-9,1)]*5
aod.move_ray_through_aod(rays)
still_on_axis = allclose(cross([r.position for r in rays], [0,0,1]), [0,0,0])
direction_unchanged = allclose(r.wavevector_unit, [0,0,1])
assert still_on_axis and direction_unchanged
def test_off_axis_ray_displacement():
wavevec = [17./145,0,144./145]
rays = [Ray([0,0,0],wavevec,800e-9,1)]*5
aod.move_ray_through_aod(rays)
off_wavevector = not allclose(cross([r.position for r in rays], wavevec), [0,0,0])
direction_unchanged = allclose([r.wavevector_unit for r in rays], wavevec)
assert off_wavevector and direction_unchanged
def test_refractive_indices_match():
wavelen = 800e-9
wavevec = [3./5,0,4./5]
rays = [Ray([0,0,0],wavevec,wavelen,1)]*5
n1 = aod.calc_refractive_indices_vectors([r.wavevector_unit for r in rays], wavelen)
n2 = aod.calc_refractive_indices_rays(rays)
assert allclose(n1,n2)
def test_refracting_in_towards_normal():
wavevec = [3./5,0,4./5]
rays = [Ray([0,0,0],wavevec,800e-9,1)]*5
aod.refract_in(rays)
cosine_outside = dot(wavevec, aod.normal)
cosine_inside = dot([r.wavevector_unit for r in rays], aod.normal)
towards_normal = abs(cosine_outside) < abs(cosine_inside)
not_reflected = cosine_outside * cosine_inside >= 0
assert towards_normal.all() and not_reflected.all()
def test_walkoff_towards_axis():
wavevec = normalise([0.01,0,1])
rays = [Ray([0,0,0],wavevec,800e-9,1)]*2
directions = aod.get_ray_direction_ord(rays)
cosine_wavevec = dot(wavevec, aod.optic_axis)
cosine_dir = dot(directions[0], aod.optic_axis)
walkoff_to_axis = cosine_wavevec < cosine_dir
assert walkoff_to_axis
def test_refracting_in_at_normal():
wavevec = [0,0,1]
rays = [Ray([0,0,0],wavevec,800e-9,1)]*5
aod.refract_in(rays)
assert allclose(wavevec, [r.wavevector_unit for r in rays])
def test_refracting_out_away_from_normal():
wavevec = [17./145,0,144./145]
rays = [Ray([0,0,0],wavevec,800e-9,1)]*5
aod.refract_out(rays)
cosine_outside = dot([r.wavevector_unit for r in rays], aod.normal)
cosine_inside = dot(wavevec, aod.normal)
towards_normal = abs(cosine_outside) < abs(cosine_inside)
not_reflected = cosine_outside * cosine_inside >= 0
assert towards_normal.all() and not_reflected.all()
def test_refracting_out_at_normal():
wavevec = [0,0,1]
rays = [Ray([0,0,0],wavevec,800e-9,1)]*5
aod.refract_out(rays)
assert allclose(wavevec, [r.wavevector_unit for r in rays])
def test_refraction_in_out_no_change():
wavevec = [3./5,0,4./5]
rays = [Ray([0,0,0],wavevec,800e-9,1)]*5
aod.refract_in(rays)
aod.refract_out(rays)
assert allclose([r.wavevector_unit for r in rays], [3./5,0,4./5], rtol=5e-3) # should be close but not the same since ext in, ord out
def test_acoustic_direction_trivial():
direc = aod.acoustic_direction
assert allclose(direc, [1,0,0])
def test_acoustic_sound_direction():
aod_new = Aod([1,0,1]/sqrt(2), [1,0,0], 1, 1, 1)
direc = aod_new.acoustic_direction
assert allclose(direc, [1,0,-1]/sqrt(2))
if __name__ == "__main__":
test_walkoff_towards_axis()
| gpl-3.0 |
vinilios/synnefo | snf-admin-app/synnefo_admin/admin/resources/volumes/utils.py | 7 | 1622 | # Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from django.core.exceptions import ObjectDoesNotExist
from synnefo.db.models import Volume
from astakos.im.models import AstakosUser, Project
from synnefo_admin.admin.exceptions import AdminHttp404
from synnefo_admin.admin.utils import create_details_href
def get_volume_or_404(query):
try:
return Volume.objects.get(pk=int(query))
except (ObjectDoesNotExist, ValueError):
raise AdminHttp404(
"No Volume was found that matches this query: %s\n" % query)
def get_user_details_href(volume):
user = AstakosUser.objects.get(uuid=volume.userid)
return create_details_href('user', user.realname, user.email)
def get_project_details_href(volume):
project = Project.objects.get(uuid=volume.project)
return create_details_href('project', project.realname, project.id)
def get_vm_details_href(volume):
vm = volume.machine
return create_details_href('vm', vm.name, vm.id)
| gpl-3.0 |
frosty308/webapps | susana/gallery.py | 2 | 11263 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2017-2018 Alan Frost. All rights reserved.
Generate static image gallery HTML as masonary layout or carousel
> python gallery.py -t "Susan Frost's Gallery" -p index.html masonary > t
> python gallery.py -t "Susan Frost's Gallery" -p gallery.html gallery > t
> python gallery.py -t "Prints by Susan Frost" -p prints.html -m Print gallery > t
"""
from __future__ import print_function
from string import Template
import argparse
import csv
import json
import sys
def parse_options():
""" Parse command line options
"""
parser = argparse.ArgumentParser(description='Image Gallery HTML Generator')
parser.add_argument('-i', '--images', action='store', default='images.json', help='images.json')
parser.add_argument('-m', '--medium', action='store', default='*')
parser.add_argument('-p', '--page', action='store', default='gallery.html')
parser.add_argument('-s', '--site', action='store', default='https://susanafrost.com')
parser.add_argument('-t', '--title', action='store', default='Susan Frost')
parser.add_argument('command', action='store', help='check, csv, gallery, masonary, search')
return parser.parse_args()
def load_images(images_file):
""" Load the images.json file
Args:
images filename
"""
images = None
try:
with open(images_file) as json_file:
images = json.load(json_file)
except (IOError, ValueError) as err:
print('Load of images file failed:', err.message)
return images
def read_csv(csv_file):
""" Read a CSV file with visual art work metadata
Args:
csv filename
Return:
array of row objects
"""
csv_rows = []
with open(csv_file) as csvfile:
reader = csv.DictReader(csvfile)
field = reader.fieldnames
for row in reader:
csv_rows.extend([{field[i]:row[field[i]] for i in range(len(field))}])
return csv_rows
def write_csv(images):
""" Write a CSV file with visual art work metadata
Args:
images dict
"""
fieldnames = ['title', 'medium', 'size', 'created', 'image']
writer = csv.DictWriter(sys.stdout, fieldnames)
writer.writeheader()
for image in images:
writer.writerow(image)
def check_images(images):
""" Check an images dict for required visual art work metadata. See
http://schema.org/VisualArtwork for all sorts of metadata that could be added.
Args:
images dict
"""
for image in images:
if 'title' not in image:
if 'image' in image:
print('Missing "title" from: ' + image['image'])
else:
title = image['title']
if 'created' not in image:
print('Missing "created" from: ' + title)
if 'image' not in image:
print('Missing "image" from: ' + title)
if 'size' not in image:
print('Missing "size" from: ' + title)
if 'medium' not in image:
print('Missing "medium" from: ' + title)
def generate_masonary_page(site, page, title, images, medium='*'):
""" Generate HTML masonary layput
Args:
site name
page name
images dict
medium filter (e.g. Oil, Print...)
"""
with open('header.html') as html_file:
template = Template(html_file.read())
contents = template.substitute(title=title, page='/' + page)
print(contents)
html = '<!-- Modal Image -->\n'
html += '<div id="imgModal" class="modal">\n'
html += ' <span class="modal-close" onclick="closeGalleryImage()">×</span>\n'
html += ' <img class="modal-content" id="modal-content">\n'
html += ' <div id="modal-caption" class="modal-caption"></div>\n'
html += '</div>\n'
html += ' <div class="container">\n'
html += ' <h1>' + title + '</h1>\n'
html += ' <hr>\n'
html += ' <div class="row">\n'
html += ' <div class="col-md-12">\n'
html += ' <div class="gal">'
print(html)
slide = 0
for image in images:
if medium == '*' or medium in image['medium']:
title = image['title']
caption = image['medium'] + ', ' + image['size'] + ', ' + image['created']
img_small = image['image'].replace('.jpg', '_small.jpg')
html = ' <a href="gallery.html?slide=' + str(slide) + '" title="' + title + ',' + caption + '">'
#print(html)
html = ' <img src="static/img/' + img_small + '" alt="' + title
html += '" onclick="openGalleryImage(' + str(slide) + ')" class="hover-shadow">'
print(html)
slide += 1
html = ' </div>\n'
html += ' </div>\n'
html += ' </div>\n'
html += ' </div>\n'
print(html)
with open('footer.html') as html_file:
contents = html_file.read()
print(contents)
def generate_gallery_indicators(images, medium='*'):
""" Generate gallery indicators
Args:
images dict
medium filter (e.g. Oil, Print...)
"""
html = ' <!-- Indicators -->\n'
if medium != '*':
filtered_images = []
for image in images:
if medium in image['medium']:
filtered_images.append(image)
images = filtered_images
if len(images) < 12:
html += ' <ol class="carousel-indicators">\n'
html += ' <li data-target="#artCarousel" data-slide-to="0" class="active"></li>\n'
for item in range(1, len(images)):
html += ' <li data-target="#artCarousel" data-slide-to="' + str(item) + '"></li>\n'
html += ' </ol>\n'
print(html)
def generate_search_list(images):
slide = 0
html = ''
for image in images:
title = image['title']
#thumb = image['image'].replace('.jpg', '_thumb.jpg')
#html += '<li><a href="gallery.html?slide=' + str(slide) + '">' + title + '<img src="static/img/' + thumb + '"></a></li>\n'
html += '<li><a href="gallery.html?slide=' + str(slide) + '">' + title + '</a></li>\n'
slide += 1
print(html)
def generate_gallery(site, images, medium='*'):
""" Generate HTML gallery layput
Args:
site name
images dict
medium filter (e.g. Oil, Print...)
"""
slide = 0
#img_path = site + 'static/img/'
img_path = 'static/img/'
html = ' <section class="slide-wrapper">\n'
html += ' <div class="container">\n'
html += ' <div id="artCarousel" class="carousel slide carousel-fade" data-ride="carousel" data-interval="false">\n'
print(html)
generate_gallery_indicators(images, medium)
html = ' <!-- Wrapper for slides -->\n'
html += ' <div class="carousel-inner" role="listbox">\n'
for image in images:
if medium != '*' and medium not in image['medium']:
continue
title = image['title']
img_hd = image['image'].replace('.jpg', '_hd.jpg')
img_med = image['image'].replace('.jpg', '_medium.jpg')
img_small = image['image'].replace('.jpg', '_small.jpg')
html += ' <div class="item item' + str(slide)
if slide == 0:
html += ' active'
html += '">\n'
html += ' <div class="fill" style=" background-color:#48c3af;">\n'
html += ' <div class="inner-content">\n'
html += ' <div class="carousel-img">\n'
html += ' <a href="' + img_path + img_hd + '">\n'
html += ' <picture>\n'
html += ' <source srcset="' + img_path + img_hd + '" media="(min-width: 1400px)">\n'
html += ' <source srcset="' + img_path + img_med + '" media="(min-width: 768px)">\n'
html += ' <source srcset="' + img_path + img_small + '" media="(min-width: 576px)">\n'
html += ' <img srcset="' + img_path + img_small + '" alt="responsive image" class="img img-responsive">\n'
html += ' </picture>\n'
html += ' </a>\n'
#html += ' <img src="' + site + 'static/img/' + img_hd + '" alt="' + title + '" class="img img-responsive" />\n'
html += ' </div>\n'
html += ' <div class="carousel-desc">\n'
html += ' <h3>' + title + '</h3>\n'
html += ' <p>' + image['medium'] + ', ' + image['size'] + ', ' + image['created'] + '</p>\n'
html += ' </div>\n'
html += ' </div>\n'
html += ' </div>\n'
html += ' </div>\n'
slide += 1
html += ' </div>\n'
html += ' <!-- Left and right controls -->\n'
html += ' <a class="left carousel-control" href="#artCarousel" data-slide="prev">\n'
html += ' <span class="fa fa-chevron-left"></span>\n'
html += ' <span class="sr-only">Previous</span>\n'
html += ' </a>\n'
html += ' <a class="right carousel-control" href="#artCarousel" data-slide="next">\n'
html += ' <span class="fa fa-chevron-right"></span>\n'
html += ' <span class="sr-only">Next</span>\n'
html += ' </a>\n'
#html += ' <div id="carouselButtons">\n'
#html += ' <button id="playButton" type="button" class="btn btn-default btn-xs">\n'
#html += ' <span class="fa fa-play"></span>\n'
#html += ' </button>\n'
#html += ' </div>\n'
html += ' </div>\n'
html += ' </div>\n'
html += ' </section>\n'
print(html)
def generate_gallery_page(site, page, title, images, medium='*'):
""" Generate HTML gallery layput
Args:
site name
page name
title
images dict
medium filter (e.g. Oil, Print...)
"""
with open('header.html') as html_file:
template = Template(html_file.read())
contents = template.substitute(title=title, page='/' + page)
print(contents)
generate_gallery(site, images, medium)
with open('footer.html') as html_file:
contents = html_file.read()
print(contents)
def main():
""" Main program
"""
options = parse_options()
images = load_images(options.images)
if images is None:
print('No images loaded')
return
else:
pass #print('Loaded {} images'.format(len(images)))
site = options.site
if len(site) > 1:
site += '/'
if options.command == 'check':
if images is not None:
print('Images loaded')
check_images(images)
elif options.command == 'csv':
write_csv(images)
elif options.command == 'masonary':
generate_masonary_page(site, options.page, options.title, images, options.medium)
elif options.command == 'gallery':
generate_gallery_page(site, options.page, options.title, images, options.medium)
elif options.command == 'search':
generate_search_list(images)
if __name__ == '__main__':
main()
| gpl-3.0 |
resmo/ansible | test/units/modules/network/nxos/test_nxos_vpc_interface.py | 68 | 2764 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.nxos import nxos_vpc_interface
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosVpcModule(TestNxosModule):
module = nxos_vpc_interface
def setUp(self):
super(TestNxosVpcModule, self).setUp()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_vpc_interface.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_vpc_interface.get_config')
self.get_config = self.mock_get_config.start()
self.mock_run_commands = patch('ansible.modules.network.nxos.nxos_vpc_interface.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestNxosVpcModule, self).tearDown()
self.mock_load_config.stop()
self.mock_get_config.stop()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None, device=''):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for command in commands:
filename = str(command).split(' | ')[0].replace(' ', '_')
output.append(load_fixture('nxos_vpc_interface', filename))
return output
self.run_commands.side_effect = load_from_file
self.load_config.return_value = None
def test_nxos_vpc_interface_absent(self):
set_module_args(dict(portchannel=10, vpc=100, state='absent'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['interface port-channel10', 'no vpc'])
def test_nxos_vpc_interface_present(self):
set_module_args(dict(portchannel=20, vpc=200, state='present'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['interface port-channel20', 'vpc 200'])
| gpl-3.0 |
pquentin/django | tests/generic_views/test_edit.py | 7 | 19270 | from __future__ import unicode_literals
import warnings
from django import forms
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.test import TestCase, ignore_warnings, override_settings
from django.test.client import RequestFactory
from django.utils.deprecation import RemovedInDjango20Warning
from django.views.generic.base import View
from django.views.generic.edit import CreateView, FormMixin, ModelFormMixin
from . import views
from .models import Artist, Author
from .test_forms import AuthorForm
class FormMixinTests(TestCase):
def test_initial_data(self):
""" Test instance independence of initial data dict (see #16138) """
initial_1 = FormMixin().get_initial()
initial_1['foo'] = 'bar'
initial_2 = FormMixin().get_initial()
self.assertNotEqual(initial_1, initial_2)
def test_get_prefix(self):
""" Test prefix can be set (see #18872) """
test_string = 'test'
rf = RequestFactory()
get_request = rf.get('/')
class TestFormMixin(FormMixin):
request = get_request
default_kwargs = TestFormMixin().get_form_kwargs()
self.assertIsNone(default_kwargs.get('prefix'))
set_mixin = TestFormMixin()
set_mixin.prefix = test_string
set_kwargs = set_mixin.get_form_kwargs()
self.assertEqual(test_string, set_kwargs.get('prefix'))
def test_get_form(self):
class TestFormMixin(FormMixin):
request = RequestFactory().get('/')
self.assertIsInstance(
TestFormMixin().get_form(forms.Form), forms.Form,
'get_form() should use provided form class.'
)
class FormClassTestFormMixin(TestFormMixin):
form_class = forms.Form
self.assertIsInstance(
FormClassTestFormMixin().get_form(), forms.Form,
'get_form() should fallback to get_form_class() if none is provided.'
)
def test_get_form_missing_form_class_default_value(self):
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always')
class MissingDefaultValue(FormMixin):
request = RequestFactory().get('/')
form_class = forms.Form
def get_form(self, form_class):
return form_class(**self.get_form_kwargs())
self.assertEqual(len(w), 1)
self.assertEqual(w[0].category, RemovedInDjango20Warning)
self.assertEqual(
str(w[0].message),
'`generic_views.test_edit.MissingDefaultValue.get_form` method '
'must define a default value for its `form_class` argument.'
)
self.assertIsInstance(
MissingDefaultValue().get_form(), forms.Form,
)
@override_settings(ROOT_URLCONF='generic_views.urls')
class BasicFormTests(TestCase):
def test_post_data(self):
res = self.client.post('/contact/', {'name': "Me", 'message': "Hello"})
self.assertRedirects(res, 'http://testserver/list/authors/')
class ModelFormMixinTests(TestCase):
def test_get_form(self):
form_class = views.AuthorGetQuerySetFormView().get_form_class()
self.assertEqual(form_class._meta.model, Author)
def test_get_form_checks_for_object(self):
mixin = ModelFormMixin()
mixin.request = RequestFactory().get('/')
self.assertEqual({'initial': {}, 'prefix': None},
mixin.get_form_kwargs())
@override_settings(ROOT_URLCONF='generic_views.urls')
class CreateViewTests(TestCase):
def test_create(self):
res = self.client.get('/edit/authors/create/')
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], forms.ModelForm)
self.assertIsInstance(res.context['view'], View)
self.assertNotIn('object', res.context)
self.assertNotIn('author', res.context)
self.assertTemplateUsed(res, 'generic_views/author_form.html')
res = self.client.post('/edit/authors/create/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
def test_create_invalid(self):
res = self.client.post('/edit/authors/create/',
{'name': 'A' * 101, 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_form.html')
self.assertEqual(len(res.context['form'].errors), 1)
self.assertEqual(Author.objects.count(), 0)
def test_create_with_object_url(self):
res = self.client.post('/edit/artists/create/',
{'name': 'Rene Magritte'})
self.assertEqual(res.status_code, 302)
artist = Artist.objects.get(name='Rene Magritte')
self.assertRedirects(res, 'http://testserver/detail/artist/%d/' % artist.pk)
self.assertQuerysetEqual(Artist.objects.all(), ['<Artist: Rene Magritte>'])
def test_create_with_redirect(self):
res = self.client.post('/edit/authors/create/redirect/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/edit/authors/create/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
@ignore_warnings(category=RemovedInDjango20Warning)
def test_create_with_interpolated_redirect(self):
res = self.client.post(
'/edit/authors/create/interpolate_redirect/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'}
)
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
self.assertEqual(res.status_code, 302)
pk = Author.objects.first().pk
self.assertRedirects(res, 'http://testserver/edit/author/%d/update/' % pk)
# Also test with escaped chars in URL
res = self.client.post(
'/edit/authors/create/interpolate_redirect_nonascii/',
{'name': 'John Doe', 'slug': 'john-doe'}
)
self.assertEqual(res.status_code, 302)
pk = Author.objects.get(name='John Doe').pk
self.assertRedirects(res, 'http://testserver/%C3%A9dit/author/{}/update/'.format(pk))
def test_create_with_special_properties(self):
res = self.client.get('/edit/authors/create/special/')
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], views.AuthorForm)
self.assertNotIn('object', res.context)
self.assertNotIn('author', res.context)
self.assertTemplateUsed(res, 'generic_views/form.html')
res = self.client.post('/edit/authors/create/special/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
obj = Author.objects.get(slug='randall-munroe')
self.assertRedirects(res, reverse('author_detail', kwargs={'pk': obj.pk}))
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
def test_create_without_redirect(self):
try:
self.client.post('/edit/authors/create/naive/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.fail('Should raise exception -- No redirect URL provided, and no get_absolute_url provided')
except ImproperlyConfigured:
pass
def test_create_restricted(self):
res = self.client.post('/edit/authors/create/restricted/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/accounts/login/?next=/edit/authors/create/restricted/')
def test_create_view_with_restricted_fields(self):
class MyCreateView(CreateView):
model = Author
fields = ['name']
self.assertEqual(list(MyCreateView().get_form_class().base_fields),
['name'])
def test_create_view_all_fields(self):
class MyCreateView(CreateView):
model = Author
fields = '__all__'
self.assertEqual(list(MyCreateView().get_form_class().base_fields),
['name', 'slug'])
def test_create_view_without_explicit_fields(self):
class MyCreateView(CreateView):
model = Author
message = (
"Using ModelFormMixin (base class of MyCreateView) without the "
"'fields' attribute is prohibited."
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
MyCreateView().get_form_class()
def test_define_both_fields_and_form_class(self):
class MyCreateView(CreateView):
model = Author
form_class = AuthorForm
fields = ['name']
message = "Specifying both 'fields' and 'form_class' is not permitted."
with self.assertRaisesMessage(ImproperlyConfigured, message):
MyCreateView().get_form_class()
@override_settings(ROOT_URLCONF='generic_views.urls')
class UpdateViewTests(TestCase):
def test_update_post(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.get('/edit/author/%d/update/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], forms.ModelForm)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['author'], Author.objects.get(pk=a.pk))
self.assertTemplateUsed(res, 'generic_views/author_form.html')
# Modification with both POST and PUT (browser compatible)
res = self.client.post('/edit/author/%d/update/' % a.pk,
{'name': 'Randall Munroe (xkcd)', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (xkcd)>'])
def test_update_invalid(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.post('/edit/author/%d/update/' % a.pk,
{'name': 'A' * 101, 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_form.html')
self.assertEqual(len(res.context['form'].errors), 1)
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
def test_update_with_object_url(self):
a = Artist.objects.create(name='Rene Magritte')
res = self.client.post('/edit/artists/%d/update/' % a.pk,
{'name': 'Rene Magritte'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/detail/artist/%d/' % a.pk)
self.assertQuerysetEqual(Artist.objects.all(), ['<Artist: Rene Magritte>'])
def test_update_with_redirect(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.post('/edit/author/%d/update/redirect/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/edit/authors/create/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (author of xkcd)>'])
@ignore_warnings(category=RemovedInDjango20Warning)
def test_update_with_interpolated_redirect(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.post(
'/edit/author/%d/update/interpolate_redirect/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'}
)
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (author of xkcd)>'])
self.assertEqual(res.status_code, 302)
pk = Author.objects.first().pk
self.assertRedirects(res, 'http://testserver/edit/author/%d/update/' % pk)
# Also test with escaped chars in URL
res = self.client.post(
'/edit/author/%d/update/interpolate_redirect_nonascii/' % a.pk,
{'name': 'John Doe', 'slug': 'john-doe'}
)
self.assertEqual(res.status_code, 302)
pk = Author.objects.get(name='John Doe').pk
self.assertRedirects(res, 'http://testserver/%C3%A9dit/author/{}/update/'.format(pk))
def test_update_with_special_properties(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.get('/edit/author/%d/update/special/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], views.AuthorForm)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['thingy'], Author.objects.get(pk=a.pk))
self.assertNotIn('author', res.context)
self.assertTemplateUsed(res, 'generic_views/form.html')
res = self.client.post('/edit/author/%d/update/special/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/detail/author/%d/' % a.pk)
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (author of xkcd)>'])
def test_update_without_redirect(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
# Should raise exception -- No redirect URL provided, and no
# get_absolute_url provided
with self.assertRaises(ImproperlyConfigured):
self.client.post('/edit/author/%d/update/naive/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'})
def test_update_get_object(self):
a = Author.objects.create(
pk=1,
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.get('/edit/author/update/')
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], forms.ModelForm)
self.assertIsInstance(res.context['view'], View)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['author'], Author.objects.get(pk=a.pk))
self.assertTemplateUsed(res, 'generic_views/author_form.html')
# Modification with both POST and PUT (browser compatible)
res = self.client.post('/edit/author/update/',
{'name': 'Randall Munroe (xkcd)', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (xkcd)>'])
@override_settings(ROOT_URLCONF='generic_views.urls')
class DeleteViewTests(TestCase):
def test_delete_by_post(self):
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.get('/edit/author/%d/delete/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['author'], Author.objects.get(pk=a.pk))
self.assertTemplateUsed(res, 'generic_views/author_confirm_delete.html')
# Deletion with POST
res = self.client.post('/edit/author/%d/delete/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), [])
def test_delete_by_delete(self):
# Deletion with browser compatible DELETE method
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.delete('/edit/author/%d/delete/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), [])
def test_delete_with_redirect(self):
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.post('/edit/author/%d/delete/redirect/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/edit/authors/create/')
self.assertQuerysetEqual(Author.objects.all(), [])
@ignore_warnings(category=RemovedInDjango20Warning)
def test_delete_with_interpolated_redirect(self):
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.post('/edit/author/%d/delete/interpolate_redirect/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/edit/authors/create/?deleted=%d' % a.pk)
self.assertQuerysetEqual(Author.objects.all(), [])
# Also test with escaped chars in URL
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.post('/edit/author/{}/delete/interpolate_redirect_nonascii/'.format(a.pk))
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/%C3%A9dit/authors/create/?deleted={}'.format(a.pk))
def test_delete_with_special_properties(self):
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.get('/edit/author/%d/delete/special/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['thingy'], Author.objects.get(pk=a.pk))
self.assertNotIn('author', res.context)
self.assertTemplateUsed(res, 'generic_views/confirm_delete.html')
res = self.client.post('/edit/author/%d/delete/special/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), [])
def test_delete_without_redirect(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
# Should raise exception -- No redirect URL provided, and no
# get_absolute_url provided
with self.assertRaises(ImproperlyConfigured):
self.client.post('/edit/author/%d/delete/naive/' % a.pk)
| bsd-3-clause |
alphafoobar/intellij-community | python/lib/Lib/encodings/cp863.py | 593 | 34508 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP863.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp863',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00b6, # PILCROW SIGN
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x2017, # DOUBLE LOW LINE
0x008e: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x008f: 0x00a7, # SECTION SIGN
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x0092: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x0095: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00a4, # CURRENCY SIGN
0x0099: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x009e: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00a6, # BROKEN BAR
0x00a1: 0x00b4, # ACUTE ACCENT
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00a8, # DIAERESIS
0x00a5: 0x00b8, # CEDILLA
0x00a6: 0x00b3, # SUPERSCRIPT THREE
0x00a7: 0x00af, # MACRON
0x00a8: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xc2' # 0x0084 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
u'\xb6' # 0x0086 -> PILCROW SIGN
u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
u'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\u2017' # 0x008d -> DOUBLE LOW LINE
u'\xc0' # 0x008e -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xa7' # 0x008f -> SECTION SIGN
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xc8' # 0x0091 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xca' # 0x0092 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xcb' # 0x0094 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xcf' # 0x0095 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
u'\xa4' # 0x0098 -> CURRENCY SIGN
u'\xd4' # 0x0099 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xa2' # 0x009b -> CENT SIGN
u'\xa3' # 0x009c -> POUND SIGN
u'\xd9' # 0x009d -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xdb' # 0x009e -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
u'\xa6' # 0x00a0 -> BROKEN BAR
u'\xb4' # 0x00a1 -> ACUTE ACCENT
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\xa8' # 0x00a4 -> DIAERESIS
u'\xb8' # 0x00a5 -> CEDILLA
u'\xb3' # 0x00a6 -> SUPERSCRIPT THREE
u'\xaf' # 0x00a7 -> MACRON
u'\xce' # 0x00a8 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\u2310' # 0x00a9 -> REVERSED NOT SIGN
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\xbe' # 0x00ad -> VULGAR FRACTION THREE QUARTERS
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
u'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
u'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
u'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
u'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
u'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
u'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
u'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
u'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
u'\u221e' # 0x00ec -> INFINITY
u'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
u'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
u'\u2229' # 0x00ef -> INTERSECTION
u'\u2261' # 0x00f0 -> IDENTICAL TO
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
u'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
u'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
u'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\u2248' # 0x00f7 -> ALMOST EQUAL TO
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\u221a' # 0x00fb -> SQUARE ROOT
u'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a2: 0x009b, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x0098, # CURRENCY SIGN
0x00a6: 0x00a0, # BROKEN BAR
0x00a7: 0x008f, # SECTION SIGN
0x00a8: 0x00a4, # DIAERESIS
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00af: 0x00a7, # MACRON
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b3: 0x00a6, # SUPERSCRIPT THREE
0x00b4: 0x00a1, # ACUTE ACCENT
0x00b5: 0x00e6, # MICRO SIGN
0x00b6: 0x0086, # PILCROW SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00b8: 0x00a5, # CEDILLA
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00be: 0x00ad, # VULGAR FRACTION THREE QUARTERS
0x00c0: 0x008e, # LATIN CAPITAL LETTER A WITH GRAVE
0x00c2: 0x0084, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c8: 0x0091, # LATIN CAPITAL LETTER E WITH GRAVE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00ca: 0x0092, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00cb: 0x0094, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00ce: 0x00a8, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00cf: 0x0095, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d4: 0x0099, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d9: 0x009d, # LATIN CAPITAL LETTER U WITH GRAVE
0x00db: 0x009e, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f7: 0x00f6, # DIVISION SIGN
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x2017: 0x008d, # DOUBLE LOW LINE
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| apache-2.0 |
hectorip/PolymerBoilerplate | bp_includes/external/requests/packages/chardet/euctwfreq.py | 3133 | 34872 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# EUCTW frequency table
# Converted from big5 work
# by Taiwan's Mandarin Promotion Council
# <http:#www.edu.tw:81/mandr/>
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Idea Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75
# Char to FreqOrder table ,
EUCTW_TABLE_SIZE = 8102
EUCTWCharToFreqOrder = (
1,1800,1506, 255,1431, 198, 9, 82, 6,7310, 177, 202,3615,1256,2808, 110, # 2742
3735, 33,3241, 261, 76, 44,2113, 16,2931,2184,1176, 659,3868, 26,3404,2643, # 2758
1198,3869,3313,4060, 410,2211, 302, 590, 361,1963, 8, 204, 58,4296,7311,1931, # 2774
63,7312,7313, 317,1614, 75, 222, 159,4061,2412,1480,7314,3500,3068, 224,2809, # 2790
3616, 3, 10,3870,1471, 29,2774,1135,2852,1939, 873, 130,3242,1123, 312,7315, # 2806
4297,2051, 507, 252, 682,7316, 142,1914, 124, 206,2932, 34,3501,3173, 64, 604, # 2822
7317,2494,1976,1977, 155,1990, 645, 641,1606,7318,3405, 337, 72, 406,7319, 80, # 2838
630, 238,3174,1509, 263, 939,1092,2644, 756,1440,1094,3406, 449, 69,2969, 591, # 2854
179,2095, 471, 115,2034,1843, 60, 50,2970, 134, 806,1868, 734,2035,3407, 180, # 2870
995,1607, 156, 537,2893, 688,7320, 319,1305, 779,2144, 514,2374, 298,4298, 359, # 2886
2495, 90,2707,1338, 663, 11, 906,1099,2545, 20,2436, 182, 532,1716,7321, 732, # 2902
1376,4062,1311,1420,3175, 25,2312,1056, 113, 399, 382,1949, 242,3408,2467, 529, # 2918
3243, 475,1447,3617,7322, 117, 21, 656, 810,1297,2295,2329,3502,7323, 126,4063, # 2934
706, 456, 150, 613,4299, 71,1118,2036,4064, 145,3069, 85, 835, 486,2114,1246, # 2950
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,7324,2127,2354, 347,3736, 221, # 2966
3503,3110,7325,1955,1153,4065, 83, 296,1199,3070, 192, 624, 93,7326, 822,1897, # 2982
2810,3111, 795,2064, 991,1554,1542,1592, 27, 43,2853, 859, 139,1456, 860,4300, # 2998
437, 712,3871, 164,2392,3112, 695, 211,3017,2096, 195,3872,1608,3504,3505,3618, # 3014
3873, 234, 811,2971,2097,3874,2229,1441,3506,1615,2375, 668,2076,1638, 305, 228, # 3030
1664,4301, 467, 415,7327, 262,2098,1593, 239, 108, 300, 200,1033, 512,1247,2077, # 3046
7328,7329,2173,3176,3619,2673, 593, 845,1062,3244, 88,1723,2037,3875,1950, 212, # 3062
266, 152, 149, 468,1898,4066,4302, 77, 187,7330,3018, 37, 5,2972,7331,3876, # 3078
7332,7333, 39,2517,4303,2894,3177,2078, 55, 148, 74,4304, 545, 483,1474,1029, # 3094
1665, 217,1869,1531,3113,1104,2645,4067, 24, 172,3507, 900,3877,3508,3509,4305, # 3110
32,1408,2811,1312, 329, 487,2355,2247,2708, 784,2674, 4,3019,3314,1427,1788, # 3126
188, 109, 499,7334,3620,1717,1789, 888,1217,3020,4306,7335,3510,7336,3315,1520, # 3142
3621,3878, 196,1034, 775,7337,7338, 929,1815, 249, 439, 38,7339,1063,7340, 794, # 3158
3879,1435,2296, 46, 178,3245,2065,7341,2376,7342, 214,1709,4307, 804, 35, 707, # 3174
324,3622,1601,2546, 140, 459,4068,7343,7344,1365, 839, 272, 978,2257,2572,3409, # 3190
2128,1363,3623,1423, 697, 100,3071, 48, 70,1231, 495,3114,2193,7345,1294,7346, # 3206
2079, 462, 586,1042,3246, 853, 256, 988, 185,2377,3410,1698, 434,1084,7347,3411, # 3222
314,2615,2775,4308,2330,2331, 569,2280, 637,1816,2518, 757,1162,1878,1616,3412, # 3238
287,1577,2115, 768,4309,1671,2854,3511,2519,1321,3737, 909,2413,7348,4069, 933, # 3254
3738,7349,2052,2356,1222,4310, 765,2414,1322, 786,4311,7350,1919,1462,1677,2895, # 3270
1699,7351,4312,1424,2437,3115,3624,2590,3316,1774,1940,3413,3880,4070, 309,1369, # 3286
1130,2812, 364,2230,1653,1299,3881,3512,3882,3883,2646, 525,1085,3021, 902,2000, # 3302
1475, 964,4313, 421,1844,1415,1057,2281, 940,1364,3116, 376,4314,4315,1381, 7, # 3318
2520, 983,2378, 336,1710,2675,1845, 321,3414, 559,1131,3022,2742,1808,1132,1313, # 3334
265,1481,1857,7352, 352,1203,2813,3247, 167,1089, 420,2814, 776, 792,1724,3513, # 3350
4071,2438,3248,7353,4072,7354, 446, 229, 333,2743, 901,3739,1200,1557,4316,2647, # 3366
1920, 395,2744,2676,3740,4073,1835, 125, 916,3178,2616,4317,7355,7356,3741,7357, # 3382
7358,7359,4318,3117,3625,1133,2547,1757,3415,1510,2313,1409,3514,7360,2145, 438, # 3398
2591,2896,2379,3317,1068, 958,3023, 461, 311,2855,2677,4074,1915,3179,4075,1978, # 3414
383, 750,2745,2617,4076, 274, 539, 385,1278,1442,7361,1154,1964, 384, 561, 210, # 3430
98,1295,2548,3515,7362,1711,2415,1482,3416,3884,2897,1257, 129,7363,3742, 642, # 3446
523,2776,2777,2648,7364, 141,2231,1333, 68, 176, 441, 876, 907,4077, 603,2592, # 3462
710, 171,3417, 404, 549, 18,3118,2393,1410,3626,1666,7365,3516,4319,2898,4320, # 3478
7366,2973, 368,7367, 146, 366, 99, 871,3627,1543, 748, 807,1586,1185, 22,2258, # 3494
379,3743,3180,7368,3181, 505,1941,2618,1991,1382,2314,7369, 380,2357, 218, 702, # 3510
1817,1248,3418,3024,3517,3318,3249,7370,2974,3628, 930,3250,3744,7371, 59,7372, # 3526
585, 601,4078, 497,3419,1112,1314,4321,1801,7373,1223,1472,2174,7374, 749,1836, # 3542
690,1899,3745,1772,3885,1476, 429,1043,1790,2232,2116, 917,4079, 447,1086,1629, # 3558
7375, 556,7376,7377,2020,1654, 844,1090, 105, 550, 966,1758,2815,1008,1782, 686, # 3574
1095,7378,2282, 793,1602,7379,3518,2593,4322,4080,2933,2297,4323,3746, 980,2496, # 3590
544, 353, 527,4324, 908,2678,2899,7380, 381,2619,1942,1348,7381,1341,1252, 560, # 3606
3072,7382,3420,2856,7383,2053, 973, 886,2080, 143,4325,7384,7385, 157,3886, 496, # 3622
4081, 57, 840, 540,2038,4326,4327,3421,2117,1445, 970,2259,1748,1965,2081,4082, # 3638
3119,1234,1775,3251,2816,3629, 773,1206,2129,1066,2039,1326,3887,1738,1725,4083, # 3654
279,3120, 51,1544,2594, 423,1578,2130,2066, 173,4328,1879,7386,7387,1583, 264, # 3670
610,3630,4329,2439, 280, 154,7388,7389,7390,1739, 338,1282,3073, 693,2857,1411, # 3686
1074,3747,2440,7391,4330,7392,7393,1240, 952,2394,7394,2900,1538,2679, 685,1483, # 3702
4084,2468,1436, 953,4085,2054,4331, 671,2395, 79,4086,2441,3252, 608, 567,2680, # 3718
3422,4087,4088,1691, 393,1261,1791,2396,7395,4332,7396,7397,7398,7399,1383,1672, # 3734
3748,3182,1464, 522,1119, 661,1150, 216, 675,4333,3888,1432,3519, 609,4334,2681, # 3750
2397,7400,7401,7402,4089,3025, 0,7403,2469, 315, 231,2442, 301,3319,4335,2380, # 3766
7404, 233,4090,3631,1818,4336,4337,7405, 96,1776,1315,2082,7406, 257,7407,1809, # 3782
3632,2709,1139,1819,4091,2021,1124,2163,2778,1777,2649,7408,3074, 363,1655,3183, # 3798
7409,2975,7410,7411,7412,3889,1567,3890, 718, 103,3184, 849,1443, 341,3320,2934, # 3814
1484,7413,1712, 127, 67, 339,4092,2398, 679,1412, 821,7414,7415, 834, 738, 351, # 3830
2976,2146, 846, 235,1497,1880, 418,1992,3749,2710, 186,1100,2147,2746,3520,1545, # 3846
1355,2935,2858,1377, 583,3891,4093,2573,2977,7416,1298,3633,1078,2549,3634,2358, # 3862
78,3750,3751, 267,1289,2099,2001,1594,4094, 348, 369,1274,2194,2175,1837,4338, # 3878
1820,2817,3635,2747,2283,2002,4339,2936,2748, 144,3321, 882,4340,3892,2749,3423, # 3894
4341,2901,7417,4095,1726, 320,7418,3893,3026, 788,2978,7419,2818,1773,1327,2859, # 3910
3894,2819,7420,1306,4342,2003,1700,3752,3521,2359,2650, 787,2022, 506, 824,3636, # 3926
534, 323,4343,1044,3322,2023,1900, 946,3424,7421,1778,1500,1678,7422,1881,4344, # 3942
165, 243,4345,3637,2521, 123, 683,4096, 764,4346, 36,3895,1792, 589,2902, 816, # 3958
626,1667,3027,2233,1639,1555,1622,3753,3896,7423,3897,2860,1370,1228,1932, 891, # 3974
2083,2903, 304,4097,7424, 292,2979,2711,3522, 691,2100,4098,1115,4347, 118, 662, # 3990
7425, 611,1156, 854,2381,1316,2861, 2, 386, 515,2904,7426,7427,3253, 868,2234, # 4006
1486, 855,2651, 785,2212,3028,7428,1040,3185,3523,7429,3121, 448,7430,1525,7431, # 4022
2164,4348,7432,3754,7433,4099,2820,3524,3122, 503, 818,3898,3123,1568, 814, 676, # 4038
1444, 306,1749,7434,3755,1416,1030, 197,1428, 805,2821,1501,4349,7435,7436,7437, # 4054
1993,7438,4350,7439,7440,2195, 13,2779,3638,2980,3124,1229,1916,7441,3756,2131, # 4070
7442,4100,4351,2399,3525,7443,2213,1511,1727,1120,7444,7445, 646,3757,2443, 307, # 4086
7446,7447,1595,3186,7448,7449,7450,3639,1113,1356,3899,1465,2522,2523,7451, 519, # 4102
7452, 128,2132, 92,2284,1979,7453,3900,1512, 342,3125,2196,7454,2780,2214,1980, # 4118
3323,7455, 290,1656,1317, 789, 827,2360,7456,3758,4352, 562, 581,3901,7457, 401, # 4134
4353,2248, 94,4354,1399,2781,7458,1463,2024,4355,3187,1943,7459, 828,1105,4101, # 4150
1262,1394,7460,4102, 605,4356,7461,1783,2862,7462,2822, 819,2101, 578,2197,2937, # 4166
7463,1502, 436,3254,4103,3255,2823,3902,2905,3425,3426,7464,2712,2315,7465,7466, # 4182
2332,2067, 23,4357, 193, 826,3759,2102, 699,1630,4104,3075, 390,1793,1064,3526, # 4198
7467,1579,3076,3077,1400,7468,4105,1838,1640,2863,7469,4358,4359, 137,4106, 598, # 4214
3078,1966, 780, 104, 974,2938,7470, 278, 899, 253, 402, 572, 504, 493,1339,7471, # 4230
3903,1275,4360,2574,2550,7472,3640,3029,3079,2249, 565,1334,2713, 863, 41,7473, # 4246
7474,4361,7475,1657,2333, 19, 463,2750,4107, 606,7476,2981,3256,1087,2084,1323, # 4262
2652,2982,7477,1631,1623,1750,4108,2682,7478,2864, 791,2714,2653,2334, 232,2416, # 4278
7479,2983,1498,7480,2654,2620, 755,1366,3641,3257,3126,2025,1609, 119,1917,3427, # 4294
862,1026,4109,7481,3904,3760,4362,3905,4363,2260,1951,2470,7482,1125, 817,4110, # 4310
4111,3906,1513,1766,2040,1487,4112,3030,3258,2824,3761,3127,7483,7484,1507,7485, # 4326
2683, 733, 40,1632,1106,2865, 345,4113, 841,2524, 230,4364,2984,1846,3259,3428, # 4342
7486,1263, 986,3429,7487, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562,3907, # 4358
3908,2939, 967,2751,2655,1349, 592,2133,1692,3324,2985,1994,4114,1679,3909,1901, # 4374
2185,7488, 739,3642,2715,1296,1290,7489,4115,2198,2199,1921,1563,2595,2551,1870, # 4390
2752,2986,7490, 435,7491, 343,1108, 596, 17,1751,4365,2235,3430,3643,7492,4366, # 4406
294,3527,2940,1693, 477, 979, 281,2041,3528, 643,2042,3644,2621,2782,2261,1031, # 4422
2335,2134,2298,3529,4367, 367,1249,2552,7493,3530,7494,4368,1283,3325,2004, 240, # 4438
1762,3326,4369,4370, 836,1069,3128, 474,7495,2148,2525, 268,3531,7496,3188,1521, # 4454
1284,7497,1658,1546,4116,7498,3532,3533,7499,4117,3327,2684,1685,4118, 961,1673, # 4470
2622, 190,2005,2200,3762,4371,4372,7500, 570,2497,3645,1490,7501,4373,2623,3260, # 4486
1956,4374, 584,1514, 396,1045,1944,7502,4375,1967,2444,7503,7504,4376,3910, 619, # 4502
7505,3129,3261, 215,2006,2783,2553,3189,4377,3190,4378, 763,4119,3763,4379,7506, # 4518
7507,1957,1767,2941,3328,3646,1174, 452,1477,4380,3329,3130,7508,2825,1253,2382, # 4534
2186,1091,2285,4120, 492,7509, 638,1169,1824,2135,1752,3911, 648, 926,1021,1324, # 4550
4381, 520,4382, 997, 847,1007, 892,4383,3764,2262,1871,3647,7510,2400,1784,4384, # 4566
1952,2942,3080,3191,1728,4121,2043,3648,4385,2007,1701,3131,1551, 30,2263,4122, # 4582
7511,2026,4386,3534,7512, 501,7513,4123, 594,3431,2165,1821,3535,3432,3536,3192, # 4598
829,2826,4124,7514,1680,3132,1225,4125,7515,3262,4387,4126,3133,2336,7516,4388, # 4614
4127,7517,3912,3913,7518,1847,2383,2596,3330,7519,4389, 374,3914, 652,4128,4129, # 4630
375,1140, 798,7520,7521,7522,2361,4390,2264, 546,1659, 138,3031,2445,4391,7523, # 4646
2250, 612,1848, 910, 796,3765,1740,1371, 825,3766,3767,7524,2906,2554,7525, 692, # 4662
444,3032,2624, 801,4392,4130,7526,1491, 244,1053,3033,4131,4132, 340,7527,3915, # 4678
1041,2987, 293,1168, 87,1357,7528,1539, 959,7529,2236, 721, 694,4133,3768, 219, # 4694
1478, 644,1417,3331,2656,1413,1401,1335,1389,3916,7530,7531,2988,2362,3134,1825, # 4710
730,1515, 184,2827, 66,4393,7532,1660,2943, 246,3332, 378,1457, 226,3433, 975, # 4726
3917,2944,1264,3537, 674, 696,7533, 163,7534,1141,2417,2166, 713,3538,3333,4394, # 4742
3918,7535,7536,1186, 15,7537,1079,1070,7538,1522,3193,3539, 276,1050,2716, 758, # 4758
1126, 653,2945,3263,7539,2337, 889,3540,3919,3081,2989, 903,1250,4395,3920,3434, # 4774
3541,1342,1681,1718, 766,3264, 286, 89,2946,3649,7540,1713,7541,2597,3334,2990, # 4790
7542,2947,2215,3194,2866,7543,4396,2498,2526, 181, 387,1075,3921, 731,2187,3335, # 4806
7544,3265, 310, 313,3435,2299, 770,4134, 54,3034, 189,4397,3082,3769,3922,7545, # 4822
1230,1617,1849, 355,3542,4135,4398,3336, 111,4136,3650,1350,3135,3436,3035,4137, # 4838
2149,3266,3543,7546,2784,3923,3924,2991, 722,2008,7547,1071, 247,1207,2338,2471, # 4854
1378,4399,2009, 864,1437,1214,4400, 373,3770,1142,2216, 667,4401, 442,2753,2555, # 4870
3771,3925,1968,4138,3267,1839, 837, 170,1107, 934,1336,1882,7548,7549,2118,4139, # 4886
2828, 743,1569,7550,4402,4140, 582,2384,1418,3437,7551,1802,7552, 357,1395,1729, # 4902
3651,3268,2418,1564,2237,7553,3083,3772,1633,4403,1114,2085,4141,1532,7554, 482, # 4918
2446,4404,7555,7556,1492, 833,1466,7557,2717,3544,1641,2829,7558,1526,1272,3652, # 4934
4142,1686,1794, 416,2556,1902,1953,1803,7559,3773,2785,3774,1159,2316,7560,2867, # 4950
4405,1610,1584,3036,2419,2754, 443,3269,1163,3136,7561,7562,3926,7563,4143,2499, # 4966
3037,4406,3927,3137,2103,1647,3545,2010,1872,4144,7564,4145, 431,3438,7565, 250, # 4982
97, 81,4146,7566,1648,1850,1558, 160, 848,7567, 866, 740,1694,7568,2201,2830, # 4998
3195,4147,4407,3653,1687, 950,2472, 426, 469,3196,3654,3655,3928,7569,7570,1188, # 5014
424,1995, 861,3546,4148,3775,2202,2685, 168,1235,3547,4149,7571,2086,1674,4408, # 5030
3337,3270, 220,2557,1009,7572,3776, 670,2992, 332,1208, 717,7573,7574,3548,2447, # 5046
3929,3338,7575, 513,7576,1209,2868,3339,3138,4409,1080,7577,7578,7579,7580,2527, # 5062
3656,3549, 815,1587,3930,3931,7581,3550,3439,3777,1254,4410,1328,3038,1390,3932, # 5078
1741,3933,3778,3934,7582, 236,3779,2448,3271,7583,7584,3657,3780,1273,3781,4411, # 5094
7585, 308,7586,4412, 245,4413,1851,2473,1307,2575, 430, 715,2136,2449,7587, 270, # 5110
199,2869,3935,7588,3551,2718,1753, 761,1754, 725,1661,1840,4414,3440,3658,7589, # 5126
7590, 587, 14,3272, 227,2598, 326, 480,2265, 943,2755,3552, 291, 650,1883,7591, # 5142
1702,1226, 102,1547, 62,3441, 904,4415,3442,1164,4150,7592,7593,1224,1548,2756, # 5158
391, 498,1493,7594,1386,1419,7595,2055,1177,4416, 813, 880,1081,2363, 566,1145, # 5174
4417,2286,1001,1035,2558,2599,2238, 394,1286,7596,7597,2068,7598, 86,1494,1730, # 5190
3936, 491,1588, 745, 897,2948, 843,3340,3937,2757,2870,3273,1768, 998,2217,2069, # 5206
397,1826,1195,1969,3659,2993,3341, 284,7599,3782,2500,2137,2119,1903,7600,3938, # 5222
2150,3939,4151,1036,3443,1904, 114,2559,4152, 209,1527,7601,7602,2949,2831,2625, # 5238
2385,2719,3139, 812,2560,7603,3274,7604,1559, 737,1884,3660,1210, 885, 28,2686, # 5254
3553,3783,7605,4153,1004,1779,4418,7606, 346,1981,2218,2687,4419,3784,1742, 797, # 5270
1642,3940,1933,1072,1384,2151, 896,3941,3275,3661,3197,2871,3554,7607,2561,1958, # 5286
4420,2450,1785,7608,7609,7610,3942,4154,1005,1308,3662,4155,2720,4421,4422,1528, # 5302
2600, 161,1178,4156,1982, 987,4423,1101,4157, 631,3943,1157,3198,2420,1343,1241, # 5318
1016,2239,2562, 372, 877,2339,2501,1160, 555,1934, 911,3944,7611, 466,1170, 169, # 5334
1051,2907,2688,3663,2474,2994,1182,2011,2563,1251,2626,7612, 992,2340,3444,1540, # 5350
2721,1201,2070,2401,1996,2475,7613,4424, 528,1922,2188,1503,1873,1570,2364,3342, # 5366
3276,7614, 557,1073,7615,1827,3445,2087,2266,3140,3039,3084, 767,3085,2786,4425, # 5382
1006,4158,4426,2341,1267,2176,3664,3199, 778,3945,3200,2722,1597,2657,7616,4427, # 5398
7617,3446,7618,7619,7620,3277,2689,1433,3278, 131, 95,1504,3946, 723,4159,3141, # 5414
1841,3555,2758,2189,3947,2027,2104,3665,7621,2995,3948,1218,7622,3343,3201,3949, # 5430
4160,2576, 248,1634,3785, 912,7623,2832,3666,3040,3786, 654, 53,7624,2996,7625, # 5446
1688,4428, 777,3447,1032,3950,1425,7626, 191, 820,2120,2833, 971,4429, 931,3202, # 5462
135, 664, 783,3787,1997, 772,2908,1935,3951,3788,4430,2909,3203, 282,2723, 640, # 5478
1372,3448,1127, 922, 325,3344,7627,7628, 711,2044,7629,7630,3952,2219,2787,1936, # 5494
3953,3345,2220,2251,3789,2300,7631,4431,3790,1258,3279,3954,3204,2138,2950,3955, # 5510
3956,7632,2221, 258,3205,4432, 101,1227,7633,3280,1755,7634,1391,3281,7635,2910, # 5526
2056, 893,7636,7637,7638,1402,4161,2342,7639,7640,3206,3556,7641,7642, 878,1325, # 5542
1780,2788,4433, 259,1385,2577, 744,1183,2267,4434,7643,3957,2502,7644, 684,1024, # 5558
4162,7645, 472,3557,3449,1165,3282,3958,3959, 322,2152, 881, 455,1695,1152,1340, # 5574
660, 554,2153,4435,1058,4436,4163, 830,1065,3346,3960,4437,1923,7646,1703,1918, # 5590
7647, 932,2268, 122,7648,4438, 947, 677,7649,3791,2627, 297,1905,1924,2269,4439, # 5606
2317,3283,7650,7651,4164,7652,4165, 84,4166, 112, 989,7653, 547,1059,3961, 701, # 5622
3558,1019,7654,4167,7655,3450, 942, 639, 457,2301,2451, 993,2951, 407, 851, 494, # 5638
4440,3347, 927,7656,1237,7657,2421,3348, 573,4168, 680, 921,2911,1279,1874, 285, # 5654
790,1448,1983, 719,2167,7658,7659,4441,3962,3963,1649,7660,1541, 563,7661,1077, # 5670
7662,3349,3041,3451, 511,2997,3964,3965,3667,3966,1268,2564,3350,3207,4442,4443, # 5686
7663, 535,1048,1276,1189,2912,2028,3142,1438,1373,2834,2952,1134,2012,7664,4169, # 5702
1238,2578,3086,1259,7665, 700,7666,2953,3143,3668,4170,7667,4171,1146,1875,1906, # 5718
4444,2601,3967, 781,2422, 132,1589, 203, 147, 273,2789,2402, 898,1786,2154,3968, # 5734
3969,7668,3792,2790,7669,7670,4445,4446,7671,3208,7672,1635,3793, 965,7673,1804, # 5750
2690,1516,3559,1121,1082,1329,3284,3970,1449,3794, 65,1128,2835,2913,2759,1590, # 5766
3795,7674,7675, 12,2658, 45, 976,2579,3144,4447, 517,2528,1013,1037,3209,7676, # 5782
3796,2836,7677,3797,7678,3452,7679,2602, 614,1998,2318,3798,3087,2724,2628,7680, # 5798
2580,4172, 599,1269,7681,1810,3669,7682,2691,3088, 759,1060, 489,1805,3351,3285, # 5814
1358,7683,7684,2386,1387,1215,2629,2252, 490,7685,7686,4173,1759,2387,2343,7687, # 5830
4448,3799,1907,3971,2630,1806,3210,4449,3453,3286,2760,2344, 874,7688,7689,3454, # 5846
3670,1858, 91,2914,3671,3042,3800,4450,7690,3145,3972,2659,7691,3455,1202,1403, # 5862
3801,2954,2529,1517,2503,4451,3456,2504,7692,4452,7693,2692,1885,1495,1731,3973, # 5878
2365,4453,7694,2029,7695,7696,3974,2693,1216, 237,2581,4174,2319,3975,3802,4454, # 5894
4455,2694,3560,3457, 445,4456,7697,7698,7699,7700,2761, 61,3976,3672,1822,3977, # 5910
7701, 687,2045, 935, 925, 405,2660, 703,1096,1859,2725,4457,3978,1876,1367,2695, # 5926
3352, 918,2105,1781,2476, 334,3287,1611,1093,4458, 564,3146,3458,3673,3353, 945, # 5942
2631,2057,4459,7702,1925, 872,4175,7703,3459,2696,3089, 349,4176,3674,3979,4460, # 5958
3803,4177,3675,2155,3980,4461,4462,4178,4463,2403,2046, 782,3981, 400, 251,4179, # 5974
1624,7704,7705, 277,3676, 299,1265, 476,1191,3804,2121,4180,4181,1109, 205,7706, # 5990
2582,1000,2156,3561,1860,7707,7708,7709,4464,7710,4465,2565, 107,2477,2157,3982, # 6006
3460,3147,7711,1533, 541,1301, 158, 753,4182,2872,3562,7712,1696, 370,1088,4183, # 6022
4466,3563, 579, 327, 440, 162,2240, 269,1937,1374,3461, 968,3043, 56,1396,3090, # 6038
2106,3288,3354,7713,1926,2158,4467,2998,7714,3564,7715,7716,3677,4468,2478,7717, # 6054
2791,7718,1650,4469,7719,2603,7720,7721,3983,2661,3355,1149,3356,3984,3805,3985, # 6070
7722,1076, 49,7723, 951,3211,3289,3290, 450,2837, 920,7724,1811,2792,2366,4184, # 6086
1908,1138,2367,3806,3462,7725,3212,4470,1909,1147,1518,2423,4471,3807,7726,4472, # 6102
2388,2604, 260,1795,3213,7727,7728,3808,3291, 708,7729,3565,1704,7730,3566,1351, # 6118
1618,3357,2999,1886, 944,4185,3358,4186,3044,3359,4187,7731,3678, 422, 413,1714, # 6134
3292, 500,2058,2345,4188,2479,7732,1344,1910, 954,7733,1668,7734,7735,3986,2404, # 6150
4189,3567,3809,4190,7736,2302,1318,2505,3091, 133,3092,2873,4473, 629, 31,2838, # 6166
2697,3810,4474, 850, 949,4475,3987,2955,1732,2088,4191,1496,1852,7737,3988, 620, # 6182
3214, 981,1242,3679,3360,1619,3680,1643,3293,2139,2452,1970,1719,3463,2168,7738, # 6198
3215,7739,7740,3361,1828,7741,1277,4476,1565,2047,7742,1636,3568,3093,7743, 869, # 6214
2839, 655,3811,3812,3094,3989,3000,3813,1310,3569,4477,7744,7745,7746,1733, 558, # 6230
4478,3681, 335,1549,3045,1756,4192,3682,1945,3464,1829,1291,1192, 470,2726,2107, # 6246
2793, 913,1054,3990,7747,1027,7748,3046,3991,4479, 982,2662,3362,3148,3465,3216, # 6262
3217,1946,2794,7749, 571,4480,7750,1830,7751,3570,2583,1523,2424,7752,2089, 984, # 6278
4481,3683,1959,7753,3684, 852, 923,2795,3466,3685, 969,1519, 999,2048,2320,1705, # 6294
7754,3095, 615,1662, 151, 597,3992,2405,2321,1049, 275,4482,3686,4193, 568,3687, # 6310
3571,2480,4194,3688,7755,2425,2270, 409,3218,7756,1566,2874,3467,1002, 769,2840, # 6326
194,2090,3149,3689,2222,3294,4195, 628,1505,7757,7758,1763,2177,3001,3993, 521, # 6342
1161,2584,1787,2203,2406,4483,3994,1625,4196,4197, 412, 42,3096, 464,7759,2632, # 6358
4484,3363,1760,1571,2875,3468,2530,1219,2204,3814,2633,2140,2368,4485,4486,3295, # 6374
1651,3364,3572,7760,7761,3573,2481,3469,7762,3690,7763,7764,2271,2091, 460,7765, # 6390
4487,7766,3002, 962, 588,3574, 289,3219,2634,1116, 52,7767,3047,1796,7768,7769, # 6406
7770,1467,7771,1598,1143,3691,4198,1984,1734,1067,4488,1280,3365, 465,4489,1572, # 6422
510,7772,1927,2241,1812,1644,3575,7773,4490,3692,7774,7775,2663,1573,1534,7776, # 6438
7777,4199, 536,1807,1761,3470,3815,3150,2635,7778,7779,7780,4491,3471,2915,1911, # 6454
2796,7781,3296,1122, 377,3220,7782, 360,7783,7784,4200,1529, 551,7785,2059,3693, # 6470
1769,2426,7786,2916,4201,3297,3097,2322,2108,2030,4492,1404, 136,1468,1479, 672, # 6486
1171,3221,2303, 271,3151,7787,2762,7788,2049, 678,2727, 865,1947,4493,7789,2013, # 6502
3995,2956,7790,2728,2223,1397,3048,3694,4494,4495,1735,2917,3366,3576,7791,3816, # 6518
509,2841,2453,2876,3817,7792,7793,3152,3153,4496,4202,2531,4497,2304,1166,1010, # 6534
552, 681,1887,7794,7795,2957,2958,3996,1287,1596,1861,3154, 358, 453, 736, 175, # 6550
478,1117, 905,1167,1097,7796,1853,1530,7797,1706,7798,2178,3472,2287,3695,3473, # 6566
3577,4203,2092,4204,7799,3367,1193,2482,4205,1458,2190,2205,1862,1888,1421,3298, # 6582
2918,3049,2179,3474, 595,2122,7800,3997,7801,7802,4206,1707,2636, 223,3696,1359, # 6598
751,3098, 183,3475,7803,2797,3003, 419,2369, 633, 704,3818,2389, 241,7804,7805, # 6614
7806, 838,3004,3697,2272,2763,2454,3819,1938,2050,3998,1309,3099,2242,1181,7807, # 6630
1136,2206,3820,2370,1446,4207,2305,4498,7808,7809,4208,1055,2605, 484,3698,7810, # 6646
3999, 625,4209,2273,3368,1499,4210,4000,7811,4001,4211,3222,2274,2275,3476,7812, # 6662
7813,2764, 808,2606,3699,3369,4002,4212,3100,2532, 526,3370,3821,4213, 955,7814, # 6678
1620,4214,2637,2427,7815,1429,3700,1669,1831, 994, 928,7816,3578,1260,7817,7818, # 6694
7819,1948,2288, 741,2919,1626,4215,2729,2455, 867,1184, 362,3371,1392,7820,7821, # 6710
4003,4216,1770,1736,3223,2920,4499,4500,1928,2698,1459,1158,7822,3050,3372,2877, # 6726
1292,1929,2506,2842,3701,1985,1187,2071,2014,2607,4217,7823,2566,2507,2169,3702, # 6742
2483,3299,7824,3703,4501,7825,7826, 666,1003,3005,1022,3579,4218,7827,4502,1813, # 6758
2253, 574,3822,1603, 295,1535, 705,3823,4219, 283, 858, 417,7828,7829,3224,4503, # 6774
4504,3051,1220,1889,1046,2276,2456,4004,1393,1599, 689,2567, 388,4220,7830,2484, # 6790
802,7831,2798,3824,2060,1405,2254,7832,4505,3825,2109,1052,1345,3225,1585,7833, # 6806
809,7834,7835,7836, 575,2730,3477, 956,1552,1469,1144,2323,7837,2324,1560,2457, # 6822
3580,3226,4005, 616,2207,3155,2180,2289,7838,1832,7839,3478,4506,7840,1319,3704, # 6838
3705,1211,3581,1023,3227,1293,2799,7841,7842,7843,3826, 607,2306,3827, 762,2878, # 6854
1439,4221,1360,7844,1485,3052,7845,4507,1038,4222,1450,2061,2638,4223,1379,4508, # 6870
2585,7846,7847,4224,1352,1414,2325,2921,1172,7848,7849,3828,3829,7850,1797,1451, # 6886
7851,7852,7853,7854,2922,4006,4007,2485,2346, 411,4008,4009,3582,3300,3101,4509, # 6902
1561,2664,1452,4010,1375,7855,7856, 47,2959, 316,7857,1406,1591,2923,3156,7858, # 6918
1025,2141,3102,3157, 354,2731, 884,2224,4225,2407, 508,3706, 726,3583, 996,2428, # 6934
3584, 729,7859, 392,2191,1453,4011,4510,3707,7860,7861,2458,3585,2608,1675,2800, # 6950
919,2347,2960,2348,1270,4511,4012, 73,7862,7863, 647,7864,3228,2843,2255,1550, # 6966
1346,3006,7865,1332, 883,3479,7866,7867,7868,7869,3301,2765,7870,1212, 831,1347, # 6982
4226,4512,2326,3830,1863,3053, 720,3831,4513,4514,3832,7871,4227,7872,7873,4515, # 6998
7874,7875,1798,4516,3708,2609,4517,3586,1645,2371,7876,7877,2924, 669,2208,2665, # 7014
2429,7878,2879,7879,7880,1028,3229,7881,4228,2408,7882,2256,1353,7883,7884,4518, # 7030
3158, 518,7885,4013,7886,4229,1960,7887,2142,4230,7888,7889,3007,2349,2350,3833, # 7046
516,1833,1454,4014,2699,4231,4519,2225,2610,1971,1129,3587,7890,2766,7891,2961, # 7062
1422, 577,1470,3008,1524,3373,7892,7893, 432,4232,3054,3480,7894,2586,1455,2508, # 7078
2226,1972,1175,7895,1020,2732,4015,3481,4520,7896,2733,7897,1743,1361,3055,3482, # 7094
2639,4016,4233,4521,2290, 895, 924,4234,2170, 331,2243,3056, 166,1627,3057,1098, # 7110
7898,1232,2880,2227,3374,4522, 657, 403,1196,2372, 542,3709,3375,1600,4235,3483, # 7126
7899,4523,2767,3230, 576, 530,1362,7900,4524,2533,2666,3710,4017,7901, 842,3834, # 7142
7902,2801,2031,1014,4018, 213,2700,3376, 665, 621,4236,7903,3711,2925,2430,7904, # 7158
2431,3302,3588,3377,7905,4237,2534,4238,4525,3589,1682,4239,3484,1380,7906, 724, # 7174
2277, 600,1670,7907,1337,1233,4526,3103,2244,7908,1621,4527,7909, 651,4240,7910, # 7190
1612,4241,2611,7911,2844,7912,2734,2307,3058,7913, 716,2459,3059, 174,1255,2701, # 7206
4019,3590, 548,1320,1398, 728,4020,1574,7914,1890,1197,3060,4021,7915,3061,3062, # 7222
3712,3591,3713, 747,7916, 635,4242,4528,7917,7918,7919,4243,7920,7921,4529,7922, # 7238
3378,4530,2432, 451,7923,3714,2535,2072,4244,2735,4245,4022,7924,1764,4531,7925, # 7254
4246, 350,7926,2278,2390,2486,7927,4247,4023,2245,1434,4024, 488,4532, 458,4248, # 7270
4025,3715, 771,1330,2391,3835,2568,3159,2159,2409,1553,2667,3160,4249,7928,2487, # 7286
2881,2612,1720,2702,4250,3379,4533,7929,2536,4251,7930,3231,4252,2768,7931,2015, # 7302
2736,7932,1155,1017,3716,3836,7933,3303,2308, 201,1864,4253,1430,7934,4026,7935, # 7318
7936,7937,7938,7939,4254,1604,7940, 414,1865, 371,2587,4534,4535,3485,2016,3104, # 7334
4536,1708, 960,4255, 887, 389,2171,1536,1663,1721,7941,2228,4027,2351,2926,1580, # 7350
7942,7943,7944,1744,7945,2537,4537,4538,7946,4539,7947,2073,7948,7949,3592,3380, # 7366
2882,4256,7950,4257,2640,3381,2802, 673,2703,2460, 709,3486,4028,3593,4258,7951, # 7382
1148, 502, 634,7952,7953,1204,4540,3594,1575,4541,2613,3717,7954,3718,3105, 948, # 7398
3232, 121,1745,3837,1110,7955,4259,3063,2509,3009,4029,3719,1151,1771,3838,1488, # 7414
4030,1986,7956,2433,3487,7957,7958,2093,7959,4260,3839,1213,1407,2803, 531,2737, # 7430
2538,3233,1011,1537,7960,2769,4261,3106,1061,7961,3720,3721,1866,2883,7962,2017, # 7446
120,4262,4263,2062,3595,3234,2309,3840,2668,3382,1954,4542,7963,7964,3488,1047, # 7462
2704,1266,7965,1368,4543,2845, 649,3383,3841,2539,2738,1102,2846,2669,7966,7967, # 7478
1999,7968,1111,3596,2962,7969,2488,3842,3597,2804,1854,3384,3722,7970,7971,3385, # 7494
2410,2884,3304,3235,3598,7972,2569,7973,3599,2805,4031,1460, 856,7974,3600,7975, # 7510
2885,2963,7976,2886,3843,7977,4264, 632,2510, 875,3844,1697,3845,2291,7978,7979, # 7526
4544,3010,1239, 580,4545,4265,7980, 914, 936,2074,1190,4032,1039,2123,7981,7982, # 7542
7983,3386,1473,7984,1354,4266,3846,7985,2172,3064,4033, 915,3305,4267,4268,3306, # 7558
1605,1834,7986,2739, 398,3601,4269,3847,4034, 328,1912,2847,4035,3848,1331,4270, # 7574
3011, 937,4271,7987,3602,4036,4037,3387,2160,4546,3388, 524, 742, 538,3065,1012, # 7590
7988,7989,3849,2461,7990, 658,1103, 225,3850,7991,7992,4547,7993,4548,7994,3236, # 7606
1243,7995,4038, 963,2246,4549,7996,2705,3603,3161,7997,7998,2588,2327,7999,4550, # 7622
8000,8001,8002,3489,3307, 957,3389,2540,2032,1930,2927,2462, 870,2018,3604,1746, # 7638
2770,2771,2434,2463,8003,3851,8004,3723,3107,3724,3490,3390,3725,8005,1179,3066, # 7654
8006,3162,2373,4272,3726,2541,3163,3108,2740,4039,8007,3391,1556,2542,2292, 977, # 7670
2887,2033,4040,1205,3392,8008,1765,3393,3164,2124,1271,1689, 714,4551,3491,8009, # 7686
2328,3852, 533,4273,3605,2181, 617,8010,2464,3308,3492,2310,8011,8012,3165,8013, # 7702
8014,3853,1987, 618, 427,2641,3493,3394,8015,8016,1244,1690,8017,2806,4274,4552, # 7718
8018,3494,8019,8020,2279,1576, 473,3606,4275,3395, 972,8021,3607,8022,3067,8023, # 7734
8024,4553,4554,8025,3727,4041,4042,8026, 153,4555, 356,8027,1891,2888,4276,2143, # 7750
408, 803,2352,8028,3854,8029,4277,1646,2570,2511,4556,4557,3855,8030,3856,4278, # 7766
8031,2411,3396, 752,8032,8033,1961,2964,8034, 746,3012,2465,8035,4279,3728, 698, # 7782
4558,1892,4280,3608,2543,4559,3609,3857,8036,3166,3397,8037,1823,1302,4043,2706, # 7798
3858,1973,4281,8038,4282,3167, 823,1303,1288,1236,2848,3495,4044,3398, 774,3859, # 7814
8039,1581,4560,1304,2849,3860,4561,8040,2435,2161,1083,3237,4283,4045,4284, 344, # 7830
1173, 288,2311, 454,1683,8041,8042,1461,4562,4046,2589,8043,8044,4563, 985, 894, # 7846
8045,3399,3168,8046,1913,2928,3729,1988,8047,2110,1974,8048,4047,8049,2571,1194, # 7862
425,8050,4564,3169,1245,3730,4285,8051,8052,2850,8053, 636,4565,1855,3861, 760, # 7878
1799,8054,4286,2209,1508,4566,4048,1893,1684,2293,8055,8056,8057,4287,4288,2210, # 7894
479,8058,8059, 832,8060,4049,2489,8061,2965,2490,3731, 990,3109, 627,1814,2642, # 7910
4289,1582,4290,2125,2111,3496,4567,8062, 799,4291,3170,8063,4568,2112,1737,3013, # 7926
1018, 543, 754,4292,3309,1676,4569,4570,4050,8064,1489,8065,3497,8066,2614,2889, # 7942
4051,8067,8068,2966,8069,8070,8071,8072,3171,4571,4572,2182,1722,8073,3238,3239, # 7958
1842,3610,1715, 481, 365,1975,1856,8074,8075,1962,2491,4573,8076,2126,3611,3240, # 7974
433,1894,2063,2075,8077, 602,2741,8078,8079,8080,8081,8082,3014,1628,3400,8083, # 7990
3172,4574,4052,2890,4575,2512,8084,2544,2772,8085,8086,8087,3310,4576,2891,8088, # 8006
4577,8089,2851,4578,4579,1221,2967,4053,2513,8090,8091,8092,1867,1989,8093,8094, # 8022
8095,1895,8096,8097,4580,1896,4054, 318,8098,2094,4055,4293,8099,8100, 485,8101, # 8038
938,3862, 553,2670, 116,8102,3863,3612,8103,3498,2671,2773,3401,3311,2807,8104, # 8054
3613,2929,4056,1747,2930,2968,8105,8106, 207,8107,8108,2672,4581,2514,8109,3015, # 8070
890,3614,3864,8110,1877,3732,3402,8111,2183,2353,3403,1652,8112,8113,8114, 941, # 8086
2294, 208,3499,4057,2019, 330,4294,3865,2892,2492,3733,4295,8115,8116,8117,8118, # 8102
#Everything below is of no interest for detection purpose
2515,1613,4582,8119,3312,3866,2516,8120,4058,8121,1637,4059,2466,4583,3867,8122, # 8118
2493,3016,3734,8123,8124,2192,8125,8126,2162,8127,8128,8129,8130,8131,8132,8133, # 8134
8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,8144,8145,8146,8147,8148,8149, # 8150
8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,8160,8161,8162,8163,8164,8165, # 8166
8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181, # 8182
8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197, # 8198
8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213, # 8214
8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229, # 8230
8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245, # 8246
8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,8256,8257,8258,8259,8260,8261, # 8262
8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,8272,8273,8274,8275,8276,8277, # 8278
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,8290,8291,8292,8293, # 8294
8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,8308,8309, # 8310
8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322,8323,8324,8325, # 8326
8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337,8338,8339,8340,8341, # 8342
8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353,8354,8355,8356,8357, # 8358
8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,8368,8369,8370,8371,8372,8373, # 8374
8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,8384,8385,8386,8387,8388,8389, # 8390
8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,8400,8401,8402,8403,8404,8405, # 8406
8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,8416,8417,8418,8419,8420,8421, # 8422
8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,8432,8433,8434,8435,8436,8437, # 8438
8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,8448,8449,8450,8451,8452,8453, # 8454
8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,8464,8465,8466,8467,8468,8469, # 8470
8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,8480,8481,8482,8483,8484,8485, # 8486
8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501, # 8502
8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517, # 8518
8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533, # 8534
8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549, # 8550
8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,8565, # 8566
8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,8576,8577,8578,8579,8580,8581, # 8582
8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597, # 8598
8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,8608,8609,8610,8611,8612,8613, # 8614
8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,8624,8625,8626,8627,8628,8629, # 8630
8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,8640,8641,8642,8643,8644,8645, # 8646
8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,8657,8658,8659,8660,8661, # 8662
8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672,8673,8674,8675,8676,8677, # 8678
8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,8688,8689,8690,8691,8692,8693, # 8694
8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,8704,8705,8706,8707,8708,8709, # 8710
8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,8720,8721,8722,8723,8724,8725, # 8726
8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,8736,8737,8738,8739,8740,8741) # 8742
# flake8: noqa
| gpl-2.0 |
mjudsp/Tsallis | sklearn/gaussian_process/tests/test_gpr.py | 23 | 11915 | """Testing for Gaussian process regression """
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# Licence: BSD 3 clause
import numpy as np
from scipy.optimize import approx_fprime
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels \
import RBF, ConstantKernel as C, WhiteKernel
from sklearn.utils.testing \
import (assert_true, assert_greater, assert_array_less,
assert_almost_equal, assert_equal)
def f(x):
return x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
kernels = [RBF(length_scale=1.0), fixed_kernel,
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) +
C(1e-5, (1e-5, 1e2)),
C(0.1, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) +
C(1e-5, (1e-5, 1e2))]
def test_gpr_interpolation():
"""Test the interpolating property for different kernels."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_pred, y_cov = gpr.predict(X, return_cov=True)
assert_true(np.allclose(y_pred, y))
assert_true(np.allclose(np.diag(y_cov), 0.))
def test_lml_improving():
""" Test that hyperparameter-tuning improves log-marginal likelihood. """
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood(kernel.theta))
def test_lml_precomputed():
""" Test that lml of optimized kernel is stored correctly. """
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_equal(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood())
def test_converged_to_local_maximum():
""" Test that we are in local maximum after hyperparameter-optimization."""
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = \
gpr.log_marginal_likelihood(gpr.kernel_.theta, True)
assert_true(np.all((np.abs(lml_gradient) < 1e-4) |
(gpr.kernel_.theta == gpr.kernel_.bounds[:, 0]) |
(gpr.kernel_.theta == gpr.kernel_.bounds[:, 1])))
def test_solution_inside_bounds():
""" Test that hyperparameter-optimization remains in bounds"""
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
bounds = gpr.kernel_.bounds
max_ = np.finfo(gpr.kernel_.theta.dtype).max
tiny = 1e-10
bounds[~np.isfinite(bounds[:, 1]), 1] = max_
assert_array_less(bounds[:, 0], gpr.kernel_.theta + tiny)
assert_array_less(gpr.kernel_.theta, bounds[:, 1] + tiny)
def test_lml_gradient():
""" Compare analytic and numeric gradient of log marginal likelihood. """
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = gpr.log_marginal_likelihood(kernel.theta, True)
lml_gradient_approx = \
approx_fprime(kernel.theta,
lambda theta: gpr.log_marginal_likelihood(theta,
False),
1e-10)
assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
def test_prior():
""" Test that GP prior has mean 0 and identical variances."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel)
y_mean, y_cov = gpr.predict(X, return_cov=True)
assert_almost_equal(y_mean, 0, 5)
if len(gpr.kernel.theta) > 1:
# XXX: quite hacky, works only for current kernels
assert_almost_equal(np.diag(y_cov), np.exp(kernel.theta[0]), 5)
else:
assert_almost_equal(np.diag(y_cov), 1, 5)
def test_sample_statistics():
""" Test that statistics of samples drawn from GP are correct."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
samples = gpr.sample_y(X2, 300000)
# More digits accuracy would require many more samples
assert_almost_equal(y_mean, np.mean(samples, 1), 1)
assert_almost_equal(np.diag(y_cov) / np.diag(y_cov).max(),
np.var(samples, 1) / np.diag(y_cov).max(), 1)
def test_no_optimizer():
""" Test that kernel parameters are unmodified when optimizer is None."""
kernel = RBF(1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None).fit(X, y)
assert_equal(np.exp(gpr.kernel_.theta), 1.0)
def test_predict_cov_vs_std():
""" Test that predicted std.-dev. is consistent with cov's diagonal."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
y_mean, y_std = gpr.predict(X2, return_std=True)
assert_almost_equal(np.sqrt(np.diag(y_cov)), y_std)
def test_anisotropic_kernel():
""" Test that GPR can identify meaningful anisotropic length-scales. """
# We learn a function which varies in one dimension ten-times slower
# than in the other. The corresponding length-scales should differ by at
# least a factor 5
rng = np.random.RandomState(0)
X = rng.uniform(-1, 1, (50, 2))
y = X[:, 0] + 0.1 * X[:, 1]
kernel = RBF([1.0, 1.0])
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_greater(np.exp(gpr.kernel_.theta[1]),
np.exp(gpr.kernel_.theta[0]) * 5)
def test_random_starts():
"""
Test that an increasing number of random-starts of GP fitting only
increases the log marginal likelihood of the chosen theta.
"""
n_samples, n_features = 25, 2
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1) \
+ rng.normal(scale=0.1, size=n_samples)
kernel = C(1.0, (1e-2, 1e2)) \
* RBF(length_scale=[1.0] * n_features,
length_scale_bounds=[(1e-4, 1e+2)] * n_features) \
+ WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-5, 1e1))
last_lml = -np.inf
for n_restarts_optimizer in range(5):
gp = GaussianProcessRegressor(
kernel=kernel, n_restarts_optimizer=n_restarts_optimizer,
random_state=0,).fit(X, y)
lml = gp.log_marginal_likelihood(gp.kernel_.theta)
assert_greater(lml, last_lml - np.finfo(np.float32).eps)
last_lml = lml
def test_y_normalization():
""" Test normalization of the target values in GP
Fitting non-normalizing GP on normalized y and fitting normalizing GP
on unnormalized y should yield identical results
"""
y_mean = y.mean(0)
y_norm = y - y_mean
for kernel in kernels:
# Fit non-normalizing GP on normalized y
gpr = GaussianProcessRegressor(kernel=kernel)
gpr.fit(X, y_norm)
# Fit normalizing GP on unnormalized y
gpr_norm = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_norm.fit(X, y)
# Compare predicted mean, std-devs and covariances
y_pred, y_pred_std = gpr.predict(X2, return_std=True)
y_pred = y_mean + y_pred
y_pred_norm, y_pred_std_norm = gpr_norm.predict(X2, return_std=True)
assert_almost_equal(y_pred, y_pred_norm)
assert_almost_equal(y_pred_std, y_pred_std_norm)
_, y_cov = gpr.predict(X2, return_cov=True)
_, y_cov_norm = gpr_norm.predict(X2, return_cov=True)
assert_almost_equal(y_cov, y_cov_norm)
def test_y_multioutput():
""" Test that GPR can deal with multi-dimensional target values"""
y_2d = np.vstack((y, y * 2)).T
# Test for fixed kernel that first dimension of 2d GP equals the output
# of 1d GP and that second dimension is twice as large
kernel = RBF(length_scale=1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None,
normalize_y=False)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, optimizer=None,
normalize_y=False)
gpr_2d.fit(X, y_2d)
y_pred_1d, y_std_1d = gpr.predict(X2, return_std=True)
y_pred_2d, y_std_2d = gpr_2d.predict(X2, return_std=True)
_, y_cov_1d = gpr.predict(X2, return_cov=True)
_, y_cov_2d = gpr_2d.predict(X2, return_cov=True)
assert_almost_equal(y_pred_1d, y_pred_2d[:, 0])
assert_almost_equal(y_pred_1d, y_pred_2d[:, 1] / 2)
# Standard deviation and covariance do not depend on output
assert_almost_equal(y_std_1d, y_std_2d)
assert_almost_equal(y_cov_1d, y_cov_2d)
y_sample_1d = gpr.sample_y(X2, n_samples=10)
y_sample_2d = gpr_2d.sample_y(X2, n_samples=10)
assert_almost_equal(y_sample_1d, y_sample_2d[:, 0])
# Test hyperparameter optimization
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_2d.fit(X, np.vstack((y, y)).T)
assert_almost_equal(gpr.kernel_.theta, gpr_2d.kernel_.theta, 4)
def test_custom_optimizer():
""" Test that GPR can use externally defined optimizers. """
# Define a dummy optimizer that simply tests 50 random hyperparameters
def optimizer(obj_func, initial_theta, bounds):
rng = np.random.RandomState(0)
theta_opt, func_min = \
initial_theta, obj_func(initial_theta, eval_gradient=False)
for _ in range(50):
theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]),
np.minimum(1, bounds[:, 1])))
f = obj_func(theta, eval_gradient=False)
if f < func_min:
theta_opt, func_min = theta, f
return theta_opt, func_min
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=optimizer)
gpr.fit(X, y)
# Checks that optimizer improved marginal likelihood
assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood(gpr.kernel.theta))
def test_duplicate_input():
""" Test GPR can handle two different output-values for the same input. """
for kernel in kernels:
gpr_equal_inputs = \
GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
gpr_similar_inputs = \
GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
X_ = np.vstack((X, X[0]))
y_ = np.hstack((y, y[0] + 1))
gpr_equal_inputs.fit(X_, y_)
X_ = np.vstack((X, X[0] + 1e-15))
y_ = np.hstack((y, y[0] + 1))
gpr_similar_inputs.fit(X_, y_)
X_test = np.linspace(0, 10, 100)[:, None]
y_pred_equal, y_std_equal = \
gpr_equal_inputs.predict(X_test, return_std=True)
y_pred_similar, y_std_similar = \
gpr_similar_inputs.predict(X_test, return_std=True)
assert_almost_equal(y_pred_equal, y_pred_similar)
assert_almost_equal(y_std_equal, y_std_similar)
| bsd-3-clause |
tanglei528/nova | nova/virt/baremetal/db/api.py | 29 | 4985 | # Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Defines interface for DB access.
The underlying driver is loaded as a :class:`LazyPluggable`.
Functions in this module are imported into the nova.virt.baremetal.db
namespace. Call these functions from nova.virt.baremetal.db namespace, not
the nova.virt.baremetal.db.api namespace.
All functions in this module return objects that implement a dictionary-like
interface. Currently, many of these objects are sqlalchemy objects that
implement a dictionary interface. However, a future goal is to have all of
these objects be simple dictionaries.
**Related Flags**
:baremetal_db_backend: string to lookup in the list of LazyPluggable backends.
`sqlalchemy` is the only supported backend right now.
:[BAREMETAL] sql_connection: string specifying the sqlalchemy connection to
use, like: `sqlite:///var/lib/nova/nova.sqlite`.
"""
from oslo.config import cfg
from nova import utils
# NOTE(deva): we can't move baremetal_db_backend into an OptGroup yet
# because utils.LazyPluggable doesn't support reading from
# option groups. See bug #1093043.
db_opts = [
cfg.StrOpt('db_backend',
default='sqlalchemy',
help='The backend to use for bare-metal database'),
]
baremetal_group = cfg.OptGroup(name='baremetal',
title='Baremetal Options')
CONF = cfg.CONF
CONF.register_group(baremetal_group)
CONF.register_opts(db_opts, baremetal_group)
IMPL = utils.LazyPluggable(
'db_backend',
config_group='baremetal',
sqlalchemy='nova.virt.baremetal.db.sqlalchemy.api')
def bm_node_get_all(context, service_host=None):
return IMPL.bm_node_get_all(context,
service_host=service_host)
def bm_node_get_associated(context, service_host=None):
return IMPL.bm_node_get_associated(context,
service_host=service_host)
def bm_node_get_unassociated(context, service_host=None):
return IMPL.bm_node_get_unassociated(context,
service_host=service_host)
def bm_node_find_free(context, service_host=None,
memory_mb=None, cpus=None, local_gb=None):
return IMPL.bm_node_find_free(context,
service_host=service_host,
memory_mb=memory_mb,
cpus=cpus,
local_gb=local_gb)
def bm_node_get(context, bm_node_id):
return IMPL.bm_node_get(context, bm_node_id)
def bm_node_get_by_instance_uuid(context, instance_uuid):
return IMPL.bm_node_get_by_instance_uuid(context,
instance_uuid)
def bm_node_get_by_node_uuid(context, node_uuid):
return IMPL.bm_node_get_by_node_uuid(context, node_uuid)
def bm_node_create(context, values):
return IMPL.bm_node_create(context, values)
def bm_node_destroy(context, bm_node_id):
return IMPL.bm_node_destroy(context, bm_node_id)
def bm_node_update(context, bm_node_id, values):
return IMPL.bm_node_update(context, bm_node_id, values)
def bm_node_associate_and_update(context, node_uuid, values):
return IMPL.bm_node_associate_and_update(context, node_uuid, values)
def bm_interface_get(context, if_id):
return IMPL.bm_interface_get(context, if_id)
def bm_interface_get_all(context):
return IMPL.bm_interface_get_all(context)
def bm_interface_destroy(context, if_id):
return IMPL.bm_interface_destroy(context, if_id)
def bm_interface_create(context, bm_node_id, address, datapath_id, port_no):
return IMPL.bm_interface_create(context, bm_node_id, address,
datapath_id, port_no)
def bm_interface_set_vif_uuid(context, if_id, vif_uuid):
return IMPL.bm_interface_set_vif_uuid(context, if_id, vif_uuid)
def bm_interface_get_by_vif_uuid(context, vif_uuid):
return IMPL.bm_interface_get_by_vif_uuid(context, vif_uuid)
def bm_interface_get_all_by_bm_node_id(context, bm_node_id):
return IMPL.bm_interface_get_all_by_bm_node_id(context, bm_node_id)
| apache-2.0 |
navrasio/mxnet | example/image-classification/symbols/mobilenet.py | 29 | 8457 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# -*- coding:utf-8 -*-
'''
mobilenet
Suittable for image with around resolution x resolution, resolution is multiple of 32.
Reference:
MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications
https://arxiv.org/abs/1704.04861
'''
__author__ = 'qingzhouzhen'
__date__ = '17/8/5'
__modify__ = 'dwSun'
__modified_date__ = '17/11/30'
import mxnet as mx
alpha_values = [0.25, 0.50, 0.75, 1.0]
def Conv(data, num_filter=1, kernel=(1, 1), stride=(1, 1), pad=(0, 0), num_group=1, name='', suffix=''):
conv = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=kernel, num_group=num_group, stride=stride, pad=pad, no_bias=True, name='%s%s_conv2d' % (name, suffix))
bn = mx.sym.BatchNorm(data=conv, name='%s%s_batchnorm' % (name, suffix), fix_gamma=True)
act = mx.sym.Activation(data=bn, act_type='relu', name='%s%s_relu' % (name, suffix))
return act
def Conv_DPW(data, depth=1, stride=(1, 1), name='', idx=0, suffix=''):
conv_dw = Conv(data, num_group=depth, num_filter=depth, kernel=(3, 3), pad=(1, 1), stride=stride, name="conv_%d_dw" % (idx), suffix=suffix)
conv = Conv(conv_dw, num_filter=depth * stride[0], kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_%d" % (idx), suffix=suffix)
return conv
def get_symbol_compact(num_classes, alpha=1, resolution=224, **kwargs):
assert alpha in alpha_values, 'Invalid alpha={0}, must be one of {1}'.format(alpha, alpha_values)
assert resolution % 32 == 0, 'resolution must be multiple of 32'
base = int(32 * alpha)
data = mx.symbol.Variable(name="data") # 224
conv_1 = Conv(data, num_filter=base, kernel=(3, 3), pad=(1, 1), stride=(2, 2), name="conv_1") # 32*alpha, 224/112
conv_2_dw = Conv(conv_1, num_group=base, num_filter=base, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name="conv_2_dw") # 112/112
conv_2 = Conv(conv_2_dw, num_filter=base * 2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_2") # 32*alpha, 112/112
conv_3_dpw = Conv_DPW(conv_2, depth=base * 2, stride=(2, 2), idx=3) # 64*alpha, 112/56 => 56/56
conv_4_dpw = Conv_DPW(conv_3_dpw, depth=base * 4, stride=(1, 1), idx=4) # 128*alpha, 56/56 =>56/56
conv_5_dpw = Conv_DPW(conv_4_dpw, depth=base * 4, stride=(2, 2), idx=5) # 128*alpha, 56/28 => 28/28
conv_6_dpw = Conv_DPW(conv_5_dpw, depth=base * 8, stride=(1, 1), idx=6) # 256*alpha, 28/28 => 28/28
conv_7_dpw = Conv_DPW(conv_6_dpw, depth=base * 8, stride=(2, 2), idx=7) # 256*alpha, 28/14 => 14/14
conv_dpw = conv_7_dpw
for idx in range(8, 13):
conv_dpw = Conv_DPW(conv_dpw, depth=base * 16, stride=(1, 1), idx=idx) # 512*alpha, 14/14
conv_12_dpw = conv_dpw
conv_13_dpw = Conv_DPW(conv_12_dpw, depth=base * 16, stride=(2, 2), idx=13) # 512*alpha, 14/7 => 7/7
conv_14_dpw = Conv_DPW(conv_13_dpw, depth=base * 32, stride=(1, 1), idx=14) # 1024*alpha, 7/7 => 7/7
pool_size = int(resolution / 32)
pool = mx.sym.Pooling(data=conv_14_dpw, kernel=(pool_size, pool_size), stride=(1, 1), pool_type="avg", name="global_pool")
flatten = mx.sym.Flatten(data=pool, name="flatten")
fc = mx.symbol.FullyConnected(data=flatten, num_hidden=num_classes, name='fc')
softmax = mx.symbol.SoftmaxOutput(data=fc, name='softmax')
return softmax
def get_symbol(num_classes, alpha=1, resolution=224, **kwargs):
assert alpha in alpha_values, 'Invalid alpha=[{0}], must be one of [{1}]'.format(alpha, alpha_values)
assert resolution % 32 == 0, 'resolution must be multpile of 32'
base = int(32 * alpha)
data = mx.symbol.Variable(name="data") # 224
depth = base # 32*alpha
conv_1 = Conv(data, num_filter=depth, kernel=(3, 3), pad=(1, 1), stride=(2, 2), name="conv_1") # 224/112
depth = base # 32*alpha
conv_2_dw = Conv(conv_1, num_group=depth, num_filter=depth, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name="conv_2_dw") # 112/112
conv_2 = Conv(conv_2_dw, num_filter=depth * 2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_2") # 112/112
depth = base * 2 # 64*alpha
conv_3_dw = Conv(conv_2, num_group=depth, num_filter=depth, kernel=(3, 3), pad=(1, 1), stride=(2, 2), name="conv_3_dw") # 112/56
conv_3 = Conv(conv_3_dw, num_filter=depth * 2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_3") # 56/56
depth = base * 4 # 128*alpha
conv_4_dw = Conv(conv_3, num_group=depth, num_filter=depth, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name="conv_4_dw") # 56/56
conv_4 = Conv(conv_4_dw, num_filter=depth, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_4") # 56/56
depth = base * 4 # 128*alpha
conv_5_dw = Conv(conv_4, num_group=depth, num_filter=depth, kernel=(3, 3), pad=(1, 1), stride=(2, 2), name="conv_5_dw") # 56/28
conv_5 = Conv(conv_5_dw, num_filter=depth * 2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_5") # 28/28
depth = base * 8 # 256*alpha
conv_6_dw = Conv(conv_5, num_group=depth, num_filter=depth, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name="conv_6_dw") # 28/28
conv_6 = Conv(conv_6_dw, num_filter=depth, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_6") # 28/28
depth = base * 8 # 256*alpha
conv_7_dw = Conv(conv_6, num_group=depth, num_filter=depth, kernel=(3, 3), pad=(1, 1), stride=(2, 2), name="conv_7_dw") # 28/14
conv_7 = Conv(conv_7_dw, num_filter=depth * 2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_7") # 14/14
depth = base * 16 # 512*alpha
conv_8_dw = Conv(conv_7, num_group=depth, num_filter=depth, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name="conv_8_dw") # 14/14
conv_8 = Conv(conv_8_dw, num_filter=depth, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_8") # 14/14
conv_9_dw = Conv(conv_8, num_group=depth, num_filter=depth, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name="conv_9_dw") # 14/14
conv_9 = Conv(conv_9_dw, num_filter=depth, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_9") # 14/14
conv_10_dw = Conv(conv_9, num_group=depth, num_filter=depth, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name="conv_10_dw") # 14/14
conv_10 = Conv(conv_10_dw, num_filter=depth, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_10") # 14/14
conv_11_dw = Conv(conv_10, num_group=depth, num_filter=depth, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name="conv_11_dw") # 14/14
conv_11 = Conv(conv_11_dw, num_filter=depth, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_11") # 14/14
conv_12_dw = Conv(conv_11, num_group=depth, num_filter=depth, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name="conv_12_dw") # 14/14
conv_12 = Conv(conv_12_dw, num_filter=depth, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_12") # 14/14
depth = base * 16 # 512*alpha
conv_13_dw = Conv(conv_12, num_group=depth, num_filter=depth, kernel=(3, 3), pad=(1, 1), stride=(2, 2), name="conv_13_dw") # 14/7
conv_13 = Conv(conv_13_dw, num_filter=depth * 2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_13") # 7/7
depth = base * 32 # 1024*alpha
conv_14_dw = Conv(conv_13, num_group=depth, num_filter=depth, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name="conv_14_dw") # 7/7
conv_14 = Conv(conv_14_dw, num_filter=depth, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_14") # 7/7
pool_size = int(resolution / 32)
pool = mx.sym.Pooling(data=conv_14, kernel=(pool_size, pool_size), stride=(1, 1), pool_type="avg", name="global_pool")
flatten = mx.sym.Flatten(data=pool, name="flatten")
fc = mx.symbol.FullyConnected(data=flatten, num_hidden=num_classes, name='fc')
softmax = mx.symbol.SoftmaxOutput(data=fc, name='softmax')
return softmax
| apache-2.0 |
blampe/M2M | basic/profiles/views.py | 3 | 2564 | from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import Http404, HttpResponseRedirect
from django.views.generic import list_detail
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from basic.profiles.models import *
from basic.profiles.forms import *
def profile_list(request):
return list_detail.object_list(
request,
queryset = Profile.objects.all(),
paginate_by = 20,
)
profile_list.__doc__ = list_detail.object_list.__doc__
def profile_detail(request, username):
try:
user = User.objects.get(username__iexact=username)
except User.DoesNotExist:
raise Http404
profile = Profile.objects.get(user=user)
context = { 'object':profile }
return render_to_response('profiles/profile_detail.html', context, context_instance=RequestContext(request))
@login_required
def profile_edit(request, template_name='profiles/profile_form.html'):
"""Edit profile."""
if request.POST:
profile = Profile.objects.get(user=request.user)
profile_form = ProfileForm(request.POST, request.FILES, instance=profile)
user_form = UserForm(request.POST, instance=request.user)
service_formset = ServiceFormSet(request.POST, instance=profile)
link_formset = LinkFormSet(request.POST, instance=profile)
if profile_form.is_valid() and user_form.is_valid() and service_formset.is_valid() and link_formset.is_valid():
profile_form.save()
user_form.save()
service_formset.save()
link_formset.save()
return HttpResponseRedirect(reverse('profile_detail', kwargs={'username': request.user.username}))
else:
context = {
'profile_form': profile_form,
'user_form': user_form,
'service_formset': service_formset,
'link_formset': link_formset
}
else:
profile = Profile.objects.get(user=request.user)
service_formset = ServiceFormSet(instance=profile)
link_formset = LinkFormSet(instance=profile)
context = {
'profile_form': ProfileForm(instance=profile),
'user_form': UserForm(instance=request.user),
'service_formset': service_formset,
'link_formset': link_formset
}
return render_to_response(template_name, context, context_instance=RequestContext(request)) | mit |
hubertta/Wget | testenv/Test-504.py | 10 | 2069 | #!/usr/bin/env python3
from sys import exit
from test.http_test import HTTPTest
from misc.wget_file import WgetFile
"""
This test ensures that Wget handles a 504 Gateway Timeout response
correctly.
Since, we do not have a direct mechanism for conditionally sending responses
via the HTTP Server, I've used a workaround.
The server will always respond to a request for File1 with a 504 Gateway
Timeout. Using the --tries=2 option, we ensure that Wget attempts the file
only twice and then move on to the next file. Finally, check the exact
requests that the Server received and compare them, in order, to the
expected sequence of requests.
In this case, we expect Wget to attempt File1 twice and File2 once. If Wget
considered 504 as a general Server Error, it would be a fatal failure and
Wget would request File1 only once.
"""
TEST_NAME = "504 Gateway Timeouts"
############# File Definitions ###############################################
File1 = """All happy families are alike;
Each unhappy family is unhappy in its own way"""
File2 = "Anyone for chocochip cookies?"
File1_rules = {
"Response" : 504
}
A_File = WgetFile ("File1", File1, rules=File1_rules)
B_File = WgetFile ("File2", File2)
Request_List = [
[
"GET /File1",
"GET /File1",
"GET /File2",
]
]
WGET_OPTIONS = "--tries=2"
WGET_URLS = [["File1", "File2"]]
Files = [[A_File, B_File]]
ExpectedReturnCode = 4
ExpectedDownloadedFiles = [B_File]
################ Pre and Post Test Hooks #####################################
pre_test = {
"ServerFiles" : Files
}
test_options = {
"WgetCommands" : WGET_OPTIONS,
"Urls" : WGET_URLS
}
post_test = {
"ExpectedFiles" : ExpectedDownloadedFiles,
"ExpectedRetcode" : ExpectedReturnCode,
"FilesCrawled" : Request_List
}
err = HTTPTest (
name=TEST_NAME,
pre_hook=pre_test,
test_params=test_options,
post_hook=post_test
).begin ()
exit (err)
| gpl-3.0 |
DmitryYurov/BornAgain | Tests/Functional/Python/PyCore/shape2d.py | 2 | 1377 | import numpy, os, sys, unittest
sys.path.append("@CMAKE_LIBRARY_OUTPUT_DIRECTORY@")
import bornagain as ba
class Shape2DTest(unittest.TestCase):
def test_constructPolygonFromList(self):
"""
Testing construction of polygon from two Numpy arrays
"""
# initializing from list
x = [-1.0, 2.0, 2.0, -1.0]
y = [-0.5, -0.5, 1.5, 1.5]
p = ba.Polygon(x, y)
self.assertTrue(p.contains(-0.75, -0.25))
self.assertTrue(p.contains(1.5, 1.0))
# initializing from list inline
p2 = ba.Polygon([-1.0, 2.0, 2.0, -1.0], [-0.5, -0.5, 1.5, 1.5])
self.assertTrue(p2.contains(-0.75, -0.25))
self.assertTrue(p2.contains(1.5, 1.0))
# initialization from 2D list inline
p3 = ba.Polygon([[-1.0, -0.5], [2.0, -0.5], [2.0, 1.5], [-1.0, 1.5]])
self.assertTrue(p3.contains(-0.75, -0.25))
self.assertTrue(p3.contains(1.5, 1.0))
def test_constructPolygonFromNumpy(self):
"""
Testing construction of polygon from two Numpy arrays
"""
# initialization from numpy array
points = numpy.array([[-1.0, -0.5], [2.0, -0.5], [2.0, 1.5], [-1.0, 1.5]])
p = ba.Polygon(points)
self.assertTrue(p.contains(-0.75, -0.25))
self.assertTrue(p.contains(1.5, 1.0))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
jptomo/rpython-lang-scheme | rpython/translator/backendopt/tailrecursion.py | 2 | 1271 | from rpython.flowspace.model import mkentrymap, checkgraph
# this transformation is very academical -- I had too much time
def _remove_tail_call(translator, graph, block):
print "removing tail call"
assert len(block.exits) == 1
assert block.exits[0].target is graph.returnblock
assert block.operations[-1].result == block.exits[0].args[0]
op = block.operations[-1]
block.operations = block.operations[:-1]
block.exits[0].args = op.args[1:]
block.exits[0].target = graph.startblock
def remove_tail_calls_to_self(translator, graph):
entrymap = mkentrymap(graph)
changed = False
for link in entrymap[graph.returnblock]:
block = link.prevblock
if (len(block.exits) == 1 and
len(block.operations) > 0 and
block.operations[-1].opname == 'direct_call' and
block.operations[-1].result == link.args[0]):
print "getgraph", graph
if graph is graph:
_remove_tail_call(translator, graph, block)
changed = True
if changed:
from rpython.translator import simplify
checkgraph(graph)
simplify.remove_identical_vars(graph)
simplify.eliminate_empty_blocks(graph)
simplify.join_blocks(graph)
| mit |
wschenck/nest-simulator | pynest/nest/tests/test_rate_copy_model.py | 20 | 3362 | # -*- coding: utf-8 -*-
#
# test_rate_copy_model.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import nest
import unittest
import numpy as np
@nest.ll_api.check_stack
class RateCopyModelTestCase(unittest.TestCase):
'''
Test whether a rate connection created by copy model behaves
identical to the original version
'''
def test_rate_copy_model(self):
# neuron parameters
neuron_params = {'tau': 5., 'sigma': 0.}
drive = 1.5
weight = 0.5
# simulation parameters
simtime = 100.
dt = 0.001
nest.set_verbosity('M_WARNING')
nest.ResetKernel()
nest.SetKernelStatus(
{'resolution': dt, 'use_wfr': True, 'print_time': False})
# set up rate neuron network
rate_neuron_drive = nest.Create(
'lin_rate_ipn', params={'mu': drive, 'sigma': 0.})
rate_neuron_1 = nest.Create(
'lin_rate_ipn', params=neuron_params)
rate_neuron_2 = nest.Create(
'lin_rate_ipn', params=neuron_params)
multimeter = nest.Create(
'multimeter', params={
'record_from': ['rate'],
'interval': dt})
# create new connection
nest.CopyModel('rate_connection_instantaneous', 'rate_connection_new')
# record rates and connect neurons
neurons = rate_neuron_1 + rate_neuron_2
nest.Connect(
multimeter, neurons, 'all_to_all', {'delay': 10.})
nest.Connect(rate_neuron_drive, rate_neuron_1,
'all_to_all',
{'synapse_model': 'rate_connection_instantaneous',
'weight': weight})
nest.Connect(rate_neuron_drive, rate_neuron_2,
'all_to_all', {'synapse_model': 'rate_connection_new',
'weight': weight})
# simulate
nest.Simulate(simtime)
# make sure rates are identical
events = nest.GetStatus(multimeter)[0]['events']
senders = events['senders']
rate_1 = np.array(events['rate'][
np.where(senders == rate_neuron_1.get('global_id'))])
rate_2 = np.array(events['rate'][
np.where(senders == rate_neuron_2.get('global_id'))])
assert(np.sum(np.abs(rate_2 - rate_1)) < 1e-12)
def suite():
# makeSuite is sort of obsolete http://bugs.python.org/issue2721
# using loadTestsFromTestCase instead.
suite1 = unittest.TestLoader().loadTestsFromTestCase(
RateCopyModelTestCase)
return unittest.TestSuite([suite1])
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == '__main__':
run()
| gpl-2.0 |
jcftang/ansible | lib/ansible/modules/network/panos/panos_admpwd.py | 32 | 5961 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: panos_admpwd
short_description: change admin password of PAN-OS device using SSH with SSH key
description:
- Change the admin password of PAN-OS via SSH using a SSH key for authentication.
- Useful for AWS instances where the first login should be done via SSH.
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- paramiko
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device
required: true
username:
description:
- username for initial authentication
required: false
default: "admin"
key_filename:
description:
- filename of the SSH Key to use for authentication
required: true
newpassword:
description:
- password to configure for admin on the PAN-OS device
required: true
'''
EXAMPLES = '''
# Tries for 10 times to set the admin password of 192.168.1.1 to "badpassword"
# via SSH, authenticating using key /tmp/ssh.key
- name: set admin password
panos_admpwd:
ip_address: "192.168.1.1"
username: "admin"
key_filename: "/tmp/ssh.key"
newpassword: "badpassword"
register: result
until: not result|failed
retries: 10
delay: 30
'''
RETURN = '''
status:
description: success status
returned: success
type: string
sample: "Last login: Fri Sep 16 11:09:20 2016 from 10.35.34.56.....Configuration committed successfully"
'''
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
from ansible.module_utils.basic import AnsibleModule
import time
import sys
try:
import paramiko
HAS_LIB=True
except ImportError:
HAS_LIB=False
_PROMPTBUFF = 4096
def wait_with_timeout(module, shell, prompt, timeout=60):
now = time.time()
result = ""
while True:
if shell.recv_ready():
result += shell.recv(_PROMPTBUFF)
endresult = result.strip()
if len(endresult) != 0 and endresult[-1] == prompt:
break
if time.time()-now > timeout:
module.fail_json(msg="Timeout waiting for prompt")
return result
def set_panwfw_password(module, ip_address, key_filename, newpassword, username):
stdout = ""
ssh = paramiko.SSHClient()
# add policy to accept all host keys, I haven't found
# a way to retrieve the instance SSH key fingerprint from AWS
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip_address, username=username, key_filename=key_filename)
shell = ssh.invoke_shell()
# wait for the shell to start
buff = wait_with_timeout(module, shell, ">")
stdout += buff
# step into config mode
shell.send('configure\n')
# wait for the config prompt
buff = wait_with_timeout(module, shell, "#")
stdout += buff
if module.check_mode:
# exit and close connection
shell.send('exit\n')
ssh.close()
return False, 'Connection test successful. Password left intact.'
# set admin password
shell.send('set mgt-config users ' + username + ' password\n')
# wait for the password prompt
buff = wait_with_timeout(module, shell, ":")
stdout += buff
# enter password for the first time
shell.send(newpassword+'\n')
# wait for the password prompt
buff = wait_with_timeout(module, shell, ":")
stdout += buff
# enter password for the second time
shell.send(newpassword+'\n')
# wait for the config mode prompt
buff = wait_with_timeout(module, shell, "#")
stdout += buff
# commit !
shell.send('commit\n')
# wait for the prompt
buff = wait_with_timeout(module, shell, "#", 120)
stdout += buff
if 'success' not in buff:
module.fail_json(msg="Error setting " + username + " password: " + stdout)
# exit
shell.send('exit\n')
ssh.close()
return True, stdout
def main():
argument_spec = dict(
ip_address=dict(required=True),
username=dict(default='admin'),
key_filename=dict(required=True),
newpassword=dict(no_log=True, required=True)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_LIB:
module.fail_json(msg='paramiko is required for this module')
ip_address = module.params["ip_address"]
if not ip_address:
module.fail_json(msg="ip_address should be specified")
key_filename = module.params["key_filename"]
if not key_filename:
module.fail_json(msg="key_filename should be specified")
newpassword = module.params["newpassword"]
if not newpassword:
module.fail_json(msg="newpassword is required")
username = module.params['username']
try:
changed, stdout = set_panwfw_password(module, ip_address, key_filename, newpassword, username)
module.exit_json(changed=changed, stdout=stdout)
except Exception:
x = sys.exc_info()[1]
module.fail_json(msg=x)
if __name__ == '__main__':
main()
| gpl-3.0 |
asnir/airflow | airflow/example_dags/example_latest_only_with_trigger.py | 44 | 1446 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Example LatestOnlyOperator and TriggerRule interactions
"""
import datetime as dt
import airflow
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.latest_only_operator import LatestOnlyOperator
from airflow.utils.trigger_rule import TriggerRule
dag = DAG(
dag_id='latest_only_with_trigger',
schedule_interval=dt.timedelta(hours=4),
start_date=airflow.utils.dates.days_ago(2),
)
latest_only = LatestOnlyOperator(task_id='latest_only', dag=dag)
task1 = DummyOperator(task_id='task1', dag=dag)
task1.set_upstream(latest_only)
task2 = DummyOperator(task_id='task2', dag=dag)
task3 = DummyOperator(task_id='task3', dag=dag)
task3.set_upstream([task1, task2])
task4 = DummyOperator(task_id='task4', dag=dag,
trigger_rule=TriggerRule.ALL_DONE)
task4.set_upstream([task1, task2])
| apache-2.0 |
jk1/intellij-community | python/lib/Lib/pstats.py | 94 | 25940 | """Class for printing reports on profiled python code."""
# Class for printing reports on profiled python code. rev 1.0 4/1/94
#
# Based on prior profile module by Sjoerd Mullender...
# which was hacked somewhat by: Guido van Rossum
#
# see profile.doc and profile.py for more info.
# Copyright 1994, by InfoSeek Corporation, all rights reserved.
# Written by James Roskind
#
# Permission to use, copy, modify, and distribute this Python software
# and its associated documentation for any purpose (subject to the
# restriction in the following sentence) without fee is hereby granted,
# provided that the above copyright notice appears in all copies, and
# that both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of InfoSeek not be used in
# advertising or publicity pertaining to distribution of the software
# without specific, written prior permission. This permission is
# explicitly restricted to the copying and modification of the software
# to remain in Python, compiled Python, or other languages (such as C)
# wherein the modified or derived code is exclusively imported into a
# Python module.
#
# INFOSEEK CORPORATION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL INFOSEEK CORPORATION BE LIABLE FOR ANY
# SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import sys
import os
import time
import marshal
import re
__all__ = ["Stats"]
class Stats:
"""This class is used for creating reports from data generated by the
Profile class. It is a "friend" of that class, and imports data either
by direct access to members of Profile class, or by reading in a dictionary
that was emitted (via marshal) from the Profile class.
The big change from the previous Profiler (in terms of raw functionality)
is that an "add()" method has been provided to combine Stats from
several distinct profile runs. Both the constructor and the add()
method now take arbitrarily many file names as arguments.
All the print methods now take an argument that indicates how many lines
to print. If the arg is a floating point number between 0 and 1.0, then
it is taken as a decimal percentage of the available lines to be printed
(e.g., .1 means print 10% of all available lines). If it is an integer,
it is taken to mean the number of lines of data that you wish to have
printed.
The sort_stats() method now processes some additional options (i.e., in
addition to the old -1, 0, 1, or 2). It takes an arbitrary number of
quoted strings to select the sort order. For example sort_stats('time',
'name') sorts on the major key of 'internal function time', and on the
minor key of 'the name of the function'. Look at the two tables in
sort_stats() and get_sort_arg_defs(self) for more examples.
All methods return self, so you can string together commands like:
Stats('foo', 'goo').strip_dirs().sort_stats('calls').\
print_stats(5).print_callers(5)
"""
def __init__(self, *args, **kwds):
# I can't figure out how to explictly specify a stream keyword arg
# with *args:
# def __init__(self, *args, stream=sys.stdout): ...
# so I use **kwds and sqauwk if something unexpected is passed in.
self.stream = sys.stdout
if "stream" in kwds:
self.stream = kwds["stream"]
del kwds["stream"]
if kwds:
keys = kwds.keys()
keys.sort()
extras = ", ".join(["%s=%s" % (k, kwds[k]) for k in keys])
raise ValueError, "unrecognized keyword args: %s" % extras
if not len(args):
arg = None
else:
arg = args[0]
args = args[1:]
self.init(arg)
self.add(*args)
def init(self, arg):
self.all_callees = None # calc only if needed
self.files = []
self.fcn_list = None
self.total_tt = 0
self.total_calls = 0
self.prim_calls = 0
self.max_name_len = 0
self.top_level = {}
self.stats = {}
self.sort_arg_dict = {}
self.load_stats(arg)
trouble = 1
try:
self.get_top_level_stats()
trouble = 0
finally:
if trouble:
print >> self.stream, "Invalid timing data",
if self.files: print >> self.stream, self.files[-1],
print >> self.stream
def load_stats(self, arg):
if not arg: self.stats = {}
elif isinstance(arg, basestring):
f = open(arg, 'rb')
self.stats = marshal.load(f)
f.close()
try:
file_stats = os.stat(arg)
arg = time.ctime(file_stats.st_mtime) + " " + arg
except: # in case this is not unix
pass
self.files = [ arg ]
elif hasattr(arg, 'create_stats'):
arg.create_stats()
self.stats = arg.stats
arg.stats = {}
if not self.stats:
raise TypeError, "Cannot create or construct a %r object from '%r''" % (
self.__class__, arg)
return
def get_top_level_stats(self):
for func, (cc, nc, tt, ct, callers) in self.stats.items():
self.total_calls += nc
self.prim_calls += cc
self.total_tt += tt
if callers.has_key(("jprofile", 0, "profiler")):
self.top_level[func] = None
if len(func_std_string(func)) > self.max_name_len:
self.max_name_len = len(func_std_string(func))
def add(self, *arg_list):
if not arg_list: return self
if len(arg_list) > 1: self.add(*arg_list[1:])
other = arg_list[0]
if type(self) != type(other) or self.__class__ != other.__class__:
other = Stats(other)
self.files += other.files
self.total_calls += other.total_calls
self.prim_calls += other.prim_calls
self.total_tt += other.total_tt
for func in other.top_level:
self.top_level[func] = None
if self.max_name_len < other.max_name_len:
self.max_name_len = other.max_name_len
self.fcn_list = None
for func, stat in other.stats.iteritems():
if func in self.stats:
old_func_stat = self.stats[func]
else:
old_func_stat = (0, 0, 0, 0, {},)
self.stats[func] = add_func_stats(old_func_stat, stat)
return self
def dump_stats(self, filename):
"""Write the profile data to a file we know how to load back."""
f = file(filename, 'wb')
try:
marshal.dump(self.stats, f)
finally:
f.close()
# list the tuple indices and directions for sorting,
# along with some printable description
sort_arg_dict_default = {
"calls" : (((1,-1), ), "call count"),
"cumulative": (((3,-1), ), "cumulative time"),
"file" : (((4, 1), ), "file name"),
"line" : (((5, 1), ), "line number"),
"module" : (((4, 1), ), "file name"),
"name" : (((6, 1), ), "function name"),
"nfl" : (((6, 1),(4, 1),(5, 1),), "name/file/line"),
"pcalls" : (((0,-1), ), "call count"),
"stdname" : (((7, 1), ), "standard name"),
"time" : (((2,-1), ), "internal time"),
}
def get_sort_arg_defs(self):
"""Expand all abbreviations that are unique."""
if not self.sort_arg_dict:
self.sort_arg_dict = dict = {}
bad_list = {}
for word, tup in self.sort_arg_dict_default.iteritems():
fragment = word
while fragment:
if not fragment:
break
if fragment in dict:
bad_list[fragment] = 0
break
dict[fragment] = tup
fragment = fragment[:-1]
for word in bad_list:
del dict[word]
return self.sort_arg_dict
def sort_stats(self, *field):
if not field:
self.fcn_list = 0
return self
if len(field) == 1 and type(field[0]) == type(1):
# Be compatible with old profiler
field = [ {-1: "stdname",
0:"calls",
1:"time",
2: "cumulative" } [ field[0] ] ]
sort_arg_defs = self.get_sort_arg_defs()
sort_tuple = ()
self.sort_type = ""
connector = ""
for word in field:
sort_tuple = sort_tuple + sort_arg_defs[word][0]
self.sort_type += connector + sort_arg_defs[word][1]
connector = ", "
stats_list = []
for func, (cc, nc, tt, ct, callers) in self.stats.iteritems():
stats_list.append((cc, nc, tt, ct) + func +
(func_std_string(func), func))
stats_list.sort(TupleComp(sort_tuple).compare)
self.fcn_list = fcn_list = []
for tuple in stats_list:
fcn_list.append(tuple[-1])
return self
def reverse_order(self):
if self.fcn_list:
self.fcn_list.reverse()
return self
def strip_dirs(self):
oldstats = self.stats
self.stats = newstats = {}
max_name_len = 0
for func, (cc, nc, tt, ct, callers) in oldstats.iteritems():
newfunc = func_strip_path(func)
if len(func_std_string(newfunc)) > max_name_len:
max_name_len = len(func_std_string(newfunc))
newcallers = {}
for func2, caller in callers.iteritems():
newcallers[func_strip_path(func2)] = caller
if newfunc in newstats:
newstats[newfunc] = add_func_stats(
newstats[newfunc],
(cc, nc, tt, ct, newcallers))
else:
newstats[newfunc] = (cc, nc, tt, ct, newcallers)
old_top = self.top_level
self.top_level = new_top = {}
for func in old_top:
new_top[func_strip_path(func)] = None
self.max_name_len = max_name_len
self.fcn_list = None
self.all_callees = None
return self
def calc_callees(self):
if self.all_callees: return
self.all_callees = all_callees = {}
for func, (cc, nc, tt, ct, callers) in self.stats.iteritems():
if not func in all_callees:
all_callees[func] = {}
for func2, caller in callers.iteritems():
if not func2 in all_callees:
all_callees[func2] = {}
all_callees[func2][func] = caller
return
#******************************************************************
# The following functions support actual printing of reports
#******************************************************************
# Optional "amount" is either a line count, or a percentage of lines.
def eval_print_amount(self, sel, list, msg):
new_list = list
if type(sel) == type(""):
new_list = []
for func in list:
if re.search(sel, func_std_string(func)):
new_list.append(func)
else:
count = len(list)
if type(sel) == type(1.0) and 0.0 <= sel < 1.0:
count = int(count * sel + .5)
new_list = list[:count]
elif type(sel) == type(1) and 0 <= sel < count:
count = sel
new_list = list[:count]
if len(list) != len(new_list):
msg = msg + " List reduced from %r to %r due to restriction <%r>\n" % (
len(list), len(new_list), sel)
return new_list, msg
def get_print_list(self, sel_list):
width = self.max_name_len
if self.fcn_list:
list = self.fcn_list[:]
msg = " Ordered by: " + self.sort_type + '\n'
else:
list = self.stats.keys()
msg = " Random listing order was used\n"
for selection in sel_list:
list, msg = self.eval_print_amount(selection, list, msg)
count = len(list)
if not list:
return 0, list
print >> self.stream, msg
if count < len(self.stats):
width = 0
for func in list:
if len(func_std_string(func)) > width:
width = len(func_std_string(func))
return width+2, list
def print_stats(self, *amount):
for filename in self.files:
print >> self.stream, filename
if self.files: print >> self.stream
indent = ' ' * 8
for func in self.top_level:
print >> self.stream, indent, func_get_function_name(func)
print >> self.stream, indent, self.total_calls, "function calls",
if self.total_calls != self.prim_calls:
print >> self.stream, "(%d primitive calls)" % self.prim_calls,
print >> self.stream, "in %.3f CPU seconds" % self.total_tt
print >> self.stream
width, list = self.get_print_list(amount)
if list:
self.print_title()
for func in list:
self.print_line(func)
print >> self.stream
print >> self.stream
return self
def print_callees(self, *amount):
width, list = self.get_print_list(amount)
if list:
self.calc_callees()
self.print_call_heading(width, "called...")
for func in list:
if func in self.all_callees:
self.print_call_line(width, func, self.all_callees[func])
else:
self.print_call_line(width, func, {})
print >> self.stream
print >> self.stream
return self
def print_callers(self, *amount):
width, list = self.get_print_list(amount)
if list:
self.print_call_heading(width, "was called by...")
for func in list:
cc, nc, tt, ct, callers = self.stats[func]
self.print_call_line(width, func, callers, "<-")
print >> self.stream
print >> self.stream
return self
def print_call_heading(self, name_size, column_title):
print >> self.stream, "Function ".ljust(name_size) + column_title
# print sub-header only if we have new-style callers
subheader = False
for cc, nc, tt, ct, callers in self.stats.itervalues():
if callers:
value = callers.itervalues().next()
subheader = isinstance(value, tuple)
break
if subheader:
print >> self.stream, " "*name_size + " ncalls tottime cumtime"
def print_call_line(self, name_size, source, call_dict, arrow="->"):
print >> self.stream, func_std_string(source).ljust(name_size) + arrow,
if not call_dict:
print >> self.stream
return
clist = call_dict.keys()
clist.sort()
indent = ""
for func in clist:
name = func_std_string(func)
value = call_dict[func]
if isinstance(value, tuple):
nc, cc, tt, ct = value
if nc != cc:
substats = '%d/%d' % (nc, cc)
else:
substats = '%d' % (nc,)
substats = '%s %s %s %s' % (substats.rjust(7+2*len(indent)),
f8(tt), f8(ct), name)
left_width = name_size + 1
else:
substats = '%s(%r) %s' % (name, value, f8(self.stats[func][3]))
left_width = name_size + 3
print >> self.stream, indent*left_width + substats
indent = " "
def print_title(self):
print >> self.stream, ' ncalls tottime percall cumtime percall',
print >> self.stream, 'filename:lineno(function)'
def print_line(self, func): # hack : should print percentages
cc, nc, tt, ct, callers = self.stats[func]
c = str(nc)
if nc != cc:
c = c + '/' + str(cc)
print >> self.stream, c.rjust(9),
print >> self.stream, f8(tt),
if nc == 0:
print >> self.stream, ' '*8,
else:
print >> self.stream, f8(tt/nc),
print >> self.stream, f8(ct),
if cc == 0:
print >> self.stream, ' '*8,
else:
print >> self.stream, f8(ct/cc),
print >> self.stream, func_std_string(func)
class TupleComp:
"""This class provides a generic function for comparing any two tuples.
Each instance records a list of tuple-indices (from most significant
to least significant), and sort direction (ascending or decending) for
each tuple-index. The compare functions can then be used as the function
argument to the system sort() function when a list of tuples need to be
sorted in the instances order."""
def __init__(self, comp_select_list):
self.comp_select_list = comp_select_list
def compare (self, left, right):
for index, direction in self.comp_select_list:
l = left[index]
r = right[index]
if l < r:
return -direction
if l > r:
return direction
return 0
#**************************************************************************
# func_name is a triple (file:string, line:int, name:string)
def func_strip_path(func_name):
filename, line, name = func_name
return os.path.basename(filename), line, name
def func_get_function_name(func):
return func[2]
def func_std_string(func_name): # match what old profile produced
if func_name[:2] == ('~', 0):
# special case for built-in functions
name = func_name[2]
if name.startswith('<') and name.endswith('>'):
return '{%s}' % name[1:-1]
else:
return name
else:
return "%s:%d(%s)" % func_name
#**************************************************************************
# The following functions combine statists for pairs functions.
# The bulk of the processing involves correctly handling "call" lists,
# such as callers and callees.
#**************************************************************************
def add_func_stats(target, source):
"""Add together all the stats for two profile entries."""
cc, nc, tt, ct, callers = source
t_cc, t_nc, t_tt, t_ct, t_callers = target
return (cc+t_cc, nc+t_nc, tt+t_tt, ct+t_ct,
add_callers(t_callers, callers))
def add_callers(target, source):
"""Combine two caller lists in a single list."""
new_callers = {}
for func, caller in target.iteritems():
new_callers[func] = caller
for func, caller in source.iteritems():
if func in new_callers:
new_callers[func] = caller + new_callers[func]
else:
new_callers[func] = caller
return new_callers
def count_calls(callers):
"""Sum the caller statistics to get total number of calls received."""
nc = 0
for calls in callers.itervalues():
nc += calls
return nc
#**************************************************************************
# The following functions support printing of reports
#**************************************************************************
def f8(x):
return "%8.3f" % x
#**************************************************************************
# Statistics browser added by ESR, April 2001
#**************************************************************************
if __name__ == '__main__':
import cmd
try:
import readline
except ImportError:
pass
class ProfileBrowser(cmd.Cmd):
def __init__(self, profile=None):
cmd.Cmd.__init__(self)
self.prompt = "% "
if profile is not None:
self.stats = Stats(profile)
self.stream = self.stats.stream
else:
self.stats = None
self.stream = sys.stdout
def generic(self, fn, line):
args = line.split()
processed = []
for term in args:
try:
processed.append(int(term))
continue
except ValueError:
pass
try:
frac = float(term)
if frac > 1 or frac < 0:
print >> self.stream, "Fraction argument must be in [0, 1]"
continue
processed.append(frac)
continue
except ValueError:
pass
processed.append(term)
if self.stats:
getattr(self.stats, fn)(*processed)
else:
print >> self.stream, "No statistics object is loaded."
return 0
def generic_help(self):
print >> self.stream, "Arguments may be:"
print >> self.stream, "* An integer maximum number of entries to print."
print >> self.stream, "* A decimal fractional number between 0 and 1, controlling"
print >> self.stream, " what fraction of selected entries to print."
print >> self.stream, "* A regular expression; only entries with function names"
print >> self.stream, " that match it are printed."
def do_add(self, line):
self.stats.add(line)
return 0
def help_add(self):
print >> self.stream, "Add profile info from given file to current statistics object."
def do_callees(self, line):
return self.generic('print_callees', line)
def help_callees(self):
print >> self.stream, "Print callees statistics from the current stat object."
self.generic_help()
def do_callers(self, line):
return self.generic('print_callers', line)
def help_callers(self):
print >> self.stream, "Print callers statistics from the current stat object."
self.generic_help()
def do_EOF(self, line):
print >> self.stream, ""
return 1
def help_EOF(self):
print >> self.stream, "Leave the profile brower."
def do_quit(self, line):
return 1
def help_quit(self):
print >> self.stream, "Leave the profile brower."
def do_read(self, line):
if line:
try:
self.stats = Stats(line)
except IOError, args:
print >> self.stream, args[1]
return
self.prompt = line + "% "
elif len(self.prompt) > 2:
line = self.prompt[-2:]
else:
print >> self.stream, "No statistics object is current -- cannot reload."
return 0
def help_read(self):
print >> self.stream, "Read in profile data from a specified file."
def do_reverse(self, line):
self.stats.reverse_order()
return 0
def help_reverse(self):
print >> self.stream, "Reverse the sort order of the profiling report."
def do_sort(self, line):
abbrevs = self.stats.get_sort_arg_defs()
if line and not filter(lambda x,a=abbrevs: x not in a,line.split()):
self.stats.sort_stats(*line.split())
else:
print >> self.stream, "Valid sort keys (unique prefixes are accepted):"
for (key, value) in Stats.sort_arg_dict_default.iteritems():
print >> self.stream, "%s -- %s" % (key, value[1])
return 0
def help_sort(self):
print >> self.stream, "Sort profile data according to specified keys."
print >> self.stream, "(Typing `sort' without arguments lists valid keys.)"
def complete_sort(self, text, *args):
return [a for a in Stats.sort_arg_dict_default if a.startswith(text)]
def do_stats(self, line):
return self.generic('print_stats', line)
def help_stats(self):
print >> self.stream, "Print statistics from the current stat object."
self.generic_help()
def do_strip(self, line):
self.stats.strip_dirs()
return 0
def help_strip(self):
print >> self.stream, "Strip leading path information from filenames in the report."
def postcmd(self, stop, line):
if stop:
return stop
return None
import sys
if len(sys.argv) > 1:
initprofile = sys.argv[1]
else:
initprofile = None
try:
browser = ProfileBrowser(initprofile)
print >> browser.stream, "Welcome to the profile statistics browser."
browser.cmdloop()
print >> browser.stream, "Goodbye."
except KeyboardInterrupt:
pass
# That's all, folks.
| apache-2.0 |
himleyb85/django | django/test/client.py | 8 | 26705 | from __future__ import unicode_literals
import json
import mimetypes
import os
import re
import sys
from copy import copy
from importlib import import_module
from io import BytesIO
from django.apps import apps
from django.conf import settings
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import ISO_8859_1, UTF_8, WSGIRequest
from django.core.signals import (
got_request_exception, request_finished, request_started,
)
from django.db import close_old_connections
from django.http import HttpRequest, QueryDict, SimpleCookie
from django.template import TemplateDoesNotExist
from django.test import signals
from django.test.utils import ContextList
from django.urls import resolve
from django.utils import six
from django.utils.encoding import force_bytes, force_str, uri_to_iri
from django.utils.functional import SimpleLazyObject, curry
from django.utils.http import urlencode
from django.utils.itercompat import is_iterable
from django.utils.six.moves.urllib.parse import urlparse, urlsplit
__all__ = ('Client', 'RedirectCycleError', 'RequestFactory', 'encode_file', 'encode_multipart')
BOUNDARY = 'BoUnDaRyStRiNg'
MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY
CONTENT_TYPE_RE = re.compile('.*; charset=([\w\d-]+);?')
class RedirectCycleError(Exception):
"""
The test client has been asked to follow a redirect loop.
"""
def __init__(self, message, last_response):
super(RedirectCycleError, self).__init__(message)
self.last_response = last_response
self.redirect_chain = last_response.redirect_chain
class FakePayload(object):
"""
A wrapper around BytesIO that restricts what can be read since data from
the network can't be seeked and cannot be read outside of its content
length. This makes sure that views can't do anything under the test client
that wouldn't work in Real Life.
"""
def __init__(self, content=None):
self.__content = BytesIO()
self.__len = 0
self.read_started = False
if content is not None:
self.write(content)
def __len__(self):
return self.__len
def read(self, num_bytes=None):
if not self.read_started:
self.__content.seek(0)
self.read_started = True
if num_bytes is None:
num_bytes = self.__len or 0
assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data."
content = self.__content.read(num_bytes)
self.__len -= num_bytes
return content
def write(self, content):
if self.read_started:
raise ValueError("Unable to write a payload after he's been read")
content = force_bytes(content)
self.__content.write(content)
self.__len += len(content)
def closing_iterator_wrapper(iterable, close):
try:
for item in iterable:
yield item
finally:
request_finished.disconnect(close_old_connections)
close() # will fire request_finished
request_finished.connect(close_old_connections)
class ClientHandler(BaseHandler):
"""
A HTTP Handler that can be used for testing purposes. Uses the WSGI
interface to compose requests, but returns the raw HttpResponse object with
the originating WSGIRequest attached to its ``wsgi_request`` attribute.
"""
def __init__(self, enforce_csrf_checks=True, *args, **kwargs):
self.enforce_csrf_checks = enforce_csrf_checks
super(ClientHandler, self).__init__(*args, **kwargs)
def __call__(self, environ):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
self.load_middleware()
request_started.disconnect(close_old_connections)
request_started.send(sender=self.__class__, environ=environ)
request_started.connect(close_old_connections)
request = WSGIRequest(environ)
# sneaky little hack so that we can easily get round
# CsrfViewMiddleware. This makes life easier, and is probably
# required for backwards compatibility with external tests against
# admin views.
request._dont_enforce_csrf_checks = not self.enforce_csrf_checks
# Request goes through middleware.
response = self.get_response(request)
# Attach the originating request to the response so that it could be
# later retrieved.
response.wsgi_request = request
# We're emulating a WSGI server; we must call the close method
# on completion.
if response.streaming:
response.streaming_content = closing_iterator_wrapper(
response.streaming_content, response.close)
else:
request_finished.disconnect(close_old_connections)
response.close() # will fire request_finished
request_finished.connect(close_old_connections)
return response
def store_rendered_templates(store, signal, sender, template, context, **kwargs):
"""
Stores templates and contexts that are rendered.
The context is copied so that it is an accurate representation at the time
of rendering.
"""
store.setdefault('templates', []).append(template)
if 'context' not in store:
store['context'] = ContextList()
store['context'].append(copy(context))
def encode_multipart(boundary, data):
"""
Encodes multipart POST data from a dictionary of form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
def to_bytes(s):
return force_bytes(s, settings.DEFAULT_CHARSET)
# Not by any means perfect, but good enough for our purposes.
def is_file(thing):
return hasattr(thing, "read") and callable(thing.read)
# Each bit of the multipart form data could be either a form value or a
# file, or a *list* of form values and/or files. Remember that HTTP field
# names can be duplicated!
for (key, value) in data.items():
if is_file(value):
lines.extend(encode_file(boundary, key, value))
elif not isinstance(value, six.string_types) and is_iterable(value):
for item in value:
if is_file(item):
lines.extend(encode_file(boundary, key, item))
else:
lines.extend(to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
item
])
else:
lines.extend(to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
value
])
lines.extend([
to_bytes('--%s--' % boundary),
b'',
])
return b'\r\n'.join(lines)
def encode_file(boundary, key, file):
def to_bytes(s):
return force_bytes(s, settings.DEFAULT_CHARSET)
filename = os.path.basename(file.name) if hasattr(file, 'name') else ''
if hasattr(file, 'content_type'):
content_type = file.content_type
elif filename:
content_type = mimetypes.guess_type(filename)[0]
else:
content_type = None
if content_type is None:
content_type = 'application/octet-stream'
if not filename:
filename = key
return [
to_bytes('--%s' % boundary),
to_bytes('Content-Disposition: form-data; name="%s"; filename="%s"'
% (key, filename)),
to_bytes('Content-Type: %s' % content_type),
b'',
to_bytes(file.read())
]
class RequestFactory(object):
"""
Class that lets you create mock Request objects for use in testing.
Usage:
rf = RequestFactory()
get_request = rf.get('/hello/')
post_request = rf.post('/submit/', {'foo': 'bar'})
Once you have a request object you can pass it to any view function,
just as if that view had been hooked up using a URLconf.
"""
def __init__(self, **defaults):
self.defaults = defaults
self.cookies = SimpleCookie()
self.errors = BytesIO()
def _base_environ(self, **request):
"""
The base environment for a request.
"""
# This is a minimal valid WSGI environ dictionary, plus:
# - HTTP_COOKIE: for cookie support,
# - REMOTE_ADDR: often useful, see #8551.
# See http://www.python.org/dev/peps/pep-3333/#environ-variables
environ = {
'HTTP_COOKIE': self.cookies.output(header='', sep='; '),
'PATH_INFO': str('/'),
'REMOTE_ADDR': str('127.0.0.1'),
'REQUEST_METHOD': str('GET'),
'SCRIPT_NAME': str(''),
'SERVER_NAME': str('testserver'),
'SERVER_PORT': str('80'),
'SERVER_PROTOCOL': str('HTTP/1.1'),
'wsgi.version': (1, 0),
'wsgi.url_scheme': str('http'),
'wsgi.input': FakePayload(b''),
'wsgi.errors': self.errors,
'wsgi.multiprocess': True,
'wsgi.multithread': False,
'wsgi.run_once': False,
}
environ.update(self.defaults)
environ.update(request)
return environ
def request(self, **request):
"Construct a generic request object."
return WSGIRequest(self._base_environ(**request))
def _encode_data(self, data, content_type):
if content_type is MULTIPART_CONTENT:
return encode_multipart(BOUNDARY, data)
else:
# Encode the content so that the byte representation is correct.
match = CONTENT_TYPE_RE.match(content_type)
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
return force_bytes(data, encoding=charset)
def _get_path(self, parsed):
path = force_str(parsed[2])
# If there are parameters, add them
if parsed[3]:
path += str(";") + force_str(parsed[3])
path = uri_to_iri(path).encode(UTF_8)
# Under Python 3, non-ASCII values in the WSGI environ are arbitrarily
# decoded with ISO-8859-1. We replicate this behavior here.
# Refs comment in `get_bytes_from_wsgi()`.
return path.decode(ISO_8859_1) if six.PY3 else path
def get(self, path, data=None, secure=False, **extra):
"Construct a GET request."
data = {} if data is None else data
r = {
'QUERY_STRING': urlencode(data, doseq=True),
}
r.update(extra)
return self.generic('GET', path, secure=secure, **r)
def post(self, path, data=None, content_type=MULTIPART_CONTENT,
secure=False, **extra):
"Construct a POST request."
data = {} if data is None else data
post_data = self._encode_data(data, content_type)
return self.generic('POST', path, post_data, content_type,
secure=secure, **extra)
def head(self, path, data=None, secure=False, **extra):
"Construct a HEAD request."
data = {} if data is None else data
r = {
'QUERY_STRING': urlencode(data, doseq=True),
}
r.update(extra)
return self.generic('HEAD', path, secure=secure, **r)
def trace(self, path, secure=False, **extra):
"Construct a TRACE request."
return self.generic('TRACE', path, secure=secure, **extra)
def options(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct an OPTIONS request."
return self.generic('OPTIONS', path, data, content_type,
secure=secure, **extra)
def put(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct a PUT request."
return self.generic('PUT', path, data, content_type,
secure=secure, **extra)
def patch(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct a PATCH request."
return self.generic('PATCH', path, data, content_type,
secure=secure, **extra)
def delete(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct a DELETE request."
return self.generic('DELETE', path, data, content_type,
secure=secure, **extra)
def generic(self, method, path, data='',
content_type='application/octet-stream', secure=False,
**extra):
"""Constructs an arbitrary HTTP request."""
parsed = urlparse(force_str(path))
data = force_bytes(data, settings.DEFAULT_CHARSET)
r = {
'PATH_INFO': self._get_path(parsed),
'REQUEST_METHOD': str(method),
'SERVER_PORT': str('443') if secure else str('80'),
'wsgi.url_scheme': str('https') if secure else str('http'),
}
if data:
r.update({
'CONTENT_LENGTH': len(data),
'CONTENT_TYPE': str(content_type),
'wsgi.input': FakePayload(data),
})
r.update(extra)
# If QUERY_STRING is absent or empty, we want to extract it from the URL.
if not r.get('QUERY_STRING'):
query_string = force_bytes(parsed[4])
# WSGI requires latin-1 encoded strings. See get_path_info().
if six.PY3:
query_string = query_string.decode('iso-8859-1')
r['QUERY_STRING'] = query_string
return self.request(**r)
class Client(RequestFactory):
"""
A class that can act as a client for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
Client objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the Client instance.
This is not intended as a replacement for Twill/Selenium or
the like - it is here to allow testing against the
contexts and templates produced by a view, rather than the
HTML rendered to the end-user.
"""
def __init__(self, enforce_csrf_checks=False, **defaults):
super(Client, self).__init__(**defaults)
self.handler = ClientHandler(enforce_csrf_checks)
self.exc_info = None
def store_exc_info(self, **kwargs):
"""
Stores exceptions when they are generated by a view.
"""
self.exc_info = sys.exc_info()
def _session(self):
"""
Obtains the current session variables.
"""
if apps.is_installed('django.contrib.sessions'):
engine = import_module(settings.SESSION_ENGINE)
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME)
if cookie:
return engine.SessionStore(cookie.value)
else:
s = engine.SessionStore()
s.save()
self.cookies[settings.SESSION_COOKIE_NAME] = s.session_key
return s
return {}
session = property(_session)
def request(self, **request):
"""
The master request method. Composes the environment dictionary
and passes to the handler, returning the result of the handler.
Assumes defaults for the query environment, which can be overridden
using the arguments to the request.
"""
environ = self._base_environ(**request)
# Curry a data dictionary into an instance of the template renderer
# callback function.
data = {}
on_template_render = curry(store_rendered_templates, data)
signal_uid = "template-render-%s" % id(request)
signals.template_rendered.connect(on_template_render, dispatch_uid=signal_uid)
# Capture exceptions created by the handler.
exception_uid = "request-exception-%s" % id(request)
got_request_exception.connect(self.store_exc_info, dispatch_uid=exception_uid)
try:
try:
response = self.handler(environ)
except TemplateDoesNotExist as e:
# If the view raises an exception, Django will attempt to show
# the 500.html template. If that template is not available,
# we should ignore the error in favor of re-raising the
# underlying exception that caused the 500 error. Any other
# template found to be missing during view error handling
# should be reported as-is.
if e.args != ('500.html',):
raise
# Look for a signalled exception, clear the current context
# exception data, then re-raise the signalled exception.
# Also make sure that the signalled exception is cleared from
# the local cache!
if self.exc_info:
exc_info = self.exc_info
self.exc_info = None
six.reraise(*exc_info)
# Save the client and request that stimulated the response.
response.client = self
response.request = request
# Add any rendered template detail to the response.
response.templates = data.get("templates", [])
response.context = data.get("context")
response.json = curry(self._parse_json, response)
# Attach the ResolverMatch instance to the response
response.resolver_match = SimpleLazyObject(lambda: resolve(request['PATH_INFO']))
# Flatten a single context. Not really necessary anymore thanks to
# the __getattr__ flattening in ContextList, but has some edge-case
# backwards-compatibility implications.
if response.context and len(response.context) == 1:
response.context = response.context[0]
# Update persistent cookie data.
if response.cookies:
self.cookies.update(response.cookies)
return response
finally:
signals.template_rendered.disconnect(dispatch_uid=signal_uid)
got_request_exception.disconnect(dispatch_uid=exception_uid)
def get(self, path, data=None, follow=False, secure=False, **extra):
"""
Requests a response from the server using GET.
"""
response = super(Client, self).get(path, data=data, secure=secure,
**extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def post(self, path, data=None, content_type=MULTIPART_CONTENT,
follow=False, secure=False, **extra):
"""
Requests a response from the server using POST.
"""
response = super(Client, self).post(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def head(self, path, data=None, follow=False, secure=False, **extra):
"""
Request a response from the server using HEAD.
"""
response = super(Client, self).head(path, data=data, secure=secure,
**extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def options(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Request a response from the server using OPTIONS.
"""
response = super(Client, self).options(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def put(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Send a resource to the server using PUT.
"""
response = super(Client, self).put(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def patch(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Send a resource to the server using PATCH.
"""
response = super(Client, self).patch(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def delete(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Send a DELETE request to the server.
"""
response = super(Client, self).delete(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def trace(self, path, data='', follow=False, secure=False, **extra):
"""
Send a TRACE request to the server.
"""
response = super(Client, self).trace(path, data=data, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def login(self, **credentials):
"""
Sets the Factory to appear as if it has successfully logged into a site.
Returns True if login is possible; False if the provided credentials
are incorrect, or the user is inactive, or if the sessions framework is
not available.
"""
from django.contrib.auth import authenticate
user = authenticate(**credentials)
if (user and user.is_active and
apps.is_installed('django.contrib.sessions')):
self._login(user)
return True
else:
return False
def force_login(self, user, backend=None):
self._login(user, backend)
def _login(self, user, backend=None):
from django.contrib.auth import login
engine = import_module(settings.SESSION_ENGINE)
# Create a fake request to store login details.
request = HttpRequest()
if self.session:
request.session = self.session
else:
request.session = engine.SessionStore()
login(request, user, backend)
# Save the session values.
request.session.save()
# Set the cookie to represent the session.
session_cookie = settings.SESSION_COOKIE_NAME
self.cookies[session_cookie] = request.session.session_key
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
self.cookies[session_cookie].update(cookie_data)
def logout(self):
"""
Removes the authenticated user's cookies and session object.
Causes the authenticated user to be logged out.
"""
from django.contrib.auth import get_user, logout
request = HttpRequest()
engine = import_module(settings.SESSION_ENGINE)
if self.session:
request.session = self.session
request.user = get_user(request)
else:
request.session = engine.SessionStore()
logout(request)
self.cookies = SimpleCookie()
def _parse_json(self, response, **extra):
if 'application/json' not in response.get('Content-Type'):
raise ValueError(
'Content-Type header is "{0}", not "application/json"'
.format(response.get('Content-Type'))
)
return json.loads(response.content.decode(), **extra)
def _handle_redirects(self, response, **extra):
"Follows any redirects by requesting responses from the server using GET."
response.redirect_chain = []
while response.status_code in (301, 302, 303, 307):
response_url = response.url
redirect_chain = response.redirect_chain
redirect_chain.append((response_url, response.status_code))
url = urlsplit(response_url)
if url.scheme:
extra['wsgi.url_scheme'] = url.scheme
if url.hostname:
extra['SERVER_NAME'] = url.hostname
if url.port:
extra['SERVER_PORT'] = str(url.port)
response = self.get(url.path, QueryDict(url.query), follow=False, **extra)
response.redirect_chain = redirect_chain
if redirect_chain[-1] in redirect_chain[:-1]:
# Check that we're not redirecting to somewhere we've already
# been to, to prevent loops.
raise RedirectCycleError("Redirect loop detected.", last_response=response)
if len(redirect_chain) > 20:
# Such a lengthy chain likely also means a loop, but one with
# a growing path, changing view, or changing query argument;
# 20 is the value of "network.http.redirection-limit" from Firefox.
raise RedirectCycleError("Too many redirects.", last_response=response)
return response
| bsd-3-clause |
yongshengwang/hue | desktop/core/ext-py/Django-1.6.10/django/conf/locale/hu/formats.py | 118 | 1118 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'Y. F j.'
TIME_FORMAT = 'G.i.s'
DATETIME_FORMAT = 'Y. F j. G.i.s'
YEAR_MONTH_FORMAT = 'Y. F'
MONTH_DAY_FORMAT = 'F j.'
SHORT_DATE_FORMAT = 'Y.m.d.'
SHORT_DATETIME_FORMAT = 'Y.m.d. G.i.s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y.%m.%d.', # '2006.10.25.'
)
TIME_INPUT_FORMATS = (
'%H.%M.%S', # '14.30.59'
'%H.%M', # '14.30'
)
DATETIME_INPUT_FORMATS = (
'%Y.%m.%d. %H.%M.%S', # '2006.10.25. 14.30.59'
'%Y.%m.%d. %H.%M.%S.%f', # '2006.10.25. 14.30.59.000200'
'%Y.%m.%d. %H.%M', # '2006.10.25. 14.30'
'%Y.%m.%d.', # '2006.10.25.'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' ' # Non-breaking space
NUMBER_GROUPING = 3
| apache-2.0 |
tovrstra/horton | horton/io/molekel.py | 4 | 8305 | # -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2017 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
'''Molekel wavefunction input file format'''
import numpy as np
from horton.units import angstrom
from horton.io.molden import _fix_molden_from_buggy_codes
from horton.gbasis.iobas import str_to_shell_types
from horton.gbasis.gobasis import GOBasis
from horton.meanfield.orbitals import Orbitals
__all__ = ['load_mkl']
def load_mkl(filename):
'''Load data from a Molekel file.
Parameters
----------
filename : str
The filename of the mkl file.
Returns
-------
results : dict
Data loaded from file, with keys: ``coordinates``, ``numbers``, ``obasis``,
``orb_alpha``. It may also contain: ``orb_beta``, ``signs``.
'''
def helper_char_mult(f):
return [int(word) for word in f.readline().split()]
def helper_coordinates(f):
numbers = []
coordinates = []
while True:
line = f.readline()
if len(line) == 0 or line.strip() == '$END':
break
words = line.split()
numbers.append(int(words[0]))
coordinates.append([float(words[1]), float(words[2]), float(words[3])])
numbers = np.array(numbers, int)
coordinates = np.array(coordinates)*angstrom
return numbers, coordinates
def helper_obasis(f, coordinates):
shell_types = []
shell_map = []
nprims = []
alphas = []
con_coeffs = []
center_counter = 0
in_shell = False
nprim = None
while True:
line = f.readline()
lstrip = line.strip()
if len(line) == 0 or lstrip == '$END':
break
if len(lstrip) == 0:
continue
if lstrip == '$$':
center_counter += 1
in_shell = False
else:
words = line.split()
if len(words) == 2:
assert in_shell
alpha = float(words[0])
alphas.append(alpha)
con_coeffs.append(float(words[1]))
nprim += 1
else:
if nprim is not None:
nprims.append(nprim)
shell_map.append(center_counter)
# always assume pure basis functions
shell_type = str_to_shell_types(words[1], pure=True)[0]
shell_types.append(shell_type)
in_shell = True
nprim = 0
if nprim is not None:
nprims.append(nprim)
shell_map = np.array(shell_map)
nprims = np.array(nprims)
shell_types = np.array(shell_types)
alphas = np.array(alphas)
con_coeffs = np.array(con_coeffs)
return GOBasis(coordinates, shell_map, nprims, shell_types, alphas, con_coeffs)
def helper_coeffs(f, nbasis):
coeffs = []
energies = []
in_orb = 0
while True:
line = f.readline()
lstrip = line.strip()
if len(line) == 0 or lstrip == '$END':
break
if in_orb == 0:
# read a1g line
words = lstrip.split()
ncol = len(words)
assert ncol > 0
for word in words:
assert word == 'a1g'
cols = [np.zeros((nbasis,1), float) for icol in xrange(ncol)]
in_orb = 1
elif in_orb == 1:
# read energies
words = lstrip.split()
assert len(words) == ncol
for word in words:
energies.append(float(word))
in_orb = 2
ibasis = 0
elif in_orb == 2:
# read expansion coefficients
words = lstrip.split()
assert len(words) == ncol
for icol in xrange(ncol):
cols[icol][ibasis] = float(words[icol])
ibasis += 1
if ibasis == nbasis:
in_orb = 0
coeffs.extend(cols)
return np.hstack(coeffs), np.array(energies)
def helper_occ(f):
occs = []
while True:
line = f.readline()
lstrip = line.strip()
if len(line) == 0 or lstrip == '$END':
break
for word in lstrip.split():
occs.append(float(word))
return np.array(occs)
charge = None
spinmult = None
numbers = None
coordinates = None
obasis = None
coeff_alpha = None
ener_alpha = None
occ_alpha = None
coeff_beta = None
ener_beta = None
occ_beta = None
with open(filename) as f:
while True:
line = f.readline()
if len(line) == 0:
break
line = line.strip()
if line == '$CHAR_MULT':
charge, spinmult = helper_char_mult(f)
elif line == '$COORD':
numbers, coordinates = helper_coordinates(f)
elif line == '$BASIS':
obasis = helper_obasis(f, coordinates)
elif line == '$COEFF_ALPHA':
coeff_alpha, ener_alpha = helper_coeffs(f, obasis.nbasis)
elif line == '$OCC_ALPHA':
occ_alpha = helper_occ(f)
elif line == '$COEFF_BETA':
coeff_beta, ener_beta = helper_coeffs(f, obasis.nbasis)
elif line == '$OCC_BETA':
occ_beta = helper_occ(f)
if charge is None:
raise IOError('Charge and multiplicity not found in mkl file.')
if coordinates is None:
raise IOError('Coordinates not found in mkl file.')
if obasis is None:
raise IOError('Orbital basis not found in mkl file.')
if coeff_alpha is None:
raise IOError('Alpha orbitals not found in mkl file.')
if occ_alpha is None:
raise IOError('Alpha occupation numbers not found in mkl file.')
nelec = numbers.sum() - charge
if coeff_beta is None:
assert nelec % 2 == 0
assert abs(occ_alpha.sum() - nelec) < 1e-7
orb_alpha = Orbitals(obasis.nbasis, coeff_alpha.shape[1])
orb_alpha.coeffs[:] = coeff_alpha
orb_alpha.energies[:] = ener_alpha
orb_alpha.occupations[:] = occ_alpha/2
orb_beta = None
else:
if occ_beta is None:
raise IOError('Beta occupation numbers not found in mkl file while beta orbitals were present.')
nalpha = int(np.round(occ_alpha.sum()))
nbeta = int(np.round(occ_beta.sum()))
assert nelec == nalpha+nbeta
assert coeff_alpha.shape == coeff_beta.shape
assert ener_alpha.shape == ener_beta.shape
assert occ_alpha.shape == occ_beta.shape
orb_alpha = Orbitals(obasis.nbasis, coeff_alpha.shape[1])
orb_alpha.coeffs[:] = coeff_alpha
orb_alpha.energies[:] = ener_alpha
orb_alpha.occupations[:] = occ_alpha
orb_beta = Orbitals(obasis.nbasis, coeff_beta.shape[1])
orb_beta.coeffs[:] = coeff_beta
orb_beta.energies[:] = ener_beta
orb_beta.occupations[:] = occ_beta
result = {
'coordinates': coordinates,
'orb_alpha': orb_alpha,
'numbers': numbers,
'obasis': obasis,
}
if orb_beta is not None:
result['orb_beta'] = orb_beta
_fix_molden_from_buggy_codes(result, filename)
return result
| gpl-3.0 |
BorisJeremic/Real-ESSI-Examples | education_examples/_Chapter_Modeling_and_Simulation_Examples_Static_Examples/Contact_Normal_Interface_Behaviour_HardContact_Nonlinear_Hardening_Softening_Shear_Model/plot.py | 8 | 1187 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import sys
import numpy as np;
# Go over each feioutput and plot each one.
thefile = "Monotonic_Contact_Behaviour_Adding_Normal_Load.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
shear_strain_x = finput["/Model/Elements/Element_Outputs"][4,:]
shear_strain_y = finput["/Model/Elements/Element_Outputs"][5,:]
normal_strain = finput["/Model/Elements/Element_Outputs"][6,:]
shear_stress_x = finput["/Model/Elements/Element_Outputs"][7,:]
shear_stress_y = finput["/Model/Elements/Element_Outputs"][8,:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.figure()
plt.plot(normal_strain,normal_stress,'-k',Linewidth=4)
plt.xlabel(r"Normal Strain $\epsilon$")
plt.ylabel(r"Normal Stress $\sigma$")
plt.savefig("Contact_Normal_Interface_Behavour.pdf", bbox_inches='tight')
plt.show()
# ##################################################################### | cc0-1.0 |
diagramsoftware/account-financial-tools | account_default_draft_move/res_config.py | 17 | 2334 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2014 ACSONE SA/NV (http://acsone.eu).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class AccountConfigSettings(orm.TransientModel):
_inherit = 'account.config.settings'
_columns = {
'use_journal_setting': fields.boolean(
'Use journal setting to post journal entries '
'on invoice and bank statement validation',),
}
def set_parameters(self, cr, uid, ids, context=None):
config = self.browse(cr, uid, ids[0], context)
config_pool = self.pool['ir.config_parameter']
if config.use_journal_setting:
config_pool.set_param(cr, uid, 'use_journal_setting',
config.use_journal_setting)
else:
# remove the key from parameter
ids = config_pool.search(cr, uid,
[('key', '=', 'use_journal_setting')],
context=context)
if ids:
config_pool.unlink(cr, uid, ids)
def default_get(self, cr, uid, fields, context=None):
res = super(AccountConfigSettings, self).default_get(cr, uid, fields,
context=context)
config_pool = self.pool['ir.config_parameter']
res['use_journal_setting'] = config_pool.get_param(
cr, uid, 'use_journal_setting', False)
return res
| agpl-3.0 |
Icenowy/MissionPlanner | Lib/site-packages/numpy/lib/benchmarks/bench_arraysetops.py | 65 | 1615 | import numpy as np
import time
from numpy.lib.arraysetops import *
def bench_unique1d( plot_results = False ):
exponents = np.linspace( 2, 7, 9 )
ratios = []
nItems = []
dt1s = []
dt2s = []
for ii in exponents:
nItem = 10 ** ii
print 'using %d items:' % nItem
a = np.fix( nItem / 10 * np.random.random( nItem ) )
print 'unique:'
tt = time.clock()
b = np.unique( a )
dt1 = time.clock() - tt
print dt1
print 'unique1d:'
tt = time.clock()
c = unique1d( a )
dt2 = time.clock() - tt
print dt2
if dt1 < 1e-8:
ratio = 'ND'
else:
ratio = dt2 / dt1
print 'ratio:', ratio
print 'nUnique: %d == %d\n' % (len( b ), len( c ))
nItems.append( nItem )
ratios.append( ratio )
dt1s.append( dt1 )
dt2s.append( dt2 )
assert np.alltrue( b == c )
print nItems
print dt1s
print dt2s
print ratios
if plot_results:
import pylab
def plotMe( fig, fun, nItems, dt1s, dt2s ):
pylab.figure( fig )
fun( nItems, dt1s, 'g-o', linewidth = 2, markersize = 8 )
fun( nItems, dt2s, 'b-x', linewidth = 2, markersize = 8 )
pylab.legend( ('unique', 'unique1d' ) )
pylab.xlabel( 'nItem' )
pylab.ylabel( 'time [s]' )
plotMe( 1, pylab.loglog, nItems, dt1s, dt2s )
plotMe( 2, pylab.plot, nItems, dt1s, dt2s )
pylab.show()
if __name__ == '__main__':
bench_unique1d( plot_results = True )
| gpl-3.0 |
coursemdetw/2014c2 | wsgi/static/reeborg/src/libraries/brython/Lib/unittest/result.py | 15 | 6282 | """Test result object"""
import io
import sys
import traceback
from . import util
from functools import wraps
__unittest = True
def failfast(method):
@wraps(method)
def inner(self, *args, **kw):
if getattr(self, 'failfast', False):
self.stop()
return method(self, *args, **kw)
return inner
STDOUT_LINE = '\nStdout:\n%s'
STDERR_LINE = '\nStderr:\n%s'
class TestResult(object):
"""Holder for test result information.
Test results are automatically managed by the TestCase and TestSuite
classes, and do not need to be explicitly manipulated by writers of tests.
Each instance holds the total number of tests run, and collections of
failures and errors that occurred among those test runs. The collections
contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
formatted traceback of the error that occurred.
"""
_previousTestClass = None
_testRunEntered = False
_moduleSetUpFailed = False
def __init__(self, stream=None, descriptions=None, verbosity=None):
self.failfast = False
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
self.buffer = False
self._stdout_buffer = None
self._stderr_buffer = None
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
self._mirrorOutput = False
def printErrors(self):
"Called by TestRunner after test run"
def startTest(self, test):
"Called when the given test is about to be run"
self.testsRun += 1
self._mirrorOutput = False
self._setupStdout()
def _setupStdout(self):
if self.buffer:
if self._stderr_buffer is None:
self._stderr_buffer = io.StringIO()
self._stdout_buffer = io.StringIO()
sys.stdout = self._stdout_buffer
sys.stderr = self._stderr_buffer
def startTestRun(self):
"""Called once before any tests are executed.
See startTest for a method called before each test.
"""
def stopTest(self, test):
"""Called when the given test has been run"""
self._restoreStdout()
self._mirrorOutput = False
def _restoreStdout(self):
if self.buffer:
if self._mirrorOutput:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
self._original_stdout.write(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
self._original_stderr.write(STDERR_LINE % error)
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
self._stdout_buffer.seek(0)
self._stdout_buffer.truncate()
self._stderr_buffer.seek(0)
self._stderr_buffer.truncate()
def stopTestRun(self):
"""Called once after all tests are executed.
See stopTest for a method called after each test.
"""
@failfast
def addError(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
self.errors.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
@failfast
def addFailure(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info()."""
self.failures.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
def addSuccess(self, test):
"Called when a test has completed successfully"
pass
def addSkip(self, test, reason):
"""Called when a test is skipped."""
self.skipped.append((test, reason))
def addExpectedFailure(self, test, err):
"""Called when an expected failure/error occured."""
self.expectedFailures.append(
(test, self._exc_info_to_string(err, test)))
@failfast
def addUnexpectedSuccess(self, test):
"""Called when a test was expected to fail, but succeed."""
self.unexpectedSuccesses.append(test)
def wasSuccessful(self):
"Tells whether or not this result was a success"
return len(self.failures) == len(self.errors) == 0
def stop(self):
"Indicates that the tests should be aborted"
self.shouldStop = True
def _exc_info_to_string(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string."""
exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
# Skip assert*() traceback levels
length = self._count_relevant_tb_levels(tb)
msgLines = traceback.format_exception(exctype, value, tb, length)
else:
msgLines = traceback.format_exception(exctype, value, tb)
if self.buffer:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
msgLines.append(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
msgLines.append(STDERR_LINE % error)
return ''.join(msgLines)
def _is_relevant_tb_level(self, tb):
return '__unittest' in tb.tb_frame.f_globals
def _count_relevant_tb_levels(self, tb):
length = 0
while tb and not self._is_relevant_tb_level(tb):
length += 1
tb = tb.tb_next
return length
def __repr__(self):
return ("<%s run=%i errors=%i failures=%i>" %
(util.strclass(self.__class__), self.testsRun, len(self.errors),
len(self.failures)))
| gpl-2.0 |
nkgilley/home-assistant | homeassistant/components/cert_expiry/__init__.py | 5 | 2696 | """The cert_expiry component."""
from datetime import timedelta
import logging
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST, CONF_PORT
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import DEFAULT_PORT, DOMAIN
from .errors import TemporaryFailure, ValidationFailure
from .helper import get_cert_expiry_timestamp
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(hours=12)
async def async_setup(hass, config):
"""Platform setup, do nothing."""
return True
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Load the saved entities."""
host = entry.data[CONF_HOST]
port = entry.data[CONF_PORT]
coordinator = CertExpiryDataUpdateCoordinator(hass, host, port)
await coordinator.async_refresh()
if not coordinator.last_update_success:
raise ConfigEntryNotReady
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = coordinator
if entry.unique_id is None:
hass.config_entries.async_update_entry(entry, unique_id=f"{host}:{port}")
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, "sensor")
)
return True
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
return await hass.config_entries.async_forward_entry_unload(entry, "sensor")
class CertExpiryDataUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage fetching Cert Expiry data from single endpoint."""
def __init__(self, hass, host, port):
"""Initialize global Cert Expiry data updater."""
self.host = host
self.port = port
self.cert_error = None
self.is_cert_valid = False
display_port = f":{port}" if port != DEFAULT_PORT else ""
name = f"{self.host}{display_port}"
super().__init__(
hass, _LOGGER, name=name, update_interval=SCAN_INTERVAL,
)
async def _async_update_data(self):
"""Fetch certificate."""
try:
timestamp = await get_cert_expiry_timestamp(self.hass, self.host, self.port)
except TemporaryFailure as err:
raise UpdateFailed(err.args[0])
except ValidationFailure as err:
self.cert_error = err
self.is_cert_valid = False
_LOGGER.error("Certificate validation error: %s [%s]", self.host, err)
return None
self.cert_error = None
self.is_cert_valid = True
return timestamp
| apache-2.0 |
Pelagicore/qface | tests/test_validation.py | 2 | 1722 | import logging
import logging.config
from path import Path
from qface.generator import FileSystem
# logging.config.fileConfig('logging.ini')
logging.basicConfig()
log = logging.getLogger(__name__)
inputPath = Path('tests/in')
log.debug('input path folder: {0}'.format(inputPath.abspath()))
def load_one():
path = inputPath / 'com.pelagicore.one.qface'
return FileSystem.parse_document(path)
def test_resolve():
system = load_one()
module = system.lookup('com.pelagicore.one')
assert module
service = module.lookup('OneService')
assert service
operation = service._operationMap['echo']
assert operation
struct = module.lookup('StringStruct')
assert struct
assert operation.type.reference is struct
parameter = operation._parameterMap['message']
assert parameter
assert parameter.type.reference is struct
property = service._propertyMap['message']
assert property
assert property.type.reference is struct
def test_resolve_nested():
system = load_one()
module = system.lookup('com.pelagicore.one')
assert module
struct = module.lookup('com.pelagicore.one.StringStruct')
nested = module.lookup('com.pelagicore.one.NestedStruct')
assert struct and nested
member = struct._fieldMap['nested']
assert member
assert member.type.reference is nested
service = module.lookup('com.pelagicore.one.OneService')
assert service
list_property = service._propertyMap['messageList']
assert list_property
assert list_property.type.nested.reference is struct
model_property = service._propertyMap['messageModel']
assert model_property
assert model_property.type.nested.reference is struct
| mit |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_4_0_0/models/insuranceplan.py | 1 | 19453 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/InsurancePlan) on 2019-05-07.
# 2019, SMART Health IT.
from . import domainresource
class InsurancePlan(domainresource.DomainResource):
""" Details of a Health Insurance product/plan provided by an organization.
"""
resource_type = "InsurancePlan"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.administeredBy = None
""" Product administrator.
Type `FHIRReference` (represented as `dict` in JSON). """
self.alias = None
""" Alternate names.
List of `str` items. """
self.contact = None
""" Contact for the product.
List of `InsurancePlanContact` items (represented as `dict` in JSON). """
self.coverage = None
""" Coverage details.
List of `InsurancePlanCoverage` items (represented as `dict` in JSON). """
self.coverageArea = None
""" Where product applies.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.endpoint = None
""" Technical endpoint.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.identifier = None
""" Business Identifier for Product.
List of `Identifier` items (represented as `dict` in JSON). """
self.name = None
""" Official name.
Type `str`. """
self.network = None
""" What networks are Included.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.ownedBy = None
""" Plan issuer.
Type `FHIRReference` (represented as `dict` in JSON). """
self.period = None
""" When the product is available.
Type `Period` (represented as `dict` in JSON). """
self.plan = None
""" Plan details.
List of `InsurancePlanPlan` items (represented as `dict` in JSON). """
self.status = None
""" draft | active | retired | unknown.
Type `str`. """
self.type = None
""" Kind of product.
List of `CodeableConcept` items (represented as `dict` in JSON). """
super(InsurancePlan, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(InsurancePlan, self).elementProperties()
js.extend([
("administeredBy", "administeredBy", fhirreference.FHIRReference, False, None, False),
("alias", "alias", str, True, None, False),
("contact", "contact", InsurancePlanContact, True, None, False),
("coverage", "coverage", InsurancePlanCoverage, True, None, False),
("coverageArea", "coverageArea", fhirreference.FHIRReference, True, None, False),
("endpoint", "endpoint", fhirreference.FHIRReference, True, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("name", "name", str, False, None, False),
("network", "network", fhirreference.FHIRReference, True, None, False),
("ownedBy", "ownedBy", fhirreference.FHIRReference, False, None, False),
("period", "period", period.Period, False, None, False),
("plan", "plan", InsurancePlanPlan, True, None, False),
("status", "status", str, False, None, False),
("type", "type", codeableconcept.CodeableConcept, True, None, False),
])
return js
from . import backboneelement
class InsurancePlanContact(backboneelement.BackboneElement):
""" Contact for the product.
The contact for the health insurance product for a certain purpose.
"""
resource_type = "InsurancePlanContact"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.address = None
""" Visiting or postal addresses for the contact.
Type `Address` (represented as `dict` in JSON). """
self.name = None
""" A name associated with the contact.
Type `HumanName` (represented as `dict` in JSON). """
self.purpose = None
""" The type of contact.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.telecom = None
""" Contact details (telephone, email, etc.) for a contact.
List of `ContactPoint` items (represented as `dict` in JSON). """
super(InsurancePlanContact, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(InsurancePlanContact, self).elementProperties()
js.extend([
("address", "address", address.Address, False, None, False),
("name", "name", humanname.HumanName, False, None, False),
("purpose", "purpose", codeableconcept.CodeableConcept, False, None, False),
("telecom", "telecom", contactpoint.ContactPoint, True, None, False),
])
return js
class InsurancePlanCoverage(backboneelement.BackboneElement):
""" Coverage details.
Details about the coverage offered by the insurance product.
"""
resource_type = "InsurancePlanCoverage"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.benefit = None
""" List of benefits.
List of `InsurancePlanCoverageBenefit` items (represented as `dict` in JSON). """
self.network = None
""" What networks provide coverage.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.type = None
""" Type of coverage.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(InsurancePlanCoverage, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(InsurancePlanCoverage, self).elementProperties()
js.extend([
("benefit", "benefit", InsurancePlanCoverageBenefit, True, None, True),
("network", "network", fhirreference.FHIRReference, True, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, True),
])
return js
class InsurancePlanCoverageBenefit(backboneelement.BackboneElement):
""" List of benefits.
Specific benefits under this type of coverage.
"""
resource_type = "InsurancePlanCoverageBenefit"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.limit = None
""" Benefit limits.
List of `InsurancePlanCoverageBenefitLimit` items (represented as `dict` in JSON). """
self.requirement = None
""" Referral requirements.
Type `str`. """
self.type = None
""" Type of benefit.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(InsurancePlanCoverageBenefit, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(InsurancePlanCoverageBenefit, self).elementProperties()
js.extend([
("limit", "limit", InsurancePlanCoverageBenefitLimit, True, None, False),
("requirement", "requirement", str, False, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, True),
])
return js
class InsurancePlanCoverageBenefitLimit(backboneelement.BackboneElement):
""" Benefit limits.
The specific limits on the benefit.
"""
resource_type = "InsurancePlanCoverageBenefitLimit"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" Benefit limit details.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.value = None
""" Maximum value allowed.
Type `Quantity` (represented as `dict` in JSON). """
super(InsurancePlanCoverageBenefitLimit, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(InsurancePlanCoverageBenefitLimit, self).elementProperties()
js.extend([
("code", "code", codeableconcept.CodeableConcept, False, None, False),
("value", "value", quantity.Quantity, False, None, False),
])
return js
class InsurancePlanPlan(backboneelement.BackboneElement):
""" Plan details.
Details about an insurance plan.
"""
resource_type = "InsurancePlanPlan"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.coverageArea = None
""" Where product applies.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.generalCost = None
""" Overall costs.
List of `InsurancePlanPlanGeneralCost` items (represented as `dict` in JSON). """
self.identifier = None
""" Business Identifier for Product.
List of `Identifier` items (represented as `dict` in JSON). """
self.network = None
""" What networks provide coverage.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.specificCost = None
""" Specific costs.
List of `InsurancePlanPlanSpecificCost` items (represented as `dict` in JSON). """
self.type = None
""" Type of plan.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(InsurancePlanPlan, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(InsurancePlanPlan, self).elementProperties()
js.extend([
("coverageArea", "coverageArea", fhirreference.FHIRReference, True, None, False),
("generalCost", "generalCost", InsurancePlanPlanGeneralCost, True, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("network", "network", fhirreference.FHIRReference, True, None, False),
("specificCost", "specificCost", InsurancePlanPlanSpecificCost, True, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, False),
])
return js
class InsurancePlanPlanGeneralCost(backboneelement.BackboneElement):
""" Overall costs.
Overall costs associated with the plan.
"""
resource_type = "InsurancePlanPlanGeneralCost"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.comment = None
""" Additional cost information.
Type `str`. """
self.cost = None
""" Cost value.
Type `Money` (represented as `dict` in JSON). """
self.groupSize = None
""" Number of enrollees.
Type `int`. """
self.type = None
""" Type of cost.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(InsurancePlanPlanGeneralCost, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(InsurancePlanPlanGeneralCost, self).elementProperties()
js.extend([
("comment", "comment", str, False, None, False),
("cost", "cost", money.Money, False, None, False),
("groupSize", "groupSize", int, False, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, False),
])
return js
class InsurancePlanPlanSpecificCost(backboneelement.BackboneElement):
""" Specific costs.
Costs associated with the coverage provided by the product.
"""
resource_type = "InsurancePlanPlanSpecificCost"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.benefit = None
""" Benefits list.
List of `InsurancePlanPlanSpecificCostBenefit` items (represented as `dict` in JSON). """
self.category = None
""" General category of benefit.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(InsurancePlanPlanSpecificCost, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(InsurancePlanPlanSpecificCost, self).elementProperties()
js.extend([
("benefit", "benefit", InsurancePlanPlanSpecificCostBenefit, True, None, False),
("category", "category", codeableconcept.CodeableConcept, False, None, True),
])
return js
class InsurancePlanPlanSpecificCostBenefit(backboneelement.BackboneElement):
""" Benefits list.
List of the specific benefits under this category of benefit.
"""
resource_type = "InsurancePlanPlanSpecificCostBenefit"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.cost = None
""" List of the costs.
List of `InsurancePlanPlanSpecificCostBenefitCost` items (represented as `dict` in JSON). """
self.type = None
""" Type of specific benefit.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(InsurancePlanPlanSpecificCostBenefit, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(InsurancePlanPlanSpecificCostBenefit, self).elementProperties()
js.extend([
("cost", "cost", InsurancePlanPlanSpecificCostBenefitCost, True, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, True),
])
return js
class InsurancePlanPlanSpecificCostBenefitCost(backboneelement.BackboneElement):
""" List of the costs.
List of the costs associated with a specific benefit.
"""
resource_type = "InsurancePlanPlanSpecificCostBenefitCost"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.applicability = None
""" in-network | out-of-network | other.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.qualifiers = None
""" Additional information about the cost.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.type = None
""" Type of cost.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.value = None
""" The actual cost value.
Type `Quantity` (represented as `dict` in JSON). """
super(InsurancePlanPlanSpecificCostBenefitCost, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(InsurancePlanPlanSpecificCostBenefitCost, self).elementProperties()
js.extend([
("applicability", "applicability", codeableconcept.CodeableConcept, False, None, False),
("qualifiers", "qualifiers", codeableconcept.CodeableConcept, True, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, True),
("value", "value", quantity.Quantity, False, None, False),
])
return js
import sys
try:
from . import address
except ImportError:
address = sys.modules[__package__ + '.address']
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import contactpoint
except ImportError:
contactpoint = sys.modules[__package__ + '.contactpoint']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import humanname
except ImportError:
humanname = sys.modules[__package__ + '.humanname']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import money
except ImportError:
money = sys.modules[__package__ + '.money']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
try:
from . import quantity
except ImportError:
quantity = sys.modules[__package__ + '.quantity']
| bsd-3-clause |
damoti/pyjx-gwt | gwt/pyjamas/ui/TextBox.py | 7 | 3097 | # Copyright 2006 James Tauber and contributors
# Copyright (C) 2009 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
# Copyright (C) 2012 Vsevolod Fedorov <vsevolod.fedorov@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyjamas import DOM
from pyjamas import Factory
from pyjamas.ui.TextBoxBase import TextBoxBase
class TextBox(TextBoxBase):
'''
Use Kind to set a HTML5 type
Attributes supported:
* Kind
* MaxLength
* Min
* Max
* Placeholder
* Required
* Step
* VisibleLength
'''
_props = [("kind", "Kind", "Kind", None),
("maxLength", "Max Length", "MaxLength", None),
("min", "Min", "Min", None),
("max", "Max", "Max", None),
("placeholder", "Place Holder", "PlaceHolder", None),
("step", "Step", "Step", None),
("visibleLength", "Visible Length", "VisibleLength", None),
]
def __init__(self, **ka):
ka['StyleName'] = ka.get('StyleName', "gwt-TextBox")
element = ka.pop('Element', None) or DOM.createInputText()
TextBoxBase.__init__(self, element, **ka)
@classmethod
def _getProps(self):
return TextBoxBase._getProps() + self._props
def getMaxLength(self):
return DOM.getIntAttribute(self.getElement(), "maxLength")
def getKind(self):
return DOM.getAttribute(self.getElement(), "type")
def getMin(self):
return DOM.getAttribute(self.getElement(), "min")
def getMax(self):
return DOM.getAttribute(self.getElement(), "max")
def getPlaceholder(self):
return DOM.getAttribute(self.getElement(), "placeholder")
def getStep(self):
return DOM.getAttribute(self.getElement(), "step")
def getVisibleLength(self):
return DOM.getIntAttribute(self.getElement(), "size")
def setMaxLength(self, length):
DOM.setIntAttribute(self.getElement(), "maxLength", length)
def setKind(self, kind):
DOM.setAttribute(self.getElement(), "type", kind)
def setMin(self, min):
DOM.setAttribute(self.getElement(), "min", min)
def setMax(self, max):
DOM.setAttribute(self.getElement(), "max", max)
def setPlaceholder(self, placeholder):
DOM.setAttribute(self.getElement(), "placeholder", placeholder)
def setStep(self, step):
DOM.setAttribute(self.getElement(), "step", step)
def setVisibleLength(self, length):
DOM.setIntAttribute(self.getElement(), "size", length)
Factory.registerClass('pyjamas.ui.TextBox', 'TextBox', TextBox)
| apache-2.0 |
vipul-sharma20/oh-mainline | vendor/packages/django-celery/contrib/release/sphinx-to-rst.py | 31 | 1854 | #!/usr/bin/even/python
import os
import re
import sys
dirname = ""
RE_CODE_BLOCK = re.compile(r'.. code-block:: (.+?)\s*$')
RE_INCLUDE = re.compile(r'.. include:: (.+?)\s*$')
RE_REFERENCE = re.compile(r':(.+?):`(.+?)`')
def include_file(lines, pos, match):
global dirname
orig_filename = match.groups()[0]
filename = os.path.join(dirname, orig_filename)
fh = open(filename)
try:
old_dirname = dirname
dirname = os.path.dirname(orig_filename)
try:
lines[pos] = sphinx_to_rst(fh)
finally:
dirname = old_dirname
finally:
fh.close()
def replace_code_block(lines, pos, match):
lines[pos] = ""
curpos = pos - 1
# Find the first previous line with text to append "::" to it.
while True:
prev_line = lines[curpos]
if not prev_line.isspace():
prev_line_with_text = curpos
break
curpos -= 1
if lines[prev_line_with_text].endswith(":"):
lines[prev_line_with_text] += ":"
else:
lines[prev_line_with_text] += "::"
TO_RST_MAP = {RE_CODE_BLOCK: replace_code_block,
RE_REFERENCE: r'``\2``',
RE_INCLUDE: include_file}
def _process(lines):
lines = list(lines) # non-destructive
for i, line in enumerate(lines):
for regex, alt in TO_RST_MAP.items():
if callable(alt):
match = regex.match(line)
if match:
alt(lines, i, match)
line = lines[i]
else:
lines[i] = regex.sub(alt, line)
return lines
def sphinx_to_rst(fh):
return "".join(_process(fh))
if __name__ == "__main__":
dirname = os.path.dirname(sys.argv[1])
fh = open(sys.argv[1])
try:
print(sphinx_to_rst(fh))
finally:
fh.close()
| agpl-3.0 |
tscohen/chainer | chainer/functions/array/reshape.py | 12 | 1546 | from chainer import function
from chainer.utils import type_check
def _count_unknown_dims(shape):
cnt = 0
for dim in shape:
cnt += dim < 0
return cnt
class Reshape(function.Function):
"""Reshapes an input array without copy."""
def __init__(self, shape):
cnt = _count_unknown_dims(shape)
assert cnt == 0 or cnt == 1
self.shape = shape
def check_type_forward(self, in_types):
type_check.expect(
in_types.size() == 1,
)
x_type, = in_types
cnt = _count_unknown_dims(self.shape)
if cnt == 0:
type_check.expect(
type_check.prod(x_type.shape) == type_check.prod(self.shape))
else:
known_size = 1
for s in self.shape:
if s > 0:
known_size *= s
size_var = type_check.Variable(known_size,
'known_size(=%d)' % known_size)
type_check.expect(
type_check.prod(x_type.shape) % size_var == 0)
def forward(self, x):
return x[0].reshape(self.shape),
def backward(self, x, gy):
return gy[0].reshape(x[0].shape),
def reshape(x, shape):
"""Reshapes an input variable without copy.
Args:
x (~chainer.Variable): Input variable.
shape (tuple of ints): Target shape.
Returns:
~chainer.Variable: Variable that holds a reshaped version of the input
variable.
"""
return Reshape(shape)(x)
| mit |
sonelu/pypot | pypot/sensor/kinect/sensor.py | 2 | 3763 | """
This code has been developed by Baptiste Busch: https://github.com/buschbapti
This module allows you to retrieve Skeleton information from a Kinect device.
It is only the client side of a zmq client/server application.
The server part can be found at: https://bitbucket.org/buschbapti/kinectserver/src
It used the Microsoft Kinect SDK and thus only work on Windows.
Of course, the client side can be used on any platform.
"""
import zmq
import numpy
import threading
from collections import namedtuple
from ...utils import Point3D, Point2D, Quaternion
torso_joints = ('hip_center', 'spine', 'shoulder_center', 'head')
left_arm_joints = ('shoulder_left', 'elbow_left', 'wrist_left', 'hand_left')
right_arm_joints = ('shoulder_right', 'elbow_right', 'wrist_right', 'hand_right')
left_leg_joints = ('hip_left', 'knee_left', 'ankle_left', 'foot_left')
right_leg_joints = ('hip_right', 'knee_right', 'ankle_right', 'foot_right')
skeleton_joints = torso_joints + left_arm_joints + right_arm_joints + left_leg_joints + right_leg_joints
class Skeleton(namedtuple('Skeleton', ('timestamp', 'user_id') + skeleton_joints)):
joints = skeleton_joints
Joint = namedtuple('Joint', ('position', 'orientation', 'pixel_coordinate'))
class KinectSensor(object):
def __init__(self, addr, port):
self._lock = threading.Lock()
self._skeleton = {}
self.context = zmq.Context()
self.sub_skel = self.context.socket(zmq.SUB)
self.sub_skel.connect('tcp://{}:{}'.format(addr, port))
self.sub_skel.setsockopt(zmq.SUBSCRIBE, '')
t = threading.Thread(target=self.get_skeleton)
t.daemon = True
t.start()
def remove_user(self, user_index):
with self._lock:
del self._skeleton[user_index]
def remove_all_users(self):
with self._lock:
self._skeleton = {}
@property
def tracked_skeleton(self):
with self._lock:
return self._skeleton
@tracked_skeleton.setter
def tracked_skeleton(self, skeleton):
with self._lock:
self._skeleton[skeleton.user_id] = skeleton
def get_skeleton(self):
while True:
md = self.sub_skel.recv_json()
msg = self.sub_skel.recv()
skel_array = numpy.fromstring(msg, dtype=float, sep=",")
skel_array = skel_array.reshape(md['shape'])
nb_joints = md['shape'][0]
joints = []
for i in range(nb_joints):
x, y, z, w = skel_array[i][0:4]
position = Point3D(x / w, y / w, z / w)
pixel_coord = Point2D(*skel_array[i][4:6])
orientation = Quaternion(*skel_array[i][6:10])
joints.append(Joint(position, orientation, pixel_coord))
self.tracked_skeleton = Skeleton(md['timestamp'], md['user_index'], *joints)
def run(self):
cv2.startWindowThread()
while True:
img = numpy.zeros((480, 640, 3))
skeleton = kinect.tracked_skeleton
if skeleton:
for user, skel in skeleton.iteritems():
for joint_name in skel.joints:
x, y = getattr(skel, joint_name).pixel_coordinate
pt = (int(x), int(y))
cv2.circle(img, pt, 5, (255, 255, 255), thickness=-1)
kinect.remove_all_users()
cv2.imshow('Skeleton', img)
cv2.waitKey(50)
self.sub_skel.close()
self.context.term()
if __name__ == '__main__':
import cv2
kinect = KinectSensor('193.50.110.177', 9999)
kinect.run()
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.