repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
mingrammer/pyreportcard | tests/test_vcs.py | 1 | 3355 | import os
import shutil
import unittest
from config import Config
from db.collection import get_repo_collection
from vcs.repository import cache, clear, clone, create_repository, is_cached, parse_url
class RepositoryTest(unittest.TestCase):
def setUp(self):
self.repositories = get_repo_collection()
self.repo = create_repository('github.com/mingrammer/awesome-finder')
def tearDown(self):
self.repositories.delete_one({'url': self.repo.url})
if os.path.isdir(Config.CLONE_TMP_DIR):
cloned_path = os.path.join(Config.CLONE_TMP_DIR, self.repo.name)
if os.path.isdir(cloned_path):
shutil.rmtree(cloned_path)
else:
shutil.rmtree(Config.CLONE_TMP_DIR)
def test_parse_url(self):
testcases = [
{
'url': 'github.com/mingrammer/awesome-finder',
'name': 'awesome-finder',
'username': 'mingrammer'
},
{
'url': 'github.com/django/django',
'name': 'django',
'username': 'django'
}
]
for tc in testcases:
username, name = parse_url(tc['url'])
self.assertEqual(name, tc['name'])
self.assertEqual(username, tc['username'])
def test_parse_url_fail(self):
testcases = [
'github.com/mingrammer/awesome-finder/useless',
'github.com/django/'
]
for tc in testcases:
with self.assertRaises(Exception):
parse_url(tc)
def test_create_repository(self):
testcases = [
{
'url': 'github.com/mingrammer/awesome-finder',
'name': 'awesome-finder',
'username': 'mingrammer',
},
{
'url': 'github.com/django/django',
'name': 'django',
'username': 'django',
}
]
for tc in testcases:
repo = create_repository(tc['url'])
self.assertEqual(repo.url, tc['url'])
self.assertEqual(repo.name, tc['name'])
self.assertEqual(repo.username, tc['username'])
def test_cache(self):
cache(self.repo)
repo_doc = self.repositories.find_one({'url': self.repo.url})
self.assertEqual(repo_doc['url'], self.repo.url)
self.assertEqual(repo_doc['name'], self.repo.name)
self.assertEqual(repo_doc['username'], self.repo.username)
self.assertNotEqual(repo_doc['last_latest_hash'], None)
def test_is_cache(self):
self.assertEqual(is_cached(self.repo), False)
cache(self.repo)
self.assertEqual(is_cached(self.repo), True)
def test_clone(self):
cloned_path = clone(self.repo)
self.assertEqual(cloned_path, os.path.join(Config.CLONE_TMP_DIR, self.repo.name))
def test_clone_fail(self):
self.repo.url = 'github.com/mingrammer/null'
with self.assertRaises(Exception):
clone(self.repo.url)
def test_clear(self):
parent_dir = '11de2e29fe888b68'
child_dir = '54fc55942cb4abb6'
os.mkdir(parent_dir)
os.mkdir(os.path.join(parent_dir, child_dir))
clear(parent_dir)
self.assertEqual(os.path.isdir(parent_dir), False)
| mit |
dancingdan/tensorflow | tensorflow/python/data/experimental/kernel_tests/tf_record_writer_test.py | 8 | 4406 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.TFRecordWriter`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.data.experimental.ops import writers
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.framework import dtypes
from tensorflow.python.lib.io import python_io
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class TFRecordWriterTest(test_base.DatasetTestBase):
def setUp(self):
super(TFRecordWriterTest, self).setUp()
self._num_records = 7
self.filename = array_ops.placeholder(dtypes.string, shape=[])
self.compression_type = array_ops.placeholder_with_default("", shape=[])
input_dataset = readers.TFRecordDataset([self.filename],
self.compression_type)
self.writer = writers.TFRecordWriter(
self._outputFilename(), self.compression_type).write(input_dataset)
def _record(self, i):
return compat.as_bytes("Record %d" % (i))
def _createFile(self, options=None):
filename = self._inputFilename()
writer = python_io.TFRecordWriter(filename, options)
for i in range(self._num_records):
writer.write(self._record(i))
writer.close()
return filename
def _inputFilename(self):
return os.path.join(self.get_temp_dir(), "tf_record.in.txt")
def _outputFilename(self):
return os.path.join(self.get_temp_dir(), "tf_record.out.txt")
def testWrite(self):
with self.cached_session() as sess:
sess.run(
self.writer, feed_dict={
self.filename: self._createFile(),
})
for i, r in enumerate(tf_record.tf_record_iterator(self._outputFilename())):
self.assertAllEqual(self._record(i), r)
def testWriteZLIB(self):
options = tf_record.TFRecordOptions(tf_record.TFRecordCompressionType.ZLIB)
with self.cached_session() as sess:
sess.run(
self.writer,
feed_dict={
self.filename: self._createFile(options),
self.compression_type: "ZLIB",
})
for i, r in enumerate(
tf_record.tf_record_iterator(self._outputFilename(), options=options)):
self.assertAllEqual(self._record(i), r)
def testWriteGZIP(self):
options = tf_record.TFRecordOptions(tf_record.TFRecordCompressionType.GZIP)
with self.cached_session() as sess:
sess.run(
self.writer,
feed_dict={
self.filename: self._createFile(options),
self.compression_type: "GZIP",
})
for i, r in enumerate(
tf_record.tf_record_iterator(self._outputFilename(), options=options)):
self.assertAllEqual(self._record(i), r)
def testFailDataset(self):
with self.assertRaises(TypeError):
writers.TFRecordWriter(self._outputFilename(),
self.compression_type).write("whoops")
def testFailDType(self):
input_dataset = dataset_ops.Dataset.from_tensors(10)
with self.assertRaises(TypeError):
writers.TFRecordWriter(self._outputFilename(),
self.compression_type).write(input_dataset)
def testFailShape(self):
input_dataset = dataset_ops.Dataset.from_tensors([["hello"], ["world"]])
with self.assertRaises(TypeError):
writers.TFRecordWriter(self._outputFilename(),
self.compression_type).write(input_dataset)
if __name__ == "__main__":
test.main()
| apache-2.0 |
darkleons/lama | addons/account_chart/__openerp__.py | 313 | 1451 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Template of Charts of Accounts',
'version': '1.1',
'category': 'Hidden/Dependency',
'description': """
Remove minimal account chart.
=============================
Deactivates minimal chart of accounts.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/accounting',
'depends': ['account'],
'data': [],
'demo': [],
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
wenwei202/terngrad | terngrad/inception/slim/inception_utils.py | 66 | 2630 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains common code shared by all inception models.
Usage of arg scope:
with slim.arg_scope(inception_arg_scope()):
logits, end_points = inception.inception_v3(images, num_classes,
is_training=is_training)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
def inception_arg_scope(weight_decay=0.00004,
use_batch_norm=True,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001):
"""Defines the default arg scope for inception models.
Args:
weight_decay: The weight decay to use for regularizing the model.
use_batch_norm: "If `True`, batch_norm is applied after each convolution.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
Returns:
An `arg_scope` to use for the inception models.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': batch_norm_decay,
# epsilon to prevent 0s in variance.
'epsilon': batch_norm_epsilon,
# collection containing update_ops.
'updates_collections': tf.GraphKeys.UPDATE_OPS,
}
if use_batch_norm:
normalizer_fn = slim.batch_norm
normalizer_params = batch_norm_params
else:
normalizer_fn = None
normalizer_params = {}
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay)):
with slim.arg_scope(
[slim.conv2d],
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=tf.nn.relu,
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params) as sc:
return sc
| apache-2.0 |
syaiful6/django | tests/generic_views/models.py | 382 | 1631 | from django.core.urlresolvers import reverse
from django.db import models
from django.db.models import QuerySet
from django.db.models.manager import BaseManager
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Artist(models.Model):
name = models.CharField(max_length=100)
class Meta:
ordering = ['name']
verbose_name = 'professional artist'
verbose_name_plural = 'professional artists'
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('artist_detail', kwargs={'pk': self.id})
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=100)
slug = models.SlugField()
class Meta:
ordering = ['name']
def __str__(self):
return self.name
class DoesNotExistQuerySet(QuerySet):
def get(self, *args, **kwargs):
raise Author.DoesNotExist
DoesNotExistBookManager = BaseManager.from_queryset(DoesNotExistQuerySet)
@python_2_unicode_compatible
class Book(models.Model):
name = models.CharField(max_length=300)
slug = models.SlugField()
pages = models.IntegerField()
authors = models.ManyToManyField(Author)
pubdate = models.DateField()
objects = models.Manager()
does_not_exist = DoesNotExistBookManager()
class Meta:
ordering = ['-pubdate']
def __str__(self):
return self.name
class Page(models.Model):
content = models.TextField()
template = models.CharField(max_length=300)
class BookSigning(models.Model):
event_date = models.DateTimeField()
| bsd-3-clause |
affo/nova | nova/tests/unit/api/openstack/compute/contrib/test_multiple_create.py | 4 | 22862 | # Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
from oslo_config import cfg
import webob
from nova.api.openstack.compute import plugins
from nova.api.openstack.compute.plugins.v3 import block_device_mapping as \
block_device_mapping_v21
from nova.api.openstack.compute.plugins.v3 import multiple_create as \
multiple_create_v21
from nova.api.openstack.compute.plugins.v3 import servers as servers_v21
from nova.api.openstack.compute import servers as servers_v20
from nova.api.openstack import extensions as extensions_v20
from nova.compute import api as compute_api
from nova.compute import flavors
from nova import db
from nova import exception
from nova.network import manager
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
from nova.tests.unit.image import fake
CONF = cfg.CONF
FAKE_UUID = fakes.FAKE_UUID
def fake_gen_uuid():
return FAKE_UUID
def return_security_group(context, instance_id, security_group_id):
pass
class MultiCreateExtensionTestV21(test.TestCase):
validation_error = exception.ValidationError
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(MultiCreateExtensionTestV21, self).setUp()
self.flags(verbose=True,
enable_instance_password=True)
self.instance_cache_num = 0
self.instance_cache_by_id = {}
self.instance_cache_by_uuid = {}
ext_info = plugins.LoadedExtensionInfo()
self.controller = servers_v21.ServersController(
extension_info=ext_info)
CONF.set_override('extensions_blacklist', 'os-multiple-create',
'osapi_v3')
self.no_mult_create_controller = servers_v21.ServersController(
extension_info=ext_info)
def instance_create(context, inst):
inst_type = flavors.get_flavor_by_flavor_id(3)
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def_image_ref = 'http://localhost/images/%s' % image_uuid
self.instance_cache_num += 1
instance = fake_instance.fake_db_instance(**{
'id': self.instance_cache_num,
'display_name': inst['display_name'] or 'test',
'uuid': FAKE_UUID,
'instance_type': inst_type,
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': inst.get('image_ref', def_image_ref),
'user_id': 'fake',
'project_id': 'fake',
'reservation_id': inst['reservation_id'],
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"progress": 0,
"fixed_ips": [],
"task_state": "",
"vm_state": "",
"security_groups": inst['security_groups'],
})
self.instance_cache_by_id[instance['id']] = instance
self.instance_cache_by_uuid[instance['uuid']] = instance
return instance
def instance_get(context, instance_id):
"""Stub for compute/api create() pulling in instance after
scheduling
"""
return self.instance_cache_by_id[instance_id]
def instance_update(context, uuid, values):
instance = self.instance_cache_by_uuid[uuid]
instance.update(values)
return instance
def server_update(context, instance_uuid, params, update_cells=True,
columns_to_join=None):
inst = self.instance_cache_by_uuid[instance_uuid]
inst.update(params)
return (inst, inst)
def fake_method(*args, **kwargs):
pass
def project_get_networks(context, user_id):
return dict(id='1', host='localhost')
def queue_get_for(context, *args):
return 'network_topic'
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
fake.stub_out_image_service(self.stubs)
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
self.stubs.Set(db, 'instance_add_security_group',
return_security_group)
self.stubs.Set(db, 'project_get_networks',
project_get_networks)
self.stubs.Set(db, 'instance_create', instance_create)
self.stubs.Set(db, 'instance_system_metadata_update',
fake_method)
self.stubs.Set(db, 'instance_get', instance_get)
self.stubs.Set(db, 'instance_update', instance_update)
self.stubs.Set(db, 'instance_update_and_get_original',
server_update)
self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
fake_method)
self.req = fakes.HTTPRequest.blank('')
def _test_create_extra(self, params, no_image=False,
override_controller=None):
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
if no_image:
server.pop('imageRef', None)
server.update(params)
body = dict(server=server)
if override_controller:
server = override_controller.create(self.req,
body=body).obj['server']
else:
server = self.controller.create(self.req,
body=body).obj['server']
def _check_multiple_create_extension_disabled(self, **kwargs):
# NOTE: on v2.1 API, "create a server" API doesn't add the following
# attributes into kwargs when non-loading multiple_create extension.
# However, v2.0 API adds them as values "1" instead. So we need to
# define checking methods for each API here.
self.assertNotIn('min_count', kwargs)
self.assertNotIn('max_count', kwargs)
def test_create_instance_with_multiple_create_disabled(self):
min_count = 2
max_count = 3
params = {
multiple_create_v21.MIN_ATTRIBUTE_NAME: min_count,
multiple_create_v21.MAX_ATTRIBUTE_NAME: max_count,
}
old_create = compute_api.API.create
def create(*args, **kwargs):
self._check_multiple_create_extension_disabled(**kwargs)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(
params,
override_controller=self.no_mult_create_controller)
def test_multiple_create_with_string_type_min_and_max(self):
min_count = '2'
max_count = '3'
params = {
multiple_create_v21.MIN_ATTRIBUTE_NAME: min_count,
multiple_create_v21.MAX_ATTRIBUTE_NAME: max_count,
}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertIsInstance(kwargs['min_count'], int)
self.assertIsInstance(kwargs['max_count'], int)
self.assertEqual(kwargs['min_count'], 2)
self.assertEqual(kwargs['max_count'], 3)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params)
def test_create_instance_with_multiple_create_enabled(self):
min_count = 2
max_count = 3
params = {
multiple_create_v21.MIN_ATTRIBUTE_NAME: min_count,
multiple_create_v21.MAX_ATTRIBUTE_NAME: max_count,
}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['min_count'], 2)
self.assertEqual(kwargs['max_count'], 3)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params)
def test_create_instance_invalid_negative_min(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MIN_ATTRIBUTE_NAME: -1,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
}
}
self.assertRaises(self.validation_error,
self.controller.create,
self.req,
body=body)
def test_create_instance_invalid_negative_max(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MAX_ATTRIBUTE_NAME: -1,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
}
}
self.assertRaises(self.validation_error,
self.controller.create,
self.req,
body=body)
def test_create_instance_with_blank_min(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MIN_ATTRIBUTE_NAME: '',
'name': 'server_test',
'image_ref': image_href,
'flavor_ref': flavor_ref,
}
}
self.assertRaises(self.validation_error,
self.controller.create,
self.req,
body=body)
def test_create_instance_with_blank_max(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MAX_ATTRIBUTE_NAME: '',
'name': 'server_test',
'image_ref': image_href,
'flavor_ref': flavor_ref,
}
}
self.assertRaises(self.validation_error,
self.controller.create,
self.req,
body=body)
def test_create_instance_invalid_min_greater_than_max(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MIN_ATTRIBUTE_NAME: 4,
multiple_create_v21.MAX_ATTRIBUTE_NAME: 2,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
}
}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req,
body=body)
def test_create_instance_invalid_alpha_min(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MIN_ATTRIBUTE_NAME: 'abcd',
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
}
}
self.assertRaises(self.validation_error,
self.controller.create,
self.req,
body=body)
def test_create_instance_invalid_alpha_max(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MAX_ATTRIBUTE_NAME: 'abcd',
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
}
}
self.assertRaises(self.validation_error,
self.controller.create,
self.req,
body=body)
def test_create_multiple_instances(self):
"""Test creating multiple instances but not asking for
reservation_id
"""
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MIN_ATTRIBUTE_NAME: 2,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {'hello': 'world',
'open': 'stack'},
}
}
res = self.controller.create(self.req, body=body).obj
self.assertEqual(FAKE_UUID, res["server"]["id"])
self._check_admin_password_len(res["server"])
def test_create_multiple_instances_pass_disabled(self):
"""Test creating multiple instances but not asking for
reservation_id
"""
self.flags(enable_instance_password=False)
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MIN_ATTRIBUTE_NAME: 2,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {'hello': 'world',
'open': 'stack'},
}
}
res = self.controller.create(self.req, body=body).obj
self.assertEqual(FAKE_UUID, res["server"]["id"])
self._check_admin_password_missing(res["server"])
def _check_admin_password_len(self, server_dict):
"""utility function - check server_dict for admin_password length."""
self.assertEqual(CONF.password_length,
len(server_dict["adminPass"]))
def _check_admin_password_missing(self, server_dict):
"""utility function - check server_dict for admin_password absence."""
self.assertNotIn("admin_password", server_dict)
def _create_multiple_instances_resv_id_return(self, resv_id_return):
"""Test creating multiple instances with asking for
reservation_id
"""
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MIN_ATTRIBUTE_NAME: 2,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {'hello': 'world',
'open': 'stack'},
multiple_create_v21.RRID_ATTRIBUTE_NAME: resv_id_return
}
}
res = self.controller.create(self.req, body=body)
reservation_id = res.obj['reservation_id']
self.assertNotEqual(reservation_id, "")
self.assertIsNotNone(reservation_id)
self.assertTrue(len(reservation_id) > 1)
def test_create_multiple_instances_with_resv_id_return(self):
self._create_multiple_instances_resv_id_return(True)
def test_create_multiple_instances_with_string_resv_id_return(self):
self._create_multiple_instances_resv_id_return("True")
def test_create_multiple_instances_with_multiple_volume_bdm(self):
"""Test that a BadRequest is raised if multiple instances
are requested with a list of block device mappings for volumes.
"""
min_count = 2
bdm = [{'source_type': 'volume', 'uuid': 'vol-xxxx'},
{'source_type': 'volume', 'uuid': 'vol-yyyy'}
]
params = {
block_device_mapping_v21.ATTRIBUTE_NAME: bdm,
multiple_create_v21.MIN_ATTRIBUTE_NAME: min_count
}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['min_count'], 2)
self.assertEqual(len(kwargs['block_device_mapping']), 2)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
exc = self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params, no_image=True)
self.assertEqual("Cannot attach one or more volumes to multiple "
"instances", exc.explanation)
def test_create_multiple_instances_with_single_volume_bdm(self):
"""Test that a BadRequest is raised if multiple instances
are requested to boot from a single volume.
"""
min_count = 2
bdm = [{'source_type': 'volume', 'uuid': 'vol-xxxx'}]
params = {
block_device_mapping_v21.ATTRIBUTE_NAME: bdm,
multiple_create_v21.MIN_ATTRIBUTE_NAME: min_count
}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['min_count'], 2)
self.assertEqual(kwargs['block_device_mapping'][0]['volume_id'],
'vol-xxxx')
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
exc = self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params, no_image=True)
self.assertEqual("Cannot attach one or more volumes to multiple "
"instances", exc.explanation)
def test_create_multiple_instance_with_non_integer_max_count(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MAX_ATTRIBUTE_NAME: 2.5,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {'hello': 'world',
'open': 'stack'},
}
}
self.assertRaises(self.validation_error,
self.controller.create, self.req, body=body)
def test_create_multiple_instance_with_non_integer_min_count(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MIN_ATTRIBUTE_NAME: 2.5,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {'hello': 'world',
'open': 'stack'},
}
}
self.assertRaises(self.validation_error,
self.controller.create, self.req, body=body)
class MultiCreateExtensionTestV2(MultiCreateExtensionTestV21):
validation_error = webob.exc.HTTPBadRequest
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(MultiCreateExtensionTestV2, self).setUp()
self.flags(verbose=True,
enable_instance_password=True)
self.instance_cache_num = 0
self.instance_cache_by_id = {}
self.instance_cache_by_uuid = {}
fakes.stub_out_nw_api(self.stubs)
self.ext_mgr = extensions_v20.ExtensionManager()
self.ext_mgr.extensions = {
'os-volumes': 'fake',
'os-multiple-create': 'fake',
'os-block-device-mapping-v2-boot': 'fake'
}
self.controller = servers_v20.Controller(self.ext_mgr)
no_mult_ext_mgr = extensions_v20.ExtensionManager()
no_mult_ext_mgr.extensions = {
'os-volumes': 'fake',
'os-block-device-mapping-v2-boot': 'fake'
}
self.no_mult_create_controller = servers_v20.Controller(
no_mult_ext_mgr)
def instance_create(context, inst):
inst_type = flavors.get_flavor_by_flavor_id(3)
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def_image_ref = 'http://localhost/images/%s' % image_uuid
self.instance_cache_num += 1
instance = fake_instance.fake_db_instance(**{
'id': self.instance_cache_num,
'display_name': inst['display_name'] or 'test',
'uuid': FAKE_UUID,
'instance_type': inst_type,
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': inst.get('image_ref', def_image_ref),
'user_id': 'fake',
'project_id': 'fake',
'reservation_id': inst['reservation_id'],
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"config_drive": None,
"progress": 0,
"fixed_ips": [],
"task_state": "",
"vm_state": "",
"root_device_name": inst.get('root_device_name', 'vda'),
"security_groups": inst['security_groups'],
})
self.instance_cache_by_id[instance['id']] = instance
self.instance_cache_by_uuid[instance['uuid']] = instance
return instance
def instance_get(context, instance_id):
"""Stub for compute/api create() pulling in instance after
scheduling
"""
return self.instance_cache_by_id[instance_id]
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
fake.stub_out_image_service(self.stubs)
self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
self.stubs.Set(db, 'instance_create', instance_create)
self.stubs.Set(db, 'instance_get', instance_get)
def _check_multiple_create_extension_disabled(self, **kwargs):
self.assertEqual(kwargs['min_count'], 1)
self.assertEqual(kwargs['max_count'], 1)
| apache-2.0 |
memtoko/django | tests/basic/tests.py | 3 | 29074 | from __future__ import unicode_literals
import threading
import warnings
from datetime import datetime, timedelta
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from django.db import DEFAULT_DB_ALIAS, DatabaseError, connections
from django.db.models.fields import Field
from django.db.models.fields.related import ForeignObjectRel
from django.db.models.manager import BaseManager
from django.db.models.query import EmptyQuerySet, QuerySet
from django.test import (
TestCase, TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature,
)
from django.utils import six
from django.utils.translation import ugettext_lazy
from .models import Article, ArticleSelectOnSave, SelfRef
class ModelInstanceCreationTests(TestCase):
def test_object_is_not_written_to_database_until_save_was_called(self):
a = Article(
id=None,
headline='Area man programs in Python',
pub_date=datetime(2005, 7, 28),
)
self.assertIsNone(a.id)
self.assertEqual(Article.objects.all().count(), 0)
# Save it into the database. You have to call save() explicitly.
a.save()
self.assertIsNotNone(a.id)
self.assertEqual(Article.objects.all().count(), 1)
def test_can_initialize_model_instance_using_positional_arguments(self):
"""
You can initialize a model instance using positional arguments,
which should match the field order as defined in the model.
"""
a = Article(None, 'Second article', datetime(2005, 7, 29))
a.save()
self.assertEqual(a.headline, 'Second article')
self.assertEqual(a.pub_date, datetime(2005, 7, 29, 0, 0))
def test_can_create_instance_using_kwargs(self):
a = Article(
id=None,
headline='Third article',
pub_date=datetime(2005, 7, 30),
)
a.save()
self.assertEqual(a.headline, 'Third article')
self.assertEqual(a.pub_date, datetime(2005, 7, 30, 0, 0))
def test_autofields_generate_different_values_for_each_instance(self):
a1 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
a2 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
a3 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
self.assertNotEqual(a3.id, a1.id)
self.assertNotEqual(a3.id, a2.id)
def test_can_mix_and_match_position_and_kwargs(self):
# You can also mix and match position and keyword arguments, but
# be sure not to duplicate field information.
a = Article(None, 'Fourth article', pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Fourth article')
def test_cannot_create_instance_with_invalid_kwargs(self):
six.assertRaisesRegex(
self,
TypeError,
"'foo' is an invalid keyword argument for this function",
Article,
id=None,
headline='Some headline',
pub_date=datetime(2005, 7, 31),
foo='bar',
)
def test_can_leave_off_value_for_autofield_and_it_gets_value_on_save(self):
"""
You can leave off the value for an AutoField when creating an
object, because it'll get filled in automatically when you save().
"""
a = Article(headline='Article 5', pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Article 5')
self.assertNotEqual(a.id, None)
def test_leaving_off_a_field_with_default_set_the_default_will_be_saved(self):
a = Article(pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Default headline')
def test_for_datetimefields_saves_as_much_precision_as_was_given(self):
"""as much precision in *seconds*"""
a1 = Article(
headline='Article 7',
pub_date=datetime(2005, 7, 31, 12, 30),
)
a1.save()
self.assertEqual(Article.objects.get(id__exact=a1.id).pub_date,
datetime(2005, 7, 31, 12, 30))
a2 = Article(
headline='Article 8',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a2.save()
self.assertEqual(Article.objects.get(id__exact=a2.id).pub_date,
datetime(2005, 7, 31, 12, 30, 45))
def test_saving_an_object_again_does_not_create_a_new_object(self):
a = Article(headline='original', pub_date=datetime(2014, 5, 16))
a.save()
current_id = a.id
a.save()
self.assertEqual(a.id, current_id)
a.headline = 'Updated headline'
a.save()
self.assertEqual(a.id, current_id)
def test_querysets_checking_for_membership(self):
headlines = [
'Area man programs in Python', 'Second article', 'Third article']
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
a = Article(headline='Some headline', pub_date=some_pub_date)
a.save()
# You can use 'in' to test for membership...
self.assertIn(a, Article.objects.all())
# ... but there will often be more efficient ways if that is all you need:
self.assertTrue(Article.objects.filter(id=a.id).exists())
class ModelTest(TestCase):
def test_objects_attribute_is_only_available_on_the_class_itself(self):
six.assertRaisesRegex(
self,
AttributeError,
"Manager isn't accessible via Article instances",
getattr,
Article(),
"objects",
)
self.assertFalse(hasattr(Article(), 'objects'))
self.assertTrue(hasattr(Article, 'objects'))
def test_queryset_delete_removes_all_items_in_that_queryset(self):
headlines = [
'An article', 'Article One', 'Amazing article', 'Boring article']
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
self.assertQuerysetEqual(Article.objects.all().order_by('headline'),
["<Article: Amazing article>",
"<Article: An article>",
"<Article: Article One>",
"<Article: Boring article>"])
Article.objects.filter(headline__startswith='A').delete()
self.assertQuerysetEqual(Article.objects.all().order_by('headline'),
["<Article: Boring article>"])
def test_not_equal_and_equal_operators_behave_as_expected_on_instances(self):
some_pub_date = datetime(2014, 5, 16, 12, 1)
a1 = Article.objects.create(headline='First', pub_date=some_pub_date)
a2 = Article.objects.create(headline='Second', pub_date=some_pub_date)
self.assertNotEqual(a1, a2)
self.assertEqual(a1, Article.objects.get(id__exact=a1.id))
self.assertNotEqual(Article.objects.get(id__exact=a1.id), Article.objects.get(id__exact=a2.id))
@skipUnlessDBFeature('supports_microsecond_precision')
def test_microsecond_precision(self):
# In PostgreSQL, microsecond-level precision is available.
a9 = Article(
headline='Article 9',
pub_date=datetime(2005, 7, 31, 12, 30, 45, 180),
)
a9.save()
self.assertEqual(Article.objects.get(pk=a9.pk).pub_date,
datetime(2005, 7, 31, 12, 30, 45, 180))
@skipIfDBFeature('supports_microsecond_precision')
def test_microsecond_precision_not_supported(self):
# In MySQL, microsecond-level precision isn't available. You'll lose
# microsecond-level precision once the data is saved.
a9 = Article(
headline='Article 9',
pub_date=datetime(2005, 7, 31, 12, 30, 45, 180),
)
a9.save()
self.assertEqual(Article.objects.get(id__exact=a9.id).pub_date,
datetime(2005, 7, 31, 12, 30, 45))
def test_manually_specify_primary_key(self):
# You can manually specify the primary key when creating a new object.
a101 = Article(
id=101,
headline='Article 101',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a101.save()
a101 = Article.objects.get(pk=101)
self.assertEqual(a101.headline, 'Article 101')
def test_create_method(self):
# You can create saved objects in a single step
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
self.assertEqual(Article.objects.get(headline="Article 10"), a10)
def test_year_lookup_edge_case(self):
# Edge-case test: A year lookup should retrieve all objects in
# the given year, including Jan. 1 and Dec. 31.
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
self.assertQuerysetEqual(Article.objects.filter(pub_date__year=2008),
["<Article: Article 11>", "<Article: Article 12>"])
def test_unicode_data(self):
# Unicode data works, too.
a = Article(
headline='\u6797\u539f \u3081\u3050\u307f',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.get(pk=a.id).headline,
'\u6797\u539f \u3081\u3050\u307f')
def test_hash_function(self):
# Model instances have a hash function, so they can be used in sets
# or as dictionary keys. Two models compare as equal if their primary
# keys are equal.
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a11 = Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
a12 = Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
s = {a10, a11, a12}
self.assertIn(Article.objects.get(headline='Article 11'), s)
def test_field_ordering(self):
"""
Field instances have a `__lt__` comparison function to define an
ordering based on their creation. Prior to #17851 this ordering
comparison relied on the now unsupported `__cmp__` and was assuming
compared objects were both Field instances raising `AttributeError`
when it should have returned `NotImplemented`.
"""
f1 = Field()
f2 = Field(auto_created=True)
f3 = Field()
self.assertLess(f2, f1)
self.assertGreater(f3, f1)
self.assertIsNotNone(f1)
self.assertNotIn(f2, (None, 1, ''))
def test_extra_method_select_argument_with_dashes_and_values(self):
# The 'select' argument to extra() supports names with dashes in
# them, as long as you use values().
Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
dicts = Article.objects.filter(
pub_date__year=2008).extra(
select={'dashed-value': '1'}).values('headline', 'dashed-value')
self.assertEqual([sorted(d.items()) for d in dicts],
[[('dashed-value', 1), ('headline', 'Article 11')], [('dashed-value', 1), ('headline', 'Article 12')]])
def test_extra_method_select_argument_with_dashes(self):
# If you use 'select' with extra() and names containing dashes on a
# query that's *not* a values() query, those extra 'select' values
# will silently be ignored.
Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
articles = Article.objects.filter(
pub_date__year=2008).extra(select={'dashed-value': '1', 'undashedvalue': '2'})
self.assertEqual(articles[0].undashedvalue, 2)
def test_create_relation_with_ugettext_lazy(self):
"""
Test that ugettext_lazy objects work when saving model instances
through various methods. Refs #10498.
"""
notlazy = 'test'
lazy = ugettext_lazy(notlazy)
Article.objects.create(headline=lazy, pub_date=datetime.now())
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# test that assign + save works with Promise objects
article.headline = lazy
article.save()
self.assertEqual(article.headline, notlazy)
# test .update()
Article.objects.update(headline=lazy)
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# still test bulk_create()
Article.objects.all().delete()
Article.objects.bulk_create([Article(headline=lazy, pub_date=datetime.now())])
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
def test_emptyqs(self):
# Can't be instantiated
with self.assertRaises(TypeError):
EmptyQuerySet()
self.assertIsInstance(Article.objects.none(), EmptyQuerySet)
def test_emptyqs_values(self):
# test for #15959
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
qs = Article.objects.none().values_list('pk')
self.assertIsInstance(qs, EmptyQuerySet)
self.assertEqual(len(qs), 0)
def test_emptyqs_customqs(self):
# A hacky test for custom QuerySet subclass - refs #17271
Article.objects.create(headline='foo', pub_date=datetime.now())
class CustomQuerySet(QuerySet):
def do_something(self):
return 'did something'
qs = Article.objects.all()
qs.__class__ = CustomQuerySet
qs = qs.none()
with self.assertNumQueries(0):
self.assertEqual(len(qs), 0)
self.assertIsInstance(qs, EmptyQuerySet)
self.assertEqual(qs.do_something(), 'did something')
def test_emptyqs_values_order(self):
# Tests for ticket #17712
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().values_list('id').order_by('id')), 0)
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().filter(
id__in=Article.objects.values_list('id', flat=True))), 0)
@skipUnlessDBFeature('can_distinct_on_fields')
def test_emptyqs_distinct(self):
# Tests for #19426
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().distinct('headline', 'pub_date')), 0)
def test_ticket_20278(self):
sr = SelfRef.objects.create()
with self.assertRaises(ObjectDoesNotExist):
SelfRef.objects.get(selfref=sr)
def test_eq(self):
self.assertEqual(Article(id=1), Article(id=1))
self.assertNotEqual(Article(id=1), object())
self.assertNotEqual(object(), Article(id=1))
a = Article()
self.assertEqual(a, a)
self.assertNotEqual(Article(), a)
def test_hash(self):
# Value based on PK
self.assertEqual(hash(Article(id=1)), hash(1))
with self.assertRaises(TypeError):
# No PK value -> unhashable (because save() would then change
# hash)
hash(Article())
class ModelLookupTest(TestCase):
def setUp(self):
# Create an Article.
self.a = Article(
id=None,
headline='Area woman programs in Python',
pub_date=datetime(2005, 7, 28),
)
# Save it into the database. You have to call save() explicitly.
self.a.save()
def test_all_lookup(self):
# Change values by changing the attributes, then calling save().
self.a.headline = 'Area man programs in Python'
self.a.save()
# Article.objects.all() returns all the articles in the database.
self.assertQuerysetEqual(Article.objects.all(),
['<Article: Area man programs in Python>'])
def test_rich_lookup(self):
# Django provides a rich database lookup API.
self.assertEqual(Article.objects.get(id__exact=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline__startswith='Area woman'), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7, pub_date__day=28), self.a)
self.assertEqual(Article.objects.get(pub_date__week_day=5), self.a)
def test_equal_lookup(self):
# The "__exact" lookup type can be omitted, as a shortcut.
self.assertEqual(Article.objects.get(id=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline='Area woman programs in Python'), self.a)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005),
['<Article: Area woman programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2004),
[],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005, pub_date__month=7),
['<Article: Area woman programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=5),
['<Article: Area woman programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=6),
[],
)
def test_does_not_exist(self):
# Django raises an Article.DoesNotExist exception for get() if the
# parameters don't match any object.
six.assertRaisesRegex(
self,
ObjectDoesNotExist,
"Article matching query does not exist.",
Article.objects.get,
id__exact=2000,
)
# To avoid dict-ordering related errors check only one lookup
# in single assert.
self.assertRaises(
ObjectDoesNotExist,
Article.objects.get,
pub_date__year=2005,
pub_date__month=8,
)
six.assertRaisesRegex(
self,
ObjectDoesNotExist,
"Article matching query does not exist.",
Article.objects.get,
pub_date__week_day=6,
)
def test_lookup_by_primary_key(self):
# Lookup by a primary key is the most common case, so Django
# provides a shortcut for primary-key exact lookups.
# The following is identical to articles.get(id=a.id).
self.assertEqual(Article.objects.get(pk=self.a.id), self.a)
# pk can be used as a shortcut for the primary key name in any query.
self.assertQuerysetEqual(Article.objects.filter(pk__in=[self.a.id]),
["<Article: Area woman programs in Python>"])
# Model instances of the same type and same ID are considered equal.
a = Article.objects.get(pk=self.a.id)
b = Article.objects.get(pk=self.a.id)
self.assertEqual(a, b)
def test_too_many(self):
# Create a very similar object
a = Article(
id=None,
headline='Area man programs in Python',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.count(), 2)
# Django raises an Article.MultipleObjectsReturned exception if the
# lookup matches more than one object
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned 2!",
Article.objects.get,
headline__startswith='Area',
)
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned 2!",
Article.objects.get,
pub_date__year=2005,
)
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned 2!",
Article.objects.get,
pub_date__year=2005,
pub_date__month=7,
)
class ConcurrentSaveTests(TransactionTestCase):
available_apps = ['basic']
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_concurrent_delete_with_save(self):
"""
Test fetching, deleting and finally saving an object - we should get
an insert in this case.
"""
a = Article.objects.create(headline='foo', pub_date=datetime.now())
exceptions = []
def deleter():
try:
# Do not delete a directly - doing so alters its state.
Article.objects.filter(pk=a.pk).delete()
except Exception as e:
exceptions.append(e)
finally:
connections[DEFAULT_DB_ALIAS].close()
self.assertEqual(len(exceptions), 0)
t = threading.Thread(target=deleter)
t.start()
t.join()
a.save()
self.assertEqual(Article.objects.get(pk=a.pk).headline, 'foo')
class ManagerTest(TestCase):
QUERYSET_PROXY_METHODS = [
'none',
'count',
'dates',
'datetimes',
'distinct',
'extra',
'get',
'get_or_create',
'update_or_create',
'create',
'bulk_create',
'filter',
'aggregate',
'annotate',
'complex_filter',
'exclude',
'in_bulk',
'iterator',
'earliest',
'latest',
'first',
'last',
'order_by',
'select_for_update',
'select_related',
'prefetch_related',
'values',
'values_list',
'update',
'reverse',
'defer',
'only',
'using',
'exists',
'_insert',
'_update',
'raw',
]
def test_manager_methods(self):
"""
This test ensures that the correct set of methods from `QuerySet`
are copied onto `Manager`.
It's particularly useful to prevent accidentally leaking new methods
into `Manager`. New `QuerySet` methods that should also be copied onto
`Manager` will need to be added to `ManagerTest.QUERYSET_PROXY_METHODS`.
"""
self.assertEqual(
sorted(BaseManager._get_queryset_methods(QuerySet).keys()),
sorted(self.QUERYSET_PROXY_METHODS),
)
class SelectOnSaveTests(TestCase):
def test_select_on_save(self):
a1 = Article.objects.create(pub_date=datetime.now())
with self.assertNumQueries(1):
a1.save()
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(2):
asos.save()
with self.assertNumQueries(1):
asos.save(force_update=True)
Article.objects.all().delete()
with self.assertRaises(DatabaseError):
with self.assertNumQueries(1):
asos.save(force_update=True)
def test_select_on_save_lying_update(self):
"""
Test that select_on_save works correctly if the database
doesn't return correct information about matched rows from
UPDATE.
"""
# Change the manager to not return "row matched" for update().
# We are going to change the Article's _base_manager class
# dynamically. This is a bit of a hack, but it seems hard to
# test this properly otherwise. Article's manager, because
# proxy models use their parent model's _base_manager.
orig_class = Article._base_manager.__class__
class FakeQuerySet(QuerySet):
# Make sure the _update method below is in fact called.
called = False
def _update(self, *args, **kwargs):
FakeQuerySet.called = True
super(FakeQuerySet, self)._update(*args, **kwargs)
return 0
class FakeManager(orig_class):
def get_queryset(self):
return FakeQuerySet(self.model)
try:
Article._base_manager.__class__ = FakeManager
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(3):
asos.save()
self.assertTrue(FakeQuerySet.called)
# This is not wanted behavior, but this is how Django has always
# behaved for databases that do not return correct information
# about matched rows for UPDATE.
with self.assertRaises(DatabaseError):
asos.save(force_update=True)
with self.assertRaises(DatabaseError):
asos.save(update_fields=['pub_date'])
finally:
Article._base_manager.__class__ = orig_class
class ModelRefreshTests(TestCase):
def _truncate_ms(self, val):
# MySQL < 5.6.4 removes microseconds from the datetimes which can cause
# problems when comparing the original value to that loaded from DB
return val - timedelta(microseconds=val.microsecond)
def test_refresh(self):
a = Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
Article.objects.filter(pk=a.pk).update(headline='new headline')
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.headline, 'new headline')
orig_pub_date = a.pub_date
new_pub_date = a.pub_date + timedelta(10)
Article.objects.update(headline='new headline 2', pub_date=new_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db(fields=['headline'])
self.assertEqual(a.headline, 'new headline 2')
self.assertEqual(a.pub_date, orig_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.pub_date, new_pub_date)
def test_refresh_fk(self):
s1 = SelfRef.objects.create()
s2 = SelfRef.objects.create()
s3 = SelfRef.objects.create(selfref=s1)
s3_copy = SelfRef.objects.get(pk=s3.pk)
s3_copy.selfref.touched = True
s3.selfref = s2
s3.save()
with self.assertNumQueries(1):
s3_copy.refresh_from_db()
with self.assertNumQueries(1):
# The old related instance was thrown away (the selfref_id has
# changed). It needs to be reloaded on access, so one query
# executed.
self.assertFalse(hasattr(s3_copy.selfref, 'touched'))
self.assertEqual(s3_copy.selfref, s2)
def test_refresh_unsaved(self):
pub_date = self._truncate_ms(datetime.now())
a = Article.objects.create(pub_date=pub_date)
a2 = Article(id=a.pk)
with self.assertNumQueries(1):
a2.refresh_from_db()
self.assertEqual(a2.pub_date, pub_date)
self.assertEqual(a2._state.db, "default")
def test_refresh_no_fields(self):
a = Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
with self.assertNumQueries(0):
a.refresh_from_db(fields=[])
class TestRelatedObjectDeprecation(TestCase):
def test_field_related_deprecation(self):
field = SelfRef._meta.get_field('selfref')
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
self.assertIsInstance(field.related, ForeignObjectRel)
self.assertEqual(len(warns), 1)
self.assertEqual(
str(warns.pop().message),
'Usage of field.related has been deprecated. Use field.rel instead.'
)
| bsd-3-clause |
nlamirault/python-freeboxclient | doc/source/conf.py | 1 | 8617 | #
# Copyright 2013 Nicolas Lamirault <nicolas.lamirault@gmail.com>.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')))
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'FreeboxOS Command Line Client'
copyright = u'2013 Nicolas Lamirault'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
version_info = '0.1.0'
#
# The short X.Y version.
#version = version_info.version_string()
version = version_info
# The full version, including alpha/beta/rc tags.
#release = version_info.release_string()
release = version_info
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'OpenStackCommandLineClientdoc'
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual])
# .
latex_documents = [
('index', 'OpenStackCommandLineClient.tex',
u'FreeboxOS Command Line Client Documentation',
u'Nicolas Lamirault', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
'man/freeboxos',
'freeboxos',
u'FreeboxOS Command Line Client',
[u'Nicolas Lamirault'],
1,
),
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'FreeboxOSCommandLineClient',
u'FreeboxOS Command Line Client Documentation',
u'FreeboxOS', 'FreeboxOSCommandLineClient',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| apache-2.0 |
cindyyu/kuma | kuma/search/models.py | 22 | 7642 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.dispatch import receiver
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.text import slugify
from elasticsearch.exceptions import NotFoundError
from taggit.managers import TaggableManager
from kuma.core.urlresolvers import reverse
from kuma.wiki.search import WikiDocumentType
from .jobs import AvailableFiltersJob
from .managers import IndexManager, FilterManager
class Index(models.Model):
"""
Model to store a bunch of metadata about search indexes including
a way to promote it to be picked up as the "current" one.
"""
created_at = models.DateTimeField(default=timezone.now)
name = models.CharField(max_length=30, blank=True, null=True,
help_text='The search index name, set to '
'the created date when left empty')
promoted = models.BooleanField(default=False)
populated = models.BooleanField(default=False)
objects = IndexManager()
class Meta:
verbose_name = 'Index'
verbose_name_plural = 'Indexes'
ordering = ['-created_at']
def save(self, *args, **kwargs):
if not self.name:
self.name = self.created_at.strftime('%Y-%m-%d-%H-%M-%S')
super(Index, self).save(*args, **kwargs)
def delete_if_exists(self):
es = WikiDocumentType.get_connection()
try:
es.indices.delete(self.prefixed_name)
except NotFoundError:
# Can ignore this since it indicates the index doesn't exist
# and therefore there's nothing to delete.
pass
def __unicode__(self):
return self.name
@cached_property
def successor(self):
try:
return self.get_next_by_created_at()
except (Index.DoesNotExist, ValueError):
return None
@cached_property
def prefixed_name(self):
"The name to use for the search index in ES"
return '%s-%s' % (settings.ES_INDEX_PREFIX, self.name)
def populate(self):
return WikiDocumentType.reindex_all(index=self, chunk_size=500)
def record_outdated(self, instance):
if self.successor:
return OutdatedObject.objects.create(index=self.successor,
content_object=instance)
def promote(self):
from kuma.wiki.tasks import index_documents
# Index all outdated documents to this index.
outdated_ids = []
for outdated_object in self.outdated_objects.all():
instance = outdated_object.content_object
outdated_ids.append(instance.id)
if outdated_ids:
index_documents.delay(outdated_ids, self.pk)
# Clear outdated.
self.outdated_objects.all().delete()
# Promote this index.
self.promoted = True
self.save()
# Allow only a single index to be promoted.
Index.objects.exclude(pk=self.pk).update(promoted=False)
def demote(self):
self.promoted = False
self.save()
@receiver(models.signals.post_delete,
sender=Index, dispatch_uid='search.index.delete')
def delete_index(**kwargs):
index = kwargs.get('instance', None)
if index is not None:
index.delete_if_exists()
class OutdatedObject(models.Model):
index = models.ForeignKey(Index, related_name='outdated_objects')
created_at = models.DateTimeField(default=timezone.now)
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
class FilterGroup(models.Model):
"""
A way to group different kinds of filters from each other.
"""
name = models.CharField(max_length=255)
slug = models.CharField(max_length=255, blank=True, null=True,
help_text='the slug to be used as the name of the '
'query parameter in the search URL')
order = models.IntegerField(default=1,
help_text='An integer defining which order '
'the filter group should show up '
'in the sidebar')
class Meta:
ordering = ('-order', 'name')
unique_together = (
('name', 'slug'),
)
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.name)
super(FilterGroup, self).save(*args, **kwargs)
def __unicode__(self):
return self.name
class Filter(models.Model):
"""
The model to store custom search filters in the database. This is
used to dynamically tweak the search filters available to users.
"""
OPERATOR_AND = 'AND'
OPERATOR_OR = 'OR'
OPERATOR_CHOICES = (
(OPERATOR_OR, OPERATOR_OR),
(OPERATOR_AND, OPERATOR_AND),
)
OPERATORS = {
OPERATOR_OR: 'or',
OPERATOR_AND: 'and',
}
name = models.CharField(max_length=255, db_index=True,
help_text='the English name of the filter '
'to be shown in the frontend UI')
slug = models.CharField(max_length=255, db_index=True,
help_text='the slug to be used as a query '
'parameter in the search URL')
shortcut = models.CharField(max_length=255, db_index=True,
null=True, blank=True,
help_text='the name of the shortcut to '
'show in the command and query UI. '
'e.g. fxos')
group = models.ForeignKey(FilterGroup, related_name='filters',
help_text='E.g. "Topic", "Skill level" etc')
tags = TaggableManager(help_text='A comma-separated list of tags. '
'If more than one tag given a OR '
'query is executed')
operator = models.CharField(max_length=3, choices=OPERATOR_CHOICES,
default=OPERATOR_OR,
help_text='The logical operator to use '
'if more than one tag is given')
enabled = models.BooleanField(default=True,
help_text='Whether this filter is shown '
'to users or not.')
visible = models.BooleanField(default=True,
help_text='Whether this filter is shown '
'at public places, e.g. the '
'command and query UI')
objects = FilterManager()
class Meta(object):
unique_together = (
('name', 'slug'),
)
def __unicode__(self):
return self.name
def get_absolute_url(self):
path = reverse('search', locale=settings.LANGUAGE_CODE)
return '%s%s?%s=%s' % (settings.SITE_URL, path,
self.group.slug, self.slug)
@receiver(models.signals.post_save, sender=Filter)
@receiver(models.signals.pre_delete, sender=Filter)
def invalidate_filter_cache(sender, instance, **kwargs):
AvailableFiltersJob().invalidate()
| mpl-2.0 |
jamestwebber/scipy | scipy/spatial/transform/tests/test_rotation_groups.py | 3 | 5626 | from __future__ import division, print_function, absolute_import
import pytest
import numpy as np
from numpy.testing import assert_array_almost_equal
from scipy.spatial.transform import Rotation
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
from scipy.constants import golden as phi
from scipy.spatial import cKDTree
TOL = 1E-12
NS = range(1, 13)
NAMES = ["I", "O", "T"] + ["C%d" % n for n in NS] + ["D%d" % n for n in NS]
SIZES = [60, 24, 12] + list(NS) + [2 * n for n in NS]
def _calculate_rmsd(P, Q):
"""Calculates the root-mean-square distance between the points of P and Q.
The distance is taken as the minimum over all possible matchings. It is
zero if P and Q are identical and non-zero if not.
"""
distance_matrix = cdist(P, Q, metric='sqeuclidean')
matching = linear_sum_assignment(distance_matrix)
return np.sqrt(distance_matrix[matching].sum())
def _generate_pyramid(n, axis):
thetas = np.linspace(0, 2 * np.pi, n + 1)[:-1]
P = np.vstack([np.zeros(n), np.cos(thetas), np.sin(thetas)]).T
P = np.concatenate((P, [[1, 0, 0]]))
return np.roll(P, axis, axis=1)
def _generate_prism(n, axis):
thetas = np.linspace(0, 2 * np.pi, n + 1)[:-1]
bottom = np.vstack([-np.ones(n), np.cos(thetas), np.sin(thetas)]).T
top = np.vstack([+np.ones(n), np.cos(thetas), np.sin(thetas)]).T
P = np.concatenate((bottom, top))
return np.roll(P, axis, axis=1)
def _generate_icosahedron():
x = np.array([[0, -1, -phi],
[0, -1, +phi],
[0, +1, -phi],
[0, +1, +phi]])
return np.concatenate([np.roll(x, i, axis=1) for i in range(3)])
def _generate_octahedron():
return np.array([[-1, 0, 0], [+1, 0, 0], [0, -1, 0],
[0, +1, 0], [0, 0, -1], [0, 0, +1]])
def _generate_tetrahedron():
return np.array([[1, 1, 1], [1, -1, -1], [-1, 1, -1], [-1, -1, 1]])
@pytest.mark.parametrize("name", [-1, None, True, np.array(['C3'])])
def test_group_type(name):
with pytest.raises(ValueError,
match="must be a string"):
Rotation.create_group(name)
@pytest.mark.parametrize("name", ["Q", " ", "CA", "C ", "DA", "D ", "I2", ""])
def test_group_name(name):
with pytest.raises(ValueError,
match="must be one of 'I', 'O', 'T', 'Dn', 'Cn'"):
Rotation.create_group(name)
@pytest.mark.parametrize("name", ["C0", "D0"])
def test_group_order_positive(name):
with pytest.raises(ValueError,
match="Group order must be positive"):
Rotation.create_group(name)
@pytest.mark.parametrize("axis", ['A', 'b', 0, 1, 2, 4, False, None])
def test_axis_valid(axis):
with pytest.raises(ValueError,
match="`axis` must be one of"):
Rotation.create_group("C1", axis)
def test_icosahedral():
"""The icosahedral group fixes the rotations of an icosahedron. Here we
test that the icosahedron is invariant after application of the elements
of the rotation group."""
P = _generate_icosahedron()
for g in Rotation.create_group("I"):
g = Rotation.from_quat(g.as_quat())
assert _calculate_rmsd(P, g.apply(P)) < TOL
def test_octahedral():
"""Test that the octahedral group correctly fixes the rotations of an
octahedron."""
P = _generate_octahedron()
for g in Rotation.create_group("O"):
assert _calculate_rmsd(P, g.apply(P)) < TOL
def test_tetrahedral():
"""Test that the tetrahedral group correctly fixes the rotations of a
tetrahedron."""
P = _generate_tetrahedron()
for g in Rotation.create_group("T"):
assert _calculate_rmsd(P, g.apply(P)) < TOL
@pytest.mark.parametrize("n", NS)
@pytest.mark.parametrize("axis", 'XYZ')
def test_dicyclic(n, axis):
"""Test that the dicyclic group correctly fixes the rotations of a
prism."""
P = _generate_prism(n, axis='XYZ'.index(axis))
for g in Rotation.create_group("D%d" % n, axis=axis):
assert _calculate_rmsd(P, g.apply(P)) < TOL
@pytest.mark.parametrize("n", NS)
@pytest.mark.parametrize("axis", 'XYZ')
def test_cyclic(n, axis):
"""Test that the cyclic group correctly fixes the rotations of a
pyramid."""
P = _generate_pyramid(n, axis='XYZ'.index(axis))
for g in Rotation.create_group("C%d" % n, axis=axis):
assert _calculate_rmsd(P, g.apply(P)) < TOL
@pytest.mark.parametrize("name, size", zip(NAMES, SIZES))
def test_group_sizes(name, size):
assert len(Rotation.create_group(name)) == size
@pytest.mark.parametrize("name, size", zip(NAMES, SIZES))
def test_group_no_duplicates(name, size):
g = Rotation.create_group(name)
kdtree = cKDTree(g.as_quat())
assert len(kdtree.query_pairs(1E-3)) == 0
@pytest.mark.parametrize("name, size", zip(NAMES, SIZES))
def test_group_symmetry(name, size):
g = Rotation.create_group(name)
q = np.concatenate((-g.as_quat(), g.as_quat()))
distance = np.sort(cdist(q, q))
deltas = np.max(distance, axis=0) - np.min(distance, axis=0)
assert (deltas < TOL).all()
@pytest.mark.parametrize("name", NAMES)
def test_reduction(name):
"""Test that the elements of the rotation group are correctly
mapped onto the identity rotation."""
g = Rotation.create_group(name)
f = g.reduce(g)
assert_array_almost_equal(f.magnitude(), np.zeros(len(g)))
@pytest.mark.parametrize("name", NAMES)
def test_single_reduction(name):
g = Rotation.create_group(name)
f = g[-1].reduce(g)
assert_array_almost_equal(f.magnitude(), 0)
assert f.as_quat().shape == (4,)
| bsd-3-clause |
StuartJSquires/markov_slackbot | markov_slackbot/message_interpreter.py | 1 | 5241 | # -*- coding: utf-8 -*-
from itertools import chain
import logging
import re
class MessageInterpreter(object):
def __init__(self, user_id, username, command_names, external_texts):
self.logger = logging.getLogger(__name__)
self.user_id = user_id
self.username = username
self.external_texts = external_texts
self.command_regexes = self.compile_command_regexes(command_names)
self.master_regex = re.compile('master|slack')
self.channel_regex = re.compile('<\#(C[A-Z0-9]{8})>')
self.user_regex = re.compile('<\@(U[A-Z0-9]{8})>')
self.external_text_regexes = self.compile_external_text_regexes(
external_texts)
def compile_command_regexes(self, command_names):
command_regexes = [re.compile('({0})'.format(command))
for command in command_names]
return command_regexes
def compile_external_text_regexes(self, external_texts):
external_text_regexes = [re.compile('\\$({0})'.format(external_text))
for external_text in external_texts]
return external_text_regexes
def find_commands(self, message_text):
"""Finds all commands in message_text.
:param message_text: the message to search.
:returns command_list: all commands that were found.
"""
self.logger.debug('Finding commands...')
commands = [self.find_command(command_regex, message_text)
for command_regex in self.command_regexes]
command_list = [command for command in commands if command is not None]
self.logger.debug('Found commands: {0}'.format(command_list))
return command_list
def find_command(self, command_regex, message_text):
"""Finds a specific command in message_text.
:param command_regex: the regex for the command.
:param message_text: the message to search.
:returns command_match: the matching command.
"""
self.logger.debug(
'Finding command with regex: {0}'.format(command_regex))
command_matches = command_regex.findall(message_text)
if command_matches:
command_match = command_matches[0]
self.logger.debug('Found command: {0}'.format(command_match))
else:
command_match = None
self.logger.debug('Did not find command.')
return command_match
def find_master(self, message_text):
"""Finds all master mentions.
:param message_text: the message to search.
:returns masters: a list of master mentions.
"""
self.logger.debug('Finding master mentions.')
masters = self.master_regex.findall(message_text)
self.logger.debug('Found master mentions: {0}'.format(masters))
return masters
def find_channels(self, message_text):
"""Finds all channel mentions.
:param message_text: the message to search.
:returns channels: a list of channel mentions.
"""
self.logger.debug('Finding channel mentions.')
channels = self.channel_regex.findall(message_text)
self.logger.debug('Found channel mentions: {0}'.format(channels))
return channels
def find_users(self, message_text):
"""Finds all user mentions.
:param message_text: the message to search.
:returns users: a list of user mentions.
"""
self.logger.debug('Finding user mentions.')
users = self.user_regex.findall(message_text)
self.logger.debug('Found user mentions: {0}'.format(users))
return users
def find_external_texts(self, message_text):
"""Find all external text mentions.
:param message_text: the message to search.
:returns external_texts: a list of external text mentions.
"""
self.logger.debug('Finding external text mentions.')
external_texts = []
spaceless_message = message_text.replace(' ', '')
matches = [regex.findall(spaceless_message)
for regex in self.external_text_regexes]
external_texts = list(chain.from_iterable(matches))
self.logger.debug(
'Found external text mentions: {0}'.format(external_texts))
return external_texts
def is_respondable(self, message):
"""Determines if the bot should reply to the message
:param message: A message to qualify for a response.
:returns respondable: True or False
"""
if 'type' not in message:
return False
if 'text' not in message:
return False
if message['type'] != 'message':
return False
# Prevents loops from multiple instances.
if 'user' in message and message['user'] == self.user_id:
return False
# Did the message mention the bot?
if '<@{0}>'.format(self.user_id) in message['text']:
return True
if self.username.lower() in message['text'].lower():
return True
# Direct message
if message['channel'].startswith("D"):
return True
return False
| mit |
alexus37/AugmentedRealityChess | pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGL/GL/EXT/stencil_two_side.py | 9 | 1147 | '''OpenGL extension EXT.stencil_two_side
This module customises the behaviour of the
OpenGL.raw.GL.EXT.stencil_two_side to provide a more
Python-friendly API
Overview (from the spec)
This extension provides two-sided stencil testing where the
stencil-related state (stencil operations, reference value, compare
mask, and write mask) may be different for front- and back-facing
polygons. Two-sided stencil testing may improve the performance
of stenciled shadow volume and Constructive Solid Geometry (CSG)
rendering algorithms.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/stencil_two_side.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.stencil_two_side import *
from OpenGL.raw.GL.EXT.stencil_two_side import _EXTENSION_NAME
def glInitStencilTwoSideEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | mit |
jorgefernandes/lighthouseads | node_modules/node-gyp/gyp/tools/pretty_sln.py | 1831 | 5099 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints the information in a sln file in a diffable way.
It first outputs each projects in alphabetical order with their
dependencies.
Then it outputs a possible build order.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import re
import sys
import pretty_vcproj
def BuildProject(project, built, projects, deps):
# if all dependencies are done, we can build it, otherwise we try to build the
# dependency.
# This is not infinite-recursion proof.
for dep in deps[project]:
if dep not in built:
BuildProject(dep, built, projects, deps)
print project
built.append(project)
def ParseSolution(solution_file):
# All projects, their clsid and paths.
projects = dict()
# A list of dependencies associated with a project.
dependencies = dict()
# Regular expressions that matches the SLN format.
# The first line of a project definition.
begin_project = re.compile(r'^Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942'
r'}"\) = "(.*)", "(.*)", "(.*)"$')
# The last line of a project definition.
end_project = re.compile('^EndProject$')
# The first line of a dependency list.
begin_dep = re.compile(
r'ProjectSection\(ProjectDependencies\) = postProject$')
# The last line of a dependency list.
end_dep = re.compile('EndProjectSection$')
# A line describing a dependency.
dep_line = re.compile(' *({.*}) = ({.*})$')
in_deps = False
solution = open(solution_file)
for line in solution:
results = begin_project.search(line)
if results:
# Hack to remove icu because the diff is too different.
if results.group(1).find('icu') != -1:
continue
# We remove "_gyp" from the names because it helps to diff them.
current_project = results.group(1).replace('_gyp', '')
projects[current_project] = [results.group(2).replace('_gyp', ''),
results.group(3),
results.group(2)]
dependencies[current_project] = []
continue
results = end_project.search(line)
if results:
current_project = None
continue
results = begin_dep.search(line)
if results:
in_deps = True
continue
results = end_dep.search(line)
if results:
in_deps = False
continue
results = dep_line.search(line)
if results and in_deps and current_project:
dependencies[current_project].append(results.group(1))
continue
# Change all dependencies clsid to name instead.
for project in dependencies:
# For each dependencies in this project
new_dep_array = []
for dep in dependencies[project]:
# Look for the project name matching this cldis
for project_info in projects:
if projects[project_info][1] == dep:
new_dep_array.append(project_info)
dependencies[project] = sorted(new_dep_array)
return (projects, dependencies)
def PrintDependencies(projects, deps):
print "---------------------------------------"
print "Dependencies for all projects"
print "---------------------------------------"
print "-- --"
for (project, dep_list) in sorted(deps.items()):
print "Project : %s" % project
print "Path : %s" % projects[project][0]
if dep_list:
for dep in dep_list:
print " - %s" % dep
print ""
print "-- --"
def PrintBuildOrder(projects, deps):
print "---------------------------------------"
print "Build order "
print "---------------------------------------"
print "-- --"
built = []
for (project, _) in sorted(deps.items()):
if project not in built:
BuildProject(project, built, projects, deps)
print "-- --"
def PrintVCProj(projects):
for project in projects:
print "-------------------------------------"
print "-------------------------------------"
print project
print project
print project
print "-------------------------------------"
print "-------------------------------------"
project_path = os.path.abspath(os.path.join(os.path.dirname(sys.argv[1]),
projects[project][2]))
pretty = pretty_vcproj
argv = [ '',
project_path,
'$(SolutionDir)=%s\\' % os.path.dirname(sys.argv[1]),
]
argv.extend(sys.argv[3:])
pretty.main(argv)
def main():
# check if we have exactly 1 parameter.
if len(sys.argv) < 2:
print 'Usage: %s "c:\\path\\to\\project.sln"' % sys.argv[0]
return 1
(projects, deps) = ParseSolution(sys.argv[1])
PrintDependencies(projects, deps)
PrintBuildOrder(projects, deps)
if '--recursive' in sys.argv:
PrintVCProj(projects)
return 0
if __name__ == '__main__':
sys.exit(main())
| mit |
neavouli/yournextrepresentative | candidates/management/commands/candidates_import_statements_of_persons_nominated.py | 3 | 7018 | from __future__ import print_function, unicode_literals
import errno
import hashlib
import magic
import mimetypes
import os
from os.path import dirname, join, exists
import requests
from django.core.management.base import BaseCommand, CommandError
from django.core.files.storage import FileSystemStorage
from official_documents.models import OfficialDocument
from elections.models import Election
from popolo.models import Post, Area
from compat import BufferDictReader
allowed_mime_types = set([
b'application/pdf',
b'application/msword',
b'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
])
PDF_COLUMN_HEADERS_TO_TRY = (
'Statement of Persons Nominated (SOPN) URL',
'Link to PDF',
)
POST_OR_AREA_COLUMN_HEADERS_TO_TRY = (
'Region',
'Constituency',
'Ward',
)
def download_file_cached(url):
url_hash = hashlib.md5(url).hexdigest()
directory = join(dirname(__file__), '.noms-cache')
try:
os.mkdir(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
filename = join(directory, url_hash)
if exists(filename):
return filename
r = requests.get(url)
with open(filename, 'w') as f:
f.write(r.content)
return filename
def get_column_header(possible_column_headers, row):
return [
ch for ch in possible_column_headers
if ch in row
][0]
class Command(BaseCommand):
help = "Import official documents for posts from a URL to a CSV file"
args = "<CSV_URL>"
def add_arguments(self, parser):
parser.add_argument('--delete-existing', action='store_true')
parser.add_argument('--election')
def handle(self, *args, **options):
csv_url, = args
override_election = None
override_election_slug = options['election']
if override_election_slug:
try:
override_election = Election.objects.get(
slug=override_election_slug
)
except Election.DoesNotExist:
msg = 'No election with slug {0} found'
raise CommandError(msg.format(override_election_slug))
election_name_to_election = {}
mime_type_magic = magic.Magic(mime=True)
storage = FileSystemStorage()
r = requests.get(csv_url)
r.encoding = 'utf-8'
reader = BufferDictReader(r.text)
for row in reader:
post_or_area_header = get_column_header(
POST_OR_AREA_COLUMN_HEADERS_TO_TRY, row
)
name = row[post_or_area_header]
if not name:
continue
name = name.strip()
# If there was no election specified, try to find it from
# the 'Election' column (which has the election name):
if override_election_slug:
election = override_election
else:
if 'Election' not in row:
raise CommandError("There is no election name in the 'Election' column, so you must supply an election slug with --election")
election_name = row['Election']
election = election_name_to_election.get(election_name)
if election is None:
election = Election.objects.get(name=election_name)
election_name_to_election[election_name] = election
try:
post = Post.objects.get(
label=name,
extra__elections=election,
)
except Post.DoesNotExist:
msg = "Failed to find the post {0}, guessing it might be the area name instead"
print(msg.format(name))
# If the post name isn't there, try getting it from
# the area:
try:
area = Area.objects.get(name=name)
except Area.DoesNotExist:
print("Failed to find area for {0}".format(name))
continue
try:
post = Post.objects.get(area=area)
except Post.DoesNotExist:
print("Failed to find post with for {0}".format(name))
continue
# Check that the post is actually valid for this election:
if election not in post.extra.elections.all():
msg = "The post {post} wasn't in the election {election}"
raise CommandError(msg.format(post=post.label, election=election.name))
document_url_column = get_column_header(PDF_COLUMN_HEADERS_TO_TRY, row)
document_url = row[document_url_column]
if not document_url:
print("No URL for {0}".format(name))
continue
existing_documents = OfficialDocument.objects.filter(
document_type=OfficialDocument.NOMINATION_PAPER,
post_id=post,
)
if existing_documents.count() > 0:
if options['delete_existing']:
print("Removing existing documents")
existing_documents.delete()
else:
print("Skipping {0} since it already had documents".format(name))
continue
try:
downloaded_filename = download_file_cached(document_url)
except requests.exceptions.ConnectionError:
print("Connection failed for {0}".format(name))
print("The URL was:", document_url)
continue
except requests.exceptions.MissingSchema:
# This is probably someone putting notes in the URL
# column, so ignore:
print("Probably not a document URL for {0}: {1}".format(
name, document_url
))
continue
mime_type = mime_type_magic.from_file(downloaded_filename)
extension = mimetypes.guess_extension(mime_type)
if mime_type not in allowed_mime_types:
print("Ignoring unknown MIME type {0} for {1}".format(
mime_type,
name,
))
continue
filename = "official_documents/{post_id}/statement-of-persons-nominated{extension}".format(
post_id=post.extra.slug,
extension=extension,
)
with open(downloaded_filename, 'rb') as f:
storage_filename = storage.save(filename, f)
OfficialDocument.objects.create(
document_type=OfficialDocument.NOMINATION_PAPER,
uploaded_file=storage_filename,
election=election,
post=post,
source_url=document_url
)
message = "Successfully added the Statement of Persons Nominated for {0}"
print(message.format(name))
| agpl-3.0 |
jedie/django-cms-tools | django_cms_tools/cms_apps_helper.py | 1 | 2388 |
"""
:create: 2018 by Jens Diemer
:copyleft: 2018 by the django-cms-tools team, see AUTHORS for more details.
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
import logging
from django.utils.translation import ugettext_lazy as _
from cms.models import Page
from cms.utils.i18n import force_language
try:
from django.urls import reverse, NoReverseMatch
except ImportError: # Django < 1.10 pragma: no cover
from django.core.urlresolvers import reverse, NoReverseMatch
log = logging.getLogger(__name__)
class CMSAppHelperMixin:
"""
e.g.:
@apphook_pool.register
class FoobarApp(CMSAppHelperMixin, CMSConfigApp):
app_name = "foobar"
name = "Foobar"
urls = ["foobar.urls"]
app_config = FoobarConfig
def get_foobar_app():
app_name = FoobarApp.__name__ # FoobarApp
apphook = apphook_pool.get_apphook(app_name=app_name)
return apphook
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.app_hook_name = "%sApp" % self.name # e.g.: "FooApp"
def get_app_page_url(self, language):
app_page = self.get_app_page()
return app_page.get_absolute_url(language=language)
def get_absolute_url(self, view_path, reverse_kwargs, language):
app_page = self.get_app_page()
if not app_page:
return "#"
namespace = app_page.application_namespace
with force_language(language):
viewname = "%s:%s" % (namespace, view_path)
try:
return reverse(viewname, kwargs=reverse_kwargs)
except NoReverseMatch as err:
log.error("Can't reverse %r with '%s': %s", viewname, repr(reverse_kwargs), err)
return "#"
def get_app_page(self):
page_qs = Page.objects.public()
try:
app_page = page_qs.get(application_urls=self.app_hook_name)
except Page.DoesNotExist as err:
log.error("Can't get page with application_urls=%r: %s", self.app_hook_name, err)
app_page = None
return app_page
def add_view_on_page_toolbar_links(self, menu, current_lang):
menu.add_link_item(
name=_("View on Site"),
url=self.get_app_page_url(language=current_lang),
)
| gpl-3.0 |
ryfeus/lambda-packs | Keras_tensorflow/source/numpy/ma/tests/test_core.py | 10 | 167619 | # pylint: disable-msg=W0401,W0511,W0611,W0612,W0614,R0201,E1102
"""Tests suite for MaskedArray & subclassing.
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
"""
from __future__ import division, absolute_import, print_function
__author__ = "Pierre GF Gerard-Marchant"
import warnings
import pickle
import operator
import itertools
from functools import reduce
import numpy as np
import numpy.ma.core
import numpy.core.fromnumeric as fromnumeric
import numpy.core.umath as umath
from numpy.testing import TestCase, run_module_suite, assert_raises
from numpy import ndarray
from numpy.compat import asbytes, asbytes_nested
from numpy.ma.testutils import (
assert_, assert_array_equal, assert_equal, assert_almost_equal,
assert_equal_records, fail_if_equal, assert_not_equal,
assert_mask_equal,
)
from numpy.ma.core import (
MAError, MaskError, MaskType, MaskedArray, abs, absolute, add, all,
allclose, allequal, alltrue, angle, anom, arange, arccos, arctan2,
arcsin, arctan, argsort, array, asarray, choose, concatenate,
conjugate, cos, cosh, count, default_fill_value, diag, divide, empty,
empty_like, equal, exp, flatten_mask, filled, fix_invalid,
flatten_structured_array, fromflex, getmask, getmaskarray, greater,
greater_equal, identity, inner, isMaskedArray, less, less_equal, log,
log10, make_mask, make_mask_descr, mask_or, masked, masked_array,
masked_equal, masked_greater, masked_greater_equal, masked_inside,
masked_less, masked_less_equal, masked_not_equal, masked_outside,
masked_print_option, masked_values, masked_where, max, maximum,
maximum_fill_value, min, minimum, minimum_fill_value, mod, multiply,
mvoid, nomask, not_equal, ones, outer, power, product, put, putmask,
ravel, repeat, reshape, resize, shape, sin, sinh, sometrue, sort, sqrt,
subtract, sum, take, tan, tanh, transpose, where, zeros,
)
pi = np.pi
class TestMaskedArray(TestCase):
# Base test class for MaskedArrays.
def setUp(self):
# Base data definition.
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = masked_array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)
def test_basicattributes(self):
# Tests some basic array attributes.
a = array([1, 3, 2])
b = array([1, 3, 2], mask=[1, 0, 1])
assert_equal(a.ndim, 1)
assert_equal(b.ndim, 1)
assert_equal(a.size, 3)
assert_equal(b.size, 3)
assert_equal(a.shape, (3,))
assert_equal(b.shape, (3,))
def test_basic0d(self):
# Checks masking a scalar
x = masked_array(0)
assert_equal(str(x), '0')
x = masked_array(0, mask=True)
assert_equal(str(x), str(masked_print_option))
x = masked_array(0, mask=False)
assert_equal(str(x), '0')
x = array(0, mask=1)
self.assertTrue(x.filled().dtype is x._data.dtype)
def test_basic1d(self):
# Test of basic array creation and properties in 1 dimension.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
self.assertTrue(not isMaskedArray(x))
self.assertTrue(isMaskedArray(xm))
self.assertTrue((xm - ym).filled(0).any())
fail_if_equal(xm.mask.astype(int), ym.mask.astype(int))
s = x.shape
assert_equal(np.shape(xm), s)
assert_equal(xm.shape, s)
assert_equal(xm.dtype, x.dtype)
assert_equal(zm.dtype, z.dtype)
assert_equal(xm.size, reduce(lambda x, y:x * y, s))
assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
assert_array_equal(xm, xf)
assert_array_equal(filled(xm, 1.e20), xf)
assert_array_equal(x, xm)
def test_basic2d(self):
# Test of basic array creation and properties in 2 dimensions.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
for s in [(4, 3), (6, 2)]:
x.shape = s
y.shape = s
xm.shape = s
ym.shape = s
xf.shape = s
self.assertTrue(not isMaskedArray(x))
self.assertTrue(isMaskedArray(xm))
assert_equal(shape(xm), s)
assert_equal(xm.shape, s)
assert_equal(xm.size, reduce(lambda x, y:x * y, s))
assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
assert_equal(xm, xf)
assert_equal(filled(xm, 1.e20), xf)
assert_equal(x, xm)
def test_concatenate_basic(self):
# Tests concatenations.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
# basic concatenation
assert_equal(np.concatenate((x, y)), concatenate((xm, ym)))
assert_equal(np.concatenate((x, y)), concatenate((x, y)))
assert_equal(np.concatenate((x, y)), concatenate((xm, y)))
assert_equal(np.concatenate((x, y, x)), concatenate((x, ym, x)))
def test_concatenate_alongaxis(self):
# Tests concatenations.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
# Concatenation along an axis
s = (3, 4)
x.shape = y.shape = xm.shape = ym.shape = s
assert_equal(xm.mask, np.reshape(m1, s))
assert_equal(ym.mask, np.reshape(m2, s))
xmym = concatenate((xm, ym), 1)
assert_equal(np.concatenate((x, y), 1), xmym)
assert_equal(np.concatenate((xm.mask, ym.mask), 1), xmym._mask)
x = zeros(2)
y = array(ones(2), mask=[False, True])
z = concatenate((x, y))
assert_array_equal(z, [0, 0, 1, 1])
assert_array_equal(z.mask, [False, False, False, True])
z = concatenate((y, x))
assert_array_equal(z, [1, 1, 0, 0])
assert_array_equal(z.mask, [False, True, False, False])
def test_concatenate_flexible(self):
# Tests the concatenation on flexible arrays.
data = masked_array(list(zip(np.random.rand(10),
np.arange(10))),
dtype=[('a', float), ('b', int)])
test = concatenate([data[:5], data[5:]])
assert_equal_records(test, data)
def test_creation_ndmin(self):
# Check the use of ndmin
x = array([1, 2, 3], mask=[1, 0, 0], ndmin=2)
assert_equal(x.shape, (1, 3))
assert_equal(x._data, [[1, 2, 3]])
assert_equal(x._mask, [[1, 0, 0]])
def test_creation_ndmin_from_maskedarray(self):
# Make sure we're not losing the original mask w/ ndmin
x = array([1, 2, 3])
x[-1] = masked
xx = array(x, ndmin=2, dtype=float)
assert_equal(x.shape, x._mask.shape)
assert_equal(xx.shape, xx._mask.shape)
def test_creation_maskcreation(self):
# Tests how masks are initialized at the creation of Maskedarrays.
data = arange(24, dtype=float)
data[[3, 6, 15]] = masked
dma_1 = MaskedArray(data)
assert_equal(dma_1.mask, data.mask)
dma_2 = MaskedArray(dma_1)
assert_equal(dma_2.mask, dma_1.mask)
dma_3 = MaskedArray(dma_1, mask=[1, 0, 0, 0] * 6)
fail_if_equal(dma_3.mask, dma_1.mask)
x = array([1, 2, 3], mask=True)
assert_equal(x._mask, [True, True, True])
x = array([1, 2, 3], mask=False)
assert_equal(x._mask, [False, False, False])
y = array([1, 2, 3], mask=x._mask, copy=False)
assert_(np.may_share_memory(x.mask, y.mask))
y = array([1, 2, 3], mask=x._mask, copy=True)
assert_(not np.may_share_memory(x.mask, y.mask))
def test_creation_with_list_of_maskedarrays(self):
# Tests creating a masked array from a list of masked arrays.
x = array(np.arange(5), mask=[1, 0, 0, 0, 0])
data = array((x, x[::-1]))
assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]])
assert_equal(data._mask, [[1, 0, 0, 0, 0], [0, 0, 0, 0, 1]])
x.mask = nomask
data = array((x, x[::-1]))
assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]])
self.assertTrue(data.mask is nomask)
def test_creation_from_ndarray_with_padding(self):
x = np.array([('A', 0)], dtype={'names':['f0','f1'],
'formats':['S4','i8'],
'offsets':[0,8]})
data = array(x) # used to fail due to 'V' padding field in x.dtype.descr
def test_asarray(self):
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
xm.fill_value = -9999
xm._hardmask = True
xmm = asarray(xm)
assert_equal(xmm._data, xm._data)
assert_equal(xmm._mask, xm._mask)
assert_equal(xmm.fill_value, xm.fill_value)
assert_equal(xmm._hardmask, xm._hardmask)
def test_asarray_default_order(self):
# See Issue #6646
m = np.eye(3).T
self.assertFalse(m.flags.c_contiguous)
new_m = asarray(m)
self.assertTrue(new_m.flags.c_contiguous)
def test_asarray_enforce_order(self):
# See Issue #6646
m = np.eye(3).T
self.assertFalse(m.flags.c_contiguous)
new_m = asarray(m, order='C')
self.assertTrue(new_m.flags.c_contiguous)
def test_fix_invalid(self):
# Checks fix_invalid.
with np.errstate(invalid='ignore'):
data = masked_array([np.nan, 0., 1.], mask=[0, 0, 1])
data_fixed = fix_invalid(data)
assert_equal(data_fixed._data, [data.fill_value, 0., 1.])
assert_equal(data_fixed._mask, [1., 0., 1.])
def test_maskedelement(self):
# Test of masked element
x = arange(6)
x[1] = masked
self.assertTrue(str(masked) == '--')
self.assertTrue(x[1] is masked)
assert_equal(filled(x[1], 0), 0)
def test_set_element_as_object(self):
# Tests setting elements with object
a = empty(1, dtype=object)
x = (1, 2, 3, 4, 5)
a[0] = x
assert_equal(a[0], x)
self.assertTrue(a[0] is x)
import datetime
dt = datetime.datetime.now()
a[0] = dt
self.assertTrue(a[0] is dt)
def test_indexing(self):
# Tests conversions and indexing
x1 = np.array([1, 2, 4, 3])
x2 = array(x1, mask=[1, 0, 0, 0])
x3 = array(x1, mask=[0, 1, 0, 1])
x4 = array(x1)
# test conversion to strings
str(x2) # raises?
repr(x2) # raises?
assert_equal(np.sort(x1), sort(x2, endwith=False))
# tests of indexing
assert_(type(x2[1]) is type(x1[1]))
assert_(x1[1] == x2[1])
assert_(x2[0] is masked)
assert_equal(x1[2], x2[2])
assert_equal(x1[2:5], x2[2:5])
assert_equal(x1[:], x2[:])
assert_equal(x1[1:], x3[1:])
x1[2] = 9
x2[2] = 9
assert_equal(x1, x2)
x1[1:3] = 99
x2[1:3] = 99
assert_equal(x1, x2)
x2[1] = masked
assert_equal(x1, x2)
x2[1:3] = masked
assert_equal(x1, x2)
x2[:] = x1
x2[1] = masked
assert_(allequal(getmask(x2), array([0, 1, 0, 0])))
x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
assert_(allequal(getmask(x3), array([0, 1, 1, 0])))
x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
assert_(allequal(getmask(x4), array([0, 1, 1, 0])))
assert_(allequal(x4, array([1, 2, 3, 4])))
x1 = np.arange(5) * 1.0
x2 = masked_values(x1, 3.0)
assert_equal(x1, x2)
assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask))
assert_equal(3.0, x2.fill_value)
x1 = array([1, 'hello', 2, 3], object)
x2 = np.array([1, 'hello', 2, 3], object)
s1 = x1[1]
s2 = x2[1]
assert_equal(type(s2), str)
assert_equal(type(s1), str)
assert_equal(s1, s2)
assert_(x1[1:1].shape == (0,))
def test_matrix_indexing(self):
# Tests conversions and indexing
x1 = np.matrix([[1, 2, 3], [4, 3, 2]])
x2 = array(x1, mask=[[1, 0, 0], [0, 1, 0]])
x3 = array(x1, mask=[[0, 1, 0], [1, 0, 0]])
x4 = array(x1)
# test conversion to strings
str(x2) # raises?
repr(x2) # raises?
# tests of indexing
assert_(type(x2[1, 0]) is type(x1[1, 0]))
assert_(x1[1, 0] == x2[1, 0])
assert_(x2[1, 1] is masked)
assert_equal(x1[0, 2], x2[0, 2])
assert_equal(x1[0, 1:], x2[0, 1:])
assert_equal(x1[:, 2], x2[:, 2])
assert_equal(x1[:], x2[:])
assert_equal(x1[1:], x3[1:])
x1[0, 2] = 9
x2[0, 2] = 9
assert_equal(x1, x2)
x1[0, 1:] = 99
x2[0, 1:] = 99
assert_equal(x1, x2)
x2[0, 1] = masked
assert_equal(x1, x2)
x2[0, 1:] = masked
assert_equal(x1, x2)
x2[0, :] = x1[0, :]
x2[0, 1] = masked
assert_(allequal(getmask(x2), np.array([[0, 1, 0], [0, 1, 0]])))
x3[1, :] = masked_array([1, 2, 3], [1, 1, 0])
assert_(allequal(getmask(x3)[1], array([1, 1, 0])))
assert_(allequal(getmask(x3[1]), array([1, 1, 0])))
x4[1, :] = masked_array([1, 2, 3], [1, 1, 0])
assert_(allequal(getmask(x4[1]), array([1, 1, 0])))
assert_(allequal(x4[1], array([1, 2, 3])))
x1 = np.matrix(np.arange(5) * 1.0)
x2 = masked_values(x1, 3.0)
assert_equal(x1, x2)
assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask))
assert_equal(3.0, x2.fill_value)
def test_copy(self):
# Tests of some subtle points of copying and sizing.
n = [0, 0, 1, 0, 0]
m = make_mask(n)
m2 = make_mask(m)
self.assertTrue(m is m2)
m3 = make_mask(m, copy=1)
self.assertTrue(m is not m3)
x1 = np.arange(5)
y1 = array(x1, mask=m)
assert_equal(y1._data.__array_interface__, x1.__array_interface__)
self.assertTrue(allequal(x1, y1.data))
assert_equal(y1._mask.__array_interface__, m.__array_interface__)
y1a = array(y1)
self.assertTrue(y1a._data.__array_interface__ ==
y1._data.__array_interface__)
self.assertTrue(y1a.mask is y1.mask)
y2 = array(x1, mask=m)
self.assertTrue(y2._data.__array_interface__ == x1.__array_interface__)
self.assertTrue(y2._mask.__array_interface__ == m.__array_interface__)
self.assertTrue(y2[2] is masked)
y2[2] = 9
self.assertTrue(y2[2] is not masked)
self.assertTrue(y2._mask.__array_interface__ != m.__array_interface__)
self.assertTrue(allequal(y2.mask, 0))
y3 = array(x1 * 1.0, mask=m)
self.assertTrue(filled(y3).dtype is (x1 * 1.0).dtype)
x4 = arange(4)
x4[2] = masked
y4 = resize(x4, (8,))
assert_equal(concatenate([x4, x4]), y4)
assert_equal(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0])
y5 = repeat(x4, (2, 2, 2, 2), axis=0)
assert_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3])
y6 = repeat(x4, 2, axis=0)
assert_equal(y5, y6)
y7 = x4.repeat((2, 2, 2, 2), axis=0)
assert_equal(y5, y7)
y8 = x4.repeat(2, 0)
assert_equal(y5, y8)
y9 = x4.copy()
assert_equal(y9._data, x4._data)
assert_equal(y9._mask, x4._mask)
x = masked_array([1, 2, 3], mask=[0, 1, 0])
# Copy is False by default
y = masked_array(x)
assert_equal(y._data.ctypes.data, x._data.ctypes.data)
assert_equal(y._mask.ctypes.data, x._mask.ctypes.data)
y = masked_array(x, copy=True)
assert_not_equal(y._data.ctypes.data, x._data.ctypes.data)
assert_not_equal(y._mask.ctypes.data, x._mask.ctypes.data)
def test_copy_immutable(self):
# Tests that the copy method is immutable, GitHub issue #5247
a = np.ma.array([1, 2, 3])
b = np.ma.array([4, 5, 6])
a_copy_method = a.copy
b.copy
assert_equal(a_copy_method(), [1, 2, 3])
def test_deepcopy(self):
from copy import deepcopy
a = array([0, 1, 2], mask=[False, True, False])
copied = deepcopy(a)
assert_equal(copied.mask, a.mask)
assert_not_equal(id(a._mask), id(copied._mask))
copied[1] = 1
assert_equal(copied.mask, [0, 0, 0])
assert_equal(a.mask, [0, 1, 0])
copied = deepcopy(a)
assert_equal(copied.mask, a.mask)
copied.mask[1] = False
assert_equal(copied.mask, [0, 0, 0])
assert_equal(a.mask, [0, 1, 0])
def test_str_repr(self):
a = array([0, 1, 2], mask=[False, True, False])
assert_equal(str(a), '[0 -- 2]')
assert_equal(repr(a), 'masked_array(data = [0 -- 2],\n'
' mask = [False True False],\n'
' fill_value = 999999)\n')
a = np.ma.arange(2000)
a[1:50] = np.ma.masked
assert_equal(
repr(a),
'masked_array(data = [0 -- -- ..., 1997 1998 1999],\n'
' mask = [False True True ..., False False False],\n'
' fill_value = 999999)\n'
)
def test_pickling(self):
# Tests pickling
a = arange(10)
a[::3] = masked
a.fill_value = 999
a_pickled = pickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled._data, a._data)
assert_equal(a_pickled.fill_value, 999)
def test_pickling_subbaseclass(self):
# Test pickling w/ a subclass of ndarray
a = array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2)
a_pickled = pickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled, a)
self.assertTrue(isinstance(a_pickled._data, np.matrix))
def test_pickling_maskedconstant(self):
# Test pickling MaskedConstant
mc = np.ma.masked
mc_pickled = pickle.loads(mc.dumps())
assert_equal(mc_pickled._baseclass, mc._baseclass)
assert_equal(mc_pickled._mask, mc._mask)
assert_equal(mc_pickled._data, mc._data)
def test_pickling_wstructured(self):
# Tests pickling w/ structured array
a = array([(1, 1.), (2, 2.)], mask=[(0, 0), (0, 1)],
dtype=[('a', int), ('b', float)])
a_pickled = pickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled, a)
def test_pickling_keepalignment(self):
# Tests pickling w/ F_CONTIGUOUS arrays
a = arange(10)
a.shape = (-1, 2)
b = a.T
test = pickle.loads(pickle.dumps(b))
assert_equal(test, b)
def test_single_element_subscript(self):
# Tests single element subscripts of Maskedarrays.
a = array([1, 3, 2])
b = array([1, 3, 2], mask=[1, 0, 1])
assert_equal(a[0].shape, ())
assert_equal(b[0].shape, ())
assert_equal(b[1].shape, ())
def test_topython(self):
# Tests some communication issues with Python.
assert_equal(1, int(array(1)))
assert_equal(1.0, float(array(1)))
assert_equal(1, int(array([[[1]]])))
assert_equal(1.0, float(array([[1]])))
self.assertRaises(TypeError, float, array([1, 1]))
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
assert_(np.isnan(float(array([1], mask=[1]))))
a = array([1, 2, 3], mask=[1, 0, 0])
self.assertRaises(TypeError, lambda:float(a))
assert_equal(float(a[-1]), 3.)
self.assertTrue(np.isnan(float(a[0])))
self.assertRaises(TypeError, int, a)
assert_equal(int(a[-1]), 3)
self.assertRaises(MAError, lambda:int(a[0]))
def test_oddfeatures_1(self):
# Test of other odd features
x = arange(20)
x = x.reshape(4, 5)
x.flat[5] = 12
assert_(x[1, 0] == 12)
z = x + 10j * x
assert_equal(z.real, x)
assert_equal(z.imag, 10 * x)
assert_equal((z * conjugate(z)).real, 101 * x * x)
z.imag[...] = 0.0
x = arange(10)
x[3] = masked
assert_(str(x[3]) == str(masked))
c = x >= 8
assert_(count(where(c, masked, masked)) == 0)
assert_(shape(where(c, masked, masked)) == c.shape)
z = masked_where(c, x)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is not masked)
assert_(z[7] is not masked)
assert_(z[8] is masked)
assert_(z[9] is masked)
assert_equal(x, z)
def test_oddfeatures_2(self):
# Tests some more features.
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
c[0] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
def test_oddfeatures_3(self):
# Tests some generic features
atest = array([10], mask=True)
btest = array([20])
idx = atest.mask
atest[idx] = btest[idx]
assert_equal(atest, [20])
def test_filled_w_object_dtype(self):
a = np.ma.masked_all(1, dtype='O')
assert_equal(a.filled('x')[0], 'x')
def test_filled_w_flexible_dtype(self):
# Test filled w/ flexible dtype
flexi = array([(1, 1, 1)],
dtype=[('i', int), ('s', '|S8'), ('f', float)])
flexi[0] = masked
assert_equal(flexi.filled(),
np.array([(default_fill_value(0),
default_fill_value('0'),
default_fill_value(0.),)], dtype=flexi.dtype))
flexi[0] = masked
assert_equal(flexi.filled(1),
np.array([(1, '1', 1.)], dtype=flexi.dtype))
def test_filled_w_mvoid(self):
# Test filled w/ mvoid
ndtype = [('a', int), ('b', float)]
a = mvoid((1, 2.), mask=[(0, 1)], dtype=ndtype)
# Filled using default
test = a.filled()
assert_equal(tuple(test), (1, default_fill_value(1.)))
# Explicit fill_value
test = a.filled((-1, -1))
assert_equal(tuple(test), (1, -1))
# Using predefined filling values
a.fill_value = (-999, -999)
assert_equal(tuple(a.filled()), (1, -999))
def test_filled_w_nested_dtype(self):
# Test filled w/ nested dtype
ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])]
a = array([(1, (1, 1)), (2, (2, 2))],
mask=[(0, (1, 0)), (0, (0, 1))], dtype=ndtype)
test = a.filled(0)
control = np.array([(1, (0, 1)), (2, (2, 0))], dtype=ndtype)
assert_equal(test, control)
test = a['B'].filled(0)
control = np.array([(0, 1), (2, 0)], dtype=a['B'].dtype)
assert_equal(test, control)
# test if mask gets set correctly (see #6760)
Z = numpy.ma.zeros(2, numpy.dtype([("A", "(2,2)i1,(2,2)i1", (2,2))]))
assert_equal(Z.data.dtype, numpy.dtype([('A', [('f0', 'i1', (2, 2)),
('f1', 'i1', (2, 2))], (2, 2))]))
assert_equal(Z.mask.dtype, numpy.dtype([('A', [('f0', '?', (2, 2)),
('f1', '?', (2, 2))], (2, 2))]))
def test_filled_w_f_order(self):
# Test filled w/ F-contiguous array
a = array(np.array([(0, 1, 2), (4, 5, 6)], order='F'),
mask=np.array([(0, 0, 1), (1, 0, 0)], order='F'),
order='F') # this is currently ignored
self.assertTrue(a.flags['F_CONTIGUOUS'])
self.assertTrue(a.filled(0).flags['F_CONTIGUOUS'])
def test_optinfo_propagation(self):
# Checks that _optinfo dictionary isn't back-propagated
x = array([1, 2, 3, ], dtype=float)
x._optinfo['info'] = '???'
y = x.copy()
assert_equal(y._optinfo['info'], '???')
y._optinfo['info'] = '!!!'
assert_equal(x._optinfo['info'], '???')
def test_fancy_printoptions(self):
# Test printing a masked array w/ fancy dtype.
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = array([(1, (2, 3.0)), (4, (5, 6.0))],
mask=[(1, (0, 1)), (0, (1, 0))],
dtype=fancydtype)
control = "[(--, (2, --)) (4, (--, 6.0))]"
assert_equal(str(test), control)
# Test 0-d array with multi-dimensional dtype
t_2d0 = masked_array(data = (0, [[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]],
0.0),
mask = (False, [[True, False, True],
[False, False, True]],
False),
dtype = "int, (2,3)float, float")
control = "(0, [[--, 0.0, --], [0.0, 0.0, --]], 0.0)"
assert_equal(str(t_2d0), control)
def test_flatten_structured_array(self):
# Test flatten_structured_array on arrays
# On ndarray
ndtype = [('a', int), ('b', float)]
a = np.array([(1, 1), (2, 2)], dtype=ndtype)
test = flatten_structured_array(a)
control = np.array([[1., 1.], [2., 2.]], dtype=np.float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
# On masked_array
a = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
test = flatten_structured_array(a)
control = array([[1., 1.], [2., 2.]],
mask=[[0, 1], [1, 0]], dtype=np.float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
assert_equal(test.mask, control.mask)
# On masked array with nested structure
ndtype = [('a', int), ('b', [('ba', int), ('bb', float)])]
a = array([(1, (1, 1.1)), (2, (2, 2.2))],
mask=[(0, (1, 0)), (1, (0, 1))], dtype=ndtype)
test = flatten_structured_array(a)
control = array([[1., 1., 1.1], [2., 2., 2.2]],
mask=[[0, 1, 0], [1, 0, 1]], dtype=np.float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
assert_equal(test.mask, control.mask)
# Keeping the initial shape
ndtype = [('a', int), ('b', float)]
a = np.array([[(1, 1), ], [(2, 2), ]], dtype=ndtype)
test = flatten_structured_array(a)
control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=np.float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
def test_void0d(self):
# Test creating a mvoid object
ndtype = [('a', int), ('b', int)]
a = np.array([(1, 2,)], dtype=ndtype)[0]
f = mvoid(a)
assert_(isinstance(f, mvoid))
a = masked_array([(1, 2)], mask=[(1, 0)], dtype=ndtype)[0]
assert_(isinstance(a, mvoid))
a = masked_array([(1, 2), (1, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
f = mvoid(a._data[0], a._mask[0])
assert_(isinstance(f, mvoid))
def test_mvoid_getitem(self):
# Test mvoid.__getitem__
ndtype = [('a', int), ('b', int)]
a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)],
dtype=ndtype)
# w/o mask
f = a[0]
self.assertTrue(isinstance(f, mvoid))
assert_equal((f[0], f['a']), (1, 1))
assert_equal(f['b'], 2)
# w/ mask
f = a[1]
self.assertTrue(isinstance(f, mvoid))
self.assertTrue(f[0] is masked)
self.assertTrue(f['a'] is masked)
assert_equal(f[1], 4)
# exotic dtype
A = masked_array(data=[([0,1],)],
mask=[([True, False],)],
dtype=[("A", ">i2", (2,))])
assert_equal(A[0]["A"], A["A"][0])
assert_equal(A[0]["A"], masked_array(data=[0, 1],
mask=[True, False], dtype=">i2"))
def test_mvoid_iter(self):
# Test iteration on __getitem__
ndtype = [('a', int), ('b', int)]
a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)],
dtype=ndtype)
# w/o mask
assert_equal(list(a[0]), [1, 2])
# w/ mask
assert_equal(list(a[1]), [masked, 4])
def test_mvoid_print(self):
# Test printing a mvoid
mx = array([(1, 1), (2, 2)], dtype=[('a', int), ('b', int)])
assert_equal(str(mx[0]), "(1, 1)")
mx['b'][0] = masked
ini_display = masked_print_option._display
masked_print_option.set_display("-X-")
try:
assert_equal(str(mx[0]), "(1, -X-)")
assert_equal(repr(mx[0]), "(1, -X-)")
finally:
masked_print_option.set_display(ini_display)
# also check if there are object datatypes (see gh-7493)
mx = array([(1,), (2,)], dtype=[('a', 'O')])
assert_equal(str(mx[0]), "(1,)")
def test_mvoid_multidim_print(self):
# regression test for gh-6019
t_ma = masked_array(data = [([1, 2, 3],)],
mask = [([False, True, False],)],
fill_value = ([999999, 999999, 999999],),
dtype = [('a', '<i4', (3,))])
assert_(str(t_ma[0]) == "([1, --, 3],)")
assert_(repr(t_ma[0]) == "([1, --, 3],)")
# additonal tests with structured arrays
t_2d = masked_array(data = [([[1, 2], [3,4]],)],
mask = [([[False, True], [True, False]],)],
dtype = [('a', '<i4', (2,2))])
assert_(str(t_2d[0]) == "([[1, --], [--, 4]],)")
assert_(repr(t_2d[0]) == "([[1, --], [--, 4]],)")
t_0d = masked_array(data = [(1,2)],
mask = [(True,False)],
dtype = [('a', '<i4'), ('b', '<i4')])
assert_(str(t_0d[0]) == "(--, 2)")
assert_(repr(t_0d[0]) == "(--, 2)")
t_2d = masked_array(data = [([[1, 2], [3,4]], 1)],
mask = [([[False, True], [True, False]], False)],
dtype = [('a', '<i4', (2,2)), ('b', float)])
assert_(str(t_2d[0]) == "([[1, --], [--, 4]], 1.0)")
assert_(repr(t_2d[0]) == "([[1, --], [--, 4]], 1.0)")
t_ne = masked_array(data=[(1, (1, 1))],
mask=[(True, (True, False))],
dtype = [('a', '<i4'), ('b', 'i4,i4')])
assert_(str(t_ne[0]) == "(--, (--, 1))")
assert_(repr(t_ne[0]) == "(--, (--, 1))")
def test_object_with_array(self):
mx1 = masked_array([1.], mask=[True])
mx2 = masked_array([1., 2.])
mx = masked_array([mx1, mx2], mask=[False, True])
assert_(mx[0] is mx1)
assert_(mx[1] is not mx2)
assert_(np.all(mx[1].data == mx2.data))
assert_(np.all(mx[1].mask))
# check that we return a view.
mx[1].data[0] = 0.
assert_(mx2[0] == 0.)
class TestMaskedArrayArithmetic(TestCase):
# Base test class for MaskedArrays.
def setUp(self):
# Base data definition.
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = masked_array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)
self.err_status = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
def tearDown(self):
np.seterr(**self.err_status)
def test_basic_arithmetic(self):
# Test of basic arithmetic.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
a2d = array([[1, 2], [0, 4]])
a2dm = masked_array(a2d, [[0, 0], [1, 0]])
assert_equal(a2d * a2d, a2d * a2dm)
assert_equal(a2d + a2d, a2d + a2dm)
assert_equal(a2d - a2d, a2d - a2dm)
for s in [(12,), (4, 3), (2, 6)]:
x = x.reshape(s)
y = y.reshape(s)
xm = xm.reshape(s)
ym = ym.reshape(s)
xf = xf.reshape(s)
assert_equal(-x, -xm)
assert_equal(x + y, xm + ym)
assert_equal(x - y, xm - ym)
assert_equal(x * y, xm * ym)
assert_equal(x / y, xm / ym)
assert_equal(a10 + y, a10 + ym)
assert_equal(a10 - y, a10 - ym)
assert_equal(a10 * y, a10 * ym)
assert_equal(a10 / y, a10 / ym)
assert_equal(x + a10, xm + a10)
assert_equal(x - a10, xm - a10)
assert_equal(x * a10, xm * a10)
assert_equal(x / a10, xm / a10)
assert_equal(x ** 2, xm ** 2)
assert_equal(abs(x) ** 2.5, abs(xm) ** 2.5)
assert_equal(x ** y, xm ** ym)
assert_equal(np.add(x, y), add(xm, ym))
assert_equal(np.subtract(x, y), subtract(xm, ym))
assert_equal(np.multiply(x, y), multiply(xm, ym))
assert_equal(np.divide(x, y), divide(xm, ym))
def test_divide_on_different_shapes(self):
x = arange(6, dtype=float)
x.shape = (2, 3)
y = arange(3, dtype=float)
z = x / y
assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]])
assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]])
z = x / y[None,:]
assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]])
assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]])
y = arange(2, dtype=float)
z = x / y[:, None]
assert_equal(z, [[-1., -1., -1.], [3., 4., 5.]])
assert_equal(z.mask, [[1, 1, 1], [0, 0, 0]])
def test_mixed_arithmetic(self):
# Tests mixed arithmetics.
na = np.array([1])
ma = array([1])
self.assertTrue(isinstance(na + ma, MaskedArray))
self.assertTrue(isinstance(ma + na, MaskedArray))
def test_limits_arithmetic(self):
tiny = np.finfo(float).tiny
a = array([tiny, 1. / tiny, 0.])
assert_equal(getmaskarray(a / 2), [0, 0, 0])
assert_equal(getmaskarray(2 / a), [1, 0, 1])
def test_masked_singleton_arithmetic(self):
# Tests some scalar arithmetics on MaskedArrays.
# Masked singleton should remain masked no matter what
xm = array(0, mask=1)
self.assertTrue((1 / array(0)).mask)
self.assertTrue((1 + xm).mask)
self.assertTrue((-xm).mask)
self.assertTrue(maximum(xm, xm).mask)
self.assertTrue(minimum(xm, xm).mask)
def test_masked_singleton_equality(self):
# Tests (in)equality on masked snigleton
a = array([1, 2, 3], mask=[1, 1, 0])
assert_((a[0] == 0) is masked)
assert_((a[0] != 0) is masked)
assert_equal((a[-1] == 0), False)
assert_equal((a[-1] != 0), True)
def test_arithmetic_with_masked_singleton(self):
# Checks that there's no collapsing to masked
x = masked_array([1, 2])
y = x * masked
assert_equal(y.shape, x.shape)
assert_equal(y._mask, [True, True])
y = x[0] * masked
assert_(y is masked)
y = x + masked
assert_equal(y.shape, x.shape)
assert_equal(y._mask, [True, True])
def test_arithmetic_with_masked_singleton_on_1d_singleton(self):
# Check that we're not losing the shape of a singleton
x = masked_array([1, ])
y = x + masked
assert_equal(y.shape, x.shape)
assert_equal(y.mask, [True, ])
def test_scalar_arithmetic(self):
x = array(0, mask=0)
assert_equal(x.filled().ctypes.data, x.ctypes.data)
# Make sure we don't lose the shape in some circumstances
xm = array((0, 0)) / 0.
assert_equal(xm.shape, (2,))
assert_equal(xm.mask, [1, 1])
def test_basic_ufuncs(self):
# Test various functions such as sin, cos.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_equal(np.cos(x), cos(xm))
assert_equal(np.cosh(x), cosh(xm))
assert_equal(np.sin(x), sin(xm))
assert_equal(np.sinh(x), sinh(xm))
assert_equal(np.tan(x), tan(xm))
assert_equal(np.tanh(x), tanh(xm))
assert_equal(np.sqrt(abs(x)), sqrt(xm))
assert_equal(np.log(abs(x)), log(xm))
assert_equal(np.log10(abs(x)), log10(xm))
assert_equal(np.exp(x), exp(xm))
assert_equal(np.arcsin(z), arcsin(zm))
assert_equal(np.arccos(z), arccos(zm))
assert_equal(np.arctan(z), arctan(zm))
assert_equal(np.arctan2(x, y), arctan2(xm, ym))
assert_equal(np.absolute(x), absolute(xm))
assert_equal(np.angle(x + 1j*y), angle(xm + 1j*ym))
assert_equal(np.angle(x + 1j*y, deg=True), angle(xm + 1j*ym, deg=True))
assert_equal(np.equal(x, y), equal(xm, ym))
assert_equal(np.not_equal(x, y), not_equal(xm, ym))
assert_equal(np.less(x, y), less(xm, ym))
assert_equal(np.greater(x, y), greater(xm, ym))
assert_equal(np.less_equal(x, y), less_equal(xm, ym))
assert_equal(np.greater_equal(x, y), greater_equal(xm, ym))
assert_equal(np.conjugate(x), conjugate(xm))
def test_count_func(self):
# Tests count
assert_equal(1, count(1))
assert_equal(0, array(1, mask=[1]))
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
res = count(ott)
self.assertTrue(res.dtype.type is np.intp)
assert_equal(3, res)
ott = ott.reshape((2, 2))
res = count(ott)
assert_(res.dtype.type is np.intp)
assert_equal(3, res)
res = count(ott, 0)
assert_(isinstance(res, ndarray))
assert_equal([1, 2], res)
assert_(getmask(res) is nomask)
ott = array([0., 1., 2., 3.])
res = count(ott, 0)
assert_(isinstance(res, ndarray))
assert_(res.dtype.type is np.intp)
assert_raises(ValueError, ott.count, axis=1)
def test_minmax_func(self):
# Tests minimum and maximum.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
# max doesn't work if shaped
xr = np.ravel(x)
xmr = ravel(xm)
# following are true because of careful selection of data
assert_equal(max(xr), maximum(xmr))
assert_equal(min(xr), minimum(xmr))
assert_equal(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3])
assert_equal(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9])
x = arange(5)
y = arange(5) - 2
x[3] = masked
y[0] = masked
assert_equal(minimum(x, y), where(less(x, y), x, y))
assert_equal(maximum(x, y), where(greater(x, y), x, y))
assert_(minimum(x) == 0)
assert_(maximum(x) == 4)
x = arange(4).reshape(2, 2)
x[-1, -1] = masked
assert_equal(maximum(x), 2)
def test_minimummaximum_func(self):
a = np.ones((2, 2))
aminimum = minimum(a, a)
self.assertTrue(isinstance(aminimum, MaskedArray))
assert_equal(aminimum, np.minimum(a, a))
aminimum = minimum.outer(a, a)
self.assertTrue(isinstance(aminimum, MaskedArray))
assert_equal(aminimum, np.minimum.outer(a, a))
amaximum = maximum(a, a)
self.assertTrue(isinstance(amaximum, MaskedArray))
assert_equal(amaximum, np.maximum(a, a))
amaximum = maximum.outer(a, a)
self.assertTrue(isinstance(amaximum, MaskedArray))
assert_equal(amaximum, np.maximum.outer(a, a))
def test_minmax_reduce(self):
# Test np.min/maximum.reduce on array w/ full False mask
a = array([1, 2, 3], mask=[False, False, False])
b = np.maximum.reduce(a)
assert_equal(b, 3)
def test_minmax_funcs_with_output(self):
# Tests the min/max functions with explicit outputs
mask = np.random.rand(12).round()
xm = array(np.random.uniform(0, 10, 12), mask=mask)
xm.shape = (3, 4)
for funcname in ('min', 'max'):
# Initialize
npfunc = getattr(np, funcname)
mafunc = getattr(numpy.ma.core, funcname)
# Use the np version
nout = np.empty((4,), dtype=int)
try:
result = npfunc(xm, axis=0, out=nout)
except MaskError:
pass
nout = np.empty((4,), dtype=float)
result = npfunc(xm, axis=0, out=nout)
self.assertTrue(result is nout)
# Use the ma version
nout.fill(-999)
result = mafunc(xm, axis=0, out=nout)
self.assertTrue(result is nout)
def test_minmax_methods(self):
# Additional tests on max/min
(_, _, _, _, _, xm, _, _, _, _) = self.d
xm.shape = (xm.size,)
assert_equal(xm.max(), 10)
self.assertTrue(xm[0].max() is masked)
self.assertTrue(xm[0].max(0) is masked)
self.assertTrue(xm[0].max(-1) is masked)
assert_equal(xm.min(), -10.)
self.assertTrue(xm[0].min() is masked)
self.assertTrue(xm[0].min(0) is masked)
self.assertTrue(xm[0].min(-1) is masked)
assert_equal(xm.ptp(), 20.)
self.assertTrue(xm[0].ptp() is masked)
self.assertTrue(xm[0].ptp(0) is masked)
self.assertTrue(xm[0].ptp(-1) is masked)
x = array([1, 2, 3], mask=True)
self.assertTrue(x.min() is masked)
self.assertTrue(x.max() is masked)
self.assertTrue(x.ptp() is masked)
def test_addsumprod(self):
# Tests add, sum, product.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_equal(np.add.reduce(x), add.reduce(x))
assert_equal(np.add.accumulate(x), add.accumulate(x))
assert_equal(4, sum(array(4), axis=0))
assert_equal(4, sum(array(4), axis=0))
assert_equal(np.sum(x, axis=0), sum(x, axis=0))
assert_equal(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0))
assert_equal(np.sum(x, 0), sum(x, 0))
assert_equal(np.product(x, axis=0), product(x, axis=0))
assert_equal(np.product(x, 0), product(x, 0))
assert_equal(np.product(filled(xm, 1), axis=0), product(xm, axis=0))
s = (3, 4)
x.shape = y.shape = xm.shape = ym.shape = s
if len(s) > 1:
assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1))
assert_equal(np.add.reduce(x, 1), add.reduce(x, 1))
assert_equal(np.sum(x, 1), sum(x, 1))
assert_equal(np.product(x, 1), product(x, 1))
def test_binops_d2D(self):
# Test binary operations on 2D data
a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]])
b = array([[2., 3.], [4., 5.], [6., 7.]])
test = a * b
control = array([[2., 3.], [2., 2.], [3., 3.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b * a
control = array([[2., 3.], [4., 5.], [6., 7.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
a = array([[1.], [2.], [3.]])
b = array([[2., 3.], [4., 5.], [6., 7.]],
mask=[[0, 0], [0, 0], [0, 1]])
test = a * b
control = array([[2, 3], [8, 10], [18, 3]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b * a
control = array([[2, 3], [8, 10], [18, 7]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_domained_binops_d2D(self):
# Test domained binary operations on 2D data
a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]])
b = array([[2., 3.], [4., 5.], [6., 7.]])
test = a / b
control = array([[1. / 2., 1. / 3.], [2., 2.], [3., 3.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b / a
control = array([[2. / 1., 3. / 1.], [4., 5.], [6., 7.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
a = array([[1.], [2.], [3.]])
b = array([[2., 3.], [4., 5.], [6., 7.]],
mask=[[0, 0], [0, 0], [0, 1]])
test = a / b
control = array([[1. / 2, 1. / 3], [2. / 4, 2. / 5], [3. / 6, 3]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b / a
control = array([[2 / 1., 3 / 1.], [4 / 2., 5 / 2.], [6 / 3., 7]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_noshrinking(self):
# Check that we don't shrink a mask when not wanted
# Binary operations
a = masked_array([1., 2., 3.], mask=[False, False, False],
shrink=False)
b = a + 1
assert_equal(b.mask, [0, 0, 0])
# In place binary operation
a += 1
assert_equal(a.mask, [0, 0, 0])
# Domained binary operation
b = a / 1.
assert_equal(b.mask, [0, 0, 0])
# In place binary operation
a /= 1.
assert_equal(a.mask, [0, 0, 0])
def test_noshink_on_creation(self):
# Check that the mask is not shrunk on array creation when not wanted
a = np.ma.masked_values([1., 2.5, 3.1], 1.5, shrink=False)
assert_equal(a.mask, [0, 0, 0])
def test_mod(self):
# Tests mod
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_equal(mod(x, y), mod(xm, ym))
test = mod(ym, xm)
assert_equal(test, np.mod(ym, xm))
assert_equal(test.mask, mask_or(xm.mask, ym.mask))
test = mod(xm, ym)
assert_equal(test, np.mod(xm, ym))
assert_equal(test.mask, mask_or(mask_or(xm.mask, ym.mask), (ym == 0)))
def test_TakeTransposeInnerOuter(self):
# Test of take, transpose, inner, outer products
x = arange(24)
y = np.arange(24)
x[5:6] = masked
x = x.reshape(2, 3, 4)
y = y.reshape(2, 3, 4)
assert_equal(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1)))
assert_equal(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1))
assert_equal(np.inner(filled(x, 0), filled(y, 0)),
inner(x, y))
assert_equal(np.outer(filled(x, 0), filled(y, 0)),
outer(x, y))
y = array(['abc', 1, 'def', 2, 3], object)
y[2] = masked
t = take(y, [0, 3, 4])
assert_(t[0] == 'abc')
assert_(t[1] == 2)
assert_(t[2] == 3)
def test_imag_real(self):
# Check complex
xx = array([1 + 10j, 20 + 2j], mask=[1, 0])
assert_equal(xx.imag, [10, 2])
assert_equal(xx.imag.filled(), [1e+20, 2])
assert_equal(xx.imag.dtype, xx._data.imag.dtype)
assert_equal(xx.real, [1, 20])
assert_equal(xx.real.filled(), [1e+20, 20])
assert_equal(xx.real.dtype, xx._data.real.dtype)
def test_methods_with_output(self):
xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)
xm[:, 0] = xm[0] = xm[-1, -1] = masked
funclist = ('sum', 'prod', 'var', 'std', 'max', 'min', 'ptp', 'mean',)
for funcname in funclist:
npfunc = getattr(np, funcname)
xmmeth = getattr(xm, funcname)
# A ndarray as explicit input
output = np.empty(4, dtype=float)
output.fill(-9999)
result = npfunc(xm, axis=0, out=output)
# ... the result should be the given output
assert_(result is output)
assert_equal(result, xmmeth(axis=0, out=output))
output = empty(4, dtype=int)
result = xmmeth(axis=0, out=output)
assert_(result is output)
assert_(output[0] is masked)
def test_count_mean_with_matrix(self):
m = np.ma.array(np.matrix([[1,2],[3,4]]), mask=np.zeros((2,2)))
assert_equal(m.count(axis=0).shape, (1,2))
assert_equal(m.count(axis=1).shape, (2,1))
#make sure broadcasting inside mean and var work
assert_equal(m.mean(axis=0), [[2., 3.]])
assert_equal(m.mean(axis=1), [[1.5], [3.5]])
def test_eq_on_structured(self):
# Test the equality of structured arrays
ndtype = [('A', int), ('B', int)]
a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype)
test = (a == a)
assert_equal(test, [True, True])
assert_equal(test.mask, [False, False])
b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
test = (a == b)
assert_equal(test, [False, True])
assert_equal(test.mask, [True, False])
b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
test = (a == b)
assert_equal(test, [True, False])
assert_equal(test.mask, [False, False])
def test_ne_on_structured(self):
# Test the equality of structured arrays
ndtype = [('A', int), ('B', int)]
a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype)
test = (a != a)
assert_equal(test, [False, False])
assert_equal(test.mask, [False, False])
b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
test = (a != b)
assert_equal(test, [True, False])
assert_equal(test.mask, [True, False])
b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
test = (a != b)
assert_equal(test, [False, True])
assert_equal(test.mask, [False, False])
def test_eq_w_None(self):
# Really, comparisons with None should not be done, but check them
# anyway. Note that pep8 will flag these tests.
# With partial mask
a = array([1, 2], mask=[0, 1])
assert_equal(a == None, False)
assert_equal(a.data == None, False)
assert_equal(a.mask == None, False)
assert_equal(a != None, True)
# With nomask
a = array([1, 2], mask=False)
assert_equal(a == None, False)
assert_equal(a != None, True)
# With complete mask
a = array([1, 2], mask=True)
assert_equal(a == None, False)
assert_equal(a != None, True)
# Fully masked, even comparison to None should return "masked"
a = masked
assert_equal(a == None, masked)
def test_eq_w_scalar(self):
a = array(1)
assert_equal(a == 1, True)
assert_equal(a == 0, False)
assert_equal(a != 1, False)
assert_equal(a != 0, True)
def test_numpyarithmetics(self):
# Check that the mask is not back-propagated when using numpy functions
a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1])
control = masked_array([np.nan, np.nan, 0, np.log(2), -1],
mask=[1, 1, 0, 0, 1])
test = log(a)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(a.mask, [0, 0, 0, 0, 1])
test = np.log(a)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(a.mask, [0, 0, 0, 0, 1])
class TestMaskedArrayAttributes(TestCase):
def test_keepmask(self):
# Tests the keep mask flag
x = masked_array([1, 2, 3], mask=[1, 0, 0])
mx = masked_array(x)
assert_equal(mx.mask, x.mask)
mx = masked_array(x, mask=[0, 1, 0], keep_mask=False)
assert_equal(mx.mask, [0, 1, 0])
mx = masked_array(x, mask=[0, 1, 0], keep_mask=True)
assert_equal(mx.mask, [1, 1, 0])
# We default to true
mx = masked_array(x, mask=[0, 1, 0])
assert_equal(mx.mask, [1, 1, 0])
def test_hardmask(self):
# Test hard_mask
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
xh = array(d, mask=m, hard_mask=True)
# We need to copy, to avoid updating d in xh !
xs = array(d, mask=m, hard_mask=False, copy=True)
xh[[1, 4]] = [10, 40]
xs[[1, 4]] = [10, 40]
assert_equal(xh._data, [0, 10, 2, 3, 4])
assert_equal(xs._data, [0, 10, 2, 3, 40])
assert_equal(xs.mask, [0, 0, 0, 1, 0])
self.assertTrue(xh._hardmask)
self.assertTrue(not xs._hardmask)
xh[1:4] = [10, 20, 30]
xs[1:4] = [10, 20, 30]
assert_equal(xh._data, [0, 10, 20, 3, 4])
assert_equal(xs._data, [0, 10, 20, 30, 40])
assert_equal(xs.mask, nomask)
xh[0] = masked
xs[0] = masked
assert_equal(xh.mask, [1, 0, 0, 1, 1])
assert_equal(xs.mask, [1, 0, 0, 0, 0])
xh[:] = 1
xs[:] = 1
assert_equal(xh._data, [0, 1, 1, 3, 4])
assert_equal(xs._data, [1, 1, 1, 1, 1])
assert_equal(xh.mask, [1, 0, 0, 1, 1])
assert_equal(xs.mask, nomask)
# Switch to soft mask
xh.soften_mask()
xh[:] = arange(5)
assert_equal(xh._data, [0, 1, 2, 3, 4])
assert_equal(xh.mask, nomask)
# Switch back to hard mask
xh.harden_mask()
xh[xh < 3] = masked
assert_equal(xh._data, [0, 1, 2, 3, 4])
assert_equal(xh._mask, [1, 1, 1, 0, 0])
xh[filled(xh > 1, False)] = 5
assert_equal(xh._data, [0, 1, 2, 5, 5])
assert_equal(xh._mask, [1, 1, 1, 0, 0])
xh = array([[1, 2], [3, 4]], mask=[[1, 0], [0, 0]], hard_mask=True)
xh[0] = 0
assert_equal(xh._data, [[1, 0], [3, 4]])
assert_equal(xh._mask, [[1, 0], [0, 0]])
xh[-1, -1] = 5
assert_equal(xh._data, [[1, 0], [3, 5]])
assert_equal(xh._mask, [[1, 0], [0, 0]])
xh[filled(xh < 5, False)] = 2
assert_equal(xh._data, [[1, 2], [2, 5]])
assert_equal(xh._mask, [[1, 0], [0, 0]])
def test_hardmask_again(self):
# Another test of hardmask
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
xh = array(d, mask=m, hard_mask=True)
xh[4:5] = 999
xh[0:1] = 999
assert_equal(xh._data, [999, 1, 2, 3, 4])
def test_hardmask_oncemore_yay(self):
# OK, yet another test of hardmask
# Make sure that harden_mask/soften_mask//unshare_mask returns self
a = array([1, 2, 3], mask=[1, 0, 0])
b = a.harden_mask()
assert_equal(a, b)
b[0] = 0
assert_equal(a, b)
assert_equal(b, array([1, 2, 3], mask=[1, 0, 0]))
a = b.soften_mask()
a[0] = 0
assert_equal(a, b)
assert_equal(b, array([0, 2, 3], mask=[0, 0, 0]))
def test_smallmask(self):
# Checks the behaviour of _smallmask
a = arange(10)
a[1] = masked
a[1] = 1
assert_equal(a._mask, nomask)
a = arange(10)
a._smallmask = False
a[1] = masked
a[1] = 1
assert_equal(a._mask, zeros(10))
def test_shrink_mask(self):
# Tests .shrink_mask()
a = array([1, 2, 3], mask=[0, 0, 0])
b = a.shrink_mask()
assert_equal(a, b)
assert_equal(a.mask, nomask)
def test_flat(self):
# Test that flat can return all types of items [#4585, #4615]
# test simple access
test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
assert_equal(test.flat[1], 2)
assert_equal(test.flat[2], masked)
self.assertTrue(np.all(test.flat[0:2] == test[0, 0:2]))
# Test flat on masked_matrices
test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
test.flat = masked_array([3, 2, 1], mask=[1, 0, 0])
control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0])
assert_equal(test, control)
# Test setting
test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
testflat = test.flat
testflat[:] = testflat[[2, 1, 0]]
assert_equal(test, control)
testflat[0] = 9
assert_equal(test[0, 0], 9)
# test 2-D record array
# ... on structured array w/ masked records
x = array([[(1, 1.1, 'one'), (2, 2.2, 'two'), (3, 3.3, 'thr')],
[(4, 4.4, 'fou'), (5, 5.5, 'fiv'), (6, 6.6, 'six')]],
dtype=[('a', int), ('b', float), ('c', '|S8')])
x['a'][0, 1] = masked
x['b'][1, 0] = masked
x['c'][0, 2] = masked
x[-1, -1] = masked
xflat = x.flat
assert_equal(xflat[0], x[0, 0])
assert_equal(xflat[1], x[0, 1])
assert_equal(xflat[2], x[0, 2])
assert_equal(xflat[:3], x[0])
assert_equal(xflat[3], x[1, 0])
assert_equal(xflat[4], x[1, 1])
assert_equal(xflat[5], x[1, 2])
assert_equal(xflat[3:], x[1])
assert_equal(xflat[-1], x[-1, -1])
i = 0
j = 0
for xf in xflat:
assert_equal(xf, x[j, i])
i += 1
if i >= x.shape[-1]:
i = 0
j += 1
# test that matrices keep the correct shape (#4615)
a = masked_array(np.matrix(np.eye(2)), mask=0)
b = a.flat
b01 = b[:2]
assert_equal(b01.data, array([[1., 0.]]))
assert_equal(b01.mask, array([[False, False]]))
def test_assign_dtype(self):
# check that the mask's dtype is updated when dtype is changed
a = np.zeros(4, dtype='f4,i4')
m = np.ma.array(a)
m.dtype = np.dtype('f4')
repr(m) # raises?
assert_equal(m.dtype, np.dtype('f4'))
# check that dtype changes that change shape of mask too much
# are not allowed
def assign():
m = np.ma.array(a)
m.dtype = np.dtype('f8')
assert_raises(ValueError, assign)
b = a.view(dtype='f4', type=np.ma.MaskedArray) # raises?
assert_equal(b.dtype, np.dtype('f4'))
# check that nomask is preserved
a = np.zeros(4, dtype='f4')
m = np.ma.array(a)
m.dtype = np.dtype('f4,i4')
assert_equal(m.dtype, np.dtype('f4,i4'))
assert_equal(m._mask, np.ma.nomask)
class TestFillingValues(TestCase):
def test_check_on_scalar(self):
# Test _check_fill_value set to valid and invalid values
_check_fill_value = np.ma.core._check_fill_value
fval = _check_fill_value(0, int)
assert_equal(fval, 0)
fval = _check_fill_value(None, int)
assert_equal(fval, default_fill_value(0))
fval = _check_fill_value(0, "|S3")
assert_equal(fval, asbytes("0"))
fval = _check_fill_value(None, "|S3")
assert_equal(fval, default_fill_value(b"camelot!"))
self.assertRaises(TypeError, _check_fill_value, 1e+20, int)
self.assertRaises(TypeError, _check_fill_value, 'stuff', int)
def test_check_on_fields(self):
# Tests _check_fill_value with records
_check_fill_value = np.ma.core._check_fill_value
ndtype = [('a', int), ('b', float), ('c', "|S3")]
# A check on a list should return a single record
fval = _check_fill_value([-999, -12345678.9, "???"], ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
# A check on None should output the defaults
fval = _check_fill_value(None, ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [default_fill_value(0),
default_fill_value(0.),
asbytes(default_fill_value("0"))])
#.....Using a structured type as fill_value should work
fill_val = np.array((-999, -12345678.9, "???"), dtype=ndtype)
fval = _check_fill_value(fill_val, ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
#.....Using a flexible type w/ a different type shouldn't matter
# BEHAVIOR in 1.5 and earlier: match structured types by position
#fill_val = np.array((-999, -12345678.9, "???"),
# dtype=[("A", int), ("B", float), ("C", "|S3")])
# BEHAVIOR in 1.6 and later: match structured types by name
fill_val = np.array(("???", -999, -12345678.9),
dtype=[("c", "|S3"), ("a", int), ("b", float), ])
fval = _check_fill_value(fill_val, ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
#.....Using an object-array shouldn't matter either
fill_val = np.ndarray(shape=(1,), dtype=object)
fill_val[0] = (-999, -12345678.9, asbytes("???"))
fval = _check_fill_value(fill_val, object)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
# NOTE: This test was never run properly as "fill_value" rather than
# "fill_val" was assigned. Written properly, it fails.
#fill_val = np.array((-999, -12345678.9, "???"))
#fval = _check_fill_value(fill_val, ndtype)
#self.assertTrue(isinstance(fval, ndarray))
#assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")])
#.....One-field-only flexible type should work as well
ndtype = [("a", int)]
fval = _check_fill_value(-999999999, ndtype)
self.assertTrue(isinstance(fval, ndarray))
assert_equal(fval.item(), (-999999999,))
def test_fillvalue_conversion(self):
# Tests the behavior of fill_value during conversion
# We had a tailored comment to make sure special attributes are
# properly dealt with
a = array(asbytes_nested(['3', '4', '5']))
a._optinfo.update({'comment':"updated!"})
b = array(a, dtype=int)
assert_equal(b._data, [3, 4, 5])
assert_equal(b.fill_value, default_fill_value(0))
b = array(a, dtype=float)
assert_equal(b._data, [3, 4, 5])
assert_equal(b.fill_value, default_fill_value(0.))
b = a.astype(int)
assert_equal(b._data, [3, 4, 5])
assert_equal(b.fill_value, default_fill_value(0))
assert_equal(b._optinfo['comment'], "updated!")
b = a.astype([('a', '|S3')])
assert_equal(b['a']._data, a._data)
assert_equal(b['a'].fill_value, a.fill_value)
def test_fillvalue(self):
# Yet more fun with the fill_value
data = masked_array([1, 2, 3], fill_value=-999)
series = data[[0, 2, 1]]
assert_equal(series._fill_value, data._fill_value)
mtype = [('f', float), ('s', '|S3')]
x = array([(1, 'a'), (2, 'b'), (pi, 'pi')], dtype=mtype)
x.fill_value = 999
assert_equal(x.fill_value.item(), [999., asbytes('999')])
assert_equal(x['f'].fill_value, 999)
assert_equal(x['s'].fill_value, asbytes('999'))
x.fill_value = (9, '???')
assert_equal(x.fill_value.item(), (9, asbytes('???')))
assert_equal(x['f'].fill_value, 9)
assert_equal(x['s'].fill_value, asbytes('???'))
x = array([1, 2, 3.1])
x.fill_value = 999
assert_equal(np.asarray(x.fill_value).dtype, float)
assert_equal(x.fill_value, 999.)
assert_equal(x._fill_value, np.array(999.))
def test_fillvalue_exotic_dtype(self):
# Tests yet more exotic flexible dtypes
_check_fill_value = np.ma.core._check_fill_value
ndtype = [('i', int), ('s', '|S8'), ('f', float)]
control = np.array((default_fill_value(0),
default_fill_value('0'),
default_fill_value(0.),),
dtype=ndtype)
assert_equal(_check_fill_value(None, ndtype), control)
# The shape shouldn't matter
ndtype = [('f0', float, (2, 2))]
control = np.array((default_fill_value(0.),),
dtype=[('f0', float)]).astype(ndtype)
assert_equal(_check_fill_value(None, ndtype), control)
control = np.array((0,), dtype=[('f0', float)]).astype(ndtype)
assert_equal(_check_fill_value(0, ndtype), control)
ndtype = np.dtype("int, (2,3)float, float")
control = np.array((default_fill_value(0),
default_fill_value(0.),
default_fill_value(0.),),
dtype="int, float, float").astype(ndtype)
test = _check_fill_value(None, ndtype)
assert_equal(test, control)
control = np.array((0, 0, 0), dtype="int, float, float").astype(ndtype)
assert_equal(_check_fill_value(0, ndtype), control)
# but when indexing, fill value should become scalar not tuple
# See issue #6723
M = masked_array(control)
assert_equal(M["f1"].fill_value.ndim, 0)
def test_fillvalue_datetime_timedelta(self):
# Test default fillvalue for datetime64 and timedelta64 types.
# See issue #4476, this would return '?' which would cause errors
# elsewhere
for timecode in ("as", "fs", "ps", "ns", "us", "ms", "s", "m",
"h", "D", "W", "M", "Y"):
control = numpy.datetime64("NaT", timecode)
test = default_fill_value(numpy.dtype("<M8[" + timecode + "]"))
assert_equal(test, control)
control = numpy.timedelta64("NaT", timecode)
test = default_fill_value(numpy.dtype("<m8[" + timecode + "]"))
assert_equal(test, control)
def test_extremum_fill_value(self):
# Tests extremum fill values for flexible type.
a = array([(1, (2, 3)), (4, (5, 6))],
dtype=[('A', int), ('B', [('BA', int), ('BB', int)])])
test = a.fill_value
assert_equal(test['A'], default_fill_value(a['A']))
assert_equal(test['B']['BA'], default_fill_value(a['B']['BA']))
assert_equal(test['B']['BB'], default_fill_value(a['B']['BB']))
test = minimum_fill_value(a)
assert_equal(test[0], minimum_fill_value(a['A']))
assert_equal(test[1][0], minimum_fill_value(a['B']['BA']))
assert_equal(test[1][1], minimum_fill_value(a['B']['BB']))
assert_equal(test[1], minimum_fill_value(a['B']))
test = maximum_fill_value(a)
assert_equal(test[0], maximum_fill_value(a['A']))
assert_equal(test[1][0], maximum_fill_value(a['B']['BA']))
assert_equal(test[1][1], maximum_fill_value(a['B']['BB']))
assert_equal(test[1], maximum_fill_value(a['B']))
def test_fillvalue_individual_fields(self):
# Test setting fill_value on individual fields
ndtype = [('a', int), ('b', int)]
# Explicit fill_value
a = array(list(zip([1, 2, 3], [4, 5, 6])),
fill_value=(-999, -999), dtype=ndtype)
aa = a['a']
aa.set_fill_value(10)
assert_equal(aa._fill_value, np.array(10))
assert_equal(tuple(a.fill_value), (10, -999))
a.fill_value['b'] = -10
assert_equal(tuple(a.fill_value), (10, -10))
# Implicit fill_value
t = array(list(zip([1, 2, 3], [4, 5, 6])), dtype=ndtype)
tt = t['a']
tt.set_fill_value(10)
assert_equal(tt._fill_value, np.array(10))
assert_equal(tuple(t.fill_value), (10, default_fill_value(0)))
def test_fillvalue_implicit_structured_array(self):
# Check that fill_value is always defined for structured arrays
ndtype = ('b', float)
adtype = ('a', float)
a = array([(1.,), (2.,)], mask=[(False,), (False,)],
fill_value=(np.nan,), dtype=np.dtype([adtype]))
b = empty(a.shape, dtype=[adtype, ndtype])
b['a'] = a['a']
b['a'].set_fill_value(a['a'].fill_value)
f = b._fill_value[()]
assert_(np.isnan(f[0]))
assert_equal(f[-1], default_fill_value(1.))
def test_fillvalue_as_arguments(self):
# Test adding a fill_value parameter to empty/ones/zeros
a = empty(3, fill_value=999.)
assert_equal(a.fill_value, 999.)
a = ones(3, fill_value=999., dtype=float)
assert_equal(a.fill_value, 999.)
a = zeros(3, fill_value=0., dtype=complex)
assert_equal(a.fill_value, 0.)
a = identity(3, fill_value=0., dtype=complex)
assert_equal(a.fill_value, 0.)
def test_shape_argument(self):
# Test that shape can be provides as an argument
# GH issue 6106
a = empty(shape=(3, ))
assert_equal(a.shape, (3, ))
a = ones(shape=(3, ), dtype=float)
assert_equal(a.shape, (3, ))
a = zeros(shape=(3, ), dtype=complex)
assert_equal(a.shape, (3, ))
def test_fillvalue_in_view(self):
# Test the behavior of fill_value in view
# Create initial masked array
x = array([1, 2, 3], fill_value=1, dtype=np.int64)
# Check that fill_value is preserved by default
y = x.view()
assert_(y.fill_value == 1)
# Check that fill_value is preserved if dtype is specified and the
# dtype is an ndarray sub-class and has a _fill_value attribute
y = x.view(MaskedArray)
assert_(y.fill_value == 1)
# Check that fill_value is preserved if type is specified and the
# dtype is an ndarray sub-class and has a _fill_value attribute (by
# default, the first argument is dtype, not type)
y = x.view(type=MaskedArray)
assert_(y.fill_value == 1)
# Check that code does not crash if passed an ndarray sub-class that
# does not have a _fill_value attribute
y = x.view(np.ndarray)
y = x.view(type=np.ndarray)
# Check that fill_value can be overriden with view
y = x.view(MaskedArray, fill_value=2)
assert_(y.fill_value == 2)
# Check that fill_value can be overriden with view (using type=)
y = x.view(type=MaskedArray, fill_value=2)
assert_(y.fill_value == 2)
# Check that fill_value gets reset if passed a dtype but not a
# fill_value. This is because even though in some cases one can safely
# cast the fill_value, e.g. if taking an int64 view of an int32 array,
# in other cases, this cannot be done (e.g. int32 view of an int64
# array with a large fill_value).
y = x.view(dtype=np.int32)
assert_(y.fill_value == 999999)
class TestUfuncs(TestCase):
# Test class for the application of ufuncs on MaskedArrays.
def setUp(self):
# Base data definition.
self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6),
array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),)
self.err_status = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
def tearDown(self):
np.seterr(**self.err_status)
def test_testUfuncRegression(self):
# Tests new ufuncs on MaskedArrays.
for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',
'sin', 'cos', 'tan',
'arcsin', 'arccos', 'arctan',
'sinh', 'cosh', 'tanh',
'arcsinh',
'arccosh',
'arctanh',
'absolute', 'fabs', 'negative',
'floor', 'ceil',
'logical_not',
'add', 'subtract', 'multiply',
'divide', 'true_divide', 'floor_divide',
'remainder', 'fmod', 'hypot', 'arctan2',
'equal', 'not_equal', 'less_equal', 'greater_equal',
'less', 'greater',
'logical_and', 'logical_or', 'logical_xor',
]:
try:
uf = getattr(umath, f)
except AttributeError:
uf = getattr(fromnumeric, f)
mf = getattr(numpy.ma.core, f)
args = self.d[:uf.nin]
ur = uf(*args)
mr = mf(*args)
assert_equal(ur.filled(0), mr.filled(0), f)
assert_mask_equal(ur.mask, mr.mask, err_msg=f)
def test_reduce(self):
# Tests reduce on MaskedArrays.
a = self.d[0]
self.assertTrue(not alltrue(a, axis=0))
self.assertTrue(sometrue(a, axis=0))
assert_equal(sum(a[:3], axis=0), 0)
assert_equal(product(a, axis=0), 0)
assert_equal(add.reduce(a), pi)
def test_minmax(self):
# Tests extrema on MaskedArrays.
a = arange(1, 13).reshape(3, 4)
amask = masked_where(a < 5, a)
assert_equal(amask.max(), a.max())
assert_equal(amask.min(), 5)
assert_equal(amask.max(0), a.max(0))
assert_equal(amask.min(0), [5, 6, 7, 8])
self.assertTrue(amask.max(1)[0].mask)
self.assertTrue(amask.min(1)[0].mask)
def test_ndarray_mask(self):
# Check that the mask of the result is a ndarray (not a MaskedArray...)
a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1])
test = np.sqrt(a)
control = masked_array([-1, 0, 1, np.sqrt(2), -1],
mask=[1, 0, 0, 0, 1])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
self.assertTrue(not isinstance(test.mask, MaskedArray))
def test_treatment_of_NotImplemented(self):
# Check that NotImplemented is returned at appropriate places
a = masked_array([1., 2.], mask=[1, 0])
self.assertRaises(TypeError, operator.mul, a, "abc")
self.assertRaises(TypeError, operator.truediv, a, "abc")
class MyClass(object):
__array_priority__ = a.__array_priority__ + 1
def __mul__(self, other):
return "My mul"
def __rmul__(self, other):
return "My rmul"
me = MyClass()
assert_(me * a == "My mul")
assert_(a * me == "My rmul")
# and that __array_priority__ is respected
class MyClass2(object):
__array_priority__ = 100
def __mul__(self, other):
return "Me2mul"
def __rmul__(self, other):
return "Me2rmul"
def __rdiv__(self, other):
return "Me2rdiv"
__rtruediv__ = __rdiv__
me_too = MyClass2()
assert_(a.__mul__(me_too) is NotImplemented)
assert_(all(multiply.outer(a, me_too) == "Me2rmul"))
assert_(a.__truediv__(me_too) is NotImplemented)
assert_(me_too * a == "Me2mul")
assert_(a * me_too == "Me2rmul")
assert_(a / me_too == "Me2rdiv")
class TestMaskedArrayInPlaceArithmetics(TestCase):
# Test MaskedArray Arithmetics
def setUp(self):
x = arange(10)
y = arange(10)
xm = arange(10)
xm[2] = masked
self.intdata = (x, y, xm)
self.floatdata = (x.astype(float), y.astype(float), xm.astype(float))
self.othertypes = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
self.othertypes = [np.dtype(_).type for _ in self.othertypes]
self.uint8data = (
x.astype(np.uint8),
y.astype(np.uint8),
xm.astype(np.uint8)
)
def test_inplace_addition_scalar(self):
# Test of inplace additions
(x, y, xm) = self.intdata
xm[2] = masked
x += 1
assert_equal(x, y + 1)
xm += 1
assert_equal(xm, y + 1)
(x, _, xm) = self.floatdata
id1 = x.data.ctypes._data
x += 1.
assert_(id1 == x.data.ctypes._data)
assert_equal(x, y + 1.)
def test_inplace_addition_array(self):
# Test of inplace additions
(x, y, xm) = self.intdata
m = xm.mask
a = arange(10, dtype=np.int16)
a[-1] = masked
x += a
xm += a
assert_equal(x, y + a)
assert_equal(xm, y + a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_subtraction_scalar(self):
# Test of inplace subtractions
(x, y, xm) = self.intdata
x -= 1
assert_equal(x, y - 1)
xm -= 1
assert_equal(xm, y - 1)
def test_inplace_subtraction_array(self):
# Test of inplace subtractions
(x, y, xm) = self.floatdata
m = xm.mask
a = arange(10, dtype=float)
a[-1] = masked
x -= a
xm -= a
assert_equal(x, y - a)
assert_equal(xm, y - a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_multiplication_scalar(self):
# Test of inplace multiplication
(x, y, xm) = self.floatdata
x *= 2.0
assert_equal(x, y * 2)
xm *= 2.0
assert_equal(xm, y * 2)
def test_inplace_multiplication_array(self):
# Test of inplace multiplication
(x, y, xm) = self.floatdata
m = xm.mask
a = arange(10, dtype=float)
a[-1] = masked
x *= a
xm *= a
assert_equal(x, y * a)
assert_equal(xm, y * a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_division_scalar_int(self):
# Test of inplace division
(x, y, xm) = self.intdata
x = arange(10) * 2
xm = arange(10) * 2
xm[2] = masked
x //= 2
assert_equal(x, y)
xm //= 2
assert_equal(xm, y)
def test_inplace_division_scalar_float(self):
# Test of inplace division
(x, y, xm) = self.floatdata
x /= 2.0
assert_equal(x, y / 2.0)
xm /= arange(10)
assert_equal(xm, ones((10,)))
def test_inplace_division_array_float(self):
# Test of inplace division
(x, y, xm) = self.floatdata
m = xm.mask
a = arange(10, dtype=float)
a[-1] = masked
x /= a
xm /= a
assert_equal(x, y / a)
assert_equal(xm, y / a)
assert_equal(xm.mask, mask_or(mask_or(m, a.mask), (a == 0)))
def test_inplace_division_misc(self):
x = [1., 1., 1., -2., pi / 2., 4., 5., -10., 10., 1., 2., 3.]
y = [5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = xm / ym
assert_equal(z._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1])
assert_equal(z._data,
[1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.])
xm = xm.copy()
xm /= ym
assert_equal(xm._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1])
assert_equal(z._data,
[1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.])
def test_datafriendly_add(self):
# Test keeping data w/ (inplace) addition
x = array([1, 2, 3], mask=[0, 0, 1])
# Test add w/ scalar
xx = x + 1
assert_equal(xx.data, [2, 3, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test iadd w/ scalar
x += 1
assert_equal(x.data, [2, 3, 3])
assert_equal(x.mask, [0, 0, 1])
# Test add w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x + array([1, 2, 3], mask=[1, 0, 0])
assert_equal(xx.data, [1, 4, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test iadd w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x += array([1, 2, 3], mask=[1, 0, 0])
assert_equal(x.data, [1, 4, 3])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_sub(self):
# Test keeping data w/ (inplace) subtraction
# Test sub w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x - 1
assert_equal(xx.data, [0, 1, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test isub w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
x -= 1
assert_equal(x.data, [0, 1, 3])
assert_equal(x.mask, [0, 0, 1])
# Test sub w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x - array([1, 2, 3], mask=[1, 0, 0])
assert_equal(xx.data, [1, 0, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test isub w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x -= array([1, 2, 3], mask=[1, 0, 0])
assert_equal(x.data, [1, 0, 3])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_mul(self):
# Test keeping data w/ (inplace) multiplication
# Test mul w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x * 2
assert_equal(xx.data, [2, 4, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test imul w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
x *= 2
assert_equal(x.data, [2, 4, 3])
assert_equal(x.mask, [0, 0, 1])
# Test mul w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x * array([10, 20, 30], mask=[1, 0, 0])
assert_equal(xx.data, [1, 40, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test imul w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x *= array([10, 20, 30], mask=[1, 0, 0])
assert_equal(x.data, [1, 40, 3])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_div(self):
# Test keeping data w/ (inplace) division
# Test div on scalar
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x / 2.
assert_equal(xx.data, [1 / 2., 2 / 2., 3])
assert_equal(xx.mask, [0, 0, 1])
# Test idiv on scalar
x = array([1., 2., 3.], mask=[0, 0, 1])
x /= 2.
assert_equal(x.data, [1 / 2., 2 / 2., 3])
assert_equal(x.mask, [0, 0, 1])
# Test div on array
x = array([1., 2., 3.], mask=[0, 0, 1])
xx = x / array([10., 20., 30.], mask=[1, 0, 0])
assert_equal(xx.data, [1., 2. / 20., 3.])
assert_equal(xx.mask, [1, 0, 1])
# Test idiv on array
x = array([1., 2., 3.], mask=[0, 0, 1])
x /= array([10., 20., 30.], mask=[1, 0, 0])
assert_equal(x.data, [1., 2 / 20., 3.])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_pow(self):
# Test keeping data w/ (inplace) power
# Test pow on scalar
x = array([1., 2., 3.], mask=[0, 0, 1])
xx = x ** 2.5
assert_equal(xx.data, [1., 2. ** 2.5, 3.])
assert_equal(xx.mask, [0, 0, 1])
# Test ipow on scalar
x **= 2.5
assert_equal(x.data, [1., 2. ** 2.5, 3])
assert_equal(x.mask, [0, 0, 1])
def test_datafriendly_add_arrays(self):
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 0])
a += b
assert_equal(a, [[2, 2], [4, 4]])
if a.mask is not nomask:
assert_equal(a.mask, [[0, 0], [0, 0]])
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 1])
a += b
assert_equal(a, [[2, 2], [4, 4]])
assert_equal(a.mask, [[0, 1], [0, 1]])
def test_datafriendly_sub_arrays(self):
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 0])
a -= b
assert_equal(a, [[0, 0], [2, 2]])
if a.mask is not nomask:
assert_equal(a.mask, [[0, 0], [0, 0]])
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 1])
a -= b
assert_equal(a, [[0, 0], [2, 2]])
assert_equal(a.mask, [[0, 1], [0, 1]])
def test_datafriendly_mul_arrays(self):
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 0])
a *= b
assert_equal(a, [[1, 1], [3, 3]])
if a.mask is not nomask:
assert_equal(a.mask, [[0, 0], [0, 0]])
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 1])
a *= b
assert_equal(a, [[1, 1], [3, 3]])
assert_equal(a.mask, [[0, 1], [0, 1]])
def test_inplace_addition_scalar_type(self):
# Test of inplace additions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
xm[2] = masked
x += t(1)
assert_equal(x, y + t(1))
xm += t(1)
assert_equal(xm, y + t(1))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_addition_array_type(self):
# Test of inplace additions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x += a
xm += a
assert_equal(x, y + a)
assert_equal(xm, y + a)
assert_equal(xm.mask, mask_or(m, a.mask))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_subtraction_scalar_type(self):
# Test of inplace subtractions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x -= t(1)
assert_equal(x, y - t(1))
xm -= t(1)
assert_equal(xm, y - t(1))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_subtraction_array_type(self):
# Test of inplace subtractions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x -= a
xm -= a
assert_equal(x, y - a)
assert_equal(xm, y - a)
assert_equal(xm.mask, mask_or(m, a.mask))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_multiplication_scalar_type(self):
# Test of inplace multiplication
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x *= t(2)
assert_equal(x, y * t(2))
xm *= t(2)
assert_equal(xm, y * t(2))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_multiplication_array_type(self):
# Test of inplace multiplication
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x *= a
xm *= a
assert_equal(x, y * a)
assert_equal(xm, y * a)
assert_equal(xm.mask, mask_or(m, a.mask))
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_floor_division_scalar_type(self):
# Test of inplace division
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x = arange(10, dtype=t) * t(2)
xm = arange(10, dtype=t) * t(2)
xm[2] = masked
x //= t(2)
xm //= t(2)
assert_equal(x, y)
assert_equal(xm, y)
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_floor_division_array_type(self):
# Test of inplace division
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x //= a
xm //= a
assert_equal(x, y // a)
assert_equal(xm, y // a)
assert_equal(
xm.mask,
mask_or(mask_or(m, a.mask), (a == t(0)))
)
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_division_scalar_type(self):
# Test of inplace division
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x = arange(10, dtype=t) * t(2)
xm = arange(10, dtype=t) * t(2)
xm[2] = masked
# May get a DeprecationWarning or a TypeError.
#
# This is a consequence of the fact that this is true divide
# and will require casting to float for calculation and
# casting back to the original type. This will only be raised
# with integers. Whether it is an error or warning is only
# dependent on how stringent the casting rules are.
#
# Will handle the same way.
try:
x /= t(2)
assert_equal(x, y)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e))
try:
xm /= t(2)
assert_equal(xm, y)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e))
if issubclass(t, np.integer):
assert_equal(len(w), 2, "Failed on type=%s." % t)
else:
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_division_array_type(self):
# Test of inplace division
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
# May get a DeprecationWarning or a TypeError.
#
# This is a consequence of the fact that this is true divide
# and will require casting to float for calculation and
# casting back to the original type. This will only be raised
# with integers. Whether it is an error or warning is only
# dependent on how stringent the casting rules are.
#
# Will handle the same way.
try:
x /= a
assert_equal(x, y / a)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e))
try:
xm /= a
assert_equal(xm, y / a)
assert_equal(
xm.mask,
mask_or(mask_or(m, a.mask), (a == t(0)))
)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e))
if issubclass(t, np.integer):
assert_equal(len(w), 2, "Failed on type=%s." % t)
else:
assert_equal(len(w), 0, "Failed on type=%s." % t)
def test_inplace_pow_type(self):
# Test keeping data w/ (inplace) power
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
# Test pow on scalar
x = array([1, 2, 3], mask=[0, 0, 1], dtype=t)
xx = x ** t(2)
xx_r = array([1, 2 ** 2, 3], mask=[0, 0, 1], dtype=t)
assert_equal(xx.data, xx_r.data)
assert_equal(xx.mask, xx_r.mask)
# Test ipow on scalar
x **= t(2)
assert_equal(x.data, xx_r.data)
assert_equal(x.mask, xx_r.mask)
assert_equal(len(w), 0, "Failed on type=%s." % t)
class TestMaskedArrayMethods(TestCase):
# Test class for miscellaneous MaskedArrays methods.
def setUp(self):
# Base data definition.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
m2 = np.array([1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 1,
0, 0, 1, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0,
0, 0, 1, 0, 1, 1])
m2x = array(data=x, mask=m2)
m2X = array(data=X, mask=m2.reshape(X.shape))
m2XX = array(data=XX, mask=m2.reshape(XX.shape))
self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)
def test_generic_methods(self):
# Tests some MaskedArray methods.
a = array([1, 3, 2])
assert_equal(a.any(), a._data.any())
assert_equal(a.all(), a._data.all())
assert_equal(a.argmax(), a._data.argmax())
assert_equal(a.argmin(), a._data.argmin())
assert_equal(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4))
assert_equal(a.compress([1, 0, 1]), a._data.compress([1, 0, 1]))
assert_equal(a.conj(), a._data.conj())
assert_equal(a.conjugate(), a._data.conjugate())
m = array([[1, 2], [3, 4]])
assert_equal(m.diagonal(), m._data.diagonal())
assert_equal(a.sum(), a._data.sum())
assert_equal(a.take([1, 2]), a._data.take([1, 2]))
assert_equal(m.transpose(), m._data.transpose())
def test_allclose(self):
# Tests allclose on arrays
a = np.random.rand(10)
b = a + np.random.rand(10) * 1e-8
self.assertTrue(allclose(a, b))
# Test allclose w/ infs
a[0] = np.inf
self.assertTrue(not allclose(a, b))
b[0] = np.inf
self.assertTrue(allclose(a, b))
# Test allclose w/ masked
a = masked_array(a)
a[-1] = masked
self.assertTrue(allclose(a, b, masked_equal=True))
self.assertTrue(not allclose(a, b, masked_equal=False))
# Test comparison w/ scalar
a *= 1e-8
a[0] = 0
self.assertTrue(allclose(a, 0, masked_equal=True))
# Test that the function works for MIN_INT integer typed arrays
a = masked_array([np.iinfo(np.int_).min], dtype=np.int_)
self.assertTrue(allclose(a, a))
def test_allany(self):
# Checks the any/all methods/functions.
x = np.array([[0.13, 0.26, 0.90],
[0.28, 0.33, 0.63],
[0.31, 0.87, 0.70]])
m = np.array([[True, False, False],
[False, False, False],
[True, True, False]], dtype=np.bool_)
mx = masked_array(x, mask=m)
mxbig = (mx > 0.5)
mxsmall = (mx < 0.5)
self.assertFalse(mxbig.all())
self.assertTrue(mxbig.any())
assert_equal(mxbig.all(0), [False, False, True])
assert_equal(mxbig.all(1), [False, False, True])
assert_equal(mxbig.any(0), [False, False, True])
assert_equal(mxbig.any(1), [True, True, True])
self.assertFalse(mxsmall.all())
self.assertTrue(mxsmall.any())
assert_equal(mxsmall.all(0), [True, True, False])
assert_equal(mxsmall.all(1), [False, False, False])
assert_equal(mxsmall.any(0), [True, True, False])
assert_equal(mxsmall.any(1), [True, True, False])
def test_allany_onmatrices(self):
x = np.array([[0.13, 0.26, 0.90],
[0.28, 0.33, 0.63],
[0.31, 0.87, 0.70]])
X = np.matrix(x)
m = np.array([[True, False, False],
[False, False, False],
[True, True, False]], dtype=np.bool_)
mX = masked_array(X, mask=m)
mXbig = (mX > 0.5)
mXsmall = (mX < 0.5)
self.assertFalse(mXbig.all())
self.assertTrue(mXbig.any())
assert_equal(mXbig.all(0), np.matrix([False, False, True]))
assert_equal(mXbig.all(1), np.matrix([False, False, True]).T)
assert_equal(mXbig.any(0), np.matrix([False, False, True]))
assert_equal(mXbig.any(1), np.matrix([True, True, True]).T)
self.assertFalse(mXsmall.all())
self.assertTrue(mXsmall.any())
assert_equal(mXsmall.all(0), np.matrix([True, True, False]))
assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T)
assert_equal(mXsmall.any(0), np.matrix([True, True, False]))
assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T)
def test_allany_oddities(self):
# Some fun with all and any
store = empty((), dtype=bool)
full = array([1, 2, 3], mask=True)
self.assertTrue(full.all() is masked)
full.all(out=store)
self.assertTrue(store)
self.assertTrue(store._mask, True)
self.assertTrue(store is not masked)
store = empty((), dtype=bool)
self.assertTrue(full.any() is masked)
full.any(out=store)
self.assertTrue(not store)
self.assertTrue(store._mask, True)
self.assertTrue(store is not masked)
def test_argmax_argmin(self):
# Tests argmin & argmax on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
assert_equal(mx.argmin(), 35)
assert_equal(mX.argmin(), 35)
assert_equal(m2x.argmin(), 4)
assert_equal(m2X.argmin(), 4)
assert_equal(mx.argmax(), 28)
assert_equal(mX.argmax(), 28)
assert_equal(m2x.argmax(), 31)
assert_equal(m2X.argmax(), 31)
assert_equal(mX.argmin(0), [2, 2, 2, 5, 0, 5])
assert_equal(m2X.argmin(0), [2, 2, 4, 5, 0, 4])
assert_equal(mX.argmax(0), [0, 5, 0, 5, 4, 0])
assert_equal(m2X.argmax(0), [5, 5, 0, 5, 1, 0])
assert_equal(mX.argmin(1), [4, 1, 0, 0, 5, 5, ])
assert_equal(m2X.argmin(1), [4, 4, 0, 0, 5, 3])
assert_equal(mX.argmax(1), [2, 4, 1, 1, 4, 1])
assert_equal(m2X.argmax(1), [2, 4, 1, 1, 1, 1])
def test_clip(self):
# Tests clip on MaskedArrays.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0])
mx = array(x, mask=m)
clipped = mx.clip(2, 8)
assert_equal(clipped.mask, mx.mask)
assert_equal(clipped._data, x.clip(2, 8))
assert_equal(clipped._data, mx._data.clip(2, 8))
def test_compress(self):
# test compress
a = masked_array([1., 2., 3., 4., 5.], fill_value=9999)
condition = (a > 1.5) & (a < 3.5)
assert_equal(a.compress(condition), [2., 3.])
a[[2, 3]] = masked
b = a.compress(condition)
assert_equal(b._data, [2., 3.])
assert_equal(b._mask, [0, 1])
assert_equal(b.fill_value, 9999)
assert_equal(b, a[condition])
condition = (a < 4.)
b = a.compress(condition)
assert_equal(b._data, [1., 2., 3.])
assert_equal(b._mask, [0, 0, 1])
assert_equal(b.fill_value, 9999)
assert_equal(b, a[condition])
a = masked_array([[10, 20, 30], [40, 50, 60]],
mask=[[0, 0, 1], [1, 0, 0]])
b = a.compress(a.ravel() >= 22)
assert_equal(b._data, [30, 40, 50, 60])
assert_equal(b._mask, [1, 1, 0, 0])
x = np.array([3, 1, 2])
b = a.compress(x >= 2, axis=1)
assert_equal(b._data, [[10, 30], [40, 60]])
assert_equal(b._mask, [[0, 1], [1, 0]])
def test_compressed(self):
# Tests compressed
a = array([1, 2, 3, 4], mask=[0, 0, 0, 0])
b = a.compressed()
assert_equal(b, a)
a[0] = masked
b = a.compressed()
assert_equal(b, [2, 3, 4])
a = array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0])
b = a.compressed()
assert_equal(b, a)
self.assertTrue(isinstance(b, np.matrix))
a[0, 0] = masked
b = a.compressed()
assert_equal(b, [[2, 3, 4]])
def test_empty(self):
# Tests empty/like
datatype = [('a', int), ('b', float), ('c', '|S8')]
a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')],
dtype=datatype)
assert_equal(len(a.fill_value.item()), len(datatype))
b = empty_like(a)
assert_equal(b.shape, a.shape)
assert_equal(b.fill_value, a.fill_value)
b = empty(len(a), dtype=datatype)
assert_equal(b.shape, a.shape)
assert_equal(b.fill_value, a.fill_value)
# check empty_like mask handling
a = masked_array([1, 2, 3], mask=[False, True, False])
b = empty_like(a)
assert_(not np.may_share_memory(a.mask, b.mask))
b = a.view(masked_array)
assert_(np.may_share_memory(a.mask, b.mask))
def test_put(self):
# Tests put.
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
x = array(d, mask=m)
self.assertTrue(x[3] is masked)
self.assertTrue(x[4] is masked)
x[[1, 4]] = [10, 40]
self.assertTrue(x[3] is masked)
self.assertTrue(x[4] is not masked)
assert_equal(x, [0, 10, 2, -1, 40])
x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2)
i = [0, 2, 4, 6]
x.put(i, [6, 4, 2, 0])
assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ]))
assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0])
x.put(i, masked_array([0, 2, 4, 6], [1, 0, 1, 0]))
assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ])
assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0])
x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2)
put(x, i, [6, 4, 2, 0])
assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ]))
assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0])
put(x, i, masked_array([0, 2, 4, 6], [1, 0, 1, 0]))
assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ])
assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0])
def test_put_nomask(self):
# GitHub issue 6425
x = zeros(10)
z = array([3., -1.], mask=[False, True])
x.put([1, 2], z)
self.assertTrue(x[0] is not masked)
assert_equal(x[0], 0)
self.assertTrue(x[1] is not masked)
assert_equal(x[1], 3)
self.assertTrue(x[2] is masked)
self.assertTrue(x[3] is not masked)
assert_equal(x[3], 0)
def test_put_hardmask(self):
# Tests put on hardmask
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
xh = array(d + 1, mask=m, hard_mask=True, copy=True)
xh.put([4, 2, 0, 1, 3], [1, 2, 3, 4, 5])
assert_equal(xh._data, [3, 4, 2, 4, 5])
def test_putmask(self):
x = arange(6) + 1
mx = array(x, mask=[0, 0, 0, 1, 1, 1])
mask = [0, 0, 1, 0, 0, 1]
# w/o mask, w/o masked values
xx = x.copy()
putmask(xx, mask, 99)
assert_equal(xx, [1, 2, 99, 4, 5, 99])
# w/ mask, w/o masked values
mxx = mx.copy()
putmask(mxx, mask, 99)
assert_equal(mxx._data, [1, 2, 99, 4, 5, 99])
assert_equal(mxx._mask, [0, 0, 0, 1, 1, 0])
# w/o mask, w/ masked values
values = array([10, 20, 30, 40, 50, 60], mask=[1, 1, 1, 0, 0, 0])
xx = x.copy()
putmask(xx, mask, values)
assert_equal(xx._data, [1, 2, 30, 4, 5, 60])
assert_equal(xx._mask, [0, 0, 1, 0, 0, 0])
# w/ mask, w/ masked values
mxx = mx.copy()
putmask(mxx, mask, values)
assert_equal(mxx._data, [1, 2, 30, 4, 5, 60])
assert_equal(mxx._mask, [0, 0, 1, 1, 1, 0])
# w/ mask, w/ masked values + hardmask
mxx = mx.copy()
mxx.harden_mask()
putmask(mxx, mask, values)
assert_equal(mxx, [1, 2, 30, 4, 5, 60])
def test_ravel(self):
# Tests ravel
a = array([[1, 2, 3, 4, 5]], mask=[[0, 1, 0, 0, 0]])
aravel = a.ravel()
assert_equal(aravel._mask.shape, aravel.shape)
a = array([0, 0], mask=[1, 1])
aravel = a.ravel()
assert_equal(aravel._mask.shape, a.shape)
a = array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]])
aravel = a.ravel()
assert_equal(aravel.shape, (1, 5))
assert_equal(aravel._mask.shape, a.shape)
# Checks that small_mask is preserved
a = array([1, 2, 3, 4], mask=[0, 0, 0, 0], shrink=False)
assert_equal(a.ravel()._mask, [0, 0, 0, 0])
# Test that the fill_value is preserved
a.fill_value = -99
a.shape = (2, 2)
ar = a.ravel()
assert_equal(ar._mask, [0, 0, 0, 0])
assert_equal(ar._data, [1, 2, 3, 4])
assert_equal(ar.fill_value, -99)
# Test index ordering
assert_equal(a.ravel(order='C'), [1, 2, 3, 4])
assert_equal(a.ravel(order='F'), [1, 3, 2, 4])
def test_reshape(self):
# Tests reshape
x = arange(4)
x[0] = masked
y = x.reshape(2, 2)
assert_equal(y.shape, (2, 2,))
assert_equal(y._mask.shape, (2, 2,))
assert_equal(x.shape, (4,))
assert_equal(x._mask.shape, (4,))
def test_sort(self):
# Test sort
x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8)
sortedx = sort(x)
assert_equal(sortedx._data, [1, 2, 3, 4])
assert_equal(sortedx._mask, [0, 0, 0, 1])
sortedx = sort(x, endwith=False)
assert_equal(sortedx._data, [4, 1, 2, 3])
assert_equal(sortedx._mask, [1, 0, 0, 0])
x.sort()
assert_equal(x._data, [1, 2, 3, 4])
assert_equal(x._mask, [0, 0, 0, 1])
x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8)
x.sort(endwith=False)
assert_equal(x._data, [4, 1, 2, 3])
assert_equal(x._mask, [1, 0, 0, 0])
x = [1, 4, 2, 3]
sortedx = sort(x)
self.assertTrue(not isinstance(sorted, MaskedArray))
x = array([0, 1, -1, -2, 2], mask=nomask, dtype=np.int8)
sortedx = sort(x, endwith=False)
assert_equal(sortedx._data, [-2, -1, 0, 1, 2])
x = array([0, 1, -1, -2, 2], mask=[0, 1, 0, 0, 1], dtype=np.int8)
sortedx = sort(x, endwith=False)
assert_equal(sortedx._data, [1, 2, -2, -1, 0])
assert_equal(sortedx._mask, [1, 1, 0, 0, 0])
def test_sort_2d(self):
# Check sort of 2D array.
# 2D array w/o mask
a = masked_array([[8, 4, 1], [2, 0, 9]])
a.sort(0)
assert_equal(a, [[2, 0, 1], [8, 4, 9]])
a = masked_array([[8, 4, 1], [2, 0, 9]])
a.sort(1)
assert_equal(a, [[1, 4, 8], [0, 2, 9]])
# 2D array w/mask
a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]])
a.sort(0)
assert_equal(a, [[2, 0, 1], [8, 4, 9]])
assert_equal(a._mask, [[0, 0, 0], [1, 0, 1]])
a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]])
a.sort(1)
assert_equal(a, [[1, 4, 8], [0, 2, 9]])
assert_equal(a._mask, [[0, 0, 1], [0, 0, 1]])
# 3D
a = masked_array([[[7, 8, 9], [4, 5, 6], [1, 2, 3]],
[[1, 2, 3], [7, 8, 9], [4, 5, 6]],
[[7, 8, 9], [1, 2, 3], [4, 5, 6]],
[[4, 5, 6], [1, 2, 3], [7, 8, 9]]])
a[a % 4 == 0] = masked
am = a.copy()
an = a.filled(99)
am.sort(0)
an.sort(0)
assert_equal(am, an)
am = a.copy()
an = a.filled(99)
am.sort(1)
an.sort(1)
assert_equal(am, an)
am = a.copy()
an = a.filled(99)
am.sort(2)
an.sort(2)
assert_equal(am, an)
def test_sort_flexible(self):
# Test sort on flexible dtype.
a = array(
data=[(3, 3), (3, 2), (2, 2), (2, 1), (1, 0), (1, 1), (1, 2)],
mask=[(0, 0), (0, 1), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0)],
dtype=[('A', int), ('B', int)])
test = sort(a)
b = array(
data=[(1, 1), (1, 2), (2, 1), (2, 2), (3, 3), (3, 2), (1, 0)],
mask=[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (1, 0)],
dtype=[('A', int), ('B', int)])
assert_equal(test, b)
assert_equal(test.mask, b.mask)
test = sort(a, endwith=False)
b = array(
data=[(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3), ],
mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0), ],
dtype=[('A', int), ('B', int)])
assert_equal(test, b)
assert_equal(test.mask, b.mask)
def test_argsort(self):
# Test argsort
a = array([1, 5, 2, 4, 3], mask=[1, 0, 0, 1, 0])
assert_equal(np.argsort(a), argsort(a))
def test_squeeze(self):
# Check squeeze
data = masked_array([[1, 2, 3]])
assert_equal(data.squeeze(), [1, 2, 3])
data = masked_array([[1, 2, 3]], mask=[[1, 1, 1]])
assert_equal(data.squeeze(), [1, 2, 3])
assert_equal(data.squeeze()._mask, [1, 1, 1])
data = masked_array([[1]], mask=True)
self.assertTrue(data.squeeze() is masked)
def test_swapaxes(self):
# Tests swapaxes on MaskedArrays.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mX = array(x, mask=m).reshape(6, 6)
mXX = mX.reshape(3, 2, 2, 3)
mXswapped = mX.swapaxes(0, 1)
assert_equal(mXswapped[-1], mX[:, -1])
mXXswapped = mXX.swapaxes(0, 2)
assert_equal(mXXswapped.shape, (2, 2, 3, 3))
def test_take(self):
# Tests take
x = masked_array([10, 20, 30, 40], [0, 1, 0, 1])
assert_equal(x.take([0, 0, 3]), masked_array([10, 10, 40], [0, 0, 1]))
assert_equal(x.take([0, 0, 3]), x[[0, 0, 3]])
assert_equal(x.take([[0, 1], [0, 1]]),
masked_array([[10, 20], [10, 20]], [[0, 1], [0, 1]]))
# assert_equal crashes when passed np.ma.mask
self.assertTrue(x[1] is np.ma.masked)
self.assertTrue(x.take(1) is np.ma.masked)
x = array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0, ]])
assert_equal(x.take([0, 2], axis=1),
array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]]))
assert_equal(take(x, [0, 2], axis=1),
array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]]))
def test_take_masked_indices(self):
# Test take w/ masked indices
a = np.array((40, 18, 37, 9, 22))
indices = np.arange(3)[None,:] + np.arange(5)[:, None]
mindices = array(indices, mask=(indices >= len(a)))
# No mask
test = take(a, mindices, mode='clip')
ctrl = array([[40, 18, 37],
[18, 37, 9],
[37, 9, 22],
[9, 22, 22],
[22, 22, 22]])
assert_equal(test, ctrl)
# Masked indices
test = take(a, mindices)
ctrl = array([[40, 18, 37],
[18, 37, 9],
[37, 9, 22],
[9, 22, 40],
[22, 40, 40]])
ctrl[3, 2] = ctrl[4, 1] = ctrl[4, 2] = masked
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
# Masked input + masked indices
a = array((40, 18, 37, 9, 22), mask=(0, 1, 0, 0, 0))
test = take(a, mindices)
ctrl[0, 1] = ctrl[1, 0] = masked
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
def test_tolist(self):
# Tests to list
# ... on 1D
x = array(np.arange(12))
x[[1, -2]] = masked
xlist = x.tolist()
self.assertTrue(xlist[1] is None)
self.assertTrue(xlist[-2] is None)
# ... on 2D
x.shape = (3, 4)
xlist = x.tolist()
ctrl = [[0, None, 2, 3], [4, 5, 6, 7], [8, 9, None, 11]]
assert_equal(xlist[0], [0, None, 2, 3])
assert_equal(xlist[1], [4, 5, 6, 7])
assert_equal(xlist[2], [8, 9, None, 11])
assert_equal(xlist, ctrl)
# ... on structured array w/ masked records
x = array(list(zip([1, 2, 3],
[1.1, 2.2, 3.3],
['one', 'two', 'thr'])),
dtype=[('a', int), ('b', float), ('c', '|S8')])
x[-1] = masked
assert_equal(x.tolist(),
[(1, 1.1, asbytes('one')),
(2, 2.2, asbytes('two')),
(None, None, None)])
# ... on structured array w/ masked fields
a = array([(1, 2,), (3, 4)], mask=[(0, 1), (0, 0)],
dtype=[('a', int), ('b', int)])
test = a.tolist()
assert_equal(test, [[1, None], [3, 4]])
# ... on mvoid
a = a[0]
test = a.tolist()
assert_equal(test, [1, None])
def test_tolist_specialcase(self):
# Test mvoid.tolist: make sure we return a standard Python object
a = array([(0, 1), (2, 3)], dtype=[('a', int), ('b', int)])
# w/o mask: each entry is a np.void whose elements are standard Python
for entry in a:
for item in entry.tolist():
assert_(not isinstance(item, np.generic))
# w/ mask: each entry is a ma.void whose elements should be
# standard Python
a.mask[0] = (0, 1)
for entry in a:
for item in entry.tolist():
assert_(not isinstance(item, np.generic))
def test_toflex(self):
# Test the conversion to records
data = arange(10)
record = data.toflex()
assert_equal(record['_data'], data._data)
assert_equal(record['_mask'], data._mask)
data[[0, 1, 2, -1]] = masked
record = data.toflex()
assert_equal(record['_data'], data._data)
assert_equal(record['_mask'], data._mask)
ndtype = [('i', int), ('s', '|S3'), ('f', float)]
data = array([(i, s, f) for (i, s, f) in zip(np.arange(10),
'ABCDEFGHIJKLM',
np.random.rand(10))],
dtype=ndtype)
data[[0, 1, 2, -1]] = masked
record = data.toflex()
assert_equal(record['_data'], data._data)
assert_equal(record['_mask'], data._mask)
ndtype = np.dtype("int, (2,3)float, float")
data = array([(i, f, ff) for (i, f, ff) in zip(np.arange(10),
np.random.rand(10),
np.random.rand(10))],
dtype=ndtype)
data[[0, 1, 2, -1]] = masked
record = data.toflex()
assert_equal_records(record['_data'], data._data)
assert_equal_records(record['_mask'], data._mask)
def test_fromflex(self):
# Test the reconstruction of a masked_array from a record
a = array([1, 2, 3])
test = fromflex(a.toflex())
assert_equal(test, a)
assert_equal(test.mask, a.mask)
a = array([1, 2, 3], mask=[0, 0, 1])
test = fromflex(a.toflex())
assert_equal(test, a)
assert_equal(test.mask, a.mask)
a = array([(1, 1.), (2, 2.), (3, 3.)], mask=[(1, 0), (0, 0), (0, 1)],
dtype=[('A', int), ('B', float)])
test = fromflex(a.toflex())
assert_equal(test, a)
assert_equal(test.data, a.data)
def test_arraymethod(self):
# Test a _arraymethod w/ n argument
marray = masked_array([[1, 2, 3, 4, 5]], mask=[0, 0, 1, 0, 0])
control = masked_array([[1], [2], [3], [4], [5]],
mask=[0, 0, 1, 0, 0])
assert_equal(marray.T, control)
assert_equal(marray.transpose(), control)
assert_equal(MaskedArray.cumsum(marray.T, 0), control.cumsum(0))
class TestMaskedArrayMathMethods(TestCase):
def setUp(self):
# Base data definition.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
m2 = np.array([1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 1,
0, 0, 1, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0,
0, 0, 1, 0, 1, 1])
m2x = array(data=x, mask=m2)
m2X = array(data=X, mask=m2.reshape(X.shape))
m2XX = array(data=XX, mask=m2.reshape(XX.shape))
self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)
def test_cumsumprod(self):
# Tests cumsum & cumprod on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
mXcp = mX.cumsum(0)
assert_equal(mXcp._data, mX.filled(0).cumsum(0))
mXcp = mX.cumsum(1)
assert_equal(mXcp._data, mX.filled(0).cumsum(1))
mXcp = mX.cumprod(0)
assert_equal(mXcp._data, mX.filled(1).cumprod(0))
mXcp = mX.cumprod(1)
assert_equal(mXcp._data, mX.filled(1).cumprod(1))
def test_cumsumprod_with_output(self):
# Tests cumsum/cumprod w/ output
xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)
xm[:, 0] = xm[0] = xm[-1, -1] = masked
for funcname in ('cumsum', 'cumprod'):
npfunc = getattr(np, funcname)
xmmeth = getattr(xm, funcname)
# A ndarray as explicit input
output = np.empty((3, 4), dtype=float)
output.fill(-9999)
result = npfunc(xm, axis=0, out=output)
# ... the result should be the given output
self.assertTrue(result is output)
assert_equal(result, xmmeth(axis=0, out=output))
output = empty((3, 4), dtype=int)
result = xmmeth(axis=0, out=output)
self.assertTrue(result is output)
def test_ptp(self):
# Tests ptp on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
(n, m) = X.shape
assert_equal(mx.ptp(), mx.compressed().ptp())
rows = np.zeros(n, np.float)
cols = np.zeros(m, np.float)
for k in range(m):
cols[k] = mX[:, k].compressed().ptp()
for k in range(n):
rows[k] = mX[k].compressed().ptp()
assert_equal(mX.ptp(0), cols)
assert_equal(mX.ptp(1), rows)
def test_add_object(self):
x = masked_array(['a', 'b'], mask=[1, 0], dtype=object)
y = x + 'x'
assert_equal(y[1], 'bx')
assert_(y.mask[0])
def test_sum_object(self):
# Test sum on object dtype
a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object)
assert_equal(a.sum(), 5)
a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object)
assert_equal(a.sum(axis=0), [5, 7, 9])
def test_prod_object(self):
# Test prod on object dtype
a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object)
assert_equal(a.prod(), 2 * 3)
a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object)
assert_equal(a.prod(axis=0), [4, 10, 18])
def test_meananom_object(self):
# Test mean/anom on object dtype
a = masked_array([1, 2, 3], dtype=np.object)
assert_equal(a.mean(), 2)
assert_equal(a.anom(), [-1, 0, 1])
def test_trace(self):
# Tests trace on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
mXdiag = mX.diagonal()
assert_equal(mX.trace(), mX.diagonal().compressed().sum())
assert_almost_equal(mX.trace(),
X.trace() - sum(mXdiag.mask * X.diagonal(),
axis=0))
assert_equal(np.trace(mX), mX.trace())
def test_dot(self):
# Tests dot on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
fx = mx.filled(0)
r = mx.dot(mx)
assert_almost_equal(r.filled(0), fx.dot(fx))
assert_(r.mask is nomask)
fX = mX.filled(0)
r = mX.dot(mX)
assert_almost_equal(r.filled(0), fX.dot(fX))
assert_(r.mask[1,3])
r1 = empty_like(r)
mX.dot(mX, out=r1)
assert_almost_equal(r, r1)
mYY = mXX.swapaxes(-1, -2)
fXX, fYY = mXX.filled(0), mYY.filled(0)
r = mXX.dot(mYY)
assert_almost_equal(r.filled(0), fXX.dot(fYY))
r1 = empty_like(r)
mXX.dot(mYY, out=r1)
assert_almost_equal(r, r1)
def test_dot_shape_mismatch(self):
# regression test
x = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]])
y = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]])
z = masked_array([[0,1],[3,3]])
x.dot(y, out=z)
assert_almost_equal(z.filled(0), [[1, 0], [15, 16]])
assert_almost_equal(z.mask, [[0, 1], [0, 0]])
def test_varstd(self):
# Tests var & std on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
assert_almost_equal(mX.var(axis=None), mX.compressed().var())
assert_almost_equal(mX.std(axis=None), mX.compressed().std())
assert_almost_equal(mX.std(axis=None, ddof=1),
mX.compressed().std(ddof=1))
assert_almost_equal(mX.var(axis=None, ddof=1),
mX.compressed().var(ddof=1))
assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape)
assert_equal(mX.var().shape, X.var().shape)
(mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))
assert_almost_equal(mX.var(axis=None, ddof=2),
mX.compressed().var(ddof=2))
assert_almost_equal(mX.std(axis=None, ddof=2),
mX.compressed().std(ddof=2))
for k in range(6):
assert_almost_equal(mXvar1[k], mX[k].compressed().var())
assert_almost_equal(mXvar0[k], mX[:, k].compressed().var())
assert_almost_equal(np.sqrt(mXvar0[k]),
mX[:, k].compressed().std())
def test_varstd_specialcases(self):
# Test a special case for var
nout = np.array(-1, dtype=float)
mout = array(-1, dtype=float)
x = array(arange(10), mask=True)
for methodname in ('var', 'std'):
method = getattr(x, methodname)
self.assertTrue(method() is masked)
self.assertTrue(method(0) is masked)
self.assertTrue(method(-1) is masked)
# Using a masked array as explicit output
with warnings.catch_warnings():
warnings.simplefilter('ignore')
method(out=mout)
self.assertTrue(mout is not masked)
assert_equal(mout.mask, True)
# Using a ndarray as explicit output
with warnings.catch_warnings():
warnings.simplefilter('ignore')
method(out=nout)
self.assertTrue(np.isnan(nout))
x = array(arange(10), mask=True)
x[-1] = 9
for methodname in ('var', 'std'):
method = getattr(x, methodname)
self.assertTrue(method(ddof=1) is masked)
self.assertTrue(method(0, ddof=1) is masked)
self.assertTrue(method(-1, ddof=1) is masked)
# Using a masked array as explicit output
method(out=mout, ddof=1)
self.assertTrue(mout is not masked)
assert_equal(mout.mask, True)
# Using a ndarray as explicit output
method(out=nout, ddof=1)
self.assertTrue(np.isnan(nout))
def test_varstd_ddof(self):
a = array([[1, 1, 0], [1, 1, 0]], mask=[[0, 0, 1], [0, 0, 1]])
test = a.std(axis=0, ddof=0)
assert_equal(test.filled(0), [0, 0, 0])
assert_equal(test.mask, [0, 0, 1])
test = a.std(axis=0, ddof=1)
assert_equal(test.filled(0), [0, 0, 0])
assert_equal(test.mask, [0, 0, 1])
test = a.std(axis=0, ddof=2)
assert_equal(test.filled(0), [0, 0, 0])
assert_equal(test.mask, [1, 1, 1])
def test_diag(self):
# Test diag
x = arange(9).reshape((3, 3))
x[1, 1] = masked
out = np.diag(x)
assert_equal(out, [0, 4, 8])
out = diag(x)
assert_equal(out, [0, 4, 8])
assert_equal(out.mask, [0, 1, 0])
out = diag(out)
control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]],
mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]])
assert_equal(out, control)
def test_axis_methods_nomask(self):
# Test the combination nomask & methods w/ axis
a = array([[1, 2, 3], [4, 5, 6]])
assert_equal(a.sum(0), [5, 7, 9])
assert_equal(a.sum(-1), [6, 15])
assert_equal(a.sum(1), [6, 15])
assert_equal(a.prod(0), [4, 10, 18])
assert_equal(a.prod(-1), [6, 120])
assert_equal(a.prod(1), [6, 120])
assert_equal(a.min(0), [1, 2, 3])
assert_equal(a.min(-1), [1, 4])
assert_equal(a.min(1), [1, 4])
assert_equal(a.max(0), [4, 5, 6])
assert_equal(a.max(-1), [3, 6])
assert_equal(a.max(1), [3, 6])
class TestMaskedArrayMathMethodsComplex(TestCase):
# Test class for miscellaneous MaskedArrays methods.
def setUp(self):
# Base data definition.
x = np.array([8.375j, 7.545j, 8.828j, 8.5j, 1.757j, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479j,
7.189j, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993j])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
m2 = np.array([1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 1,
0, 0, 1, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0,
0, 0, 1, 0, 1, 1])
m2x = array(data=x, mask=m2)
m2X = array(data=X, mask=m2.reshape(X.shape))
m2XX = array(data=XX, mask=m2.reshape(XX.shape))
self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)
def test_varstd(self):
# Tests var & std on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
assert_almost_equal(mX.var(axis=None), mX.compressed().var())
assert_almost_equal(mX.std(axis=None), mX.compressed().std())
assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape)
assert_equal(mX.var().shape, X.var().shape)
(mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))
assert_almost_equal(mX.var(axis=None, ddof=2),
mX.compressed().var(ddof=2))
assert_almost_equal(mX.std(axis=None, ddof=2),
mX.compressed().std(ddof=2))
for k in range(6):
assert_almost_equal(mXvar1[k], mX[k].compressed().var())
assert_almost_equal(mXvar0[k], mX[:, k].compressed().var())
assert_almost_equal(np.sqrt(mXvar0[k]),
mX[:, k].compressed().std())
class TestMaskedArrayFunctions(TestCase):
# Test class for miscellaneous functions.
def setUp(self):
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
xm.set_fill_value(1e+20)
self.info = (xm, ym)
def test_masked_where_bool(self):
x = [1, 2]
y = masked_where(False, x)
assert_equal(y, [1, 2])
assert_equal(y[1], 2)
def test_masked_equal_wlist(self):
x = [1, 2, 3]
mx = masked_equal(x, 3)
assert_equal(mx, x)
assert_equal(mx._mask, [0, 0, 1])
mx = masked_not_equal(x, 3)
assert_equal(mx, x)
assert_equal(mx._mask, [1, 1, 0])
def test_masked_equal_fill_value(self):
x = [1, 2, 3]
mx = masked_equal(x, 3)
assert_equal(mx._mask, [0, 0, 1])
assert_equal(mx.fill_value, 3)
def test_masked_where_condition(self):
# Tests masking functions.
x = array([1., 2., 3., 4., 5.])
x[2] = masked
assert_equal(masked_where(greater(x, 2), x), masked_greater(x, 2))
assert_equal(masked_where(greater_equal(x, 2), x),
masked_greater_equal(x, 2))
assert_equal(masked_where(less(x, 2), x), masked_less(x, 2))
assert_equal(masked_where(less_equal(x, 2), x),
masked_less_equal(x, 2))
assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))
assert_equal(masked_where(equal(x, 2), x), masked_equal(x, 2))
assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))
assert_equal(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]),
[99, 99, 3, 4, 5])
def test_masked_where_oddities(self):
# Tests some generic features.
atest = ones((10, 10, 10), dtype=float)
btest = zeros(atest.shape, MaskType)
ctest = masked_where(btest, atest)
assert_equal(atest, ctest)
def test_masked_where_shape_constraint(self):
a = arange(10)
try:
test = masked_equal(1, a)
except IndexError:
pass
else:
raise AssertionError("Should have failed...")
test = masked_equal(a, 1)
assert_equal(test.mask, [0, 1, 0, 0, 0, 0, 0, 0, 0, 0])
def test_masked_where_structured(self):
# test that masked_where on a structured array sets a structured
# mask (see issue #2972)
a = np.zeros(10, dtype=[("A", "<f2"), ("B", "<f4")])
am = np.ma.masked_where(a["A"] < 5, a)
assert_equal(am.mask.dtype.names, am.dtype.names)
assert_equal(am["A"],
np.ma.masked_array(np.zeros(10), np.ones(10)))
def test_masked_otherfunctions(self):
assert_equal(masked_inside(list(range(5)), 1, 3),
[0, 199, 199, 199, 4])
assert_equal(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199])
assert_equal(masked_inside(array(list(range(5)),
mask=[1, 0, 0, 0, 0]), 1, 3).mask,
[1, 1, 1, 1, 0])
assert_equal(masked_outside(array(list(range(5)),
mask=[0, 1, 0, 0, 0]), 1, 3).mask,
[1, 1, 0, 0, 1])
assert_equal(masked_equal(array(list(range(5)),
mask=[1, 0, 0, 0, 0]), 2).mask,
[1, 0, 1, 0, 0])
assert_equal(masked_not_equal(array([2, 2, 1, 2, 1],
mask=[1, 0, 0, 0, 0]), 2).mask,
[1, 0, 1, 0, 1])
def test_round(self):
a = array([1.23456, 2.34567, 3.45678, 4.56789, 5.67890],
mask=[0, 1, 0, 0, 0])
assert_equal(a.round(), [1., 2., 3., 5., 6.])
assert_equal(a.round(1), [1.2, 2.3, 3.5, 4.6, 5.7])
assert_equal(a.round(3), [1.235, 2.346, 3.457, 4.568, 5.679])
b = empty_like(a)
a.round(out=b)
assert_equal(b, [1., 2., 3., 5., 6.])
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
c[0] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
def test_round_with_output(self):
# Testing round with an explicit output
xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)
xm[:, 0] = xm[0] = xm[-1, -1] = masked
# A ndarray as explicit input
output = np.empty((3, 4), dtype=float)
output.fill(-9999)
result = np.round(xm, decimals=2, out=output)
# ... the result should be the given output
self.assertTrue(result is output)
assert_equal(result, xm.round(decimals=2, out=output))
output = empty((3, 4), dtype=float)
result = xm.round(decimals=2, out=output)
self.assertTrue(result is output)
def test_round_with_scalar(self):
# Testing round with scalar/zero dimension input
# GH issue 2244
a = array(1.1, mask=[False])
assert_equal(a.round(), 1)
a = array(1.1, mask=[True])
assert_(a.round() is masked)
a = array(1.1, mask=[False])
output = np.empty(1, dtype=float)
output.fill(-9999)
a.round(out=output)
assert_equal(output, 1)
a = array(1.1, mask=[False])
output = array(-9999., mask=[True])
a.round(out=output)
assert_equal(output[()], 1)
a = array(1.1, mask=[True])
output = array(-9999., mask=[False])
a.round(out=output)
assert_(output[()] is masked)
def test_identity(self):
a = identity(5)
self.assertTrue(isinstance(a, MaskedArray))
assert_equal(a, np.identity(5))
def test_power(self):
x = -1.1
assert_almost_equal(power(x, 2.), 1.21)
self.assertTrue(power(x, masked) is masked)
x = array([-1.1, -1.1, 1.1, 1.1, 0.])
b = array([0.5, 2., 0.5, 2., -1.], mask=[0, 0, 0, 0, 1])
y = power(x, b)
assert_almost_equal(y, [0, 1.21, 1.04880884817, 1.21, 0.])
assert_equal(y._mask, [1, 0, 0, 0, 1])
b.mask = nomask
y = power(x, b)
assert_equal(y._mask, [1, 0, 0, 0, 1])
z = x ** b
assert_equal(z._mask, y._mask)
assert_almost_equal(z, y)
assert_almost_equal(z._data, y._data)
x **= b
assert_equal(x._mask, y._mask)
assert_almost_equal(x, y)
assert_almost_equal(x._data, y._data)
def test_power_w_broadcasting(self):
# Test power w/ broadcasting
a2 = np.array([[1., 2., 3.], [4., 5., 6.]])
a2m = array(a2, mask=[[1, 0, 0], [0, 0, 1]])
b1 = np.array([2, 4, 3])
b2 = np.array([b1, b1])
b2m = array(b2, mask=[[0, 1, 0], [0, 1, 0]])
ctrl = array([[1 ** 2, 2 ** 4, 3 ** 3], [4 ** 2, 5 ** 4, 6 ** 3]],
mask=[[1, 1, 0], [0, 1, 1]])
# No broadcasting, base & exp w/ mask
test = a2m ** b2m
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
# No broadcasting, base w/ mask, exp w/o mask
test = a2m ** b2
assert_equal(test, ctrl)
assert_equal(test.mask, a2m.mask)
# No broadcasting, base w/o mask, exp w/ mask
test = a2 ** b2m
assert_equal(test, ctrl)
assert_equal(test.mask, b2m.mask)
ctrl = array([[2 ** 2, 4 ** 4, 3 ** 3], [2 ** 2, 4 ** 4, 3 ** 3]],
mask=[[0, 1, 0], [0, 1, 0]])
test = b1 ** b2m
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
test = b2m ** b1
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
def test_where(self):
# Test the where function
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
xm.set_fill_value(1e+20)
d = where(xm > 2, xm, -9)
assert_equal(d, [-9., -9., -9., -9., -9., 4.,
-9., -9., 10., -9., -9., 3.])
assert_equal(d._mask, xm._mask)
d = where(xm > 2, -9, ym)
assert_equal(d, [5., 0., 3., 2., -1., -9.,
-9., -10., -9., 1., 0., -9.])
assert_equal(d._mask, [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0])
d = where(xm > 2, xm, masked)
assert_equal(d, [-9., -9., -9., -9., -9., 4.,
-9., -9., 10., -9., -9., 3.])
tmp = xm._mask.copy()
tmp[(xm <= 2).filled(True)] = True
assert_equal(d._mask, tmp)
ixm = xm.astype(int)
d = where(ixm > 2, ixm, masked)
assert_equal(d, [-9, -9, -9, -9, -9, 4, -9, -9, 10, -9, -9, 3])
assert_equal(d.dtype, ixm.dtype)
def test_where_object(self):
a = np.array(None)
b = masked_array(None)
r = b.copy()
assert_equal(np.ma.where(True, a, a), r)
assert_equal(np.ma.where(True, b, b), r)
def test_where_with_masked_choice(self):
x = arange(10)
x[3] = masked
c = x >= 8
# Set False to masked
z = where(c, x, masked)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is masked)
assert_(z[7] is masked)
assert_(z[8] is not masked)
assert_(z[9] is not masked)
assert_equal(x, z)
# Set True to masked
z = where(c, masked, x)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is not masked)
assert_(z[7] is not masked)
assert_(z[8] is masked)
assert_(z[9] is masked)
def test_where_with_masked_condition(self):
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
c[0] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
x = arange(1, 6)
x[-1] = masked
y = arange(1, 6) * 10
y[2] = masked
c = array([1, 1, 1, 0, 0], mask=[1, 0, 0, 0, 0])
cm = c.filled(1)
z = where(c, x, y)
zm = where(cm, x, y)
assert_equal(z, zm)
assert_(getmask(zm) is nomask)
assert_equal(zm, [1, 2, 3, 40, 50])
z = where(c, masked, 1)
assert_equal(z, [99, 99, 99, 1, 1])
z = where(c, 1, masked)
assert_equal(z, [99, 1, 1, 99, 99])
def test_where_type(self):
# Test the type conservation with where
x = np.arange(4, dtype=np.int32)
y = np.arange(4, dtype=np.float32) * 2.2
test = where(x > 1.5, y, x).dtype
control = np.find_common_type([np.int32, np.float32], [])
assert_equal(test, control)
def test_choose(self):
# Test choose
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
chosen = choose([2, 3, 1, 0], choices)
assert_equal(chosen, array([20, 31, 12, 3]))
chosen = choose([2, 4, 1, 0], choices, mode='clip')
assert_equal(chosen, array([20, 31, 12, 3]))
chosen = choose([2, 4, 1, 0], choices, mode='wrap')
assert_equal(chosen, array([20, 1, 12, 3]))
# Check with some masked indices
indices_ = array([2, 4, 1, 0], mask=[1, 0, 0, 1])
chosen = choose(indices_, choices, mode='wrap')
assert_equal(chosen, array([99, 1, 12, 99]))
assert_equal(chosen.mask, [1, 0, 0, 1])
# Check with some masked choices
choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1],
[1, 0, 0, 0], [0, 0, 0, 0]])
indices_ = [2, 3, 1, 0]
chosen = choose(indices_, choices, mode='wrap')
assert_equal(chosen, array([20, 31, 12, 3]))
assert_equal(chosen.mask, [1, 0, 0, 1])
def test_choose_with_out(self):
# Test choose with an explicit out keyword
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
store = empty(4, dtype=int)
chosen = choose([2, 3, 1, 0], choices, out=store)
assert_equal(store, array([20, 31, 12, 3]))
self.assertTrue(store is chosen)
# Check with some masked indices + out
store = empty(4, dtype=int)
indices_ = array([2, 3, 1, 0], mask=[1, 0, 0, 1])
chosen = choose(indices_, choices, mode='wrap', out=store)
assert_equal(store, array([99, 31, 12, 99]))
assert_equal(store.mask, [1, 0, 0, 1])
# Check with some masked choices + out ina ndarray !
choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1],
[1, 0, 0, 0], [0, 0, 0, 0]])
indices_ = [2, 3, 1, 0]
store = empty(4, dtype=int).view(ndarray)
chosen = choose(indices_, choices, mode='wrap', out=store)
assert_equal(store, array([999999, 31, 12, 999999]))
def test_reshape(self):
a = arange(10)
a[0] = masked
# Try the default
b = a.reshape((5, 2))
assert_equal(b.shape, (5, 2))
self.assertTrue(b.flags['C'])
# Try w/ arguments as list instead of tuple
b = a.reshape(5, 2)
assert_equal(b.shape, (5, 2))
self.assertTrue(b.flags['C'])
# Try w/ order
b = a.reshape((5, 2), order='F')
assert_equal(b.shape, (5, 2))
self.assertTrue(b.flags['F'])
# Try w/ order
b = a.reshape(5, 2, order='F')
assert_equal(b.shape, (5, 2))
self.assertTrue(b.flags['F'])
c = np.reshape(a, (2, 5))
self.assertTrue(isinstance(c, MaskedArray))
assert_equal(c.shape, (2, 5))
self.assertTrue(c[0, 0] is masked)
self.assertTrue(c.flags['C'])
def test_make_mask_descr(self):
# Test make_mask_descr
# Flexible
ntype = [('a', np.float), ('b', np.float)]
test = make_mask_descr(ntype)
assert_equal(test, [('a', np.bool), ('b', np.bool)])
# Standard w/ shape
ntype = (np.float, 2)
test = make_mask_descr(ntype)
assert_equal(test, (np.bool, 2))
# Standard standard
ntype = np.float
test = make_mask_descr(ntype)
assert_equal(test, np.dtype(np.bool))
# Nested
ntype = [('a', np.float), ('b', [('ba', np.float), ('bb', np.float)])]
test = make_mask_descr(ntype)
control = np.dtype([('a', 'b1'), ('b', [('ba', 'b1'), ('bb', 'b1')])])
assert_equal(test, control)
# Named+ shape
ntype = [('a', (np.float, 2))]
test = make_mask_descr(ntype)
assert_equal(test, np.dtype([('a', (np.bool, 2))]))
# 2 names
ntype = [(('A', 'a'), float)]
test = make_mask_descr(ntype)
assert_equal(test, np.dtype([(('A', 'a'), bool)]))
def test_make_mask(self):
# Test make_mask
# w/ a list as an input
mask = [0, 1]
test = make_mask(mask)
assert_equal(test.dtype, MaskType)
assert_equal(test, [0, 1])
# w/ a ndarray as an input
mask = np.array([0, 1], dtype=np.bool)
test = make_mask(mask)
assert_equal(test.dtype, MaskType)
assert_equal(test, [0, 1])
# w/ a flexible-type ndarray as an input - use default
mdtype = [('a', np.bool), ('b', np.bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask)
assert_equal(test.dtype, MaskType)
assert_equal(test, [1, 1])
# w/ a flexible-type ndarray as an input - use input dtype
mdtype = [('a', np.bool), ('b', np.bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask, dtype=mask.dtype)
assert_equal(test.dtype, mdtype)
assert_equal(test, mask)
# w/ a flexible-type ndarray as an input - use input dtype
mdtype = [('a', np.float), ('b', np.float)]
bdtype = [('a', np.bool), ('b', np.bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask, dtype=mask.dtype)
assert_equal(test.dtype, bdtype)
assert_equal(test, np.array([(0, 0), (0, 1)], dtype=bdtype))
# test that nomask is returned when m is nomask.
bools = [True, False]
dtypes = [MaskType, np.float]
msgformat = 'copy=%s, shrink=%s, dtype=%s'
for cpy, shr, dt in itertools.product(bools, bools, dtypes):
res = make_mask(nomask, copy=cpy, shrink=shr, dtype=dt)
assert_(res is nomask, msgformat % (cpy, shr, dt))
def test_mask_or(self):
# Initialize
mtype = [('a', np.bool), ('b', np.bool)]
mask = np.array([(0, 0), (0, 1), (1, 0), (0, 0)], dtype=mtype)
# Test using nomask as input
test = mask_or(mask, nomask)
assert_equal(test, mask)
test = mask_or(nomask, mask)
assert_equal(test, mask)
# Using False as input
test = mask_or(mask, False)
assert_equal(test, mask)
# Using another array w / the same dtype
other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=mtype)
test = mask_or(mask, other)
control = np.array([(0, 1), (0, 1), (1, 1), (0, 1)], dtype=mtype)
assert_equal(test, control)
# Using another array w / a different dtype
othertype = [('A', np.bool), ('B', np.bool)]
other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=othertype)
try:
test = mask_or(mask, other)
except ValueError:
pass
# Using nested arrays
dtype = [('a', np.bool), ('b', [('ba', np.bool), ('bb', np.bool)])]
amask = np.array([(0, (1, 0)), (0, (1, 0))], dtype=dtype)
bmask = np.array([(1, (0, 1)), (0, (0, 0))], dtype=dtype)
cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype)
assert_equal(mask_or(amask, bmask), cntrl)
def test_flatten_mask(self):
# Tests flatten mask
# Standarad dtype
mask = np.array([0, 0, 1], dtype=np.bool)
assert_equal(flatten_mask(mask), mask)
# Flexible dtype
mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)])
test = flatten_mask(mask)
control = np.array([0, 0, 0, 1], dtype=bool)
assert_equal(test, control)
mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])]
data = [(0, (0, 0)), (0, (0, 1))]
mask = np.array(data, dtype=mdtype)
test = flatten_mask(mask)
control = np.array([0, 0, 0, 0, 0, 1], dtype=bool)
assert_equal(test, control)
def test_on_ndarray(self):
# Test functions on ndarrays
a = np.array([1, 2, 3, 4])
m = array(a, mask=False)
test = anom(a)
assert_equal(test, m.anom())
test = reshape(a, (2, 2))
assert_equal(test, m.reshape(2, 2))
def test_compress(self):
# Test compress function on ndarray and masked array
# Address Github #2495.
arr = np.arange(8)
arr.shape = 4, 2
cond = np.array([True, False, True, True])
control = arr[[0, 2, 3]]
test = np.ma.compress(cond, arr, axis=0)
assert_equal(test, control)
marr = np.ma.array(arr)
test = np.ma.compress(cond, marr, axis=0)
assert_equal(test, control)
def test_compressed(self):
# Test ma.compressed function.
# Address gh-4026
a = np.ma.array([1, 2])
test = np.ma.compressed(a)
assert_(type(test) is np.ndarray)
# Test case when input data is ndarray subclass
class A(np.ndarray):
pass
a = np.ma.array(A(shape=0))
test = np.ma.compressed(a)
assert_(type(test) is A)
# Test that compress flattens
test = np.ma.compressed([[1],[2]])
assert_equal(test.ndim, 1)
test = np.ma.compressed([[[[[1]]]]])
assert_equal(test.ndim, 1)
# Test case when input is MaskedArray subclass
class M(MaskedArray):
pass
test = np.ma.compressed(M(shape=(0,1,2)))
assert_equal(test.ndim, 1)
# with .compessed() overriden
class M(MaskedArray):
def compressed(self):
return 42
test = np.ma.compressed(M(shape=(0,1,2)))
assert_equal(test, 42)
class TestMaskedFields(TestCase):
def setUp(self):
ilist = [1, 2, 3, 4, 5]
flist = [1.1, 2.2, 3.3, 4.4, 5.5]
slist = ['one', 'two', 'three', 'four', 'five']
ddtype = [('a', int), ('b', float), ('c', '|S8')]
mdtype = [('a', bool), ('b', bool), ('c', bool)]
mask = [0, 1, 0, 0, 1]
base = array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype)
self.data = dict(base=base, mask=mask, ddtype=ddtype, mdtype=mdtype)
def test_set_records_masks(self):
base = self.data['base']
mdtype = self.data['mdtype']
# Set w/ nomask or masked
base.mask = nomask
assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype))
base.mask = masked
assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype))
# Set w/ simple boolean
base.mask = False
assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype))
base.mask = True
assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype))
# Set w/ list
base.mask = [0, 0, 0, 1, 1]
assert_equal_records(base._mask,
np.array([(x, x, x) for x in [0, 0, 0, 1, 1]],
dtype=mdtype))
def test_set_record_element(self):
# Check setting an element of a record)
base = self.data['base']
(base_a, base_b, base_c) = (base['a'], base['b'], base['c'])
base[0] = (pi, pi, 'pi')
assert_equal(base_a.dtype, int)
assert_equal(base_a._data, [3, 2, 3, 4, 5])
assert_equal(base_b.dtype, float)
assert_equal(base_b._data, [pi, 2.2, 3.3, 4.4, 5.5])
assert_equal(base_c.dtype, '|S8')
assert_equal(base_c._data,
asbytes_nested(['pi', 'two', 'three', 'four', 'five']))
def test_set_record_slice(self):
base = self.data['base']
(base_a, base_b, base_c) = (base['a'], base['b'], base['c'])
base[:3] = (pi, pi, 'pi')
assert_equal(base_a.dtype, int)
assert_equal(base_a._data, [3, 3, 3, 4, 5])
assert_equal(base_b.dtype, float)
assert_equal(base_b._data, [pi, pi, pi, 4.4, 5.5])
assert_equal(base_c.dtype, '|S8')
assert_equal(base_c._data,
asbytes_nested(['pi', 'pi', 'pi', 'four', 'five']))
def test_mask_element(self):
"Check record access"
base = self.data['base']
base[0] = masked
for n in ('a', 'b', 'c'):
assert_equal(base[n].mask, [1, 1, 0, 0, 1])
assert_equal(base[n]._data, base._data[n])
def test_getmaskarray(self):
# Test getmaskarray on flexible dtype
ndtype = [('a', int), ('b', float)]
test = empty(3, dtype=ndtype)
assert_equal(getmaskarray(test),
np.array([(0, 0), (0, 0), (0, 0)],
dtype=[('a', '|b1'), ('b', '|b1')]))
test[:] = masked
assert_equal(getmaskarray(test),
np.array([(1, 1), (1, 1), (1, 1)],
dtype=[('a', '|b1'), ('b', '|b1')]))
def test_view(self):
# Test view w/ flexible dtype
iterator = list(zip(np.arange(10), np.random.rand(10)))
data = np.array(iterator)
a = array(iterator, dtype=[('a', float), ('b', float)])
a.mask[0] = (1, 0)
controlmask = np.array([1] + 19 * [0], dtype=bool)
# Transform globally to simple dtype
test = a.view(float)
assert_equal(test, data.ravel())
assert_equal(test.mask, controlmask)
# Transform globally to dty
test = a.view((float, 2))
assert_equal(test, data)
assert_equal(test.mask, controlmask.reshape(-1, 2))
test = a.view((float, 2), np.matrix)
assert_equal(test, data)
self.assertTrue(isinstance(test, np.matrix))
def test_getitem(self):
ndtype = [('a', float), ('b', float)]
a = array(list(zip(np.random.rand(10), np.arange(10))), dtype=ndtype)
a.mask = np.array(list(zip([0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 1, 0])),
dtype=[('a', bool), ('b', bool)])
# No mask
self.assertTrue(isinstance(a[1], MaskedArray))
# One element masked
self.assertTrue(isinstance(a[0], MaskedArray))
assert_equal_records(a[0]._data, a._data[0])
assert_equal_records(a[0]._mask, a._mask[0])
# All element masked
self.assertTrue(isinstance(a[-2], MaskedArray))
assert_equal_records(a[-2]._data, a._data[-2])
assert_equal_records(a[-2]._mask, a._mask[-2])
def test_setitem(self):
# Issue 4866: check that one can set individual items in [record][col]
# and [col][record] order
ndtype = np.dtype([('a', float), ('b', int)])
ma = np.ma.MaskedArray([(1.0, 1), (2.0, 2)], dtype=ndtype)
ma['a'][1] = 3.0
assert_equal(ma['a'], np.array([1.0, 3.0]))
ma[1]['a'] = 4.0
assert_equal(ma['a'], np.array([1.0, 4.0]))
# Issue 2403
mdtype = np.dtype([('a', bool), ('b', bool)])
# soft mask
control = np.array([(False, True), (True, True)], dtype=mdtype)
a = np.ma.masked_all((2,), dtype=ndtype)
a['a'][0] = 2
assert_equal(a.mask, control)
a = np.ma.masked_all((2,), dtype=ndtype)
a[0]['a'] = 2
assert_equal(a.mask, control)
# hard mask
control = np.array([(True, True), (True, True)], dtype=mdtype)
a = np.ma.masked_all((2,), dtype=ndtype)
a.harden_mask()
a['a'][0] = 2
assert_equal(a.mask, control)
a = np.ma.masked_all((2,), dtype=ndtype)
a.harden_mask()
a[0]['a'] = 2
assert_equal(a.mask, control)
def test_element_len(self):
# check that len() works for mvoid (Github issue #576)
for rec in self.data['base']:
assert_equal(len(rec), len(self.data['ddtype']))
class TestMaskedView(TestCase):
def setUp(self):
iterator = list(zip(np.arange(10), np.random.rand(10)))
data = np.array(iterator)
a = array(iterator, dtype=[('a', float), ('b', float)])
a.mask[0] = (1, 0)
controlmask = np.array([1] + 19 * [0], dtype=bool)
self.data = (data, a, controlmask)
def test_view_to_nothing(self):
(data, a, controlmask) = self.data
test = a.view()
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test._data, a._data)
assert_equal(test._mask, a._mask)
def test_view_to_type(self):
(data, a, controlmask) = self.data
test = a.view(np.ndarray)
self.assertTrue(not isinstance(test, MaskedArray))
assert_equal(test, a._data)
assert_equal_records(test, data.view(a.dtype).squeeze())
def test_view_to_simple_dtype(self):
(data, a, controlmask) = self.data
# View globally
test = a.view(float)
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test, data.ravel())
assert_equal(test.mask, controlmask)
def test_view_to_flexible_dtype(self):
(data, a, controlmask) = self.data
test = a.view([('A', float), ('B', float)])
assert_equal(test.mask.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'])
assert_equal(test['B'], a['b'])
test = a[0].view([('A', float), ('B', float)])
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test.mask.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'][0])
assert_equal(test['B'], a['b'][0])
test = a[-1].view([('A', float), ('B', float)])
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'][-1])
assert_equal(test['B'], a['b'][-1])
def test_view_to_subdtype(self):
(data, a, controlmask) = self.data
# View globally
test = a.view((float, 2))
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test, data)
assert_equal(test.mask, controlmask.reshape(-1, 2))
# View on 1 masked element
test = a[0].view((float, 2))
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test, data[0])
assert_equal(test.mask, (1, 0))
# View on 1 unmasked element
test = a[-1].view((float, 2))
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test, data[-1])
def test_view_to_dtype_and_type(self):
(data, a, controlmask) = self.data
test = a.view((float, 2), np.matrix)
assert_equal(test, data)
self.assertTrue(isinstance(test, np.matrix))
self.assertTrue(not isinstance(test, MaskedArray))
class TestOptionalArgs(TestCase):
def test_ndarrayfuncs(self):
# test axis arg behaves the same as ndarray (including mutliple axes)
d = np.arange(24.0).reshape((2,3,4))
m = np.zeros(24, dtype=bool).reshape((2,3,4))
# mask out last element of last dimension
m[:,:,-1] = True
a = np.ma.array(d, mask=m)
def testaxis(f, a, d):
numpy_f = numpy.__getattribute__(f)
ma_f = np.ma.__getattribute__(f)
# test axis arg
assert_equal(ma_f(a, axis=1)[...,:-1], numpy_f(d[...,:-1], axis=1))
assert_equal(ma_f(a, axis=(0,1))[...,:-1],
numpy_f(d[...,:-1], axis=(0,1)))
def testkeepdims(f, a, d):
numpy_f = numpy.__getattribute__(f)
ma_f = np.ma.__getattribute__(f)
# test keepdims arg
assert_equal(ma_f(a, keepdims=True).shape,
numpy_f(d, keepdims=True).shape)
assert_equal(ma_f(a, keepdims=False).shape,
numpy_f(d, keepdims=False).shape)
# test both at once
assert_equal(ma_f(a, axis=1, keepdims=True)[...,:-1],
numpy_f(d[...,:-1], axis=1, keepdims=True))
assert_equal(ma_f(a, axis=(0,1), keepdims=True)[...,:-1],
numpy_f(d[...,:-1], axis=(0,1), keepdims=True))
for f in ['sum', 'prod', 'mean', 'var', 'std']:
testaxis(f, a, d)
testkeepdims(f, a, d)
for f in ['min', 'max']:
testaxis(f, a, d)
d = (np.arange(24).reshape((2,3,4))%2 == 0)
a = np.ma.array(d, mask=m)
for f in ['all', 'any']:
testaxis(f, a, d)
testkeepdims(f, a, d)
def test_count(self):
# test np.ma.count specially
d = np.arange(24.0).reshape((2,3,4))
m = np.zeros(24, dtype=bool).reshape((2,3,4))
m[:,0,:] = True
a = np.ma.array(d, mask=m)
assert_equal(count(a), 16)
assert_equal(count(a, axis=1), 2*ones((2,4)))
assert_equal(count(a, axis=(0,1)), 4*ones((4,)))
assert_equal(count(a, keepdims=True), 16*ones((1,1,1)))
assert_equal(count(a, axis=1, keepdims=True), 2*ones((2,1,4)))
assert_equal(count(a, axis=(0,1), keepdims=True), 4*ones((1,1,4)))
assert_equal(count(a, axis=-2), 2*ones((2,4)))
assert_raises(ValueError, count, a, axis=(1,1))
assert_raises(ValueError, count, a, axis=3)
# check the 'nomask' path
a = np.ma.array(d, mask=nomask)
assert_equal(count(a), 24)
assert_equal(count(a, axis=1), 3*ones((2,4)))
assert_equal(count(a, axis=(0,1)), 6*ones((4,)))
assert_equal(count(a, keepdims=True), 24*ones((1,1,1)))
assert_equal(np.ndim(count(a, keepdims=True)), 3)
assert_equal(count(a, axis=1, keepdims=True), 3*ones((2,1,4)))
assert_equal(count(a, axis=(0,1), keepdims=True), 6*ones((1,1,4)))
assert_equal(count(a, axis=-2), 3*ones((2,4)))
assert_raises(ValueError, count, a, axis=(1,1))
assert_raises(ValueError, count, a, axis=3)
# check the 'masked' singleton
assert_equal(count(np.ma.masked), 0)
# check 0-d arrays do not allow axis > 0
assert_raises(ValueError, count, np.ma.array(1), axis=1)
def test_masked_array():
a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0])
assert_equal(np.argwhere(a), [[1], [3]])
def test_append_masked_array():
a = np.ma.masked_equal([1,2,3], value=2)
b = np.ma.masked_equal([4,3,2], value=2)
result = np.ma.append(a, b)
expected_data = [1, 2, 3, 4, 3, 2]
expected_mask = [False, True, False, False, False, True]
assert_array_equal(result.data, expected_data)
assert_array_equal(result.mask, expected_mask)
a = np.ma.masked_all((2,2))
b = np.ma.ones((3,1))
result = np.ma.append(a, b)
expected_data = [1] * 3
expected_mask = [True] * 4 + [False] * 3
assert_array_equal(result.data[-3], expected_data)
assert_array_equal(result.mask, expected_mask)
result = np.ma.append(a, b, axis=None)
assert_array_equal(result.data[-3], expected_data)
assert_array_equal(result.mask, expected_mask)
def test_append_masked_array_along_axis():
a = np.ma.masked_equal([1,2,3], value=2)
b = np.ma.masked_values([[4, 5, 6], [7, 8, 9]], 7)
# When `axis` is specified, `values` must have the correct shape.
assert_raises(ValueError, np.ma.append, a, b, axis=0)
result = np.ma.append(a[np.newaxis,:], b, axis=0)
expected = np.ma.arange(1, 10)
expected[[1, 6]] = np.ma.masked
expected = expected.reshape((3,3))
assert_array_equal(result.data, expected.data)
assert_array_equal(result.mask, expected.mask)
def test_default_fill_value_complex():
# regression test for Python 3, where 'unicode' was not defined
assert_(default_fill_value(1 + 1j) == 1.e20 + 0.0j)
###############################################################################
if __name__ == "__main__":
run_module_suite()
| mit |
whip112/Whip112 | vendor/packages/pygments/formatters/svg.py | 76 | 5840 | # -*- coding: utf-8 -*-
"""
pygments.formatters.svg
~~~~~~~~~~~~~~~~~~~~~~~
Formatter for SVG output.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import get_bool_opt, get_int_opt
__all__ = ['SvgFormatter']
def escape_html(text):
"""Escape &, <, > as well as single and double quotes for HTML."""
return text.replace('&', '&'). \
replace('<', '<'). \
replace('>', '>'). \
replace('"', '"'). \
replace("'", ''')
class2style = {}
class SvgFormatter(Formatter):
"""
Format tokens as an SVG graphics file. This formatter is still experimental.
Each line of code is a ``<text>`` element with explicit ``x`` and ``y``
coordinates containing ``<tspan>`` elements with the individual token styles.
By default, this formatter outputs a full SVG document including doctype
declaration and the ``<svg>`` root element.
.. versionadded:: 0.9
Additional options accepted:
`nowrap`
Don't wrap the SVG ``<text>`` elements in ``<svg><g>`` elements and
don't add a XML declaration and a doctype. If true, the `fontfamily`
and `fontsize` options are ignored. Defaults to ``False``.
`fontfamily`
The value to give the wrapping ``<g>`` element's ``font-family``
attribute, defaults to ``"monospace"``.
`fontsize`
The value to give the wrapping ``<g>`` element's ``font-size``
attribute, defaults to ``"14px"``.
`xoffset`
Starting offset in X direction, defaults to ``0``.
`yoffset`
Starting offset in Y direction, defaults to the font size if it is given
in pixels, or ``20`` else. (This is necessary since text coordinates
refer to the text baseline, not the top edge.)
`ystep`
Offset to add to the Y coordinate for each subsequent line. This should
roughly be the text size plus 5. It defaults to that value if the text
size is given in pixels, or ``25`` else.
`spacehack`
Convert spaces in the source to `` ``, which are non-breaking
spaces. SVG provides the ``xml:space`` attribute to control how
whitespace inside tags is handled, in theory, the ``preserve`` value
could be used to keep all whitespace as-is. However, many current SVG
viewers don't obey that rule, so this option is provided as a workaround
and defaults to ``True``.
"""
name = 'SVG'
aliases = ['svg']
filenames = ['*.svg']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.nowrap = get_bool_opt(options, 'nowrap', False)
self.fontfamily = options.get('fontfamily', 'monospace')
self.fontsize = options.get('fontsize', '14px')
self.xoffset = get_int_opt(options, 'xoffset', 0)
fs = self.fontsize.strip()
if fs.endswith('px'): fs = fs[:-2].strip()
try:
int_fs = int(fs)
except:
int_fs = 20
self.yoffset = get_int_opt(options, 'yoffset', int_fs)
self.ystep = get_int_opt(options, 'ystep', int_fs + 5)
self.spacehack = get_bool_opt(options, 'spacehack', True)
self._stylecache = {}
def format_unencoded(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
For our implementation we put all lines in their own 'line group'.
"""
x = self.xoffset
y = self.yoffset
if not self.nowrap:
if self.encoding:
outfile.write('<?xml version="1.0" encoding="%s"?>\n' %
self.encoding)
else:
outfile.write('<?xml version="1.0"?>\n')
outfile.write('<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" '
'"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/'
'svg10.dtd">\n')
outfile.write('<svg xmlns="http://www.w3.org/2000/svg">\n')
outfile.write('<g font-family="%s" font-size="%s">\n' %
(self.fontfamily, self.fontsize))
outfile.write('<text x="%s" y="%s" xml:space="preserve">' % (x, y))
for ttype, value in tokensource:
style = self._get_style(ttype)
tspan = style and '<tspan' + style + '>' or ''
tspanend = tspan and '</tspan>' or ''
value = escape_html(value)
if self.spacehack:
value = value.expandtabs().replace(' ', ' ')
parts = value.split('\n')
for part in parts[:-1]:
outfile.write(tspan + part + tspanend)
y += self.ystep
outfile.write('</text>\n<text x="%s" y="%s" '
'xml:space="preserve">' % (x, y))
outfile.write(tspan + parts[-1] + tspanend)
outfile.write('</text>')
if not self.nowrap:
outfile.write('</g></svg>\n')
def _get_style(self, tokentype):
if tokentype in self._stylecache:
return self._stylecache[tokentype]
otokentype = tokentype
while not self.style.styles_token(tokentype):
tokentype = tokentype.parent
value = self.style.style_for_token(tokentype)
result = ''
if value['color']:
result = ' fill="#' + value['color'] + '"'
if value['bold']:
result += ' font-weight="bold"'
if value['italic']:
result += ' font-style="italic"'
self._stylecache[otokentype] = result
return result
| mpl-2.0 |
ameihm0912/MozDef | mq/esworker.papertrail.py | 1 | 22026 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2015 Mozilla Corporation
#
# Contributors:
# Aaron Meihm ameihm@mozilla.com
# Reads from papertrail using the API and inserts log data into ES in
# the same manner as esworker.py
import json
import math
import os
import kombu
import pynsive
import sys
import socket
import time
from configlib import getConfig, OptionParser
from datetime import datetime, timedelta
import calendar
from operator import itemgetter
import requests
import os
sys.path.append(os.path.join(os.path.dirname(__file__), "../lib"))
from elasticsearch_client import ElasticsearchClient, ElasticsearchBadServer, ElasticsearchInvalidIndex, ElasticsearchException
from utilities.toUTC import toUTC
# running under uwsgi?
try:
import uwsgi
hasUWSGI = True
except ImportError as e:
hasUWSGI = False
class PTRequestor(object):
def __init__(self, apikey, evmax=2000):
self._papertrail_api = 'https://papertrailapp.com/api/v1/events/search.json'
self._apikey = apikey
self._events = {}
self._evmax = evmax
self._evidcache = []
def parse_events(self, resp):
for x in resp['events']:
if x['id'] in self._evidcache:
# saw this event last time, just ignore it
continue
self._events[x['id']] = x
if 'reached_record_limit' in resp.keys() and resp['reached_record_limit']:
return resp['min_id']
return None
def makerequest(self, query, stime, etime, maxid):
payload = {
'min_time': calendar.timegm(stime.utctimetuple()),
'max_time': calendar.timegm(etime.utctimetuple()),
'q': query
}
if maxid != None:
payload['max_id'] = maxid
hdrs = {'X-Papertrail-Token': self._apikey}
resp = requests.get(self._papertrail_api, headers=hdrs, params=payload)
return self.parse_events(resp.json())
def request(self, query, stime, etime):
self._events = {}
maxid = None
while True:
maxid = self.makerequest(query, stime, etime, maxid)
if maxid == None:
break
if len(self._events.keys()) > self._evmax:
sys.stderr.write('WARNING: papertrail esworker hitting event request limit\n')
break
# cache event ids we return to allow for some duplicate filtering checks
# during next run
self._evidcache = self._events.keys()
return self._events
def removeAt(astring):
'''remove the leading @ from a string'''
return astring.replace('@', '')
def isCEF(aDict):
# determine if this is a CEF event
# could be an event posted to the /cef http endpoint
if 'endpoint' in aDict.keys() and aDict['endpoint'] == 'cef':
return True
# maybe it snuck in some other way
# check some key CEF indicators (the header fields)
if 'fields' in aDict.keys() and isinstance(aDict['fields'], dict):
lowerKeys = [s.lower() for s in aDict['fields'].keys()]
if 'devicevendor' in lowerKeys and 'deviceproduct' in lowerKeys and 'deviceversion' in lowerKeys:
return True
if 'details' in aDict.keys() and isinstance(aDict['details'], dict):
lowerKeys = [s.lower() for s in aDict['details'].keys()]
if 'devicevendor' in lowerKeys and 'deviceproduct' in lowerKeys and 'deviceversion' in lowerKeys:
return True
return False
def toUnicode(obj, encoding='utf-8'):
if type(obj) in [int, long, float, complex]:
# likely a number, convert it to string to get to unicode
obj = str(obj)
if isinstance(obj, basestring):
if not isinstance(obj, unicode):
obj = unicode(obj, encoding)
return obj
def keyMapping(aDict):
'''map common key/fields to a normalized structure,
explicitly typed when possible to avoid schema changes for upsteam consumers
Special accomodations made for logstash,nxlog, beaver, heka and CEF
Some shippers attempt to conform to logstash-style @fieldname convention.
This strips the leading at symbol since it breaks some elastic search
libraries like elasticutils.
'''
returndict = dict()
# uncomment to save the source event for debugging, or chain of custody/forensics
# returndict['original']=aDict
# set the timestamp when we received it, i.e. now
returndict['receivedtimestamp'] = toUTC(datetime.now()).isoformat()
returndict['mozdefhostname'] = options.mozdefhostname
try:
for k, v in aDict.iteritems():
k = removeAt(k).lower()
if k in ('message', 'summary'):
returndict[u'summary'] = toUnicode(v)
if k in ('payload') and 'summary' not in aDict.keys():
# special case for heka if it sends payload as well as a summary, keep both but move payload to the details section.
returndict[u'summary'] = toUnicode(v)
elif k in ('payload'):
if 'details' not in returndict.keys():
returndict[u'details'] = dict()
returndict[u'details']['payload'] = toUnicode(v)
if k in ('eventtime', 'timestamp', 'utctimestamp'):
returndict[u'utctimestamp'] = toUTC(v).isoformat()
returndict[u'timestamp'] = toUTC(v).isoformat()
if k in ('hostname', 'source_host', 'host'):
returndict[u'hostname'] = toUnicode(v)
if k in ('tags'):
if len(v) > 0:
returndict[u'tags'] = v
# nxlog keeps the severity name in syslogseverity,everyone else should use severity or level.
if k in ('syslogseverity', 'severity', 'severityvalue', 'level'):
returndict[u'severity'] = toUnicode(v).upper()
if k in ('facility', 'syslogfacility'):
returndict[u'facility'] = toUnicode(v)
if k in ('pid', 'processid'):
returndict[u'processid'] = toUnicode(v)
# nxlog sets sourcename to the processname (i.e. sshd), everyone else should call it process name or pname
if k in ('pname', 'processname', 'sourcename'):
returndict[u'processname'] = toUnicode(v)
# the file, or source
if k in ('path', 'logger', 'file'):
returndict[u'eventsource'] = toUnicode(v)
if k in ('type', 'eventtype', 'category'):
returndict[u'category'] = toUnicode(v)
# custom fields as a list/array
if k in ('fields', 'details'):
if len(v) > 0:
returndict[u'details'] = v
# custom fields/details as a one off, not in an array
# i.e. fields.something=value or details.something=value
# move them to a dict for consistency in querying
if k.startswith('fields.') or k.startswith('details.'):
newName = k.replace('fields.', '')
newName = newName.lower().replace('details.', '')
# add a dict to hold the details if it doesn't exist
if 'details' not in returndict.keys():
returndict[u'details'] = dict()
# add field with a special case for shippers that
# don't send details
# in an array as int/floats/strings
# we let them dictate the data type with field_datatype
# convention
if newName.endswith('_int'):
returndict[u'details'][unicode(newName)] = int(v)
elif newName.endswith('_float'):
returndict[u'details'][unicode(newName)] = float(v)
else:
returndict[u'details'][unicode(newName)] = toUnicode(v)
#nxlog windows log handling
if 'Domain' in aDict.keys() and 'SourceModuleType' in aDict.keys():
# add a dict to hold the details if it doesn't exist
if 'details' not in returndict.keys():
returndict[u'details'] = dict()
# nxlog parses all windows event fields very well
# copy all fields to details
returndict[u'details'][k]=v
if 'utctimestamp' not in returndict.keys():
# default in case we don't find a reasonable timestamp
returndict['utctimestamp'] = toUTC(datetime.now()).isoformat()
except Exception as e:
sys.stderr.write('esworker exception normalizing the message %r\n' % e)
return None
return returndict
def esConnect():
'''open or re-open a connection to elastic search'''
return ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)), options.esbulksize)
class taskConsumer(object):
def __init__(self, ptRequestor, esConnection):
self.ptrequestor = ptRequestor
self.esConnection = esConnection
# calculate our initial request window
self.lastRequestTime = toUTC(datetime.now()) - timedelta(seconds=options.ptinterval) - \
timedelta(seconds=options.ptbackoff)
if options.esbulksize != 0:
# if we are bulk posting enable a timer to occasionally flush the bulker even if it's not full
# to prevent events from sticking around an idle worker
self.esConnection.start_bulk_timer()
def run(self):
while True:
try:
curRequestTime = toUTC(datetime.now()) - timedelta(seconds=options.ptbackoff)
records = self.ptrequestor.request(options.ptquery, self.lastRequestTime, curRequestTime)
# update last request time for the next request
self.lastRequestTime = curRequestTime
for msgid in records:
msgdict = records[msgid]
# strip any line feeds from the message itself, we just convert them
# into spaces
msgdict['message'] = msgdict['message'].replace('\n', ' ').replace('\r', '')
event = dict()
event['tags'] = ['papertrail', options.ptacctname]
event['details'] = msgdict
if event['details'].has_key('generated_at'):
event['utctimestamp'] = toUTC(event['details']['generated_at']).isoformat()
if event['details'].has_key('hostname'):
event['hostname'] = event['details']['hostname']
if event['details'].has_key('message'):
event['summary'] = event['details']['message']
if event['details'].has_key('severity'):
event['severity'] = event['details']['severity']
else:
event['severity'] = 'INFO'
event['category'] = 'syslog'
#process message
self.on_message(event, msgdict)
time.sleep(options.ptinterval)
except KeyboardInterrupt:
sys.exit(1)
except ValueError as e:
sys.stdout.write('Exception while handling message: %r'%e)
sys.exit(1)
def on_message(self, body, message):
#print("RECEIVED MESSAGE: %r" % (body, ))
try:
# default elastic search metadata for an event
metadata = {
'index': 'events',
'doc_type': 'event',
'id': None
}
# just to be safe..check what we were sent.
if isinstance(body, dict):
bodyDict = body
elif isinstance(body, str) or isinstance(body, unicode):
try:
bodyDict = json.loads(body) # lets assume it's json
except ValueError as e:
# not json..ack but log the message
sys.stderr.write("esworker exception: unknown body type received %r\n" % body)
#message.ack()
return
else:
sys.stderr.write("esworker exception: unknown body type received %r\n" % body)
#message.ack()
return
if 'customendpoint' in bodyDict.keys() and bodyDict['customendpoint']:
# custom document
# send to plugins to allow them to modify it if needed
(normalizedDict, metadata) = sendEventToPlugins(bodyDict, metadata, pluginList)
else:
# normalize the dict
# to the mozdef events standard
normalizedDict = keyMapping(bodyDict)
# send to plugins to allow them to modify it if needed
if normalizedDict is not None and isinstance(normalizedDict, dict) and normalizedDict.keys():
(normalizedDict, metadata) = sendEventToPlugins(normalizedDict, metadata, pluginList)
# drop the message if a plug in set it to None
# signaling a discard
if normalizedDict is None:
#message.ack()
return
# make a json version for posting to elastic search
jbody = json.JSONEncoder().encode(normalizedDict)
if isCEF(normalizedDict):
# cef records are set to the 'deviceproduct' field value.
metadata['doc_type'] = 'cef'
if 'details' in normalizedDict.keys() and 'deviceproduct' in normalizedDict['details'].keys():
# don't create strange doc types..
if ' ' not in normalizedDict['details']['deviceproduct'] and '.' not in normalizedDict['details']['deviceproduct']:
metadata['doc_type'] = normalizedDict['details']['deviceproduct']
try:
bulk = False
if options.esbulksize != 0:
bulk = True
res = self.esConnection.save_object(
index=metadata['index'],
doc_id=metadata['id'],
doc_type=metadata['doc_type'],
body=jbody,
bulk=bulk
)
except (ElasticsearchBadServer, ElasticsearchInvalidIndex) as e:
# handle loss of server or race condition with index rotation/creation/aliasing
try:
self.esConnection = esConnect()
#message.requeue()
return
except kombu.exceptions.MessageStateError:
# state may be already set.
return
except ElasticsearchException as e:
# exception target for queue capacity issues reported by elastic search so catch the error, report it and retry the message
try:
sys.stderr.write('ElasticSearchException: {0} reported while indexing event'.format(e))
#message.requeue()
return
except kombu.exceptions.MessageStateError:
# state may be already set.
return
#message.ack()
except ValueError as e:
sys.stderr.write("esworker exception in events queue %r\n" % e)
def registerPlugins():
pluginList = list() # tuple of module,registration dict,priority
plugin_manager = pynsive.PluginManager()
if os.path.exists('plugins'):
modules = pynsive.list_modules('plugins')
for mname in modules:
module = pynsive.import_module(mname)
reload(module)
if not module:
raise ImportError('Unable to load module {}'.format(mname))
else:
if 'message' in dir(module):
mclass = module.message()
mreg = mclass.registration
if 'priority' in dir(mclass):
mpriority = mclass.priority
else:
mpriority = 100
if isinstance(mreg, list):
print('[*] plugin {0} registered to receive messages with {1}'.format(mname, mreg))
pluginList.append((mclass, mreg, mpriority))
return pluginList
def checkPlugins(pluginList, lastPluginCheck):
if abs(datetime.now() - lastPluginCheck).seconds > options.plugincheckfrequency:
# print('[*] checking plugins')
lastPluginCheck = datetime.now()
pluginList = registerPlugins()
return pluginList, lastPluginCheck
else:
return pluginList, lastPluginCheck
def dict2List(inObj):
'''given a dictionary, potentially with multiple sub dictionaries
return a list of the dict keys and values
'''
if isinstance(inObj, dict):
for key, value in inObj.iteritems():
if isinstance(value, dict):
for d in dict2List(value):
yield d
elif isinstance(value, list):
yield key.encode('ascii', 'ignore').lower()
for l in dict2List(value):
yield l
else:
yield key.encode('ascii', 'ignore').lower()
if isinstance(value, str):
yield value.lower()
elif isinstance(value, unicode):
yield value.encode('ascii', 'ignore').lower()
else:
yield value
elif isinstance(inObj, list):
for v in inObj:
if isinstance(v, str):
yield v.lower()
elif isinstance(v, unicode):
yield v.encode('ascii', 'ignore').lower()
elif isinstance(v, list):
for l in dict2List(v):
yield l
elif isinstance(v,dict):
for d in dict2List(v):
yield d
else:
yield v
else:
yield ''
def sendEventToPlugins(anevent, metadata, pluginList):
'''compare the event to the plugin registrations.
plugins register with a list of keys or values
or values they want to match on
this function compares that registration list
to the current event and sends the event to plugins
in order
'''
if not isinstance(anevent, dict):
raise TypeError('event is type {0}, should be a dict'.format(type(anevent)))
# expecting tuple of module,criteria,priority in pluginList
# sort the plugin list by priority
for plugin in sorted(pluginList, key=itemgetter(2), reverse=False):
# assume we don't run this event through the plugin
send = False
if isinstance(plugin[1], list):
try:
if (set(plugin[1]).intersection([e for e in dict2List(anevent)])):
send = True
except TypeError:
sys.stderr.write('TypeError on set intersection for dict {0}'.format(anevent))
return (anevent, metadata)
if send:
(anevent, metadata) = plugin[0].onMessage(anevent, metadata)
if anevent is None:
# plug-in is signalling to drop this message
# early exit
return (anevent, metadata)
return (anevent, metadata)
def main():
if hasUWSGI:
sys.stdout.write("started as uwsgi mule {0}\n".format(uwsgi.mule_id()))
else:
sys.stdout.write('started without uwsgi\n')
# establish api interface with papertrail
ptRequestor = PTRequestor(options.ptapikey, evmax=options.ptquerymax)
# consume our queue
taskConsumer(ptRequestor, es).run()
def initConfig():
#capture the hostname
options.mozdefhostname = getConfig('mozdefhostname', socket.gethostname(), options.configfile)
# elastic search options. set esbulksize to a non-zero value to enable bulk posting, set timeout to post no matter how many events after X seconds.
options.esservers = list(getConfig('esservers', 'http://localhost:9200', options.configfile).split(','))
options.esbulksize = getConfig('esbulksize', 0, options.configfile)
options.esbulktimeout = getConfig('esbulktimeout', 30, options.configfile)
# papertrail configuration
options.ptapikey = getConfig('papertrailapikey', 'none', options.configfile)
options.ptquery = getConfig('papertrailquery', '', options.configfile)
options.ptinterval = getConfig('papertrailinterval', 60, options.configfile)
options.ptbackoff = getConfig('papertrailbackoff', 300, options.configfile)
options.ptacctname = getConfig('papertrailaccount', 'unset', options.configfile)
options.ptquerymax = getConfig('papertrailmaxevents', 2000, options.configfile)
# plugin options
# secs to pass before checking for new/updated plugins
# seems to cause memory leaks..
# regular updates are disabled for now,
# though we set the frequency anyway.
options.plugincheckfrequency = getConfig('plugincheckfrequency', 120, options.configfile)
if __name__ == '__main__':
# configure ourselves
parser = OptionParser()
parser.add_option("-c", dest='configfile', default=sys.argv[0].replace('.py', '.conf'), help="configuration file to use")
(options, args) = parser.parse_args()
initConfig()
# open ES connection globally so we don't waste time opening it per message
es = esConnect()
# force a check for plugins and establish the plugin list
pluginList = list()
lastPluginCheck = datetime.now()-timedelta(minutes=60)
pluginList, lastPluginCheck = checkPlugins(pluginList, lastPluginCheck)
main()
| mpl-2.0 |
jlspyaozhongkai/Uter | third_party_build/Python-2.7.9/lib/python2.7/idlelib/run.py | 30 | 12387 | import sys
import io
import linecache
import time
import socket
import traceback
import thread
import threading
import Queue
from idlelib import CallTips
from idlelib import AutoComplete
from idlelib import RemoteDebugger
from idlelib import RemoteObjectBrowser
from idlelib import StackViewer
from idlelib import rpc
from idlelib import PyShell
from idlelib import IOBinding
import __main__
LOCALHOST = '127.0.0.1'
import warnings
def idle_showwarning_subproc(
message, category, filename, lineno, file=None, line=None):
"""Show Idle-format warning after replacing warnings.showwarning.
The only difference is the formatter called.
"""
if file is None:
file = sys.stderr
try:
file.write(PyShell.idle_formatwarning(
message, category, filename, lineno, line))
except IOError:
pass # the file (probably stderr) is invalid - this warning gets lost.
_warnings_showwarning = None
def capture_warnings(capture):
"Replace warning.showwarning with idle_showwarning_subproc, or reverse."
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = idle_showwarning_subproc
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
capture_warnings(True)
# Thread shared globals: Establish a queue between a subthread (which handles
# the socket) and the main thread (which runs user code), plus global
# completion, exit and interruptable (the main thread) flags:
exit_now = False
quitting = False
interruptable = False
def main(del_exitfunc=False):
"""Start the Python execution server in a subprocess
In the Python subprocess, RPCServer is instantiated with handlerclass
MyHandler, which inherits register/unregister methods from RPCHandler via
the mix-in class SocketIO.
When the RPCServer 'server' is instantiated, the TCPServer initialization
creates an instance of run.MyHandler and calls its handle() method.
handle() instantiates a run.Executive object, passing it a reference to the
MyHandler object. That reference is saved as attribute rpchandler of the
Executive instance. The Executive methods have access to the reference and
can pass it on to entities that they command
(e.g. RemoteDebugger.Debugger.start_debugger()). The latter, in turn, can
call MyHandler(SocketIO) register/unregister methods via the reference to
register and unregister themselves.
"""
global exit_now
global quitting
global no_exitfunc
no_exitfunc = del_exitfunc
#time.sleep(15) # test subprocess not responding
try:
assert(len(sys.argv) > 1)
port = int(sys.argv[-1])
except:
print>>sys.stderr, "IDLE Subprocess: no IP port passed in sys.argv."
return
capture_warnings(True)
sys.argv[:] = [""]
sockthread = threading.Thread(target=manage_socket,
name='SockThread',
args=((LOCALHOST, port),))
sockthread.setDaemon(True)
sockthread.start()
while 1:
try:
if exit_now:
try:
exit()
except KeyboardInterrupt:
# exiting but got an extra KBI? Try again!
continue
try:
seq, request = rpc.request_queue.get(block=True, timeout=0.05)
except Queue.Empty:
continue
method, args, kwargs = request
ret = method(*args, **kwargs)
rpc.response_queue.put((seq, ret))
except KeyboardInterrupt:
if quitting:
exit_now = True
continue
except SystemExit:
capture_warnings(False)
raise
except:
type, value, tb = sys.exc_info()
try:
print_exception()
rpc.response_queue.put((seq, None))
except:
# Link didn't work, print same exception to __stderr__
traceback.print_exception(type, value, tb, file=sys.__stderr__)
exit()
else:
continue
def manage_socket(address):
for i in range(3):
time.sleep(i)
try:
server = MyRPCServer(address, MyHandler)
break
except socket.error as err:
print>>sys.__stderr__,"IDLE Subprocess: socket error: "\
+ err.args[1] + ", retrying...."
else:
print>>sys.__stderr__, "IDLE Subprocess: Connection to "\
"IDLE GUI failed, exiting."
show_socket_error(err, address)
global exit_now
exit_now = True
return
server.handle_request() # A single request only
def show_socket_error(err, address):
import Tkinter
import tkMessageBox
root = Tkinter.Tk()
root.withdraw()
if err.args[0] == 61: # connection refused
msg = "IDLE's subprocess can't connect to %s:%d. This may be due "\
"to your personal firewall configuration. It is safe to "\
"allow this internal connection because no data is visible on "\
"external ports." % address
tkMessageBox.showerror("IDLE Subprocess Error", msg, parent=root)
else:
tkMessageBox.showerror("IDLE Subprocess Error",
"Socket Error: %s" % err.args[1])
root.destroy()
def print_exception():
import linecache
linecache.checkcache()
flush_stdout()
efile = sys.stderr
typ, val, tb = excinfo = sys.exc_info()
sys.last_type, sys.last_value, sys.last_traceback = excinfo
tbe = traceback.extract_tb(tb)
print>>efile, '\nTraceback (most recent call last):'
exclude = ("run.py", "rpc.py", "threading.py", "Queue.py",
"RemoteDebugger.py", "bdb.py")
cleanup_traceback(tbe, exclude)
traceback.print_list(tbe, file=efile)
lines = traceback.format_exception_only(typ, val)
for line in lines:
print>>efile, line,
def cleanup_traceback(tb, exclude):
"Remove excluded traces from beginning/end of tb; get cached lines"
orig_tb = tb[:]
while tb:
for rpcfile in exclude:
if tb[0][0].count(rpcfile):
break # found an exclude, break for: and delete tb[0]
else:
break # no excludes, have left RPC code, break while:
del tb[0]
while tb:
for rpcfile in exclude:
if tb[-1][0].count(rpcfile):
break
else:
break
del tb[-1]
if len(tb) == 0:
# exception was in IDLE internals, don't prune!
tb[:] = orig_tb[:]
print>>sys.stderr, "** IDLE Internal Exception: "
rpchandler = rpc.objecttable['exec'].rpchandler
for i in range(len(tb)):
fn, ln, nm, line = tb[i]
if nm == '?':
nm = "-toplevel-"
if not line and fn.startswith("<pyshell#"):
line = rpchandler.remotecall('linecache', 'getline',
(fn, ln), {})
tb[i] = fn, ln, nm, line
def flush_stdout():
try:
if sys.stdout.softspace:
sys.stdout.softspace = 0
sys.stdout.write("\n")
except (AttributeError, EOFError):
pass
def exit():
"""Exit subprocess, possibly after first deleting sys.exitfunc
If config-main.cfg/.def 'General' 'delete-exitfunc' is True, then any
sys.exitfunc will be removed before exiting. (VPython support)
"""
if no_exitfunc:
try:
del sys.exitfunc
except AttributeError:
pass
capture_warnings(False)
sys.exit(0)
class MyRPCServer(rpc.RPCServer):
def handle_error(self, request, client_address):
"""Override RPCServer method for IDLE
Interrupt the MainThread and exit server if link is dropped.
"""
global quitting
try:
raise
except SystemExit:
raise
except EOFError:
global exit_now
exit_now = True
thread.interrupt_main()
except:
erf = sys.__stderr__
print>>erf, '\n' + '-'*40
print>>erf, 'Unhandled server exception!'
print>>erf, 'Thread: %s' % threading.currentThread().getName()
print>>erf, 'Client Address: ', client_address
print>>erf, 'Request: ', repr(request)
traceback.print_exc(file=erf)
print>>erf, '\n*** Unrecoverable, server exiting!'
print>>erf, '-'*40
quitting = True
thread.interrupt_main()
class MyHandler(rpc.RPCHandler):
def handle(self):
"""Override base method"""
executive = Executive(self)
self.register("exec", executive)
self.console = self.get_remote_proxy("console")
sys.stdin = PyShell.PseudoInputFile(self.console, "stdin",
IOBinding.encoding)
sys.stdout = PyShell.PseudoOutputFile(self.console, "stdout",
IOBinding.encoding)
sys.stderr = PyShell.PseudoOutputFile(self.console, "stderr",
IOBinding.encoding)
# Keep a reference to stdin so that it won't try to exit IDLE if
# sys.stdin gets changed from within IDLE's shell. See issue17838.
self._keep_stdin = sys.stdin
self.interp = self.get_remote_proxy("interp")
rpc.RPCHandler.getresponse(self, myseq=None, wait=0.05)
def exithook(self):
"override SocketIO method - wait for MainThread to shut us down"
time.sleep(10)
def EOFhook(self):
"Override SocketIO method - terminate wait on callback and exit thread"
global quitting
quitting = True
thread.interrupt_main()
def decode_interrupthook(self):
"interrupt awakened thread"
global quitting
quitting = True
thread.interrupt_main()
class Executive(object):
def __init__(self, rpchandler):
self.rpchandler = rpchandler
self.locals = __main__.__dict__
self.calltip = CallTips.CallTips()
self.autocomplete = AutoComplete.AutoComplete()
def runcode(self, code):
global interruptable
try:
self.usr_exc_info = None
interruptable = True
try:
exec code in self.locals
finally:
interruptable = False
except SystemExit:
# Scripts that raise SystemExit should just
# return to the interactive prompt
pass
except:
self.usr_exc_info = sys.exc_info()
if quitting:
exit()
print_exception()
jit = self.rpchandler.console.getvar("<<toggle-jit-stack-viewer>>")
if jit:
self.rpchandler.interp.open_remote_stack_viewer()
else:
flush_stdout()
def interrupt_the_server(self):
if interruptable:
thread.interrupt_main()
def start_the_debugger(self, gui_adap_oid):
return RemoteDebugger.start_debugger(self.rpchandler, gui_adap_oid)
def stop_the_debugger(self, idb_adap_oid):
"Unregister the Idb Adapter. Link objects and Idb then subject to GC"
self.rpchandler.unregister(idb_adap_oid)
def get_the_calltip(self, name):
return self.calltip.fetch_tip(name)
def get_the_completion_list(self, what, mode):
return self.autocomplete.fetch_completions(what, mode)
def stackviewer(self, flist_oid=None):
if self.usr_exc_info:
typ, val, tb = self.usr_exc_info
else:
return None
flist = None
if flist_oid is not None:
flist = self.rpchandler.get_remote_proxy(flist_oid)
while tb and tb.tb_frame.f_globals["__name__"] in ["rpc", "run"]:
tb = tb.tb_next
sys.last_type = typ
sys.last_value = val
item = StackViewer.StackTreeItem(flist, tb)
return RemoteObjectBrowser.remote_object_tree_item(item)
capture_warnings(False) # Make sure turned off; see issue 18081
| gpl-3.0 |
evansd/django | tests/template_tests/filter_tests/test_stringformat.py | 73 | 1191 | from django.template.defaultfilters import stringformat
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class StringformatTests(SimpleTestCase):
"""
Notice that escaping is applied *after* any filters, so the string
formatting here only needs to deal with pre-escaped characters.
"""
@setup({
'stringformat01':
'{% autoescape off %}.{{ a|stringformat:"5s" }}. .{{ b|stringformat:"5s" }}.{% endautoescape %}'
})
def test_stringformat01(self):
output = self.engine.render_to_string('stringformat01', {'a': 'a<b', 'b': mark_safe('a<b')})
self.assertEqual(output, '. a<b. . a<b.')
@setup({'stringformat02': '.{{ a|stringformat:"5s" }}. .{{ b|stringformat:"5s" }}.'})
def test_stringformat02(self):
output = self.engine.render_to_string('stringformat02', {'a': 'a<b', 'b': mark_safe('a<b')})
self.assertEqual(output, '. a<b. . a<b.')
class FunctionTests(SimpleTestCase):
def test_format(self):
self.assertEqual(stringformat(1, '03d'), '001')
def test_invalid(self):
self.assertEqual(stringformat(1, 'z'), '')
| bsd-3-clause |
JioCloud/tempest | tempest/services/compute/json/volumes_extensions_client.py | 6 | 4245 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import time
from six.moves.urllib import parse as urllib
from tempest_lib import exceptions as lib_exc
from tempest.api_schema.response.compute.v2_1 import volumes as schema
from tempest.common import service_client
from tempest import exceptions
class VolumesExtensionsClient(service_client.ServiceClient):
def __init__(self, auth_provider, service, region,
default_volume_size=1, **kwargs):
super(VolumesExtensionsClient, self).__init__(
auth_provider, service, region, **kwargs)
self.default_volume_size = default_volume_size
def list_volumes(self, detail=False, **params):
"""List all the volumes created."""
url = 'os-volumes'
if detail:
url += '/detail'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(schema.list_volumes, resp, body)
return service_client.ResponseBodyList(resp, body['volumes'])
def show_volume(self, volume_id):
"""Returns the details of a single volume."""
url = "os-volumes/%s" % volume_id
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(schema.create_get_volume, resp, body)
return service_client.ResponseBody(resp, body['volume'])
def create_volume(self, size=None, **kwargs):
"""
Creates a new Volume.
size(Required): Size of volume in GB.
Following optional keyword arguments are accepted:
display_name: Optional Volume Name.
metadata: A dictionary of values to be used as metadata.
"""
if size is None:
size = self.default_volume_size
post_body = {
'size': size
}
post_body.update(kwargs)
post_body = json.dumps({'volume': post_body})
resp, body = self.post('os-volumes', post_body)
body = json.loads(body)
self.validate_response(schema.create_get_volume, resp, body)
return service_client.ResponseBody(resp, body['volume'])
def delete_volume(self, volume_id):
"""Deletes the Specified Volume."""
resp, body = self.delete("os-volumes/%s" % volume_id)
self.validate_response(schema.delete_volume, resp, body)
return service_client.ResponseBody(resp, body)
def wait_for_volume_status(self, volume_id, status):
"""Waits for a Volume to reach a given status."""
body = self.show_volume(volume_id)
volume_status = body['status']
start = int(time.time())
while volume_status != status:
time.sleep(self.build_interval)
body = self.show_volume(volume_id)
volume_status = body['status']
if volume_status == 'error':
raise exceptions.VolumeBuildErrorException(volume_id=volume_id)
if int(time.time()) - start >= self.build_timeout:
message = ('Volume %s failed to reach %s status (current %s) '
'within the required time (%s s).' %
(volume_id, status, volume_status,
self.build_timeout))
raise exceptions.TimeoutException(message)
def is_resource_deleted(self, id):
try:
self.show_volume(id)
except lib_exc.NotFound:
return True
return False
@property
def resource_type(self):
"""Returns the primary type of resource this client works with."""
return 'volume'
| apache-2.0 |
fchauvel/MAD | mad/simulation/tasks.py | 1 | 8578 | #!/usr/bin/env python
#
# This file is part of MAD.
#
# MAD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MAD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MAD. If not, see <http://www.gnu.org/licenses/>.
#
from enum import Enum
from mad.evaluation import Symbols
from mad.simulation.commons import SimulatedEntity
class TaskPool:
def put(self, task):
raise NotImplementedError("TaskPool::put is abstract")
def take(self):
raise NotImplementedError("TaskPool::take is abstract")
@property
def size(self):
raise NotImplementedError("TaskPool::size is abstract")
@property
def blocked_count(self):
raise NotImplementedError("TaskPool::size is abstract")
@property
def are_pending(self):
raise NotImplementedError("TaskPool::are_pending is abstract")
def activate(self, task):
raise NotImplementedError("TaskPool::activate is abstract")
def pause(self, task):
raise NotImplementedError("TaskPool::pause is abstract")
def intercept(self, task):
raise NotImplementedError("TaskPool::intercept is abstract")
class TaskPoolDecorator(TaskPool):
def __init__(self, delegate):
assert isinstance(delegate, TaskPool), "Delegate should be a TaskPool (found '{!s}')".format(type(delegate))
self.delegate = delegate
def put(self, task):
self.delegate.put(task)
def take(self):
return self.delegate.take()
@TaskPool.size.getter
def size(self):
return self.delegate.size
@TaskPool.blocked_count.getter
def blocked_count(self):
return self.delegate.blocked_count
@TaskPool.are_pending.getter
def are_pending(self):
return self.delegate.are_pending
def activate(self, task):
self.delegate.activate(task)
def pause(self, task):
self.delegate.pause(task)
def intercept(self, task):
self.delegate.intercept(task)
class TaskPoolWrapper(TaskPoolDecorator, SimulatedEntity):
"""
Wrap a task pool into a simulation entity that that properly log events
"""
def __init__(self, environment, delegate):
SimulatedEntity.__init__(self, Symbols.QUEUE, environment)
TaskPoolDecorator.__init__(self, delegate)
def put(self, task):
super().put(task)
def take(self):
task = super().take()
return task
def activate(self, task):
super().activate(task)
class AbstractTaskPool(TaskPool):
def __init__(self):
super().__init__()
self.tasks = []
self.interrupted = []
self.paused = []
def pause(self, task):
self.paused.append(task)
def intercept(self, task):
self.paused.remove(task)
def put(self, task):
task.accept()
task.activate()
self.tasks.append(task)
def take(self):
if len(self.interrupted) > 0:
return self._pick_from(self.interrupted)
else:
if len(self.tasks) > 0:
return self._pick_from(self.tasks)
raise ValueError("Unable to take from an empty task pool!")
def _pick_from(self, candidates):
high_priority = self._highest_priority(candidates)
return self._next( high_priority)
@staticmethod
def _highest_priority(candidates):
highest = max(candidates, key=lambda task: task.priority)
return [any_task for any_task in candidates if any_task.priority == highest.priority]
def _next(self, candidates):
raise NotImplementedError("TaskPool::_next is abstract!")
def _remove(self, task):
if task in self.tasks: self.tasks.remove(task)
if task in self.interrupted: self.interrupted.remove(task)
@property
def is_empty(self):
return self.size == 0
@TaskPool.are_pending.getter
def are_pending(self):
return not self.is_empty
@TaskPool.size.getter
def size(self):
return len(self.tasks) + len(self.interrupted)
@TaskPool.blocked_count.getter
def blocked_count(self):
return len(self.paused)
def activate(self, task):
assert task in self.paused, "Error: Req. {:d} should have been paused!".format(task.request.identifier)
self.paused.remove(task)
self.interrupted.append(task)
class FIFOTaskPool(AbstractTaskPool):
def __init__(self):
super().__init__()
def _next(self, candidates):
selected = candidates[0]
self._remove(selected)
return selected
class LIFOTaskPool(AbstractTaskPool):
def __init__(self):
super().__init__()
def _next(self, candidates):
selected = candidates[-1]
self._remove(selected)
return selected
class TaskStatus(Enum):
CREATED, RUNNING, BLOCKED, READY, REJECTED, FAILED, SUCCESSFUL = range(7)
class Task:
def __init__(self, service, request=None):
self.service = service
self.worker = None
self.request = request
self.status = TaskStatus.CREATED
@property
def priority(self):
return self.request.priority
@property
def identifier(self):
return self.request.identifier
@property
def is_cancelled(self):
return not self.request.is_pending
@property
def operation(self):
return self.request.operation
def accept(self):
self._assert_status_is(TaskStatus.CREATED)
self.service.listener.task_accepted(self)
self.request.accept()
def reject(self):
self._assert_status_is(TaskStatus.CREATED)
self.service.listener.task_rejected(self)
self.status == TaskStatus.REJECTED
self.request.reject()
def activate(self):
self._assert_status_is(TaskStatus.CREATED, TaskStatus.BLOCKED)
self.service.listener.task_activated(self)
self.status = TaskStatus.READY
def assign_to(self, worker):
assert worker, "task assigned to None!"
self._assert_status_is(TaskStatus.CREATED, TaskStatus.READY)
self.worker = worker
if self.is_cancelled:
self.discard()
else:
self.service.listener.task_assigned_to(self, worker)
self.status = TaskStatus.RUNNING
self._execute(worker)
def _execute(self, worker):
"""
This method is ASSIGNED during the evaluation to control how to resume it once it has been paused
"""
self._assert_status_is(TaskStatus.RUNNING)
operation = worker.look_up(self.operation)
operation.invoke(self, [], worker=worker)
def pause(self):
self._assert_status_is(TaskStatus.RUNNING)
self.service.listener.task_paused(self)
self.status = TaskStatus.BLOCKED
self.service.pause(self)
self.service.release(self.worker)
def resume_with(self, on_resume):
self._assert_status_is(TaskStatus.BLOCKED)
self._execute = on_resume
self.service.activate(self)
def compute(self, duration, continuation):
assert self.worker is not None, "Cannot compute, no worker attached!"
self.worker.compute(duration, continuation)
def finalise(self, status):
self.request.finalise(self, status)
def succeed(self):
self._assert_status_is(TaskStatus.RUNNING)
self.service.listener.task_successful(self)
self.status = TaskStatus.SUCCESSFUL
self.service.release(self.worker)
def discard(self):
self._assert_status_is(TaskStatus.CREATED, TaskStatus.RUNNING, TaskStatus.READY)
self.worker.listener.task_cancelled(self)
self.status = TaskStatus.FAILED
self.worker.release()
def fail(self):
self._assert_status_is(TaskStatus.RUNNING)
self.service.listener.task_failed(self)
self.status == TaskStatus.FAILED
self.service.release(self.worker)
def _assert_status_is(self, *legal_states):
assert self.status in legal_states, \
"Found status == {0.name} (expecting {1!s})".format(self.status, [ s.name for s in legal_states ])
| gpl-3.0 |
mikebentley15/cs6350_project_ml | indexing/src/TrainingExample.py | 1 | 1448 | '''
Provides a TrainingExample named tuple for convenience.
Also, factory functions exist such as trainingExamplesFromSvm().
'''
from collections import namedtuple
import numpy
TrainingExample = namedtuple('TrainingExample', ('label', 'features'))
# '''
# TrainingExample: NamedTuple to contain a single training example
#
# Attributes:
# label: Label attached to the label, can be of any type you need, usually str
# features: A numpy array of features and values. Note: not a sparse array.
# '''
def fromSvm(filepath):
'''
Loads the training examples from the given file in libSVM format.
Returns a list of training examples, one for each row.
'''
labels = []
sparseFeatureList = []
with open(filepath, 'r') as trainFile:
for line in trainFile:
split = line.split()
labels.append(int(split[0]))
sparseFeatureList.append(sorted([
(int(x.split(':')[0]), float(x.split(':')[1]))
for x in split[1:]
]))
maximumIndex = max(x[-1][0] for x in sparseFeatureList)
trainingExamples = []
for trainingIndex, sparseFeatures in enumerate(sparseFeatureList):
features = numpy.zeros(maximumIndex + 1)
for featureIndex, value in sparseFeatures:
features[featureIndex] = value
trainingExamples.append(TrainingExample(labels[trainingIndex], features))
return trainingExamples | mit |
fenginx/django | tests/urlpatterns_reverse/urls.py | 61 | 4778 | from django.urls import include, path, re_path
from .views import (
absolute_kwargs_view, defaults_view, empty_view, empty_view_nested_partial,
empty_view_partial, empty_view_wrapped, nested_view,
)
other_patterns = [
path('non_path_include/', empty_view, name='non_path_include'),
path('nested_path/', nested_view),
]
urlpatterns = [
re_path(r'^places/([0-9]+)/$', empty_view, name='places'),
re_path(r'^places?/$', empty_view, name='places?'),
re_path(r'^places+/$', empty_view, name='places+'),
re_path(r'^places*/$', empty_view, name='places*'),
re_path(r'^(?:places/)?$', empty_view, name='places2?'),
re_path(r'^(?:places/)+$', empty_view, name='places2+'),
re_path(r'^(?:places/)*$', empty_view, name='places2*'),
re_path(r'^places/([0-9]+|[a-z_]+)/', empty_view, name='places3'),
re_path(r'^places/(?P<id>[0-9]+)/$', empty_view, name='places4'),
re_path(r'^people/(?P<name>\w+)/$', empty_view, name='people'),
re_path(r'^people/(?:name/)$', empty_view, name='people2'),
re_path(r'^people/(?:name/(\w+)/)?$', empty_view, name='people2a'),
re_path(r'^people/(?P<name>\w+)-(?P=name)/$', empty_view, name='people_backref'),
re_path(r'^optional/(?P<name>.*)/(?:.+/)?', empty_view, name='optional'),
re_path(r'^optional/(?P<arg1>\d+)/(?:(?P<arg2>\d+)/)?', absolute_kwargs_view, name='named_optional'),
re_path(r'^optional/(?P<arg1>\d+)/(?:(?P<arg2>\d+)/)?$', absolute_kwargs_view, name='named_optional_terminated'),
re_path(r'^nested/noncapture/(?:(?P<p>\w+))$', empty_view, name='nested-noncapture'),
re_path(r'^nested/capture/((\w+)/)?$', empty_view, name='nested-capture'),
re_path(r'^nested/capture/mixed/((?P<p>\w+))$', empty_view, name='nested-mixedcapture'),
re_path(r'^nested/capture/named/(?P<outer>(?P<inner>\w+)/)?$', empty_view, name='nested-namedcapture'),
re_path(r'^hardcoded/$', empty_view, name='hardcoded'),
re_path(r'^hardcoded/doc\.pdf$', empty_view, name='hardcoded2'),
re_path(r'^people/(?P<state>\w\w)/(?P<name>\w+)/$', empty_view, name='people3'),
re_path(r'^people/(?P<state>\w\w)/(?P<name>[0-9])/$', empty_view, name='people4'),
re_path(r'^people/((?P<state>\w\w)/test)?/(\w+)/$', empty_view, name='people6'),
re_path(r'^character_set/[abcdef0-9]/$', empty_view, name='range'),
re_path(r'^character_set/[\w]/$', empty_view, name='range2'),
re_path(r'^price/\$([0-9]+)/$', empty_view, name='price'),
re_path(r'^price/[$]([0-9]+)/$', empty_view, name='price2'),
re_path(r'^price/[\$]([0-9]+)/$', empty_view, name='price3'),
re_path(r'^product/(?P<product>\w+)\+\(\$(?P<price>[0-9]+(\.[0-9]+)?)\)/$', empty_view, name='product'),
re_path(r'^headlines/(?P<year>[0-9]+)\.(?P<month>[0-9]+)\.(?P<day>[0-9]+)/$', empty_view, name='headlines'),
re_path(r'^windows_path/(?P<drive_name>[A-Z]):\\(?P<path>.+)/$', empty_view, name='windows'),
re_path(r'^special_chars/(?P<chars>.+)/$', empty_view, name='special'),
re_path(r'^(?P<name>.+)/[0-9]+/$', empty_view, name='mixed'),
re_path(r'^repeats/a{1,2}/$', empty_view, name='repeats'),
re_path(r'^repeats/a{2,4}/$', empty_view, name='repeats2'),
re_path(r'^repeats/a{2}/$', empty_view, name='repeats3'),
re_path(r'^test/1/?', empty_view, name='test'),
re_path(r'^outer/(?P<outer>[0-9]+)/', include('urlpatterns_reverse.included_urls')),
re_path(r'^outer-no-kwargs/([0-9]+)/', include('urlpatterns_reverse.included_no_kwargs_urls')),
re_path('', include('urlpatterns_reverse.extra_urls')),
re_path(r'^lookahead-/(?!not-a-city)(?P<city>[^/]+)/$', empty_view, name='lookahead-negative'),
re_path(r'^lookahead\+/(?=a-city)(?P<city>[^/]+)/$', empty_view, name='lookahead-positive'),
re_path(r'^lookbehind-/(?P<city>[^/]+)(?<!not-a-city)/$', empty_view, name='lookbehind-negative'),
re_path(r'^lookbehind\+/(?P<city>[^/]+)(?<=a-city)/$', empty_view, name='lookbehind-positive'),
# Partials should be fine.
path('partial/', empty_view_partial, name='partial'),
path('partial_nested/', empty_view_nested_partial, name='partial_nested'),
path('partial_wrapped/', empty_view_wrapped, name='partial_wrapped'),
# This is non-reversible, but we shouldn't blow up when parsing it.
re_path(r'^(?:foo|bar)(\w+)/$', empty_view, name='disjunction'),
path('absolute_arg_view/', absolute_kwargs_view),
# Tests for #13154. Mixed syntax to test both ways of defining URLs.
re_path(r'^defaults_view1/(?P<arg1>[0-9]+)/$', defaults_view, {'arg2': 1}, name='defaults'),
re_path(r'^defaults_view2/(?P<arg1>[0-9]+)/$', defaults_view, {'arg2': 2}, 'defaults'),
path('includes/', include(other_patterns)),
# Security tests
re_path('(.+)/security/$', empty_view, name='security'),
]
| bsd-3-clause |
HydrelioxGitHub/home-assistant | tests/components/nuheat/test_init.py | 12 | 1431 | """NuHeat component tests."""
import unittest
from unittest.mock import patch
from tests.common import get_test_home_assistant, MockDependency
from homeassistant.components import nuheat
VALID_CONFIG = {
"nuheat": {
"username": "warm",
"password": "feet",
"devices": "thermostat123"
}
}
class TestNuHeat(unittest.TestCase):
"""Test the NuHeat component."""
def setUp(self): # pylint: disable=invalid-name
"""Initialize the values for this test class."""
self.hass = get_test_home_assistant()
self.config = VALID_CONFIG
def tearDown(self): # pylint: disable=invalid-name
"""Teardown this test class. Stop hass."""
self.hass.stop()
@MockDependency("nuheat")
@patch("homeassistant.helpers.discovery.load_platform")
def test_setup(self, mocked_nuheat, mocked_load):
"""Test setting up the NuHeat component."""
nuheat.setup(self.hass, self.config)
mocked_nuheat.NuHeat.assert_called_with("warm", "feet")
assert nuheat.DOMAIN in self.hass.data
assert 2 == len(self.hass.data[nuheat.DOMAIN])
assert isinstance(self.hass.data[nuheat.DOMAIN][0],
type(mocked_nuheat.NuHeat()))
assert self.hass.data[nuheat.DOMAIN][1] == "thermostat123"
mocked_load.assert_called_with(
self.hass, "climate", nuheat.DOMAIN, {}, self.config
)
| apache-2.0 |
charbeljc/e-commerce | __unported__/product_links_goodies/purchase.py | 18 | 9124 | # -*- encoding: utf-8 -*-
###############################################################################
# #
# purchase_outillage for OpenERP #
# Copyright (C) 2011 Akretion Sébastien BEAU <sebastien.beau@akretion.com> #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as #
# published by the Free Software Foundation, either version 3 of the #
# License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
from openerp.osv.orm import Model
from openerp.osv import fields
class purchase_order_line(Model):
_inherit = "purchase.order.line"
_columns = {
'goodie_for_line_id': fields.many2one('purchase.order.line', 'Goodies for', help='The product linked to this goodie lines'),
'goodies_line_ids': fields.one2many('purchase.order.line', 'goodie_for_line_id', 'Goodies linked', help=''),
}
def write(self, cr, uid, ids, vals, context=None):
if context is None: context={}
#TODO I should apply this only for automatic po need a read only mode
if context.get("updated_from_op"):
if not context.get('goodies_create_update'):
ctx = context.copy()
ctx['goodies_create_update'] = True
for line in self.browse(cr, uid, ids, context=None):
if line.product_id.is_purchase_goodies(context=ctx):
vals['product_qty'] = self._get_new_qty_for_none_goodies_line(cr, uid, vals['product_qty'], line.product_id.id, line.order_id.id, context=ctx)
super(purchase_order_line, self).write(cr, uid, line.id, vals, context=ctx)
qty_added = vals['product_qty'] - line.product_qty
for goodie in line.product_id.supplier_goodies_ids:
qty = goodie.get_quantity(qty_added, context=ctx)
po_line_for_goodie = False
for goodies_line in line.goodies_line_ids:
if goodies_line.product_id.id == goodie.linked_product_id.id:
po_line_for_goodie = goodies_line
break
#TODO manage correctly uom
print 'po_line_for_goodie', po_line_for_goodie
if po_line_for_goodie:
po_line_for_goodie.write({'product_qty': po_line_for_goodie.product_qty + qty}, context=ctx)
else:
self.create(cr, uid,
self._prepare_goodies_line(cr, uid, line.id, goodie, qty, line.order_id, line.date_planned, context=ctx),
context=ctx)
self.update_none_goodies_line(cr, uid, qty, goodie.linked_product_id.id, line.order_id.id, context=ctx)
return True
return super(purchase_order_line, self).write(cr, uid, ids, vals, context=context)
def create(self, cr, uid, vals, context=None):
if context is None: context={}
if not context.get('goodies_create_update'):
ctx = context.copy()
ctx['goodies_create_update'] = True
product_obj = self.pool.get('product.product')
product = product_obj.browse(cr, uid, vals['product_id'], context=ctx)
if product.is_purchase_goodies(context=ctx):
vals['product_qty'] = self._get_new_qty_for_none_goodies_line(cr, uid, vals['product_qty'], vals['product_id'], vals['order_id'], context=ctx)
line_id = super(purchase_order_line, self).create(cr, uid, vals, context=context)
order = self.pool.get('purchase.order').browse(cr, uid, vals['order_id'], context=context)
for goodie in product.supplier_goodies_ids:
qty = goodie.get_quantity(vals['product_qty'], context=ctx)
self.create(cr, uid,
self._prepare_goodies_line(cr, uid, line_id, goodie, qty, order, vals.get('date_planned'), context=ctx),
context=ctx)
self.update_none_goodies_line(cr, uid, qty, goodie.linked_product_id.id, order.id, context=ctx)
return line_id
else:
return super(purchase_order_line, self).create(cr, uid, vals, context=context)
def _get_new_qty_for_none_goodies_line(self, cr, uid, qty, product_id, order_id, context=None):
"""If we want to buy X more product B we have to check if there is not already goodies
line that containt this product. If yes we have to reduce the qty to buy by the the total
of goodies lines
:params qty float: quantity of product to buy
:params product_id int: product id
:params order_id: order id
:return: the quantity for the none goodies line reduced by the quantity of goodies line
:rtype: float
"""
goodies_line_ids = self.search(cr, uid, [
['order_id', '=', order_id],
['product_id', '=', product_id],
['goodie_for_line_id', '!=', False]
], context=context)
for goodie_line in self.browse(cr, uid, goodies_line_ids, context=context):
qty -= goodie_line.product_qty
if qty < 0:
qty = 0
return qty
def update_none_goodies_line(self, cr, uid, goodies_qty, product_id, order_id, context=None):
"""Update the none line goodies, by this I mean :
If you sold a product A with a goodies B
If the scheduler have run a minimal rule for B before running the A rule.
We have a line for the B product and we should remove the qty added by the goodies
:params goodies_qty float: quantity of goodies product
:params product_id int: product id
:params order_id: order id
:return: True
:rtype: Boolean
"""
product_line_id = self.search(cr, uid, [
['order_id', '=', order_id],
['product_id', '=', product_id],
['goodie_for_line_id', '=', False]
], context=context)
if product_line_id:
product_line = self.browse(cr, uid, product_line_id[0], context=context)
new_qty = product_line.product_qty - goodies_qty
if new_qty < 0: new_qty = 0
product_line.write({'product_qty': new_qty}, context=context)
return True
def _prepare_goodies_line(self, cr, uid, line_id, goodie, qty, order, date_planned, context=None):
"""Prepare the purchase order line for goodies
:params goodies browse_record: browse_record of product_links
:params qty float: quantity of goodies to buy
:params order browse_record: purchase order that contain this line
:params schedule_date str: planned to for receiving the product
:return: dictionnary of value for creating the purchase order line
:rtype: dict
"""
#TODO manage correctly uom
acc_pos_obj = self.pool.get('account.fiscal.position')
taxes_ids = goodie.product_id.supplier_taxes_id
taxes = acc_pos_obj.map_tax(cr, uid, order.partner_id.property_account_position, taxes_ids)
ctx = context.copy()
#set the partner id in the context in order to have the good name for product
ctx['partner_id'] = order.partner_id.id
product = self.pool.get('product.product').browse(cr, uid, goodie.linked_product_id.id, context=ctx)
return {
'name': ">>>%s"%product.partner_ref,
'product_qty': qty,
'product_id': product.id,
'product_uom': product.uom_po_id.id,
'price_unit': goodie.cost_price or 0.0,
'date_planned': date_planned,
'notes': product.description_purchase,
'taxes_id': [(6,0,taxes)],
'order_id': order.id,
'goodie_for_line_id': line_id
}
| agpl-3.0 |
ahaberlie/MetPy | tests/plots/test_cartopy_utils.py | 3 | 2612 | # Copyright (c) 2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Test the cartopy utilities."""
import cartopy.crs as ccrs
import matplotlib
import matplotlib.pyplot as plt
import pytest
from metpy.plots import USCOUNTIES, USSTATES
# Fixtures to make sure we have the right backend and consistent round
from metpy.testing import patch_round, set_agg_backend # noqa: F401, I202
MPL_VERSION = matplotlib.__version__[:3]
@pytest.mark.mpl_image_compare(tolerance=0.053, remove_text=True)
def test_us_county_defaults():
"""Test the default US county plotting."""
proj = ccrs.LambertConformal(central_longitude=-85.0, central_latitude=45.0)
fig = plt.figure(figsize=(12, 9))
ax = fig.add_subplot(1, 1, 1, projection=proj)
ax.set_extent([270.25, 270.9, 38.15, 38.75], ccrs.Geodetic())
ax.add_feature(USCOUNTIES)
return fig
@pytest.mark.mpl_image_compare(tolerance=0.092, remove_text=True)
def test_us_county_scales():
"""Test US county plotting with all scales."""
proj = ccrs.LambertConformal(central_longitude=-85.0, central_latitude=45.0)
fig = plt.figure(figsize=(12, 9))
ax1 = fig.add_subplot(1, 3, 1, projection=proj)
ax2 = fig.add_subplot(1, 3, 2, projection=proj)
ax3 = fig.add_subplot(1, 3, 3, projection=proj)
for scale, axis in zip(['20m', '5m', '500k'], [ax1, ax2, ax3]):
axis.set_extent([270.25, 270.9, 38.15, 38.75], ccrs.Geodetic())
axis.add_feature(USCOUNTIES.with_scale(scale))
return fig
@pytest.mark.mpl_image_compare(tolerance=0.053, remove_text=True)
def test_us_states_defaults():
"""Test the default US States plotting."""
proj = ccrs.LambertConformal(central_longitude=-85.0, central_latitude=45.0)
fig = plt.figure(figsize=(12, 9))
ax = fig.add_subplot(1, 1, 1, projection=proj)
ax.set_extent([270, 280, 28, 39], ccrs.Geodetic())
ax.add_feature(USSTATES)
return fig
@pytest.mark.mpl_image_compare(tolerance=0.092, remove_text=True)
def test_us_states_scales():
"""Test the default US States plotting with all scales."""
proj = ccrs.LambertConformal(central_longitude=-85.0, central_latitude=45.0)
fig = plt.figure(figsize=(12, 9))
ax1 = fig.add_subplot(1, 3, 1, projection=proj)
ax2 = fig.add_subplot(1, 3, 2, projection=proj)
ax3 = fig.add_subplot(1, 3, 3, projection=proj)
for scale, axis in zip(['20m', '5m', '500k'], [ax1, ax2, ax3]):
axis.set_extent([270, 280, 28, 39], ccrs.Geodetic())
axis.add_feature(USSTATES.with_scale(scale))
return fig
| bsd-3-clause |
mfherbst/spack | var/spack/repos/builtin/packages/libxmu/package.py | 5 | 1935 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libxmu(AutotoolsPackage):
"""This library contains miscellaneous utilities and is not part of the
Xlib standard. It contains routines which only use public interfaces so
that it may be layered on top of any proprietary implementation of Xlib
or Xt."""
homepage = "http://cgit.freedesktop.org/xorg/lib/libXmu"
url = "https://www.x.org/archive/individual/lib/libXmu-1.1.2.tar.gz"
version('1.1.2', 'd5be323b02e6851607205c8e941b4e61')
depends_on('libxt')
depends_on('libxext')
depends_on('libx11')
depends_on('xextproto', type='build')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
| lgpl-2.1 |
SurfasJones/djcmsrc3 | venv/lib/python2.7/site-packages/cms/migrations/0024_added_placeholder_model.py | 525 | 20033 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.module_name)
user_ptr_name = '%s_ptr' % User._meta.object_name.lower()
class Migration(SchemaMigration):
def forwards(self, orm):
# Dummy migration
pass
def backwards(self, orm):
# Dummy migration
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [],
{'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Permission']", 'symmetrical': 'False',
'blank': 'True'})
},
'auth.permission': {
'Meta': {
'ordering': "('content_type__app_label', 'content_type__model', 'codename')",
'unique_together': "(('content_type', 'codename'),)",
'object_name': 'Permission'},
'codename': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['contenttypes.ContentType']"}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [],
{'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [],
{'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Group']", 'symmetrical': 'False',
'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [],
{'max_length': '30', 'blank': 'True'}),
'password': (
'django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': (
'django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Permission']", 'symmetrical': 'False',
'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [],
{'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [],
{'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.CMSPlugin']", 'null': 'True',
'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [],
{'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [],
{'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [],
{'symmetrical': 'False', 'to': "orm['sites.Site']",
'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('site', 'tree_id', 'lft')",
'object_name': 'Page'},
'changed_by': (
'django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now': 'True', 'blank': 'True'}),
'created_by': (
'django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now_add': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [],
{'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'limit_visibility_in_menu': (
'django.db.models.fields.SmallIntegerField', [],
{'default': 'None', 'null': 'True', 'db_index': 'True',
'blank': 'True'}),
'login_required': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [],
{'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '80',
'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'blank': 'True', 'related_name': "'children'",
'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['cms.Placeholder']",
'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [],
{'db_index': 'True', 'null': 'True',
'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [],
{'db_index': 'True', 'null': 'True',
'blank': 'True'}),
'published': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [],
{'default': 'True', 'db_index': 'True'}),
'publisher_public': (
'django.db.models.fields.related.OneToOneField', [],
{'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True',
'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [],
{'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '40', 'null': 'True',
'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [],
{'default': 'False', 'db_index': 'True'}),
'template': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'})
},
'cms.pagemoderator': {
'Meta': {'object_name': 'PageModerator'},
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderate_children': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_descendants': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_page': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label})
},
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')",
'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [],
{'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [],
{'auto_now_add': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [],
{'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': (
'django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [user_orm_label]},
'created_by': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'created_users'",
'to': "orm['%s']" % user_orm_label}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['%s']" % user_orm_label, 'unique': 'True',
'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': ['auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'created_usergroups'",
'to': "orm['%s']" % user_orm_label}),
'group_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['auth.Group']", 'unique': 'True',
'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': (
'django.db.models.fields.PositiveSmallIntegerField', [],
{'null': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [],
{'max_length': '50', 'db_index': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)",
'object_name': 'Title'},
'application_urls': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '200',
'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [],
{'default': 'False', 'db_index': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [],
{'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [],
{'max_length': '255', 'null': 'True',
'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True',
'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': (
'django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': (
'django.db.models.fields.CharField', [], {'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)",
'unique_together': "(('app_label', 'model'),)",
'object_name': 'ContentType',
'db_table': "'django_content_type'"},
'app_label': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site',
'db_table': "'django_site'"},
'domain': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms']
| mit |
Pablomoto1000/Pablosity | node_modules/node-gyp/gyp/pylib/gyp/generator/ninja.py | 1284 | 100329 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import copy
import hashlib
import json
import multiprocessing
import os.path
import re
import signal
import subprocess
import sys
import gyp
import gyp.common
from gyp.common import OrderedSet
import gyp.msvs_emulation
import gyp.MSVSUtil as MSVSUtil
import gyp.xcode_emulation
from cStringIO import StringIO
from gyp.common import GetEnvironFallback
import gyp.ninja_syntax as ninja_syntax
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
# Gyp expects the following variables to be expandable by the build
# system to the appropriate locations. Ninja prefers paths to be
# known at gyp time. To resolve this, introduce special
# variables starting with $! and $| (which begin with a $ so gyp knows it
# should be treated specially, but is otherwise an invalid
# ninja/shell variable) that are passed to gyp here but expanded
# before writing out into the target .ninja files; see
# ExpandSpecial.
# $! is used for variables that represent a path and that can only appear at
# the start of a string, while $| is used for variables that can appear
# anywhere in a string.
'INTERMEDIATE_DIR': '$!INTERMEDIATE_DIR',
'SHARED_INTERMEDIATE_DIR': '$!PRODUCT_DIR/gen',
'PRODUCT_DIR': '$!PRODUCT_DIR',
'CONFIGURATION_NAME': '$|CONFIGURATION_NAME',
# Special variables that may be used by gyp 'rule' targets.
# We generate definitions for these variables on the fly when processing a
# rule.
'RULE_INPUT_ROOT': '${root}',
'RULE_INPUT_DIRNAME': '${dirname}',
'RULE_INPUT_PATH': '${source}',
'RULE_INPUT_EXT': '${ext}',
'RULE_INPUT_NAME': '${name}',
}
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested()
def StripPrefix(arg, prefix):
if arg.startswith(prefix):
return arg[len(prefix):]
return arg
def QuoteShellArgument(arg, flavor):
"""Quote a string such that it will be interpreted as a single argument
by the shell."""
# Rather than attempting to enumerate the bad shell characters, just
# whitelist common OK ones and quote anything else.
if re.match(r'^[a-zA-Z0-9_=.\\/-]+$', arg):
return arg # No quoting necessary.
if flavor == 'win':
return gyp.msvs_emulation.QuoteForRspFile(arg)
return "'" + arg.replace("'", "'" + '"\'"' + "'") + "'"
def Define(d, flavor):
"""Takes a preprocessor define and returns a -D parameter that's ninja- and
shell-escaped."""
if flavor == 'win':
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
d = d.replace('#', '\\%03o' % ord('#'))
return QuoteShellArgument(ninja_syntax.escape('-D' + d), flavor)
def AddArch(output, arch):
"""Adds an arch string to an output path."""
output, extension = os.path.splitext(output)
return '%s.%s%s' % (output, arch, extension)
class Target(object):
"""Target represents the paths used within a single gyp target.
Conceptually, building a single target A is a series of steps:
1) actions/rules/copies generates source/resources/etc.
2) compiles generates .o files
3) link generates a binary (library/executable)
4) bundle merges the above in a mac bundle
(Any of these steps can be optional.)
From a build ordering perspective, a dependent target B could just
depend on the last output of this series of steps.
But some dependent commands sometimes need to reach inside the box.
For example, when linking B it needs to get the path to the static
library generated by A.
This object stores those paths. To keep things simple, member
variables only store concrete paths to single files, while methods
compute derived values like "the last output of the target".
"""
def __init__(self, type):
# Gyp type ("static_library", etc.) of this target.
self.type = type
# File representing whether any input dependencies necessary for
# dependent actions have completed.
self.preaction_stamp = None
# File representing whether any input dependencies necessary for
# dependent compiles have completed.
self.precompile_stamp = None
# File representing the completion of actions/rules/copies, if any.
self.actions_stamp = None
# Path to the output of the link step, if any.
self.binary = None
# Path to the file representing the completion of building the bundle,
# if any.
self.bundle = None
# On Windows, incremental linking requires linking against all the .objs
# that compose a .lib (rather than the .lib itself). That list is stored
# here. In this case, we also need to save the compile_deps for the target,
# so that the the target that directly depends on the .objs can also depend
# on those.
self.component_objs = None
self.compile_deps = None
# Windows only. The import .lib is the output of a build step, but
# because dependents only link against the lib (not both the lib and the
# dll) we keep track of the import library here.
self.import_lib = None
def Linkable(self):
"""Return true if this is a target that can be linked against."""
return self.type in ('static_library', 'shared_library')
def UsesToc(self, flavor):
"""Return true if the target should produce a restat rule based on a TOC
file."""
# For bundles, the .TOC should be produced for the binary, not for
# FinalOutput(). But the naive approach would put the TOC file into the
# bundle, so don't do this for bundles for now.
if flavor == 'win' or self.bundle:
return False
return self.type in ('shared_library', 'loadable_module')
def PreActionInput(self, flavor):
"""Return the path, if any, that should be used as a dependency of
any dependent action step."""
if self.UsesToc(flavor):
return self.FinalOutput() + '.TOC'
return self.FinalOutput() or self.preaction_stamp
def PreCompileInput(self):
"""Return the path, if any, that should be used as a dependency of
any dependent compile step."""
return self.actions_stamp or self.precompile_stamp
def FinalOutput(self):
"""Return the last output of the target, which depends on all prior
steps."""
return self.bundle or self.binary or self.actions_stamp
# A small discourse on paths as used within the Ninja build:
# All files we produce (both at gyp and at build time) appear in the
# build directory (e.g. out/Debug).
#
# Paths within a given .gyp file are always relative to the directory
# containing the .gyp file. Call these "gyp paths". This includes
# sources as well as the starting directory a given gyp rule/action
# expects to be run from. We call the path from the source root to
# the gyp file the "base directory" within the per-.gyp-file
# NinjaWriter code.
#
# All paths as written into the .ninja files are relative to the build
# directory. Call these paths "ninja paths".
#
# We translate between these two notions of paths with two helper
# functions:
#
# - GypPathToNinja translates a gyp path (i.e. relative to the .gyp file)
# into the equivalent ninja path.
#
# - GypPathToUniqueOutput translates a gyp path into a ninja path to write
# an output file; the result can be namespaced such that it is unique
# to the input file name as well as the output target name.
class NinjaWriter(object):
def __init__(self, hash_for_rules, target_outputs, base_dir, build_dir,
output_file, toplevel_build, output_file_name, flavor,
toplevel_dir=None):
"""
base_dir: path from source root to directory containing this gyp file,
by gyp semantics, all input paths are relative to this
build_dir: path from source root to build output
toplevel_dir: path to the toplevel directory
"""
self.hash_for_rules = hash_for_rules
self.target_outputs = target_outputs
self.base_dir = base_dir
self.build_dir = build_dir
self.ninja = ninja_syntax.Writer(output_file)
self.toplevel_build = toplevel_build
self.output_file_name = output_file_name
self.flavor = flavor
self.abs_build_dir = None
if toplevel_dir is not None:
self.abs_build_dir = os.path.abspath(os.path.join(toplevel_dir,
build_dir))
self.obj_ext = '.obj' if flavor == 'win' else '.o'
if flavor == 'win':
# See docstring of msvs_emulation.GenerateEnvironmentFiles().
self.win_env = {}
for arch in ('x86', 'x64'):
self.win_env[arch] = 'environment.' + arch
# Relative path from build output dir to base dir.
build_to_top = gyp.common.InvertRelativePath(build_dir, toplevel_dir)
self.build_to_base = os.path.join(build_to_top, base_dir)
# Relative path from base dir to build dir.
base_to_top = gyp.common.InvertRelativePath(base_dir, toplevel_dir)
self.base_to_build = os.path.join(base_to_top, build_dir)
def ExpandSpecial(self, path, product_dir=None):
"""Expand specials like $!PRODUCT_DIR in |path|.
If |product_dir| is None, assumes the cwd is already the product
dir. Otherwise, |product_dir| is the relative path to the product
dir.
"""
PRODUCT_DIR = '$!PRODUCT_DIR'
if PRODUCT_DIR in path:
if product_dir:
path = path.replace(PRODUCT_DIR, product_dir)
else:
path = path.replace(PRODUCT_DIR + '/', '')
path = path.replace(PRODUCT_DIR + '\\', '')
path = path.replace(PRODUCT_DIR, '.')
INTERMEDIATE_DIR = '$!INTERMEDIATE_DIR'
if INTERMEDIATE_DIR in path:
int_dir = self.GypPathToUniqueOutput('gen')
# GypPathToUniqueOutput generates a path relative to the product dir,
# so insert product_dir in front if it is provided.
path = path.replace(INTERMEDIATE_DIR,
os.path.join(product_dir or '', int_dir))
CONFIGURATION_NAME = '$|CONFIGURATION_NAME'
path = path.replace(CONFIGURATION_NAME, self.config_name)
return path
def ExpandRuleVariables(self, path, root, dirname, source, ext, name):
if self.flavor == 'win':
path = self.msvs_settings.ConvertVSMacros(
path, config=self.config_name)
path = path.replace(generator_default_variables['RULE_INPUT_ROOT'], root)
path = path.replace(generator_default_variables['RULE_INPUT_DIRNAME'],
dirname)
path = path.replace(generator_default_variables['RULE_INPUT_PATH'], source)
path = path.replace(generator_default_variables['RULE_INPUT_EXT'], ext)
path = path.replace(generator_default_variables['RULE_INPUT_NAME'], name)
return path
def GypPathToNinja(self, path, env=None):
"""Translate a gyp path to a ninja path, optionally expanding environment
variable references in |path| with |env|.
See the above discourse on path conversions."""
if env:
if self.flavor == 'mac':
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
elif self.flavor == 'win':
path = gyp.msvs_emulation.ExpandMacros(path, env)
if path.startswith('$!'):
expanded = self.ExpandSpecial(path)
if self.flavor == 'win':
expanded = os.path.normpath(expanded)
return expanded
if '$|' in path:
path = self.ExpandSpecial(path)
assert '$' not in path, path
return os.path.normpath(os.path.join(self.build_to_base, path))
def GypPathToUniqueOutput(self, path, qualified=True):
"""Translate a gyp path to a ninja path for writing output.
If qualified is True, qualify the resulting filename with the name
of the target. This is necessary when e.g. compiling the same
path twice for two separate output targets.
See the above discourse on path conversions."""
path = self.ExpandSpecial(path)
assert not path.startswith('$'), path
# Translate the path following this scheme:
# Input: foo/bar.gyp, target targ, references baz/out.o
# Output: obj/foo/baz/targ.out.o (if qualified)
# obj/foo/baz/out.o (otherwise)
# (and obj.host instead of obj for cross-compiles)
#
# Why this scheme and not some other one?
# 1) for a given input, you can compute all derived outputs by matching
# its path, even if the input is brought via a gyp file with '..'.
# 2) simple files like libraries and stamps have a simple filename.
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
path_dir, path_basename = os.path.split(path)
assert not os.path.isabs(path_dir), (
"'%s' can not be absolute path (see crbug.com/462153)." % path_dir)
if qualified:
path_basename = self.name + '.' + path_basename
return os.path.normpath(os.path.join(obj, self.base_dir, path_dir,
path_basename))
def WriteCollapsedDependencies(self, name, targets, order_only=None):
"""Given a list of targets, return a path for a single file
representing the result of building all the targets or None.
Uses a stamp file if necessary."""
assert targets == filter(None, targets), targets
if len(targets) == 0:
assert not order_only
return None
if len(targets) > 1 or order_only:
stamp = self.GypPathToUniqueOutput(name + '.stamp')
targets = self.ninja.build(stamp, 'stamp', targets, order_only=order_only)
self.ninja.newline()
return targets[0]
def _SubninjaNameForArch(self, arch):
output_file_base = os.path.splitext(self.output_file_name)[0]
return '%s.%s.ninja' % (output_file_base, arch)
def WriteSpec(self, spec, config_name, generator_flags):
"""The main entry point for NinjaWriter: write the build rules for a spec.
Returns a Target object, which represents the output paths for this spec.
Returns None if there are no outputs (e.g. a settings-only 'none' type
target)."""
self.config_name = config_name
self.name = spec['target_name']
self.toolset = spec['toolset']
config = spec['configurations'][config_name]
self.target = Target(spec['type'])
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
# Track if this target contains any C++ files, to decide if gcc or g++
# should be used for linking.
self.uses_cpp = False
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
self.xcode_settings = self.msvs_settings = None
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
if self.flavor == 'win':
self.msvs_settings = gyp.msvs_emulation.MsvsSettings(spec,
generator_flags)
arch = self.msvs_settings.GetArch(config_name)
self.ninja.variable('arch', self.win_env[arch])
self.ninja.variable('cc', '$cl_' + arch)
self.ninja.variable('cxx', '$cl_' + arch)
self.ninja.variable('cc_host', '$cl_' + arch)
self.ninja.variable('cxx_host', '$cl_' + arch)
self.ninja.variable('asm', '$ml_' + arch)
if self.flavor == 'mac':
self.archs = self.xcode_settings.GetActiveArchs(config_name)
if len(self.archs) > 1:
self.arch_subninjas = dict(
(arch, ninja_syntax.Writer(
OpenOutput(os.path.join(self.toplevel_build,
self._SubninjaNameForArch(arch)),
'w')))
for arch in self.archs)
# Compute predepends for all rules.
# actions_depends is the dependencies this target depends on before running
# any of its action/rule/copy steps.
# compile_depends is the dependencies this target depends on before running
# any of its compile steps.
actions_depends = []
compile_depends = []
# TODO(evan): it is rather confusing which things are lists and which
# are strings. Fix these.
if 'dependencies' in spec:
for dep in spec['dependencies']:
if dep in self.target_outputs:
target = self.target_outputs[dep]
actions_depends.append(target.PreActionInput(self.flavor))
compile_depends.append(target.PreCompileInput())
actions_depends = filter(None, actions_depends)
compile_depends = filter(None, compile_depends)
actions_depends = self.WriteCollapsedDependencies('actions_depends',
actions_depends)
compile_depends = self.WriteCollapsedDependencies('compile_depends',
compile_depends)
self.target.preaction_stamp = actions_depends
self.target.precompile_stamp = compile_depends
# Write out actions, rules, and copies. These must happen before we
# compile any sources, so compute a list of predependencies for sources
# while we do it.
extra_sources = []
mac_bundle_depends = []
self.target.actions_stamp = self.WriteActionsRulesCopies(
spec, extra_sources, actions_depends, mac_bundle_depends)
# If we have actions/rules/copies, we depend directly on those, but
# otherwise we depend on dependent target's actions/rules/copies etc.
# We never need to explicitly depend on previous target's link steps,
# because no compile ever depends on them.
compile_depends_stamp = (self.target.actions_stamp or compile_depends)
# Write out the compilation steps, if any.
link_deps = []
sources = extra_sources + spec.get('sources', [])
if sources:
if self.flavor == 'mac' and len(self.archs) > 1:
# Write subninja file containing compile and link commands scoped to
# a single arch if a fat binary is being built.
for arch in self.archs:
self.ninja.subninja(self._SubninjaNameForArch(arch))
pch = None
if self.flavor == 'win':
gyp.msvs_emulation.VerifyMissingSources(
sources, self.abs_build_dir, generator_flags, self.GypPathToNinja)
pch = gyp.msvs_emulation.PrecompiledHeader(
self.msvs_settings, config_name, self.GypPathToNinja,
self.GypPathToUniqueOutput, self.obj_ext)
else:
pch = gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, self.GypPathToNinja,
lambda path, lang: self.GypPathToUniqueOutput(path + '-' + lang))
link_deps = self.WriteSources(
self.ninja, config_name, config, sources, compile_depends_stamp, pch,
spec)
# Some actions/rules output 'sources' that are already object files.
obj_outputs = [f for f in sources if f.endswith(self.obj_ext)]
if obj_outputs:
if self.flavor != 'mac' or len(self.archs) == 1:
link_deps += [self.GypPathToNinja(o) for o in obj_outputs]
else:
print "Warning: Actions/rules writing object files don't work with " \
"multiarch targets, dropping. (target %s)" % spec['target_name']
elif self.flavor == 'mac' and len(self.archs) > 1:
link_deps = collections.defaultdict(list)
compile_deps = self.target.actions_stamp or actions_depends
if self.flavor == 'win' and self.target.type == 'static_library':
self.target.component_objs = link_deps
self.target.compile_deps = compile_deps
# Write out a link step, if needed.
output = None
is_empty_bundle = not link_deps and not mac_bundle_depends
if link_deps or self.target.actions_stamp or actions_depends:
output = self.WriteTarget(spec, config_name, config, link_deps,
compile_deps)
if self.is_mac_bundle:
mac_bundle_depends.append(output)
# Bundle all of the above together, if needed.
if self.is_mac_bundle:
output = self.WriteMacBundle(spec, mac_bundle_depends, is_empty_bundle)
if not output:
return None
assert self.target.FinalOutput(), output
return self.target
def _WinIdlRule(self, source, prebuild, outputs):
"""Handle the implicit VS .idl rule for one source file. Fills |outputs|
with files that are generated."""
outdir, output, vars, flags = self.msvs_settings.GetIdlBuildData(
source, self.config_name)
outdir = self.GypPathToNinja(outdir)
def fix_path(path, rel=None):
path = os.path.join(outdir, path)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
path = self.ExpandRuleVariables(
path, root, dirname, source, ext, basename)
if rel:
path = os.path.relpath(path, rel)
return path
vars = [(name, fix_path(value, outdir)) for name, value in vars]
output = [fix_path(p) for p in output]
vars.append(('outdir', outdir))
vars.append(('idlflags', flags))
input = self.GypPathToNinja(source)
self.ninja.build(output, 'idl', input,
variables=vars, order_only=prebuild)
outputs.extend(output)
def WriteWinIdlFiles(self, spec, prebuild):
"""Writes rules to match MSVS's implicit idl handling."""
assert self.flavor == 'win'
if self.msvs_settings.HasExplicitIdlRulesOrActions(spec):
return []
outputs = []
for source in filter(lambda x: x.endswith('.idl'), spec['sources']):
self._WinIdlRule(source, prebuild, outputs)
return outputs
def WriteActionsRulesCopies(self, spec, extra_sources, prebuild,
mac_bundle_depends):
"""Write out the Actions, Rules, and Copies steps. Return a path
representing the outputs of these steps."""
outputs = []
if self.is_mac_bundle:
mac_bundle_resources = spec.get('mac_bundle_resources', [])[:]
else:
mac_bundle_resources = []
extra_mac_bundle_resources = []
if 'actions' in spec:
outputs += self.WriteActions(spec['actions'], extra_sources, prebuild,
extra_mac_bundle_resources)
if 'rules' in spec:
outputs += self.WriteRules(spec['rules'], extra_sources, prebuild,
mac_bundle_resources,
extra_mac_bundle_resources)
if 'copies' in spec:
outputs += self.WriteCopies(spec['copies'], prebuild, mac_bundle_depends)
if 'sources' in spec and self.flavor == 'win':
outputs += self.WriteWinIdlFiles(spec, prebuild)
stamp = self.WriteCollapsedDependencies('actions_rules_copies', outputs)
if self.is_mac_bundle:
xcassets = self.WriteMacBundleResources(
extra_mac_bundle_resources + mac_bundle_resources, mac_bundle_depends)
partial_info_plist = self.WriteMacXCassets(xcassets, mac_bundle_depends)
self.WriteMacInfoPlist(partial_info_plist, mac_bundle_depends)
return stamp
def GenerateDescription(self, verb, message, fallback):
"""Generate and return a description of a build step.
|verb| is the short summary, e.g. ACTION or RULE.
|message| is a hand-written description, or None if not available.
|fallback| is the gyp-level name of the step, usable as a fallback.
"""
if self.toolset != 'target':
verb += '(%s)' % self.toolset
if message:
return '%s %s' % (verb, self.ExpandSpecial(message))
else:
return '%s %s: %s' % (verb, self.name, fallback)
def WriteActions(self, actions, extra_sources, prebuild,
extra_mac_bundle_resources):
# Actions cd into the base directory.
env = self.GetToolchainEnv()
all_outputs = []
for action in actions:
# First write out a rule for the action.
name = '%s_%s' % (action['action_name'], self.hash_for_rules)
description = self.GenerateDescription('ACTION',
action.get('message', None),
name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(action)
if self.flavor == 'win' else False)
args = action['action']
depfile = action.get('depfile', None)
if depfile:
depfile = self.ExpandSpecial(depfile, self.base_to_build)
pool = 'console' if int(action.get('ninja_use_console', 0)) else None
rule_name, _ = self.WriteNewNinjaRule(name, args, description,
is_cygwin, env, pool,
depfile=depfile)
inputs = [self.GypPathToNinja(i, env) for i in action['inputs']]
if int(action.get('process_outputs_as_sources', False)):
extra_sources += action['outputs']
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += action['outputs']
outputs = [self.GypPathToNinja(o, env) for o in action['outputs']]
# Then write out an edge using the rule.
self.ninja.build(outputs, rule_name, inputs,
order_only=prebuild)
all_outputs += outputs
self.ninja.newline()
return all_outputs
def WriteRules(self, rules, extra_sources, prebuild,
mac_bundle_resources, extra_mac_bundle_resources):
env = self.GetToolchainEnv()
all_outputs = []
for rule in rules:
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
# First write out a rule for the rule action.
name = '%s_%s' % (rule['rule_name'], self.hash_for_rules)
args = rule['action']
description = self.GenerateDescription(
'RULE',
rule.get('message', None),
('%s ' + generator_default_variables['RULE_INPUT_PATH']) % name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(rule)
if self.flavor == 'win' else False)
pool = 'console' if int(rule.get('ninja_use_console', 0)) else None
rule_name, args = self.WriteNewNinjaRule(
name, args, description, is_cygwin, env, pool)
# TODO: if the command references the outputs directly, we should
# simplify it to just use $out.
# Rules can potentially make use of some special variables which
# must vary per source file.
# Compute the list of variables we'll need to provide.
special_locals = ('source', 'root', 'dirname', 'ext', 'name')
needed_variables = set(['source'])
for argument in args:
for var in special_locals:
if '${%s}' % var in argument:
needed_variables.add(var)
def cygwin_munge(path):
# pylint: disable=cell-var-from-loop
if is_cygwin:
return path.replace('\\', '/')
return path
inputs = [self.GypPathToNinja(i, env) for i in rule.get('inputs', [])]
# If there are n source files matching the rule, and m additional rule
# inputs, then adding 'inputs' to each build edge written below will
# write m * n inputs. Collapsing reduces this to m + n.
sources = rule.get('rule_sources', [])
num_inputs = len(inputs)
if prebuild:
num_inputs += 1
if num_inputs > 2 and len(sources) > 2:
inputs = [self.WriteCollapsedDependencies(
rule['rule_name'], inputs, order_only=prebuild)]
prebuild = []
# For each source file, write an edge that generates all the outputs.
for source in sources:
source = os.path.normpath(source)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
# Gather the list of inputs and outputs, expanding $vars if possible.
outputs = [self.ExpandRuleVariables(o, root, dirname,
source, ext, basename)
for o in rule['outputs']]
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
was_mac_bundle_resource = source in mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Note: This is n_resources * n_outputs_in_rule. Put to-be-removed
# items in a set and remove them all in a single pass if this becomes
# a performance issue.
if was_mac_bundle_resource:
mac_bundle_resources.remove(source)
extra_bindings = []
for var in needed_variables:
if var == 'root':
extra_bindings.append(('root', cygwin_munge(root)))
elif var == 'dirname':
# '$dirname' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
dirname_expanded = self.ExpandSpecial(dirname, self.base_to_build)
extra_bindings.append(('dirname', cygwin_munge(dirname_expanded)))
elif var == 'source':
# '$source' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
source_expanded = self.ExpandSpecial(source, self.base_to_build)
extra_bindings.append(('source', cygwin_munge(source_expanded)))
elif var == 'ext':
extra_bindings.append(('ext', ext))
elif var == 'name':
extra_bindings.append(('name', cygwin_munge(basename)))
else:
assert var == None, repr(var)
outputs = [self.GypPathToNinja(o, env) for o in outputs]
if self.flavor == 'win':
# WriteNewNinjaRule uses unique_name for creating an rsp file on win.
extra_bindings.append(('unique_name',
hashlib.md5(outputs[0]).hexdigest()))
self.ninja.build(outputs, rule_name, self.GypPathToNinja(source),
implicit=inputs,
order_only=prebuild,
variables=extra_bindings)
all_outputs.extend(outputs)
return all_outputs
def WriteCopies(self, copies, prebuild, mac_bundle_depends):
outputs = []
env = self.GetToolchainEnv()
for copy in copies:
for path in copy['files']:
# Normalize the path so trailing slashes don't confuse us.
path = os.path.normpath(path)
basename = os.path.split(path)[1]
src = self.GypPathToNinja(path, env)
dst = self.GypPathToNinja(os.path.join(copy['destination'], basename),
env)
outputs += self.ninja.build(dst, 'copy', src, order_only=prebuild)
if self.is_mac_bundle:
# gyp has mac_bundle_resources to copy things into a bundle's
# Resources folder, but there's no built-in way to copy files to other
# places in the bundle. Hence, some targets use copies for this. Check
# if this file is copied into the current bundle, and if so add it to
# the bundle depends so that dependent targets get rebuilt if the copy
# input changes.
if dst.startswith(self.xcode_settings.GetBundleContentsFolderPath()):
mac_bundle_depends.append(dst)
return outputs
def WriteMacBundleResources(self, resources, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources'."""
xcassets = []
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, map(self.GypPathToNinja, resources)):
output = self.ExpandSpecial(output)
if os.path.splitext(output)[-1] != '.xcassets':
isBinary = self.xcode_settings.IsBinaryOutputFormat(self.config_name)
self.ninja.build(output, 'mac_tool', res,
variables=[('mactool_cmd', 'copy-bundle-resource'), \
('binary', isBinary)])
bundle_depends.append(output)
else:
xcassets.append(res)
return xcassets
def WriteMacXCassets(self, xcassets, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources' .xcassets files.
This add an invocation of 'actool' via the 'mac_tool.py' helper script.
It assumes that the assets catalogs define at least one imageset and
thus an Assets.car file will be generated in the application resources
directory. If this is not the case, then the build will probably be done
at each invocation of ninja."""
if not xcassets:
return
extra_arguments = {}
settings_to_arg = {
'XCASSETS_APP_ICON': 'app-icon',
'XCASSETS_LAUNCH_IMAGE': 'launch-image',
}
settings = self.xcode_settings.xcode_settings[self.config_name]
for settings_key, arg_name in settings_to_arg.iteritems():
value = settings.get(settings_key)
if value:
extra_arguments[arg_name] = value
partial_info_plist = None
if extra_arguments:
partial_info_plist = self.GypPathToUniqueOutput(
'assetcatalog_generated_info.plist')
extra_arguments['output-partial-info-plist'] = partial_info_plist
outputs = []
outputs.append(
os.path.join(
self.xcode_settings.GetBundleResourceFolder(),
'Assets.car'))
if partial_info_plist:
outputs.append(partial_info_plist)
keys = QuoteShellArgument(json.dumps(extra_arguments), self.flavor)
extra_env = self.xcode_settings.GetPerTargetSettings()
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
bundle_depends.extend(self.ninja.build(
outputs, 'compile_xcassets', xcassets,
variables=[('env', env), ('keys', keys)]))
return partial_info_plist
def WriteMacInfoPlist(self, partial_info_plist, bundle_depends):
"""Write build rules for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, self.GypPathToNinja)
if not info_plist:
return
out = self.ExpandSpecial(out)
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = self.GypPathToUniqueOutput(
os.path.basename(info_plist))
defines = ' '.join([Define(d, self.flavor) for d in defines])
info_plist = self.ninja.build(
intermediate_plist, 'preprocess_infoplist', info_plist,
variables=[('defines',defines)])
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
if partial_info_plist:
intermediate_plist = self.GypPathToUniqueOutput('merged_info.plist')
info_plist = self.ninja.build(
intermediate_plist, 'merge_infoplist',
[partial_info_plist, info_plist])
keys = self.xcode_settings.GetExtraPlistItems(self.config_name)
keys = QuoteShellArgument(json.dumps(keys), self.flavor)
isBinary = self.xcode_settings.IsBinaryOutputFormat(self.config_name)
self.ninja.build(out, 'copy_infoplist', info_plist,
variables=[('env', env), ('keys', keys),
('binary', isBinary)])
bundle_depends.append(out)
def WriteSources(self, ninja_file, config_name, config, sources, predepends,
precompiled_header, spec):
"""Write build rules to compile all of |sources|."""
if self.toolset == 'host':
self.ninja.variable('ar', '$ar_host')
self.ninja.variable('cc', '$cc_host')
self.ninja.variable('cxx', '$cxx_host')
self.ninja.variable('ld', '$ld_host')
self.ninja.variable('ldxx', '$ldxx_host')
self.ninja.variable('nm', '$nm_host')
self.ninja.variable('readelf', '$readelf_host')
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteSourcesForArch(
self.ninja, config_name, config, sources, predepends,
precompiled_header, spec)
else:
return dict((arch, self.WriteSourcesForArch(
self.arch_subninjas[arch], config_name, config, sources, predepends,
precompiled_header, spec, arch=arch))
for arch in self.archs)
def WriteSourcesForArch(self, ninja_file, config_name, config, sources,
predepends, precompiled_header, spec, arch=None):
"""Write build rules to compile all of |sources|."""
extra_defines = []
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(config_name, arch=arch)
cflags_c = self.xcode_settings.GetCflagsC(config_name)
cflags_cc = self.xcode_settings.GetCflagsCC(config_name)
cflags_objc = ['$cflags_c'] + \
self.xcode_settings.GetCflagsObjC(config_name)
cflags_objcc = ['$cflags_cc'] + \
self.xcode_settings.GetCflagsObjCC(config_name)
elif self.flavor == 'win':
asmflags = self.msvs_settings.GetAsmflags(config_name)
cflags = self.msvs_settings.GetCflags(config_name)
cflags_c = self.msvs_settings.GetCflagsC(config_name)
cflags_cc = self.msvs_settings.GetCflagsCC(config_name)
extra_defines = self.msvs_settings.GetComputedDefines(config_name)
# See comment at cc_command for why there's two .pdb files.
pdbpath_c = pdbpath_cc = self.msvs_settings.GetCompilerPdbName(
config_name, self.ExpandSpecial)
if not pdbpath_c:
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
pdbpath = os.path.normpath(os.path.join(obj, self.base_dir, self.name))
pdbpath_c = pdbpath + '.c.pdb'
pdbpath_cc = pdbpath + '.cc.pdb'
self.WriteVariableList(ninja_file, 'pdbname_c', [pdbpath_c])
self.WriteVariableList(ninja_file, 'pdbname_cc', [pdbpath_cc])
self.WriteVariableList(ninja_file, 'pchprefix', [self.name])
else:
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cc = config.get('cflags_cc', [])
# Respect environment variables related to build, but target-specific
# flags can still override them.
if self.toolset == 'target':
cflags_c = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CFLAGS', '').split() + cflags_c)
cflags_cc = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CXXFLAGS', '').split() + cflags_cc)
elif self.toolset == 'host':
cflags_c = (os.environ.get('CPPFLAGS_host', '').split() +
os.environ.get('CFLAGS_host', '').split() + cflags_c)
cflags_cc = (os.environ.get('CPPFLAGS_host', '').split() +
os.environ.get('CXXFLAGS_host', '').split() + cflags_cc)
defines = config.get('defines', []) + extra_defines
self.WriteVariableList(ninja_file, 'defines',
[Define(d, self.flavor) for d in defines])
if self.flavor == 'win':
self.WriteVariableList(ninja_file, 'asmflags',
map(self.ExpandSpecial, asmflags))
self.WriteVariableList(ninja_file, 'rcflags',
[QuoteShellArgument(self.ExpandSpecial(f), self.flavor)
for f in self.msvs_settings.GetRcflags(config_name,
self.GypPathToNinja)])
include_dirs = config.get('include_dirs', [])
env = self.GetToolchainEnv()
if self.flavor == 'win':
include_dirs = self.msvs_settings.AdjustIncludeDirs(include_dirs,
config_name)
self.WriteVariableList(ninja_file, 'includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in include_dirs])
if self.flavor == 'win':
midl_include_dirs = config.get('midl_include_dirs', [])
midl_include_dirs = self.msvs_settings.AdjustMidlIncludeDirs(
midl_include_dirs, config_name)
self.WriteVariableList(ninja_file, 'midl_includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in midl_include_dirs])
pch_commands = precompiled_header.GetPchBuildCommands(arch)
if self.flavor == 'mac':
# Most targets use no precompiled headers, so only write these if needed.
for ext, var in [('c', 'cflags_pch_c'), ('cc', 'cflags_pch_cc'),
('m', 'cflags_pch_objc'), ('mm', 'cflags_pch_objcc')]:
include = precompiled_header.GetInclude(ext, arch)
if include: ninja_file.variable(var, include)
arflags = config.get('arflags', [])
self.WriteVariableList(ninja_file, 'cflags',
map(self.ExpandSpecial, cflags))
self.WriteVariableList(ninja_file, 'cflags_c',
map(self.ExpandSpecial, cflags_c))
self.WriteVariableList(ninja_file, 'cflags_cc',
map(self.ExpandSpecial, cflags_cc))
if self.flavor == 'mac':
self.WriteVariableList(ninja_file, 'cflags_objc',
map(self.ExpandSpecial, cflags_objc))
self.WriteVariableList(ninja_file, 'cflags_objcc',
map(self.ExpandSpecial, cflags_objcc))
self.WriteVariableList(ninja_file, 'arflags',
map(self.ExpandSpecial, arflags))
ninja_file.newline()
outputs = []
has_rc_source = False
for source in sources:
filename, ext = os.path.splitext(source)
ext = ext[1:]
obj_ext = self.obj_ext
if ext in ('cc', 'cpp', 'cxx'):
command = 'cxx'
self.uses_cpp = True
elif ext == 'c' or (ext == 'S' and self.flavor != 'win'):
command = 'cc'
elif ext == 's' and self.flavor != 'win': # Doesn't generate .o.d files.
command = 'cc_s'
elif (self.flavor == 'win' and ext == 'asm' and
not self.msvs_settings.HasExplicitAsmRules(spec)):
command = 'asm'
# Add the _asm suffix as msvs is capable of handling .cc and
# .asm files of the same name without collision.
obj_ext = '_asm.obj'
elif self.flavor == 'mac' and ext == 'm':
command = 'objc'
elif self.flavor == 'mac' and ext == 'mm':
command = 'objcxx'
self.uses_cpp = True
elif self.flavor == 'win' and ext == 'rc':
command = 'rc'
obj_ext = '.res'
has_rc_source = True
else:
# Ignore unhandled extensions.
continue
input = self.GypPathToNinja(source)
output = self.GypPathToUniqueOutput(filename + obj_ext)
if arch is not None:
output = AddArch(output, arch)
implicit = precompiled_header.GetObjDependencies([input], [output], arch)
variables = []
if self.flavor == 'win':
variables, output, implicit = precompiled_header.GetFlagsModifications(
input, output, implicit, command, cflags_c, cflags_cc,
self.ExpandSpecial)
ninja_file.build(output, command, input,
implicit=[gch for _, _, gch in implicit],
order_only=predepends, variables=variables)
outputs.append(output)
if has_rc_source:
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
self.WriteVariableList(ninja_file, 'resource_includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in resource_include_dirs])
self.WritePchTargets(ninja_file, pch_commands)
ninja_file.newline()
return outputs
def WritePchTargets(self, ninja_file, pch_commands):
"""Writes ninja rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
var_name = {
'c': 'cflags_pch_c',
'cc': 'cflags_pch_cc',
'm': 'cflags_pch_objc',
'mm': 'cflags_pch_objcc',
}[lang]
map = { 'c': 'cc', 'cc': 'cxx', 'm': 'objc', 'mm': 'objcxx', }
cmd = map.get(lang)
ninja_file.build(gch, cmd, input, variables=[(var_name, lang_flag)])
def WriteLink(self, spec, config_name, config, link_deps):
"""Write out a link step. Fills out target.binary. """
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteLinkForArch(
self.ninja, spec, config_name, config, link_deps)
else:
output = self.ComputeOutput(spec)
inputs = [self.WriteLinkForArch(self.arch_subninjas[arch], spec,
config_name, config, link_deps[arch],
arch=arch)
for arch in self.archs]
extra_bindings = []
build_output = output
if not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
# TODO(yyanagisawa): more work needed to fix:
# https://code.google.com/p/gyp/issues/detail?id=411
if (spec['type'] in ('shared_library', 'loadable_module') and
not self.is_mac_bundle):
extra_bindings.append(('lib', output))
self.ninja.build([output, output + '.TOC'], 'solipo', inputs,
variables=extra_bindings)
else:
self.ninja.build(build_output, 'lipo', inputs, variables=extra_bindings)
return output
def WriteLinkForArch(self, ninja_file, spec, config_name, config,
link_deps, arch=None):
"""Write out a link step. Fills out target.binary. """
command = {
'executable': 'link',
'loadable_module': 'solink_module',
'shared_library': 'solink',
}[spec['type']]
command_suffix = ''
implicit_deps = set()
solibs = set()
order_deps = set()
if 'dependencies' in spec:
# Two kinds of dependencies:
# - Linkable dependencies (like a .a or a .so): add them to the link line.
# - Non-linkable dependencies (like a rule that generates a file
# and writes a stamp file): add them to implicit_deps
extra_link_deps = set()
for dep in spec['dependencies']:
target = self.target_outputs.get(dep)
if not target:
continue
linkable = target.Linkable()
if linkable:
new_deps = []
if (self.flavor == 'win' and
target.component_objs and
self.msvs_settings.IsUseLibraryDependencyInputs(config_name)):
new_deps = target.component_objs
if target.compile_deps:
order_deps.add(target.compile_deps)
elif self.flavor == 'win' and target.import_lib:
new_deps = [target.import_lib]
elif target.UsesToc(self.flavor):
solibs.add(target.binary)
implicit_deps.add(target.binary + '.TOC')
else:
new_deps = [target.binary]
for new_dep in new_deps:
if new_dep not in extra_link_deps:
extra_link_deps.add(new_dep)
link_deps.append(new_dep)
final_output = target.FinalOutput()
if not linkable or final_output != target.binary:
implicit_deps.add(final_output)
extra_bindings = []
if self.uses_cpp and self.flavor != 'win':
extra_bindings.append(('ld', '$ldxx'))
output = self.ComputeOutput(spec, arch)
if arch is None and not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
is_executable = spec['type'] == 'executable'
# The ldflags config key is not used on mac or win. On those platforms
# linker flags are set via xcode_settings and msvs_settings, respectively.
env_ldflags = os.environ.get('LDFLAGS', '').split()
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(config_name,
self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']),
self.GypPathToNinja, arch)
ldflags = env_ldflags + ldflags
elif self.flavor == 'win':
manifest_base_name = self.GypPathToUniqueOutput(
self.ComputeOutputFileName(spec))
ldflags, intermediate_manifest, manifest_files = \
self.msvs_settings.GetLdflags(config_name, self.GypPathToNinja,
self.ExpandSpecial, manifest_base_name,
output, is_executable,
self.toplevel_build)
ldflags = env_ldflags + ldflags
self.WriteVariableList(ninja_file, 'manifests', manifest_files)
implicit_deps = implicit_deps.union(manifest_files)
if intermediate_manifest:
self.WriteVariableList(
ninja_file, 'intermediatemanifest', [intermediate_manifest])
command_suffix = _GetWinLinkRuleNameSuffix(
self.msvs_settings.IsEmbedManifest(config_name))
def_file = self.msvs_settings.GetDefFile(self.GypPathToNinja)
if def_file:
implicit_deps.add(def_file)
else:
# Respect environment variables related to build, but target-specific
# flags can still override them.
ldflags = env_ldflags + config.get('ldflags', [])
if is_executable and len(solibs):
rpath = 'lib/'
if self.toolset != 'target':
rpath += self.toolset
ldflags.append(r'-Wl,-rpath=\$$ORIGIN/%s' % rpath)
ldflags.append('-Wl,-rpath-link=%s' % rpath)
self.WriteVariableList(ninja_file, 'ldflags',
map(self.ExpandSpecial, ldflags))
library_dirs = config.get('library_dirs', [])
if self.flavor == 'win':
library_dirs = [self.msvs_settings.ConvertVSMacros(l, config_name)
for l in library_dirs]
library_dirs = ['/LIBPATH:' + QuoteShellArgument(self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
else:
library_dirs = [QuoteShellArgument('-L' + self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
libraries = gyp.common.uniquer(map(self.ExpandSpecial,
spec.get('libraries', [])))
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries, config_name)
elif self.flavor == 'win':
libraries = self.msvs_settings.AdjustLibraries(libraries)
self.WriteVariableList(ninja_file, 'libs', library_dirs + libraries)
linked_binary = output
if command in ('solink', 'solink_module'):
extra_bindings.append(('soname', os.path.split(output)[1]))
extra_bindings.append(('lib',
gyp.common.EncodePOSIXShellArgument(output)))
if self.flavor != 'win':
link_file_list = output
if self.is_mac_bundle:
# 'Dependency Framework.framework/Versions/A/Dependency Framework' ->
# 'Dependency Framework.framework.rsp'
link_file_list = self.xcode_settings.GetWrapperName()
if arch:
link_file_list += '.' + arch
link_file_list += '.rsp'
# If an rspfile contains spaces, ninja surrounds the filename with
# quotes around it and then passes it to open(), creating a file with
# quotes in its name (and when looking for the rsp file, the name
# makes it through bash which strips the quotes) :-/
link_file_list = link_file_list.replace(' ', '_')
extra_bindings.append(
('link_file_list',
gyp.common.EncodePOSIXShellArgument(link_file_list)))
if self.flavor == 'win':
extra_bindings.append(('binary', output))
if ('/NOENTRY' not in ldflags and
not self.msvs_settings.GetNoImportLibrary(config_name)):
self.target.import_lib = output + '.lib'
extra_bindings.append(('implibflag',
'/IMPLIB:%s' % self.target.import_lib))
pdbname = self.msvs_settings.GetPDBName(
config_name, self.ExpandSpecial, output + '.pdb')
output = [output, self.target.import_lib]
if pdbname:
output.append(pdbname)
elif not self.is_mac_bundle:
output = [output, output + '.TOC']
else:
command = command + '_notoc'
elif self.flavor == 'win':
extra_bindings.append(('binary', output))
pdbname = self.msvs_settings.GetPDBName(
config_name, self.ExpandSpecial, output + '.pdb')
if pdbname:
output = [output, pdbname]
if len(solibs):
extra_bindings.append(('solibs', gyp.common.EncodePOSIXShellList(solibs)))
ninja_file.build(output, command + command_suffix, link_deps,
implicit=list(implicit_deps),
order_only=list(order_deps),
variables=extra_bindings)
return linked_binary
def WriteTarget(self, spec, config_name, config, link_deps, compile_deps):
extra_link_deps = any(self.target_outputs.get(dep).Linkable()
for dep in spec.get('dependencies', [])
if dep in self.target_outputs)
if spec['type'] == 'none' or (not link_deps and not extra_link_deps):
# TODO(evan): don't call this function for 'none' target types, as
# it doesn't do anything, and we fake out a 'binary' with a stamp file.
self.target.binary = compile_deps
self.target.type = 'none'
elif spec['type'] == 'static_library':
self.target.binary = self.ComputeOutput(spec)
if (self.flavor not in ('mac', 'openbsd', 'netbsd', 'win') and not
self.is_standalone_static_library):
self.ninja.build(self.target.binary, 'alink_thin', link_deps,
order_only=compile_deps)
else:
variables = []
if self.xcode_settings:
libtool_flags = self.xcode_settings.GetLibtoolflags(config_name)
if libtool_flags:
variables.append(('libtool_flags', libtool_flags))
if self.msvs_settings:
libflags = self.msvs_settings.GetLibFlags(config_name,
self.GypPathToNinja)
variables.append(('libflags', libflags))
if self.flavor != 'mac' or len(self.archs) == 1:
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', link_deps,
order_only=compile_deps, variables=variables)
else:
inputs = []
for arch in self.archs:
output = self.ComputeOutput(spec, arch)
self.arch_subninjas[arch].build(output, 'alink', link_deps[arch],
order_only=compile_deps,
variables=variables)
inputs.append(output)
# TODO: It's not clear if libtool_flags should be passed to the alink
# call that combines single-arch .a files into a fat .a file.
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', inputs,
# FIXME: test proving order_only=compile_deps isn't
# needed.
variables=variables)
else:
self.target.binary = self.WriteLink(spec, config_name, config, link_deps)
return self.target.binary
def WriteMacBundle(self, spec, mac_bundle_depends, is_empty):
assert self.is_mac_bundle
package_framework = spec['type'] in ('shared_library', 'loadable_module')
output = self.ComputeMacBundleOutput()
if is_empty:
output += '.stamp'
variables = []
self.AppendPostbuildVariable(variables, spec, output, self.target.binary,
is_command_start=not package_framework)
if package_framework and not is_empty:
variables.append(('version', self.xcode_settings.GetFrameworkVersion()))
self.ninja.build(output, 'package_framework', mac_bundle_depends,
variables=variables)
else:
self.ninja.build(output, 'stamp', mac_bundle_depends,
variables=variables)
self.target.bundle = output
return output
def GetToolchainEnv(self, additional_settings=None):
"""Returns the variables toolchain would set for build steps."""
env = self.GetSortedXcodeEnv(additional_settings=additional_settings)
if self.flavor == 'win':
env = self.GetMsvsToolchainEnv(
additional_settings=additional_settings)
return env
def GetMsvsToolchainEnv(self, additional_settings=None):
"""Returns the variables Visual Studio would set for build steps."""
return self.msvs_settings.GetVSMacroEnv('$!PRODUCT_DIR',
config=self.config_name)
def GetSortedXcodeEnv(self, additional_settings=None):
"""Returns the variables Xcode would set for build steps."""
assert self.abs_build_dir
abs_build_dir = self.abs_build_dir
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, abs_build_dir,
os.path.join(abs_build_dir, self.build_to_base), self.config_name,
additional_settings)
def GetSortedXcodePostbuildEnv(self):
"""Returns the variables Xcode would set for postbuild steps."""
postbuild_settings = {}
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE')
if strip_save_file:
postbuild_settings['CHROMIUM_STRIP_SAVE_FILE'] = strip_save_file
return self.GetSortedXcodeEnv(additional_settings=postbuild_settings)
def AppendPostbuildVariable(self, variables, spec, output, binary,
is_command_start=False):
"""Adds a 'postbuild' variable if there is a postbuild for |output|."""
postbuild = self.GetPostbuildCommand(spec, output, binary, is_command_start)
if postbuild:
variables.append(('postbuilds', postbuild))
def GetPostbuildCommand(self, spec, output, output_binary, is_command_start):
"""Returns a shell command that runs all the postbuilds, and removes
|output| if any of them fails. If |is_command_start| is False, then the
returned string will start with ' && '."""
if not self.xcode_settings or spec['type'] == 'none' or not output:
return ''
output = QuoteShellArgument(output, self.flavor)
postbuilds = gyp.xcode_emulation.GetSpecPostbuildCommands(spec, quiet=True)
if output_binary is not None:
postbuilds = self.xcode_settings.AddImplicitPostbuilds(
self.config_name,
os.path.normpath(os.path.join(self.base_to_build, output)),
QuoteShellArgument(
os.path.normpath(os.path.join(self.base_to_build, output_binary)),
self.flavor),
postbuilds, quiet=True)
if not postbuilds:
return ''
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(
['cd', self.build_to_base]))
env = self.ComputeExportEnvString(self.GetSortedXcodePostbuildEnv())
# G will be non-null if any postbuild fails. Run all postbuilds in a
# subshell.
commands = env + ' (' + \
' && '.join([ninja_syntax.escape(command) for command in postbuilds])
command_string = (commands + '); G=$$?; '
# Remove the final output if any postbuild failed.
'((exit $$G) || rm -rf %s) ' % output + '&& exit $$G)')
if is_command_start:
return '(' + command_string + ' && '
else:
return '$ && (' + command_string
def ComputeExportEnvString(self, env):
"""Given an environment, returns a string looking like
'export FOO=foo; export BAR="${FOO} bar;'
that exports |env| to the shell."""
export_str = []
for k, v in env:
export_str.append('export %s=%s;' %
(k, ninja_syntax.escape(gyp.common.EncodePOSIXShellArgument(v))))
return ' '.join(export_str)
def ComputeMacBundleOutput(self):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return self.ExpandSpecial(
os.path.join(path, self.xcode_settings.GetWrapperName()))
def ComputeOutputFileName(self, spec, type=None):
"""Compute the filename of the final output for the current target."""
if not type:
type = spec['type']
default_variables = copy.copy(generator_default_variables)
CalculateVariables(default_variables, {'flavor': self.flavor})
# Compute filename prefix: the product prefix, or a default for
# the product type.
DEFAULT_PREFIX = {
'loadable_module': default_variables['SHARED_LIB_PREFIX'],
'shared_library': default_variables['SHARED_LIB_PREFIX'],
'static_library': default_variables['STATIC_LIB_PREFIX'],
'executable': default_variables['EXECUTABLE_PREFIX'],
}
prefix = spec.get('product_prefix', DEFAULT_PREFIX.get(type, ''))
# Compute filename extension: the product extension, or a default
# for the product type.
DEFAULT_EXTENSION = {
'loadable_module': default_variables['SHARED_LIB_SUFFIX'],
'shared_library': default_variables['SHARED_LIB_SUFFIX'],
'static_library': default_variables['STATIC_LIB_SUFFIX'],
'executable': default_variables['EXECUTABLE_SUFFIX'],
}
extension = spec.get('product_extension')
if extension:
extension = '.' + extension
else:
extension = DEFAULT_EXTENSION.get(type, '')
if 'product_name' in spec:
# If we were given an explicit name, use that.
target = spec['product_name']
else:
# Otherwise, derive a name from the target name.
target = spec['target_name']
if prefix == 'lib':
# Snip out an extra 'lib' from libs if appropriate.
target = StripPrefix(target, 'lib')
if type in ('static_library', 'loadable_module', 'shared_library',
'executable'):
return '%s%s%s' % (prefix, target, extension)
elif type == 'none':
return '%s.stamp' % target
else:
raise Exception('Unhandled output type %s' % type)
def ComputeOutput(self, spec, arch=None):
"""Compute the path for the final output of the spec."""
type = spec['type']
if self.flavor == 'win':
override = self.msvs_settings.GetOutputName(self.config_name,
self.ExpandSpecial)
if override:
return override
if arch is None and self.flavor == 'mac' and type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
filename = self.xcode_settings.GetExecutablePath()
else:
filename = self.ComputeOutputFileName(spec, type)
if arch is None and 'product_dir' in spec:
path = os.path.join(spec['product_dir'], filename)
return self.ExpandSpecial(path)
# Some products go into the output root, libraries go into shared library
# dir, and everything else goes into the normal place.
type_in_output_root = ['executable', 'loadable_module']
if self.flavor == 'mac' and self.toolset == 'target':
type_in_output_root += ['shared_library', 'static_library']
elif self.flavor == 'win' and self.toolset == 'target':
type_in_output_root += ['shared_library']
if arch is not None:
# Make sure partial executables don't end up in a bundle or the regular
# output directory.
archdir = 'arch'
if self.toolset != 'target':
archdir = os.path.join('arch', '%s' % self.toolset)
return os.path.join(archdir, AddArch(filename, arch))
elif type in type_in_output_root or self.is_standalone_static_library:
return filename
elif type == 'shared_library':
libdir = 'lib'
if self.toolset != 'target':
libdir = os.path.join('lib', '%s' % self.toolset)
return os.path.join(libdir, filename)
else:
return self.GypPathToUniqueOutput(filename, qualified=False)
def WriteVariableList(self, ninja_file, var, values):
assert not isinstance(values, str)
if values is None:
values = []
ninja_file.variable(var, ' '.join(values))
def WriteNewNinjaRule(self, name, args, description, is_cygwin, env, pool,
depfile=None):
"""Write out a new ninja "rule" statement for a given command.
Returns the name of the new rule, and a copy of |args| with variables
expanded."""
if self.flavor == 'win':
args = [self.msvs_settings.ConvertVSMacros(
arg, self.base_to_build, config=self.config_name)
for arg in args]
description = self.msvs_settings.ConvertVSMacros(
description, config=self.config_name)
elif self.flavor == 'mac':
# |env| is an empty list on non-mac.
args = [gyp.xcode_emulation.ExpandEnvVars(arg, env) for arg in args]
description = gyp.xcode_emulation.ExpandEnvVars(description, env)
# TODO: we shouldn't need to qualify names; we do it because
# currently the ninja rule namespace is global, but it really
# should be scoped to the subninja.
rule_name = self.name
if self.toolset == 'target':
rule_name += '.' + self.toolset
rule_name += '.' + name
rule_name = re.sub('[^a-zA-Z0-9_]', '_', rule_name)
# Remove variable references, but not if they refer to the magic rule
# variables. This is not quite right, as it also protects these for
# actions, not just for rules where they are valid. Good enough.
protect = [ '${root}', '${dirname}', '${source}', '${ext}', '${name}' ]
protect = '(?!' + '|'.join(map(re.escape, protect)) + ')'
description = re.sub(protect + r'\$', '_', description)
# gyp dictates that commands are run from the base directory.
# cd into the directory before running, and adjust paths in
# the arguments to point to the proper locations.
rspfile = None
rspfile_content = None
args = [self.ExpandSpecial(arg, self.base_to_build) for arg in args]
if self.flavor == 'win':
rspfile = rule_name + '.$unique_name.rsp'
# The cygwin case handles this inside the bash sub-shell.
run_in = '' if is_cygwin else ' ' + self.build_to_base
if is_cygwin:
rspfile_content = self.msvs_settings.BuildCygwinBashCommandLine(
args, self.build_to_base)
else:
rspfile_content = gyp.msvs_emulation.EncodeRspFileList(args)
command = ('%s gyp-win-tool action-wrapper $arch ' % sys.executable +
rspfile + run_in)
else:
env = self.ComputeExportEnvString(env)
command = gyp.common.EncodePOSIXShellList(args)
command = 'cd %s; ' % self.build_to_base + env + command
# GYP rules/actions express being no-ops by not touching their outputs.
# Avoid executing downstream dependencies in this case by specifying
# restat=1 to ninja.
self.ninja.rule(rule_name, command, description, depfile=depfile,
restat=True, pool=pool,
rspfile=rspfile, rspfile_content=rspfile_content)
self.ninja.newline()
return rule_name, args
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
global generator_additional_non_configuration_keys
global generator_additional_path_sections
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Ninja generator.
import gyp.generator.xcode as xcode_generator
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
elif flavor == 'win':
exts = gyp.MSVSUtil.TARGET_TYPE_EXT
default_variables.setdefault('OS', 'win')
default_variables['EXECUTABLE_SUFFIX'] = '.' + exts['executable']
default_variables['STATIC_LIB_PREFIX'] = ''
default_variables['STATIC_LIB_SUFFIX'] = '.' + exts['static_library']
default_variables['SHARED_LIB_PREFIX'] = ''
default_variables['SHARED_LIB_SUFFIX'] = '.' + exts['shared_library']
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'lib'))
default_variables.setdefault('LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'obj'))
def ComputeOutputDir(params):
"""Returns the path from the toplevel_dir to the build output directory."""
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to ninja easier, ninja doesn't put anything here.
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = params.get('generator_flags', {}).get('output_dir', 'out')
# Relative path from source root to our output files. e.g. "out"
return os.path.normpath(os.path.join(generator_dir, output_dir))
def CalculateGeneratorInputInfo(params):
"""Called by __init__ to initialize generator values based on params."""
# E.g. "out/gypfiles"
toplevel = params['options'].toplevel_dir
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, ComputeOutputDir(params), 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def OpenOutput(path, mode='w'):
"""Open |path| for writing, creating directories if necessary."""
gyp.common.EnsureDirExists(path)
return open(path, mode)
def CommandWithWrapper(cmd, wrappers, prog):
wrapper = wrappers.get(cmd, '')
if wrapper:
return wrapper + ' ' + prog
return prog
def GetDefaultConcurrentLinks():
"""Returns a best-guess for a number of concurrent links."""
pool_size = int(os.environ.get('GYP_LINK_CONCURRENCY', 0))
if pool_size:
return pool_size
if sys.platform in ('win32', 'cygwin'):
import ctypes
class MEMORYSTATUSEX(ctypes.Structure):
_fields_ = [
("dwLength", ctypes.c_ulong),
("dwMemoryLoad", ctypes.c_ulong),
("ullTotalPhys", ctypes.c_ulonglong),
("ullAvailPhys", ctypes.c_ulonglong),
("ullTotalPageFile", ctypes.c_ulonglong),
("ullAvailPageFile", ctypes.c_ulonglong),
("ullTotalVirtual", ctypes.c_ulonglong),
("ullAvailVirtual", ctypes.c_ulonglong),
("sullAvailExtendedVirtual", ctypes.c_ulonglong),
]
stat = MEMORYSTATUSEX()
stat.dwLength = ctypes.sizeof(stat)
ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat))
# VS 2015 uses 20% more working set than VS 2013 and can consume all RAM
# on a 64 GB machine.
mem_limit = max(1, stat.ullTotalPhys / (5 * (2 ** 30))) # total / 5GB
hard_cap = max(1, int(os.environ.get('GYP_LINK_CONCURRENCY_MAX', 2**32)))
return min(mem_limit, hard_cap)
elif sys.platform.startswith('linux'):
if os.path.exists("/proc/meminfo"):
with open("/proc/meminfo") as meminfo:
memtotal_re = re.compile(r'^MemTotal:\s*(\d*)\s*kB')
for line in meminfo:
match = memtotal_re.match(line)
if not match:
continue
# Allow 8Gb per link on Linux because Gold is quite memory hungry
return max(1, int(match.group(1)) / (8 * (2 ** 20)))
return 1
elif sys.platform == 'darwin':
try:
avail_bytes = int(subprocess.check_output(['sysctl', '-n', 'hw.memsize']))
# A static library debug build of Chromium's unit_tests takes ~2.7GB, so
# 4GB per ld process allows for some more bloat.
return max(1, avail_bytes / (4 * (2 ** 30))) # total / 4GB
except:
return 1
else:
# TODO(scottmg): Implement this for other platforms.
return 1
def _GetWinLinkRuleNameSuffix(embed_manifest):
"""Returns the suffix used to select an appropriate linking rule depending on
whether the manifest embedding is enabled."""
return '_embed' if embed_manifest else ''
def _AddWinLinkRules(master_ninja, embed_manifest):
"""Adds link rules for Windows platform to |master_ninja|."""
def FullLinkCommand(ldcmd, out, binary_type):
resource_name = {
'exe': '1',
'dll': '2',
}[binary_type]
return '%(python)s gyp-win-tool link-with-manifests $arch %(embed)s ' \
'%(out)s "%(ldcmd)s" %(resname)s $mt $rc "$intermediatemanifest" ' \
'$manifests' % {
'python': sys.executable,
'out': out,
'ldcmd': ldcmd,
'resname': resource_name,
'embed': embed_manifest }
rule_name_suffix = _GetWinLinkRuleNameSuffix(embed_manifest)
use_separate_mspdbsrv = (
int(os.environ.get('GYP_USE_SEPARATE_MSPDBSRV', '0')) != 0)
dlldesc = 'LINK%s(DLL) $binary' % rule_name_suffix.upper()
dllcmd = ('%s gyp-win-tool link-wrapper $arch %s '
'$ld /nologo $implibflag /DLL /OUT:$binary '
'@$binary.rsp' % (sys.executable, use_separate_mspdbsrv))
dllcmd = FullLinkCommand(dllcmd, '$binary', 'dll')
master_ninja.rule('solink' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$binary.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
master_ninja.rule('solink_module' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$binary.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
# Note that ldflags goes at the end so that it has the option of
# overriding default settings earlier in the command line.
exe_cmd = ('%s gyp-win-tool link-wrapper $arch %s '
'$ld /nologo /OUT:$binary @$binary.rsp' %
(sys.executable, use_separate_mspdbsrv))
exe_cmd = FullLinkCommand(exe_cmd, '$binary', 'exe')
master_ninja.rule('link' + rule_name_suffix,
description='LINK%s $binary' % rule_name_suffix.upper(),
command=exe_cmd,
rspfile='$binary.rsp',
rspfile_content='$in_newline $libs $ldflags',
pool='link_pool')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(
os.path.join(ComputeOutputDir(params), config_name))
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
master_ninja_file = OpenOutput(os.path.join(toplevel_build, 'build.ninja'))
master_ninja = ninja_syntax.Writer(master_ninja_file, width=120)
# Put build-time support tools in out/{config_name}.
gyp.common.CopyTool(flavor, toplevel_build)
# Grab make settings for CC/CXX.
# The rules are
# - The priority from low to high is gcc/g++, the 'make_global_settings' in
# gyp, the environment variable.
# - If there is no 'make_global_settings' for CC.host/CXX.host or
# 'CC_host'/'CXX_host' enviroment variable, cc_host/cxx_host should be set
# to cc/cxx.
if flavor == 'win':
ar = 'lib.exe'
# cc and cxx must be set to the correct architecture by overriding with one
# of cl_x86 or cl_x64 below.
cc = 'UNSET'
cxx = 'UNSET'
ld = 'link.exe'
ld_host = '$ld'
else:
ar = 'ar'
cc = 'cc'
cxx = 'c++'
ld = '$cc'
ldxx = '$cxx'
ld_host = '$cc_host'
ldxx_host = '$cxx_host'
ar_host = 'ar'
cc_host = None
cxx_host = None
cc_host_global_setting = None
cxx_host_global_setting = None
clang_cl = None
nm = 'nm'
nm_host = 'nm'
readelf = 'readelf'
readelf_host = 'readelf'
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings = data[build_file].get('make_global_settings', [])
build_to_root = gyp.common.InvertRelativePath(build_dir,
options.toplevel_dir)
wrappers = {}
for key, value in make_global_settings:
if key == 'AR':
ar = os.path.join(build_to_root, value)
if key == 'AR.host':
ar_host = os.path.join(build_to_root, value)
if key == 'CC':
cc = os.path.join(build_to_root, value)
if cc.endswith('clang-cl'):
clang_cl = cc
if key == 'CXX':
cxx = os.path.join(build_to_root, value)
if key == 'CC.host':
cc_host = os.path.join(build_to_root, value)
cc_host_global_setting = value
if key == 'CXX.host':
cxx_host = os.path.join(build_to_root, value)
cxx_host_global_setting = value
if key == 'LD':
ld = os.path.join(build_to_root, value)
if key == 'LD.host':
ld_host = os.path.join(build_to_root, value)
if key == 'NM':
nm = os.path.join(build_to_root, value)
if key == 'NM.host':
nm_host = os.path.join(build_to_root, value)
if key == 'READELF':
readelf = os.path.join(build_to_root, value)
if key == 'READELF.host':
readelf_host = os.path.join(build_to_root, value)
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = os.path.join(build_to_root, value)
# Support wrappers from environment variables too.
for key, value in os.environ.iteritems():
if key.lower().endswith('_wrapper'):
key_prefix = key[:-len('_wrapper')]
key_prefix = re.sub(r'\.HOST$', '.host', key_prefix)
wrappers[key_prefix] = os.path.join(build_to_root, value)
if flavor == 'win':
configs = [target_dicts[qualified_target]['configurations'][config_name]
for qualified_target in target_list]
shared_system_includes = None
if not generator_flags.get('ninja_use_custom_environment_files', 0):
shared_system_includes = \
gyp.msvs_emulation.ExtractSharedMSVSSystemIncludes(
configs, generator_flags)
cl_paths = gyp.msvs_emulation.GenerateEnvironmentFiles(
toplevel_build, generator_flags, shared_system_includes, OpenOutput)
for arch, path in cl_paths.iteritems():
if clang_cl:
# If we have selected clang-cl, use that instead.
path = clang_cl
command = CommandWithWrapper('CC', wrappers,
QuoteShellArgument(path, 'win'))
if clang_cl:
# Use clang-cl to cross-compile for x86 or x86_64.
command += (' -m32' if arch == 'x86' else ' -m64')
master_ninja.variable('cl_' + arch, command)
cc = GetEnvironFallback(['CC_target', 'CC'], cc)
master_ninja.variable('cc', CommandWithWrapper('CC', wrappers, cc))
cxx = GetEnvironFallback(['CXX_target', 'CXX'], cxx)
master_ninja.variable('cxx', CommandWithWrapper('CXX', wrappers, cxx))
if flavor == 'win':
master_ninja.variable('ld', ld)
master_ninja.variable('idl', 'midl.exe')
master_ninja.variable('ar', ar)
master_ninja.variable('rc', 'rc.exe')
master_ninja.variable('ml_x86', 'ml.exe')
master_ninja.variable('ml_x64', 'ml64.exe')
master_ninja.variable('mt', 'mt.exe')
else:
master_ninja.variable('ld', CommandWithWrapper('LINK', wrappers, ld))
master_ninja.variable('ldxx', CommandWithWrapper('LINK', wrappers, ldxx))
master_ninja.variable('ar', GetEnvironFallback(['AR_target', 'AR'], ar))
if flavor != 'mac':
# Mac does not use readelf/nm for .TOC generation, so avoiding polluting
# the master ninja with extra unused variables.
master_ninja.variable(
'nm', GetEnvironFallback(['NM_target', 'NM'], nm))
master_ninja.variable(
'readelf', GetEnvironFallback(['READELF_target', 'READELF'], readelf))
if generator_supports_multiple_toolsets:
if not cc_host:
cc_host = cc
if not cxx_host:
cxx_host = cxx
master_ninja.variable('ar_host', GetEnvironFallback(['AR_host'], ar_host))
master_ninja.variable('nm_host', GetEnvironFallback(['NM_host'], nm_host))
master_ninja.variable('readelf_host',
GetEnvironFallback(['READELF_host'], readelf_host))
cc_host = GetEnvironFallback(['CC_host'], cc_host)
cxx_host = GetEnvironFallback(['CXX_host'], cxx_host)
# The environment variable could be used in 'make_global_settings', like
# ['CC.host', '$(CC)'] or ['CXX.host', '$(CXX)'], transform them here.
if '$(CC)' in cc_host and cc_host_global_setting:
cc_host = cc_host_global_setting.replace('$(CC)', cc)
if '$(CXX)' in cxx_host and cxx_host_global_setting:
cxx_host = cxx_host_global_setting.replace('$(CXX)', cxx)
master_ninja.variable('cc_host',
CommandWithWrapper('CC.host', wrappers, cc_host))
master_ninja.variable('cxx_host',
CommandWithWrapper('CXX.host', wrappers, cxx_host))
if flavor == 'win':
master_ninja.variable('ld_host', ld_host)
else:
master_ninja.variable('ld_host', CommandWithWrapper(
'LINK', wrappers, ld_host))
master_ninja.variable('ldxx_host', CommandWithWrapper(
'LINK', wrappers, ldxx_host))
master_ninja.newline()
master_ninja.pool('link_pool', depth=GetDefaultConcurrentLinks())
master_ninja.newline()
deps = 'msvc' if flavor == 'win' else 'gcc'
if flavor != 'win':
master_ninja.rule(
'cc',
description='CC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'cc_s',
description='CC $out',
command=('$cc $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'))
master_ninja.rule(
'cxx',
description='CXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_cc '
'$cflags_pch_cc -c $in -o $out'),
depfile='$out.d',
deps=deps)
else:
# TODO(scottmg) Separate pdb names is a test to see if it works around
# http://crbug.com/142362. It seems there's a race between the creation of
# the .pdb by the precompiled header step for .cc and the compilation of
# .c files. This should be handled by mspdbsrv, but rarely errors out with
# c1xx : fatal error C1033: cannot open program database
# By making the rules target separate pdb files this might be avoided.
cc_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cc /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_c ')
cxx_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cxx /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_cc ')
master_ninja.rule(
'cc',
description='CC $out',
command=cc_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_c',
deps=deps)
master_ninja.rule(
'cxx',
description='CXX $out',
command=cxx_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_cc',
deps=deps)
master_ninja.rule(
'idl',
description='IDL $in',
command=('%s gyp-win-tool midl-wrapper $arch $outdir '
'$tlb $h $dlldata $iid $proxy $in '
'$midl_includes $idlflags' % sys.executable))
master_ninja.rule(
'rc',
description='RC $in',
# Note: $in must be last otherwise rc.exe complains.
command=('%s gyp-win-tool rc-wrapper '
'$arch $rc $defines $resource_includes $rcflags /fo$out $in' %
sys.executable))
master_ninja.rule(
'asm',
description='ASM $out',
command=('%s gyp-win-tool asm-wrapper '
'$arch $asm $defines $includes $asmflags /c /Fo $out $in' %
sys.executable))
if flavor != 'mac' and flavor != 'win':
master_ninja.rule(
'alink',
description='AR $out',
command='rm -f $out && $ar rcs $arflags $out $in')
master_ninja.rule(
'alink_thin',
description='AR $out',
command='rm -f $out && $ar rcsT $arflags $out $in')
# This allows targets that only need to depend on $lib's API to declare an
# order-only dependency on $lib.TOC and avoid relinking such downstream
# dependencies when $lib changes only in non-public ways.
# The resulting string leaves an uninterpolated %{suffix} which
# is used in the final substitution below.
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e $lib.TOC ]; then '
'%(solink)s && %(extract_toc)s > $lib.TOC; else '
'%(solink)s && %(extract_toc)s > $lib.tmp && '
'if ! cmp -s $lib.tmp $lib.TOC; then mv $lib.tmp $lib.TOC ; '
'fi; fi'
% { 'solink':
'$ld -shared $ldflags -o $lib -Wl,-soname=$soname %(suffix)s',
'extract_toc':
('{ $readelf -d $lib | grep SONAME ; '
'$nm -gD -f p $lib | cut -f1-2 -d\' \'; }')})
master_ninja.rule(
'solink',
description='SOLINK $lib',
restat=True,
command=mtime_preserving_solink_base % {'suffix': '@$link_file_list'},
rspfile='$link_file_list',
rspfile_content=
'-Wl,--whole-archive $in $solibs -Wl,--no-whole-archive $libs',
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib',
restat=True,
command=mtime_preserving_solink_base % {'suffix': '@$link_file_list'},
rspfile='$link_file_list',
rspfile_content='-Wl,--start-group $in -Wl,--end-group $solibs $libs',
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out',
command=('$ld $ldflags -o $out '
'-Wl,--start-group $in -Wl,--end-group $solibs $libs'),
pool='link_pool')
elif flavor == 'win':
master_ninja.rule(
'alink',
description='LIB $out',
command=('%s gyp-win-tool link-wrapper $arch False '
'$ar /nologo /ignore:4221 /OUT:$out @$out.rsp' %
sys.executable),
rspfile='$out.rsp',
rspfile_content='$in_newline $libflags')
_AddWinLinkRules(master_ninja, embed_manifest=True)
_AddWinLinkRules(master_ninja, embed_manifest=False)
else:
master_ninja.rule(
'objc',
description='OBJC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_objc '
'$cflags_pch_objc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'objcxx',
description='OBJCXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_objcc '
'$cflags_pch_objcc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'alink',
description='LIBTOOL-STATIC $out, POSTBUILDS',
command='rm -f $out && '
'./gyp-mac-tool filter-libtool libtool $libtool_flags '
'-static -o $out $in'
'$postbuilds')
master_ninja.rule(
'lipo',
description='LIPO $out, POSTBUILDS',
command='rm -f $out && lipo -create $in -output $out$postbuilds')
master_ninja.rule(
'solipo',
description='SOLIPO $out, POSTBUILDS',
command=(
'rm -f $lib $lib.TOC && lipo -create $in -output $lib$postbuilds &&'
'%(extract_toc)s > $lib.TOC'
% { 'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'}))
# Record the public interface of $lib in $lib.TOC. See the corresponding
# comment in the posix section above for details.
solink_base = '$ld %(type)s $ldflags -o $lib %(suffix)s'
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e $lib.TOC ] || '
# Always force dependent targets to relink if this library
# reexports something. Handling this correctly would require
# recursive TOC dumping but this is rare in practice, so punt.
'otool -l $lib | grep -q LC_REEXPORT_DYLIB ; then '
'%(solink)s && %(extract_toc)s > $lib.TOC; '
'else '
'%(solink)s && %(extract_toc)s > $lib.tmp && '
'if ! cmp -s $lib.tmp $lib.TOC; then '
'mv $lib.tmp $lib.TOC ; '
'fi; '
'fi'
% { 'solink': solink_base,
'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'})
solink_suffix = '@$link_file_list$postbuilds'
master_ninja.rule(
'solink',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_suffix,
'type': '-shared'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_notoc',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix':solink_suffix, 'type': '-shared'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_suffix,
'type': '-bundle'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_module_notoc',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix': solink_suffix, 'type': '-bundle'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out, POSTBUILDS',
command=('$ld $ldflags -o $out '
'$in $solibs $libs$postbuilds'),
pool='link_pool')
master_ninja.rule(
'preprocess_infoplist',
description='PREPROCESS INFOPLIST $out',
command=('$cc -E -P -Wno-trigraphs -x c $defines $in -o $out && '
'plutil -convert xml1 $out $out'))
master_ninja.rule(
'copy_infoplist',
description='COPY INFOPLIST $in',
command='$env ./gyp-mac-tool copy-info-plist $in $out $binary $keys')
master_ninja.rule(
'merge_infoplist',
description='MERGE INFOPLISTS $in',
command='$env ./gyp-mac-tool merge-info-plist $out $in')
master_ninja.rule(
'compile_xcassets',
description='COMPILE XCASSETS $in',
command='$env ./gyp-mac-tool compile-xcassets $keys $in')
master_ninja.rule(
'mac_tool',
description='MACTOOL $mactool_cmd $in',
command='$env ./gyp-mac-tool $mactool_cmd $in $out $binary')
master_ninja.rule(
'package_framework',
description='PACKAGE FRAMEWORK $out, POSTBUILDS',
command='./gyp-mac-tool package-framework $out $version$postbuilds '
'&& touch $out')
if flavor == 'win':
master_ninja.rule(
'stamp',
description='STAMP $out',
command='%s gyp-win-tool stamp $out' % sys.executable)
master_ninja.rule(
'copy',
description='COPY $in $out',
command='%s gyp-win-tool recursive-mirror $in $out' % sys.executable)
else:
master_ninja.rule(
'stamp',
description='STAMP $out',
command='${postbuilds}touch $out')
master_ninja.rule(
'copy',
description='COPY $in $out',
command='rm -rf $out && cp -af $in $out')
master_ninja.newline()
all_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list,
target_dicts,
os.path.normpath(build_file)):
all_targets.add(target)
all_outputs = set()
# target_outputs is a map from qualified target name to a Target object.
target_outputs = {}
# target_short_names is a map from target short name to a list of Target
# objects.
target_short_names = {}
# short name of targets that were skipped because they didn't contain anything
# interesting.
# NOTE: there may be overlap between this an non_empty_target_names.
empty_target_names = set()
# Set of non-empty short target names.
# NOTE: there may be overlap between this an empty_target_names.
non_empty_target_names = set()
for qualified_target in target_list:
# qualified_target is like: third_party/icu/icu.gyp:icui18n#target
build_file, name, toolset = \
gyp.common.ParseQualifiedTarget(qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings == this_make_global_settings, (
"make_global_settings needs to be the same for all targets. %s vs. %s" %
(this_make_global_settings, make_global_settings))
spec = target_dicts[qualified_target]
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
# If build_file is a symlink, we must not follow it because there's a chance
# it could point to a path above toplevel_dir, and we cannot correctly deal
# with that case at the moment.
build_file = gyp.common.RelativePath(build_file, options.toplevel_dir,
False)
qualified_target_for_hash = gyp.common.QualifiedTarget(build_file, name,
toolset)
hash_for_rules = hashlib.md5(qualified_target_for_hash).hexdigest()
base_path = os.path.dirname(build_file)
obj = 'obj'
if toolset != 'target':
obj += '.' + toolset
output_file = os.path.join(obj, base_path, name + '.ninja')
ninja_output = StringIO()
writer = NinjaWriter(hash_for_rules, target_outputs, base_path, build_dir,
ninja_output,
toplevel_build, output_file,
flavor, toplevel_dir=options.toplevel_dir)
target = writer.WriteSpec(spec, config_name, generator_flags)
if ninja_output.tell() > 0:
# Only create files for ninja files that actually have contents.
with OpenOutput(os.path.join(toplevel_build, output_file)) as ninja_file:
ninja_file.write(ninja_output.getvalue())
ninja_output.close()
master_ninja.subninja(output_file)
if target:
if name != target.FinalOutput() and spec['toolset'] == 'target':
target_short_names.setdefault(name, []).append(target)
target_outputs[qualified_target] = target
if qualified_target in all_targets:
all_outputs.add(target.FinalOutput())
non_empty_target_names.add(name)
else:
empty_target_names.add(name)
if target_short_names:
# Write a short name to build this target. This benefits both the
# "build chrome" case as well as the gyp tests, which expect to be
# able to run actions and build libraries by their short name.
master_ninja.newline()
master_ninja.comment('Short names for targets.')
for short_name in target_short_names:
master_ninja.build(short_name, 'phony', [x.FinalOutput() for x in
target_short_names[short_name]])
# Write phony targets for any empty targets that weren't written yet. As
# short names are not necessarily unique only do this for short names that
# haven't already been output for another target.
empty_target_names = empty_target_names - non_empty_target_names
if empty_target_names:
master_ninja.newline()
master_ninja.comment('Empty targets (output for completeness).')
for name in sorted(empty_target_names):
master_ninja.build(name, 'phony')
if all_outputs:
master_ninja.newline()
master_ninja.build('all', 'phony', list(all_outputs))
master_ninja.default(generator_flags.get('default_target', 'all'))
master_ninja_file.close()
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
builddir = os.path.join(options.toplevel_dir, 'out', config)
arguments = ['ninja', '-C', builddir]
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def CallGenerateOutputForConfig(arglist):
# Ignore the interrupt signal so that the parent process catches it and
# kills all multiprocessing children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
(target_list, target_dicts, data, params, config_name) = arglist
GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
def GenerateOutput(target_list, target_dicts, data, params):
# Update target_dicts for iOS device builds.
target_dicts = gyp.xcode_emulation.CloneConfigurationForDeviceAndEmulator(
target_dicts)
user_config = params.get('generator_flags', {}).get('config', None)
if gyp.common.GetFlavor(params) == 'win':
target_list, target_dicts = MSVSUtil.ShardTargets(target_list, target_dicts)
target_list, target_dicts = MSVSUtil.InsertLargePdbShims(
target_list, target_dicts, generator_default_variables)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
if params['parallel']:
try:
pool = multiprocessing.Pool(len(config_names))
arglists = []
for config_name in config_names:
arglists.append(
(target_list, target_dicts, data, params, config_name))
pool.map(CallGenerateOutputForConfig, arglists)
except KeyboardInterrupt, e:
pool.terminate()
raise e
else:
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
| gpl-2.0 |
jaywreddy/django | tests/logging_tests/tests.py | 72 | 17601 | # -*- coding:utf-8 -*-
from __future__ import unicode_literals
import logging
import warnings
from admin_scripts.tests import AdminScriptTestCase
from django.conf import settings
from django.core import mail
from django.core.files.temp import NamedTemporaryFile
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import LoggingCaptureMixin, patch_logger
from django.utils.deprecation import RemovedInNextVersionWarning
from django.utils.encoding import force_text
from django.utils.log import (
DEFAULT_LOGGING, AdminEmailHandler, CallbackFilter, RequireDebugFalse,
RequireDebugTrue,
)
from django.utils.six import StringIO
from .logconfig import MyEmailBackend
# logging config prior to using filter with mail_admins
OLD_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
class LoggingFiltersTest(SimpleTestCase):
def test_require_debug_false_filter(self):
"""
Test the RequireDebugFalse filter class.
"""
filter_ = RequireDebugFalse()
with self.settings(DEBUG=True):
self.assertEqual(filter_.filter("record is not used"), False)
with self.settings(DEBUG=False):
self.assertEqual(filter_.filter("record is not used"), True)
def test_require_debug_true_filter(self):
"""
Test the RequireDebugTrue filter class.
"""
filter_ = RequireDebugTrue()
with self.settings(DEBUG=True):
self.assertEqual(filter_.filter("record is not used"), True)
with self.settings(DEBUG=False):
self.assertEqual(filter_.filter("record is not used"), False)
class DefaultLoggingTest(LoggingCaptureMixin, SimpleTestCase):
@classmethod
def setUpClass(cls):
super(DefaultLoggingTest, cls).setUpClass()
cls._logging = settings.LOGGING
logging.config.dictConfig(DEFAULT_LOGGING)
@classmethod
def tearDownClass(cls):
super(DefaultLoggingTest, cls).tearDownClass()
logging.config.dictConfig(cls._logging)
def test_django_logger(self):
"""
The 'django' base logger only output anything when DEBUG=True.
"""
self.logger.error("Hey, this is an error.")
self.assertEqual(self.logger_output.getvalue(), '')
with self.settings(DEBUG=True):
self.logger.error("Hey, this is an error.")
self.assertEqual(self.logger_output.getvalue(), 'Hey, this is an error.\n')
def test_django_logger_warning(self):
with self.settings(DEBUG=True):
self.logger.warning('warning')
self.assertEqual(self.logger_output.getvalue(), 'warning\n')
def test_django_logger_info(self):
with self.settings(DEBUG=True):
self.logger.info('info')
self.assertEqual(self.logger_output.getvalue(), 'info\n')
def test_django_logger_debug(self):
with self.settings(DEBUG=True):
self.logger.debug('debug')
self.assertEqual(self.logger_output.getvalue(), '')
class WarningLoggerTests(SimpleTestCase):
"""
Tests that warnings output for RemovedInDjangoXXWarning (XX being the next
Django version) is enabled and captured to the logging system
"""
def setUp(self):
# If tests are invoke with "-Wall" (or any -W flag actually) then
# warning logging gets disabled (see configure_logging in django/utils/log.py).
# However, these tests expect warnings to be logged, so manually force warnings
# to the logs. Use getattr() here because the logging capture state is
# undocumented and (I assume) brittle.
self._old_capture_state = bool(getattr(logging, '_warnings_showwarning', False))
logging.captureWarnings(True)
# this convoluted setup is to avoid printing this deprecation to
# stderr during test running - as the test runner forces deprecations
# to be displayed at the global py.warnings level
self.logger = logging.getLogger('py.warnings')
self.outputs = []
self.old_streams = []
for handler in self.logger.handlers:
self.old_streams.append(handler.stream)
self.outputs.append(StringIO())
handler.stream = self.outputs[-1]
def tearDown(self):
for i, handler in enumerate(self.logger.handlers):
self.logger.handlers[i].stream = self.old_streams[i]
# Reset warnings state.
logging.captureWarnings(self._old_capture_state)
@override_settings(DEBUG=True)
def test_warnings_capture(self):
with warnings.catch_warnings():
warnings.filterwarnings('always')
warnings.warn('Foo Deprecated', RemovedInNextVersionWarning)
output = force_text(self.outputs[0].getvalue())
self.assertIn('Foo Deprecated', output)
def test_warnings_capture_debug_false(self):
with warnings.catch_warnings():
warnings.filterwarnings('always')
warnings.warn('Foo Deprecated', RemovedInNextVersionWarning)
output = force_text(self.outputs[0].getvalue())
self.assertNotIn('Foo Deprecated', output)
@override_settings(DEBUG=True)
def test_error_filter_still_raises(self):
with warnings.catch_warnings():
warnings.filterwarnings(
'error',
category=RemovedInNextVersionWarning
)
with self.assertRaises(RemovedInNextVersionWarning):
warnings.warn('Foo Deprecated', RemovedInNextVersionWarning)
class CallbackFilterTest(SimpleTestCase):
def test_sense(self):
f_false = CallbackFilter(lambda r: False)
f_true = CallbackFilter(lambda r: True)
self.assertEqual(f_false.filter("record"), False)
self.assertEqual(f_true.filter("record"), True)
def test_passes_on_record(self):
collector = []
def _callback(record):
collector.append(record)
return True
f = CallbackFilter(_callback)
f.filter("a record")
self.assertEqual(collector, ["a record"])
class AdminEmailHandlerTest(SimpleTestCase):
logger = logging.getLogger('django')
def get_admin_email_handler(self, logger):
# Ensure that AdminEmailHandler does not get filtered out
# even with DEBUG=True.
admin_email_handler = [
h for h in logger.handlers
if h.__class__.__name__ == "AdminEmailHandler"
][0]
return admin_email_handler
def test_fail_silently(self):
admin_email_handler = self.get_admin_email_handler(self.logger)
self.assertTrue(admin_email_handler.connection().fail_silently)
@override_settings(
ADMINS=[('whatever admin', 'admin@example.com')],
EMAIL_SUBJECT_PREFIX='-SuperAwesomeSubject-'
)
def test_accepts_args(self):
"""
Ensure that user-supplied arguments and the EMAIL_SUBJECT_PREFIX
setting are used to compose the email subject.
Refs #16736.
"""
message = "Custom message that says '%s' and '%s'"
token1 = 'ping'
token2 = 'pong'
admin_email_handler = self.get_admin_email_handler(self.logger)
# Backup then override original filters
orig_filters = admin_email_handler.filters
try:
admin_email_handler.filters = []
self.logger.error(message, token1, token2)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['admin@example.com'])
self.assertEqual(mail.outbox[0].subject,
"-SuperAwesomeSubject-ERROR: Custom message that says 'ping' and 'pong'")
finally:
# Restore original filters
admin_email_handler.filters = orig_filters
@override_settings(
ADMINS=[('whatever admin', 'admin@example.com')],
EMAIL_SUBJECT_PREFIX='-SuperAwesomeSubject-',
INTERNAL_IPS=['127.0.0.1'],
)
def test_accepts_args_and_request(self):
"""
Ensure that the subject is also handled if being
passed a request object.
"""
message = "Custom message that says '%s' and '%s'"
token1 = 'ping'
token2 = 'pong'
admin_email_handler = self.get_admin_email_handler(self.logger)
# Backup then override original filters
orig_filters = admin_email_handler.filters
try:
admin_email_handler.filters = []
rf = RequestFactory()
request = rf.get('/')
self.logger.error(message, token1, token2,
extra={
'status_code': 403,
'request': request,
}
)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['admin@example.com'])
self.assertEqual(mail.outbox[0].subject,
"-SuperAwesomeSubject-ERROR (internal IP): Custom message that says 'ping' and 'pong'")
finally:
# Restore original filters
admin_email_handler.filters = orig_filters
@override_settings(
ADMINS=[('admin', 'admin@example.com')],
EMAIL_SUBJECT_PREFIX='',
DEBUG=False,
)
def test_subject_accepts_newlines(self):
"""
Ensure that newlines in email reports' subjects are escaped to avoid
AdminErrorHandler to fail.
Refs #17281.
"""
message = 'Message \r\n with newlines'
expected_subject = 'ERROR: Message \\r\\n with newlines'
self.assertEqual(len(mail.outbox), 0)
self.logger.error(message)
self.assertEqual(len(mail.outbox), 1)
self.assertNotIn('\n', mail.outbox[0].subject)
self.assertNotIn('\r', mail.outbox[0].subject)
self.assertEqual(mail.outbox[0].subject, expected_subject)
@override_settings(
ADMINS=(('admin', 'admin@example.com'),),
EMAIL_SUBJECT_PREFIX='',
DEBUG=False,
)
def test_truncate_subject(self):
"""
RFC 2822's hard limit is 998 characters per line.
So, minus "Subject: ", the actual subject must be no longer than 989
characters.
Refs #17281.
"""
message = 'a' * 1000
expected_subject = 'ERROR: aa' + 'a' * 980
self.assertEqual(len(mail.outbox), 0)
self.logger.error(message)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, expected_subject)
@override_settings(
ADMINS=[('admin', 'admin@example.com')],
DEBUG=False,
)
def test_uses_custom_email_backend(self):
"""
Refs #19325
"""
message = 'All work and no play makes Jack a dull boy'
admin_email_handler = self.get_admin_email_handler(self.logger)
mail_admins_called = {'called': False}
def my_mail_admins(*args, **kwargs):
connection = kwargs['connection']
self.assertIsInstance(connection, MyEmailBackend)
mail_admins_called['called'] = True
# Monkeypatches
orig_mail_admins = mail.mail_admins
orig_email_backend = admin_email_handler.email_backend
mail.mail_admins = my_mail_admins
admin_email_handler.email_backend = (
'logging_tests.logconfig.MyEmailBackend')
try:
self.logger.error(message)
self.assertTrue(mail_admins_called['called'])
finally:
# Revert Monkeypatches
mail.mail_admins = orig_mail_admins
admin_email_handler.email_backend = orig_email_backend
@override_settings(
ADMINS=[('whatever admin', 'admin@example.com')],
)
def test_emit_non_ascii(self):
"""
#23593 - AdminEmailHandler should allow Unicode characters in the
request.
"""
handler = self.get_admin_email_handler(self.logger)
record = self.logger.makeRecord('name', logging.ERROR, 'function', 'lno', 'message', None, None)
rf = RequestFactory()
url_path = '/º'
record.request = rf.get(url_path)
handler.emit(record)
self.assertEqual(len(mail.outbox), 1)
msg = mail.outbox[0]
self.assertEqual(msg.to, ['admin@example.com'])
self.assertEqual(msg.subject, "[Django] ERROR (EXTERNAL IP): message")
self.assertIn("Report at %s" % url_path, msg.body)
@override_settings(
MANAGERS=[('manager', 'manager@example.com')],
DEBUG=False,
)
def test_customize_send_mail_method(self):
class ManagerEmailHandler(AdminEmailHandler):
def send_mail(self, subject, message, *args, **kwargs):
mail.mail_managers(subject, message, *args, connection=self.connection(), **kwargs)
handler = ManagerEmailHandler()
record = self.logger.makeRecord('name', logging.ERROR, 'function', 'lno', 'message', None, None)
self.assertEqual(len(mail.outbox), 0)
handler.emit(record)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['manager@example.com'])
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host_doesnt_crash(self):
admin_email_handler = self.get_admin_email_handler(self.logger)
old_include_html = admin_email_handler.include_html
# Text email
admin_email_handler.include_html = False
try:
self.client.get('/', HTTP_HOST='evil.com')
finally:
admin_email_handler.include_html = old_include_html
# HTML email
admin_email_handler.include_html = True
try:
self.client.get('/', HTTP_HOST='evil.com')
finally:
admin_email_handler.include_html = old_include_html
class SettingsConfigTest(AdminScriptTestCase):
"""
Test that accessing settings in a custom logging handler does not trigger
a circular import error.
"""
def setUp(self):
log_config = """{
'version': 1,
'handlers': {
'custom_handler': {
'level': 'INFO',
'class': 'logging_tests.logconfig.MyHandler',
}
}
}"""
self.write_settings('settings.py', sdict={'LOGGING': log_config})
def tearDown(self):
self.remove_settings('settings.py')
def test_circular_dependency(self):
# validate is just an example command to trigger settings configuration
out, err = self.run_manage(['check'])
self.assertNoOutput(err)
self.assertOutput(out, "System check identified no issues (0 silenced).")
def dictConfig(config):
dictConfig.called = True
dictConfig.called = False
class SetupConfigureLogging(SimpleTestCase):
"""
Test that calling django.setup() initializes the logging configuration.
"""
@override_settings(LOGGING_CONFIG='logging_tests.tests.dictConfig',
LOGGING=OLD_LOGGING)
def test_configure_initializes_logging(self):
from django import setup
setup()
self.assertTrue(dictConfig.called)
@override_settings(DEBUG=True, ROOT_URLCONF='logging_tests.urls')
class SecurityLoggerTest(SimpleTestCase):
def test_suspicious_operation_creates_log_message(self):
with patch_logger('django.security.SuspiciousOperation', 'error') as calls:
self.client.get('/suspicious/')
self.assertEqual(len(calls), 1)
self.assertEqual(calls[0], 'dubious')
def test_suspicious_operation_uses_sublogger(self):
with patch_logger('django.security.DisallowedHost', 'error') as calls:
self.client.get('/suspicious_spec/')
self.assertEqual(len(calls), 1)
self.assertEqual(calls[0], 'dubious')
@override_settings(
ADMINS=[('admin', 'admin@example.com')],
DEBUG=False,
)
def test_suspicious_email_admins(self):
self.client.get('/suspicious/')
self.assertEqual(len(mail.outbox), 1)
self.assertIn('Report at /suspicious/', mail.outbox[0].body)
class SettingsCustomLoggingTest(AdminScriptTestCase):
"""
Test that using a logging defaults are still applied when using a custom
callable in LOGGING_CONFIG (i.e., logging.config.fileConfig).
"""
def setUp(self):
logging_conf = """
[loggers]
keys=root
[handlers]
keys=stream
[formatters]
keys=simple
[logger_root]
handlers=stream
[handler_stream]
class=StreamHandler
formatter=simple
args=(sys.stdout,)
[formatter_simple]
format=%(message)s
"""
self.temp_file = NamedTemporaryFile()
self.temp_file.write(logging_conf.encode('utf-8'))
self.temp_file.flush()
sdict = {'LOGGING_CONFIG': '"logging.config.fileConfig"',
'LOGGING': 'r"%s"' % self.temp_file.name}
self.write_settings('settings.py', sdict=sdict)
def tearDown(self):
self.temp_file.close()
self.remove_settings('settings.py')
def test_custom_logging(self):
out, err = self.run_manage(['check'])
self.assertNoOutput(err)
self.assertOutput(out, "System check identified no issues (0 silenced).")
| bsd-3-clause |
dpshelio/sunpy | examples/units_and_coordinates/AIA_limb_STEREO.py | 1 | 3832 | # -*- coding: utf-8 -*-
"""
===========================================
Drawing the AIA limb on a STEREO EUVI image
===========================================
In this example we use a STEREO-B and an SDO image to demonstrate how to
overplot the limb as seen by AIA on an EUVI-B image. Then we overplot the AIA
coordinate grid on the STEREO image.
"""
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
from astropy.coordinates import SkyCoord
import sunpy.map
import sunpy.coordinates.wcs_utils
from sunpy.net import Fido, attrs as a
##############################################################################
# The first step is to download some data, we are going to get an image from
# early 2011 when the STEREO spacecraft were roughly 90 deg separated from the
# Earth.
stereo = (a.vso.Source('STEREO_B') &
a.Instrument('EUVI') &
a.Time('2011-01-01', '2011-01-01T00:10:00'))
aia = (a.Instrument('AIA') &
a.vso.Sample(24 * u.hour) &
a.Time('2011-01-01', '2011-01-02'))
wave = a.Wavelength(30 * u.nm, 31 * u.nm)
result = Fido.search(wave, aia | stereo)
###############################################################################
# Let's inspect the result
print(result)
##############################################################################
# and download the files
downloaded_files = Fido.fetch(result)
print(downloaded_files)
##############################################################################
# Let's create a dictionary with the two maps, which we crop to full disk.
maps = {m.detector: m.submap(SkyCoord([-1100, 1100], [-1100, 1100],
unit=u.arcsec, frame=m.coordinate_frame))
for m in sunpy.map.Map(downloaded_files)}
##############################################################################
# Next, let's calculate points on the limb in the AIA image for the half that
# can be seen from STEREO's point of view.
r = maps['AIA'].rsun_obs - 1 * u.arcsec # remove one arcsec so it's on disk.
# Adjust the following range if you only want to plot on STEREO_A
th = np.linspace(-180 * u.deg, 0 * u.deg)
x = r * np.sin(th)
y = r * np.cos(th)
coords = SkyCoord(x, y, frame=maps['AIA'].coordinate_frame)
##############################################################################
# Now, let's plot both maps
fig = plt.figure(figsize=(10, 4))
ax1 = fig.add_subplot(1, 2, 1, projection=maps['AIA'])
maps['AIA'].plot(axes=ax1)
maps['AIA'].draw_limb()
ax2 = fig.add_subplot(1, 2, 2, projection=maps['EUVI'])
maps['EUVI'].plot(axes=ax2)
ax2.plot_coord(coords, color='w')
##############################################################################
# Let's also plot the helioprojective coordinate grid as seen by SDO on the
# STEREO image.
fig = plt.figure()
ax = plt.subplot(projection=maps['EUVI'])
maps['EUVI'].plot()
# Move the title so it does not clash with the extra labels.
tx, ty = ax.title.get_position()
ax.title.set_position([tx, ty + 0.08])
# Change the default grid labels.
stereo_x, stereo_y = ax.coords
stereo_x.set_axislabel("Helioprojective Longitude (STEREO B) [arcsec]")
stereo_y.set_axislabel("Helioprojective Latitude (STEREO B) [arcsec]")
# Add a new coordinate overlay in the SDO frame.
overlay = ax.get_coords_overlay(maps['AIA'].coordinate_frame)
overlay.grid()
# Configure the grid:
x, y = overlay
# Set the ticks to be on the top and left axes.
x.set_ticks_position('tr')
y.set_ticks_position('tr')
# Wrap the longitude at 180 deg rather than the default 360.
x.set_coord_type('longitude', 180.)
# Change the defaults to arcseconds
x.set_major_formatter('s.s')
y.set_major_formatter('s.s')
# Add axes labels
x.set_axislabel("Helioprojective Longitude (SDO) [arcsec]")
y.set_axislabel("Helioprojective Latitude (SDO) [arcsec]")
plt.show()
| bsd-2-clause |
beni55/django | django/core/checks/security/base.py | 135 | 6076 | from django.conf import settings
from .. import Tags, Warning, register
SECRET_KEY_MIN_LENGTH = 50
SECRET_KEY_MIN_UNIQUE_CHARACTERS = 5
W001 = Warning(
"You do not have 'django.middleware.security.SecurityMiddleware' "
"in your MIDDLEWARE_CLASSES so the SECURE_HSTS_SECONDS, "
"SECURE_CONTENT_TYPE_NOSNIFF, "
"SECURE_BROWSER_XSS_FILTER, and SECURE_SSL_REDIRECT settings "
"will have no effect.",
id='security.W001',
)
W002 = Warning(
"You do not have "
"'django.middleware.clickjacking.XFrameOptionsMiddleware' in your "
"MIDDLEWARE_CLASSES, so your pages will not be served with an "
"'x-frame-options' header. Unless there is a good reason for your "
"site to be served in a frame, you should consider enabling this "
"header to help prevent clickjacking attacks.",
id='security.W002',
)
W004 = Warning(
"You have not set a value for the SECURE_HSTS_SECONDS setting. "
"If your entire site is served only over SSL, you may want to consider "
"setting a value and enabling HTTP Strict Transport Security. "
"Be sure to read the documentation first; enabling HSTS carelessly "
"can cause serious, irreversible problems.",
id='security.W004',
)
W005 = Warning(
"You have not set the SECURE_HSTS_INCLUDE_SUBDOMAINS setting to True. "
"Without this, your site is potentially vulnerable to attack "
"via an insecure connection to a subdomain. Only set this to True if "
"you are certain that all subdomains of your domain should be served "
"exclusively via SSL.",
id='security.W005',
)
W006 = Warning(
"Your SECURE_CONTENT_TYPE_NOSNIFF setting is not set to True, "
"so your pages will not be served with an "
"'x-content-type-options: nosniff' header. "
"You should consider enabling this header to prevent the "
"browser from identifying content types incorrectly.",
id='security.W006',
)
W007 = Warning(
"Your SECURE_BROWSER_XSS_FILTER setting is not set to True, "
"so your pages will not be served with an "
"'x-xss-protection: 1; mode=block' header. "
"You should consider enabling this header to activate the "
"browser's XSS filtering and help prevent XSS attacks.",
id='security.W007',
)
W008 = Warning(
"Your SECURE_SSL_REDIRECT setting is not set to True. "
"Unless your site should be available over both SSL and non-SSL "
"connections, you may want to either set this setting True "
"or configure a load balancer or reverse-proxy server "
"to redirect all connections to HTTPS.",
id='security.W008',
)
W009 = Warning(
"Your SECRET_KEY has less than %(min_length)s characters or less than "
"%(min_unique_chars)s unique characters. Please generate a long and random "
"SECRET_KEY, otherwise many of Django's security-critical features will be "
"vulnerable to attack." % {
'min_length': SECRET_KEY_MIN_LENGTH,
'min_unique_chars': SECRET_KEY_MIN_UNIQUE_CHARACTERS,
},
id='security.W009',
)
W018 = Warning(
"You should not have DEBUG set to True in deployment.",
id='security.W018',
)
W019 = Warning(
"You have "
"'django.middleware.clickjacking.XFrameOptionsMiddleware' in your "
"MIDDLEWARE_CLASSES, but X_FRAME_OPTIONS is not set to 'DENY'. "
"The default is 'SAMEORIGIN', but unless there is a good reason for "
"your site to serve other parts of itself in a frame, you should "
"change it to 'DENY'.",
id='security.W019',
)
def _security_middleware():
return "django.middleware.security.SecurityMiddleware" in settings.MIDDLEWARE_CLASSES
def _xframe_middleware():
return "django.middleware.clickjacking.XFrameOptionsMiddleware" in settings.MIDDLEWARE_CLASSES
@register(Tags.security, deploy=True)
def check_security_middleware(app_configs, **kwargs):
passed_check = _security_middleware()
return [] if passed_check else [W001]
@register(Tags.security, deploy=True)
def check_xframe_options_middleware(app_configs, **kwargs):
passed_check = _xframe_middleware()
return [] if passed_check else [W002]
@register(Tags.security, deploy=True)
def check_sts(app_configs, **kwargs):
passed_check = not _security_middleware() or settings.SECURE_HSTS_SECONDS
return [] if passed_check else [W004]
@register(Tags.security, deploy=True)
def check_sts_include_subdomains(app_configs, **kwargs):
passed_check = (
not _security_middleware() or
not settings.SECURE_HSTS_SECONDS or
settings.SECURE_HSTS_INCLUDE_SUBDOMAINS is True
)
return [] if passed_check else [W005]
@register(Tags.security, deploy=True)
def check_content_type_nosniff(app_configs, **kwargs):
passed_check = (
not _security_middleware() or
settings.SECURE_CONTENT_TYPE_NOSNIFF is True
)
return [] if passed_check else [W006]
@register(Tags.security, deploy=True)
def check_xss_filter(app_configs, **kwargs):
passed_check = (
not _security_middleware() or
settings.SECURE_BROWSER_XSS_FILTER is True
)
return [] if passed_check else [W007]
@register(Tags.security, deploy=True)
def check_ssl_redirect(app_configs, **kwargs):
passed_check = (
not _security_middleware() or
settings.SECURE_SSL_REDIRECT is True
)
return [] if passed_check else [W008]
@register(Tags.security, deploy=True)
def check_secret_key(app_configs, **kwargs):
passed_check = (
getattr(settings, 'SECRET_KEY', None) and
len(set(settings.SECRET_KEY)) >= SECRET_KEY_MIN_UNIQUE_CHARACTERS and
len(settings.SECRET_KEY) >= SECRET_KEY_MIN_LENGTH
)
return [] if passed_check else [W009]
@register(Tags.security, deploy=True)
def check_debug(app_configs, **kwargs):
passed_check = not settings.DEBUG
return [] if passed_check else [W018]
@register(Tags.security, deploy=True)
def check_xframe_deny(app_configs, **kwargs):
passed_check = (
not _xframe_middleware() or
settings.X_FRAME_OPTIONS == 'DENY'
)
return [] if passed_check else [W019]
| bsd-3-clause |
scs/uclinux | lib/boost/boost_1_38_0/libs/python/test/pickle3.py | 46 | 1566 | # Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
r'''>>> import pickle3_ext
>>> import pickle
>>> pickle3_ext.world.__module__
'pickle3_ext'
>>> pickle3_ext.world.__safe_for_unpickling__
1
>>> pickle3_ext.world.__getstate_manages_dict__
1
>>> pickle3_ext.world.__name__
'world'
>>> pickle3_ext.world('Hello').__reduce__()
(<class 'pickle3_ext.world'>, ('Hello',), ({}, 0))
>>> for number in (24, 42):
... wd = pickle3_ext.world('California')
... wd.set_secret_number(number)
... wd.x = 2 * number
... wd.y = 'y' * number
... wd.z = 3. * number
... pstr = pickle.dumps(wd)
... wl = pickle.loads(pstr)
... print wd.greet(), wd.get_secret_number(), wd.x, wd.y, wd.z
... print wl.greet(), wl.get_secret_number(), wl.x, wl.y, wl.z
Hello from California! 24 48 yyyyyyyyyyyyyyyyyyyyyyyy 72.0
Hello from California! 24 48 yyyyyyyyyyyyyyyyyyyyyyyy 72.0
Hello from California! 42 84 yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy 126.0
Hello from California! 0 84 yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy 126.0
'''
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print "running..."
import sys
status = run()[0]
if (status == 0): print "Done."
sys.exit(status)
| gpl-2.0 |
kenwang76/readthedocs.org | readthedocs/redirects/models.py | 26 | 2513 | from django.db import models
from django.utils.translation import ugettext
from django.utils.translation import ugettext_lazy as _
from readthedocs.projects.models import Project
HTTP_STATUS_CHOICES = (
(301, _('301 - Permanent Redirect')),
(302, _('302 - Temporary Redirect')),
)
STATUS_CHOICES = (
(True, _('Active')),
(False, _('Inactive')),
)
TYPE_CHOICES = (
('prefix', _('Prefix Redirect')),
('page', _('Page Redirect')),
('exact', _('Exact Redirect')),
('sphinx_html', _('Sphinx HTMLDir -> HTML')),
('sphinx_htmldir', _('Sphinx HTML -> HTMLDir')),
# ('advanced', _('Advanced')),
)
from_url_helptext = _('Absolute path, excluding the domain. '
'Example: <b>/docs/</b> or <b>/install.html</b>'
)
to_url_helptext = _('Absolute or relative url. Examples: '
'<b>/tutorial/install.html</b>'
)
redirect_type_helptext = _('The type of redirect you wish to use.')
class Redirect(models.Model):
project = models.ForeignKey(Project, verbose_name=_('Project'),
related_name='redirects')
redirect_type = models.CharField(_('Redirect Type'), max_length=255, choices=TYPE_CHOICES,
help_text=redirect_type_helptext)
from_url = models.CharField(_('From URL'), max_length=255,
db_index=True, help_text=from_url_helptext, blank=True)
to_url = models.CharField(_('To URL'), max_length=255,
db_index=True, help_text=to_url_helptext, blank=True)
http_status = models.SmallIntegerField(_('HTTP Status'),
choices=HTTP_STATUS_CHOICES,
default=301)
status = models.BooleanField(choices=STATUS_CHOICES, default=True)
create_dt = models.DateTimeField(auto_now_add=True)
update_dt = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = _('redirect')
verbose_name_plural = _('redirects')
ordering = ('-update_dt',)
def __unicode__(self):
if self.redirect_type == 'prefix':
return ugettext('Prefix Redirect: %s ->' % self.from_url)
elif self.redirect_type == 'page':
return ugettext('Page Redirect: %s -> %s' % (
self.from_url,
self.to_url))
else:
return ugettext('Redirect: %s' % self.get_redirect_type_display())
| mit |
75651/kbengine_cloud | kbe/res/scripts/common/Lib/multiprocessing/process.py | 98 | 9144 | #
# Module providing the `Process` class which emulates `threading.Thread`
#
# multiprocessing/process.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = ['BaseProcess', 'current_process', 'active_children']
#
# Imports
#
import os
import sys
import signal
import itertools
from _weakrefset import WeakSet
#
#
#
try:
ORIGINAL_DIR = os.path.abspath(os.getcwd())
except OSError:
ORIGINAL_DIR = None
#
# Public functions
#
def current_process():
'''
Return process object representing the current process
'''
return _current_process
def active_children():
'''
Return list of process objects corresponding to live child processes
'''
_cleanup()
return list(_children)
#
#
#
def _cleanup():
# check for processes which have finished
for p in list(_children):
if p._popen.poll() is not None:
_children.discard(p)
#
# The `Process` class
#
class BaseProcess(object):
'''
Process objects represent activity that is run in a separate process
The class is analogous to `threading.Thread`
'''
def _Popen(self):
raise NotImplementedError
def __init__(self, group=None, target=None, name=None, args=(), kwargs={},
*, daemon=None):
assert group is None, 'group argument must be None for now'
count = next(_process_counter)
self._identity = _current_process._identity + (count,)
self._config = _current_process._config.copy()
self._parent_pid = os.getpid()
self._popen = None
self._target = target
self._args = tuple(args)
self._kwargs = dict(kwargs)
self._name = name or type(self).__name__ + '-' + \
':'.join(str(i) for i in self._identity)
if daemon is not None:
self.daemon = daemon
_dangling.add(self)
def run(self):
'''
Method to be run in sub-process; can be overridden in sub-class
'''
if self._target:
self._target(*self._args, **self._kwargs)
def start(self):
'''
Start child process
'''
assert self._popen is None, 'cannot start a process twice'
assert self._parent_pid == os.getpid(), \
'can only start a process object created by current process'
assert not _current_process._config.get('daemon'), \
'daemonic processes are not allowed to have children'
_cleanup()
self._popen = self._Popen(self)
self._sentinel = self._popen.sentinel
_children.add(self)
def terminate(self):
'''
Terminate process; sends SIGTERM signal or uses TerminateProcess()
'''
self._popen.terminate()
def join(self, timeout=None):
'''
Wait until child process terminates
'''
assert self._parent_pid == os.getpid(), 'can only join a child process'
assert self._popen is not None, 'can only join a started process'
res = self._popen.wait(timeout)
if res is not None:
_children.discard(self)
def is_alive(self):
'''
Return whether process is alive
'''
if self is _current_process:
return True
assert self._parent_pid == os.getpid(), 'can only test a child process'
if self._popen is None:
return False
self._popen.poll()
return self._popen.returncode is None
@property
def name(self):
return self._name
@name.setter
def name(self, name):
assert isinstance(name, str), 'name must be a string'
self._name = name
@property
def daemon(self):
'''
Return whether process is a daemon
'''
return self._config.get('daemon', False)
@daemon.setter
def daemon(self, daemonic):
'''
Set whether process is a daemon
'''
assert self._popen is None, 'process has already started'
self._config['daemon'] = daemonic
@property
def authkey(self):
return self._config['authkey']
@authkey.setter
def authkey(self, authkey):
'''
Set authorization key of process
'''
self._config['authkey'] = AuthenticationString(authkey)
@property
def exitcode(self):
'''
Return exit code of process or `None` if it has yet to stop
'''
if self._popen is None:
return self._popen
return self._popen.poll()
@property
def ident(self):
'''
Return identifier (PID) of process or `None` if it has yet to start
'''
if self is _current_process:
return os.getpid()
else:
return self._popen and self._popen.pid
pid = ident
@property
def sentinel(self):
'''
Return a file descriptor (Unix) or handle (Windows) suitable for
waiting for process termination.
'''
try:
return self._sentinel
except AttributeError:
raise ValueError("process not started")
def __repr__(self):
if self is _current_process:
status = 'started'
elif self._parent_pid != os.getpid():
status = 'unknown'
elif self._popen is None:
status = 'initial'
else:
if self._popen.poll() is not None:
status = self.exitcode
else:
status = 'started'
if type(status) is int:
if status == 0:
status = 'stopped'
else:
status = 'stopped[%s]' % _exitcode_to_name.get(status, status)
return '<%s(%s, %s%s)>' % (type(self).__name__, self._name,
status, self.daemon and ' daemon' or '')
##
def _bootstrap(self):
from . import util, context
global _current_process, _process_counter, _children
try:
if self._start_method is not None:
context._force_start_method(self._start_method)
_process_counter = itertools.count(1)
_children = set()
if sys.stdin is not None:
try:
sys.stdin.close()
sys.stdin = open(os.devnull)
except (OSError, ValueError):
pass
old_process = _current_process
_current_process = self
try:
util._finalizer_registry.clear()
util._run_after_forkers()
finally:
# delay finalization of the old process object until after
# _run_after_forkers() is executed
del old_process
util.info('child process calling self.run()')
try:
self.run()
exitcode = 0
finally:
util._exit_function()
except SystemExit as e:
if not e.args:
exitcode = 1
elif isinstance(e.args[0], int):
exitcode = e.args[0]
else:
sys.stderr.write(str(e.args[0]) + '\n')
exitcode = 1
except:
exitcode = 1
import traceback
sys.stderr.write('Process %s:\n' % self.name)
traceback.print_exc()
finally:
util.info('process exiting with exitcode %d' % exitcode)
sys.stdout.flush()
sys.stderr.flush()
return exitcode
#
# We subclass bytes to avoid accidental transmission of auth keys over network
#
class AuthenticationString(bytes):
def __reduce__(self):
from .context import get_spawning_popen
if get_spawning_popen() is None:
raise TypeError(
'Pickling an AuthenticationString object is '
'disallowed for security reasons'
)
return AuthenticationString, (bytes(self),)
#
# Create object representing the main process
#
class _MainProcess(BaseProcess):
def __init__(self):
self._identity = ()
self._name = 'MainProcess'
self._parent_pid = None
self._popen = None
self._config = {'authkey': AuthenticationString(os.urandom(32)),
'semprefix': '/mp'}
# Note that some versions of FreeBSD only allow named
# semaphores to have names of up to 14 characters. Therefore
# we choose a short prefix.
#
# On MacOSX in a sandbox it may be necessary to use a
# different prefix -- see #19478.
#
# Everything in self._config will be inherited by descendant
# processes.
_current_process = _MainProcess()
_process_counter = itertools.count(1)
_children = set()
del _MainProcess
#
# Give names to some return codes
#
_exitcode_to_name = {}
for name, signum in list(signal.__dict__.items()):
if name[:3]=='SIG' and '_' not in name:
_exitcode_to_name[-signum] = name
# For debug and leak testing
_dangling = WeakSet()
| lgpl-3.0 |
vaporry/pydevp2p | devp2p/tests/test_multiplexer.py | 3 | 6454 | from devp2p.multiplexer import Multiplexer, Packet, Frame, DeserializationError
def test_frame():
mux = Multiplexer()
p0 = 0
mux.add_protocol(p0)
# test normal packet
packet0 = Packet(p0, cmd_id=0, payload='x' * 100)
mux.add_packet(packet0)
frames = mux.pop_frames()
assert len(frames) == 1
f = frames[0]
message = f.as_bytes()
# check framing
fs = f.frame_size()
assert len(message) == fs
_fs = 16 + 16 + len(f.enc_cmd_id) + len(packet0.payload) + 16
_fs += Frame.padding - _fs % Frame.padding
assert fs == _fs
assert message[32 + len(f.enc_cmd_id):].startswith(packet0.payload)
packets = mux.decode(message)
assert len(mux._decode_buffer) == 0
assert len(packets[0].payload) == len(packet0.payload)
assert packets[0].payload == packet0.payload
assert packets[0] == packet0
def test_chunked():
mux = Multiplexer()
p0, p1, p2 = 0, 1, 2
mux.add_protocol(p0)
mux.add_protocol(p1)
mux.add_protocol(p2)
# big packet
print 'size', mux.max_window_size * 2
packet1 = Packet(p1, cmd_id=0, payload='\x00' * mux.max_window_size * 2 + 'x')
mux.add_packet(packet1)
frames = mux.pop_all_frames()
all_frames_length = sum(f.frame_size() for f in frames)
assert sum(len(f.payload) for f in frames) == len(packet1.payload)
for i, f, in enumerate(frames):
print i, f.frame_size()
print 'frame payload', len(f.payload)
print f._frame_type()
mux.add_packet(packet1)
message = mux.pop_all_frames_as_bytes()
assert len(message) == all_frames_length
packets = mux.decode(message)
assert len(mux._decode_buffer) == 0
assert packets[0].payload == packet1.payload
assert packets[0] == packet1
assert len(packets) == 1
def test_chunked_big():
import time
mux = Multiplexer()
p0 = 0
mux.add_protocol(p0)
# big packet
payload = '\x00' * 10 * 1024**2
print 'size', len(payload)
packet1 = Packet(p0, cmd_id=0, payload=payload)
# framing
st = time.time()
mux.add_packet(packet1)
print 'framing', time.time() - st
# popping frames
st = time.time()
messages = [f.as_bytes() for f in mux.pop_all_frames()]
print 'popping frames', time.time() - st
st = time.time()
# decoding
for m in messages:
packets = mux.decode(m)
if packets:
break
print 'decoding frames', time.time() - st
assert len(mux._decode_buffer) == 0
assert packets[0].payload == packet1.payload
assert packets[0] == packet1
assert len(packets) == 1
def test_remain():
mux = Multiplexer()
p0, p1, p2 = 0, 1, 2
mux.add_protocol(p0)
mux.add_protocol(p1)
mux.add_protocol(p2)
# test buffer remains, incomplete frames
packet1 = Packet(p1, cmd_id=0, payload='\x00' * 100)
mux.add_packet(packet1)
message = mux.pop_all_frames_as_bytes()
tail = message[:50]
message += tail
packets = mux.decode(message)
assert packets[0] == packet1
assert len(packets) == 1
assert len(mux._decode_buffer) == len(tail)
# test buffer decode with invalid data
message = message[1:]
exception_raised = False
try:
packets = mux.decode(message)
except DeserializationError:
exception_raised = True
assert exception_raised
def test_multiplexer():
mux = Multiplexer()
p0, p1, p2 = 0, 1, 2
mux.add_protocol(p0)
mux.add_protocol(p1)
mux.add_protocol(p2)
assert mux.next_protocol == p0
assert mux.next_protocol == p1
assert mux.next_protocol == p2
assert mux.next_protocol == p0
assert mux.pop_frames() == []
assert mux.num_active_protocols == 0
# test normal packet
packet0 = Packet(p0, cmd_id=0, payload='x' * 100)
mux.add_packet(packet0)
assert mux.num_active_protocols == 1
frames = mux.pop_frames()
assert len(frames) == 1
f = frames[0]
assert len(f.as_bytes()) == f.frame_size()
mux.add_packet(packet0)
assert mux.num_active_protocols == 1
message = mux.pop_all_frames_as_bytes()
packets = mux.decode(message)
assert len(packets[0].payload) == len(packet0.payload)
assert packets[0].payload == packet0.payload
assert packets[0] == packet0
# nothing left to pop
assert len(mux.pop_frames()) == 0
# big packet
packet1 = Packet(p1, cmd_id=0, payload='\x00' * mux.max_window_size * 2)
mux.add_packet(packet1)
# decode packets from buffer
message = mux.pop_all_frames_as_bytes()
packets = mux.decode(message)
assert packets[0].payload == packet1.payload
assert packets[0] == packet1
assert len(packets) == 1
# mix packet types
packet2 = Packet(p0, cmd_id=0, payload='\x00' * 200, prioritize=True)
mux.add_packet(packet1)
mux.add_packet(packet0)
mux.add_packet(packet2)
message = mux.pop_all_frames_as_bytes()
packets = mux.decode(message)
assert packets == [packet2, packet0, packet1]
# packets with different protocols
packet3 = Packet(p1, cmd_id=0, payload='\x00' * 3000, prioritize=False)
mux.add_packet(packet1)
mux.add_packet(packet0)
mux.add_packet(packet2)
mux.add_packet(packet3)
mux.add_packet(packet3)
mux.add_packet(packet3)
assert mux.next_protocol == p0
# thus next with data is p1 w/ packet3
message = mux.pop_all_frames_as_bytes()
packets = mux.decode(message)
assert packets == [packet3, packet2, packet0, packet3, packet3, packet1]
# test buffer remains, incomplete frames
packet1 = Packet(p1, cmd_id=0, payload='\x00' * 100)
mux.add_packet(packet1)
message = mux.pop_all_frames_as_bytes()
tail = message[:50]
message += tail
packets = mux.decode(message)
assert packets[0] == packet1
assert len(packets) == 1
assert len(mux._decode_buffer) == len(tail)
def test_rlpx_alpha():
"""
protocol_id: 0
sequence_id: 0
Single-frame packet:
header || header-mac || frame || frame-mac
header: frame-size || header-data || padding
frame-size: 3-byte integer size of frame, big endian encoded (excludes padding)
header-data:
normal: rlp.list(protocol-type[, sequence-id])
values:
protocol-type: < 2**16
sequence-id: < 2**16 (this value is optional for normal frames)
total-packet-size: < 2**32
padding: zero-fill to 16-byte boundary
"""
pass
| mit |
ilexius/odoo | addons/delivery/models/stock_move.py | 11 | 1843 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp import api, fields, models
import openerp.addons.decimal_precision as dp
class StockMove(models.Model):
_inherit = 'stock.move'
def _default_uom(self):
uom_categ_id = self.env.ref('product.product_uom_categ_kgm').id
return self.env['product.uom'].search([('category_id', '=', uom_categ_id), ('factor', '=', 1)], limit=1)
weight = fields.Float(compute='_cal_move_weight', digits_compute=dp.get_precision('Stock Weight'), store=True)
weight_uom_id = fields.Many2one('product.uom', string='Unit of Measure', required=True, readonly=True, help="Unit of Measure (Unit of Measure) is the unit of measurement for Weight", default=_default_uom)
@api.depends('product_id', 'product_uom_qty', 'product_uom')
def _cal_move_weight(self):
for move in self.filtered(lambda moves: moves.product_id.weight > 0.00):
move.weight = (move.product_qty * move.product_id.weight)
@api.multi
def action_confirm(self):
"""
Pass the carrier to the picking from the sales order
(Should also work in case of Phantom BoMs when on explosion the original move is deleted)
"""
procs_to_check = []
for move in self:
if move.procurement_id and move.procurement_id.sale_line_id and move.procurement_id.sale_line_id.order_id.carrier_id:
procs_to_check += [move.procurement_id]
res = super(StockMove, self).action_confirm()
for proc in procs_to_check:
pickings = (proc.move_ids.mapped('picking_id')).filtered(lambda record: not record.carrier_id)
if pickings:
pickings.write({'carrier_id': proc.sale_line_id.order_id.carrier_id.id})
return res
| gpl-3.0 |
wasade/networkx | networkx/algorithms/connectivity/tests/test_connectivity.py | 44 | 15871 | import itertools
from nose.tools import assert_equal, assert_true, assert_false, assert_raises
import networkx as nx
from networkx.algorithms.flow import (edmonds_karp, preflow_push,
shortest_augmenting_path)
flow_funcs = [edmonds_karp, preflow_push, shortest_augmenting_path]
# connectivity functions not imported to the base namespace
from networkx.algorithms.connectivity import (local_edge_connectivity,
local_node_connectivity)
msg = "Assertion failed in function: {0}"
# helper functions for tests
def _generate_no_biconnected(max_attempts=50):
attempts = 0
while True:
G = nx.fast_gnp_random_graph(100, 0.0575)
if nx.is_connected(G) and not nx.is_biconnected(G):
attempts = 0
yield G
else:
if attempts >= max_attempts:
msg = "Tried %d times: no suitable Graph."
raise Exception(msg % max_attempts)
else:
attempts += 1
def test_average_connectivity():
# figure 1 from:
# Beineke, L., O. Oellermann, and R. Pippert (2002). The average
# connectivity of a graph. Discrete mathematics 252(1-3), 31-45
# http://www.sciencedirect.com/science/article/pii/S0012365X01001807
G1 = nx.path_graph(3)
G1.add_edges_from([(1, 3),(1, 4)])
G2 = nx.path_graph(3)
G2.add_edges_from([(1, 3),(1, 4),(0, 3),(0, 4),(3, 4)])
G3 = nx.Graph()
for flow_func in flow_funcs:
kwargs = dict(flow_func=flow_func)
assert_equal(nx.average_node_connectivity(G1, **kwargs), 1,
msg=msg.format(flow_func.__name__))
assert_equal(nx.average_node_connectivity(G2, **kwargs), 2.2,
msg=msg.format(flow_func.__name__))
assert_equal(nx.average_node_connectivity(G3, **kwargs), 0,
msg=msg.format(flow_func.__name__))
def test_average_connectivity_directed():
G = nx.DiGraph([(1,3),(1,4),(1,5)])
for flow_func in flow_funcs:
assert_equal(nx.average_node_connectivity(G), 0.25,
msg=msg.format(flow_func.__name__))
def test_articulation_points():
Ggen = _generate_no_biconnected()
for flow_func in flow_funcs:
for i in range(3):
G = next(Ggen)
assert_equal(nx.node_connectivity(G, flow_func=flow_func), 1,
msg=msg.format(flow_func.__name__))
def test_brandes_erlebach():
# Figure 1 chapter 7: Connectivity
# http://www.informatik.uni-augsburg.de/thi/personen/kammer/Graph_Connectivity.pdf
G = nx.Graph()
G.add_edges_from([(1, 2), (1, 3), (1, 4), (1, 5), (2, 3), (2, 6), (3, 4),
(3, 6), (4, 6), (4, 7), (5, 7), (6, 8), (6, 9), (7, 8),
(7, 10), (8, 11), (9, 10), (9, 11), (10, 11)])
for flow_func in flow_funcs:
kwargs = dict(flow_func=flow_func)
assert_equal(3, local_edge_connectivity(G, 1, 11, **kwargs),
msg=msg.format(flow_func.__name__))
assert_equal(3, nx.edge_connectivity(G, 1, 11, **kwargs),
msg=msg.format(flow_func.__name__))
assert_equal(2, local_node_connectivity(G, 1, 11, **kwargs),
msg=msg.format(flow_func.__name__))
assert_equal(2, nx.node_connectivity(G, 1, 11, **kwargs),
msg=msg.format(flow_func.__name__))
assert_equal(2, nx.edge_connectivity(G, **kwargs), # node 5 has degree 2
msg=msg.format(flow_func.__name__))
assert_equal(2, nx.node_connectivity(G, **kwargs),
msg=msg.format(flow_func.__name__))
def test_white_harary_1():
# Figure 1b white and harary (2001)
# # http://eclectic.ss.uci.edu/~drwhite/sm-w23.PDF
# A graph with high adhesion (edge connectivity) and low cohesion
# (vertex connectivity)
G = nx.disjoint_union(nx.complete_graph(4), nx.complete_graph(4))
G.remove_node(7)
for i in range(4, 7):
G.add_edge(0, i)
G = nx.disjoint_union(G, nx.complete_graph(4))
G.remove_node(G.order() - 1)
for i in range(7, 10):
G.add_edge(0, i)
for flow_func in flow_funcs:
assert_equal(1, nx.node_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
assert_equal(3, nx.edge_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
def test_white_harary_2():
# Figure 8 white and harary (2001)
# # http://eclectic.ss.uci.edu/~drwhite/sm-w23.PDF
G = nx.disjoint_union(nx.complete_graph(4), nx.complete_graph(4))
G.add_edge(0, 4)
# kappa <= lambda <= delta
assert_equal(3, min(nx.core_number(G).values()))
for flow_func in flow_funcs:
assert_equal(1, nx.node_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
assert_equal(1, nx.edge_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
def test_complete_graphs():
for n in range(5, 20, 5):
for flow_func in flow_funcs:
G = nx.complete_graph(n)
assert_equal(n-1, nx.node_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
assert_equal(n-1, nx.node_connectivity(G.to_directed(),
flow_func=flow_func),
msg=msg.format(flow_func.__name__))
assert_equal(n-1, nx.edge_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
assert_equal(n-1, nx.edge_connectivity(G.to_directed(),
flow_func=flow_func),
msg=msg.format(flow_func.__name__))
def test_empty_graphs():
for k in range(5, 25, 5):
G = nx.empty_graph(k)
for flow_func in flow_funcs:
assert_equal(0, nx.node_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
assert_equal(0, nx.edge_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
def test_petersen():
G = nx.petersen_graph()
for flow_func in flow_funcs:
assert_equal(3, nx.node_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
assert_equal(3, nx.edge_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
def test_tutte():
G = nx.tutte_graph()
for flow_func in flow_funcs:
assert_equal(3, nx.node_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
assert_equal(3, nx.edge_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
def test_dodecahedral():
G = nx.dodecahedral_graph()
for flow_func in flow_funcs:
assert_equal(3, nx.node_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
assert_equal(3, nx.edge_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
def test_octahedral():
G=nx.octahedral_graph()
for flow_func in flow_funcs:
assert_equal(4, nx.node_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
assert_equal(4, nx.edge_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
def test_icosahedral():
G=nx.icosahedral_graph()
for flow_func in flow_funcs:
assert_equal(5, nx.node_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
assert_equal(5, nx.edge_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
def test_missing_source():
G = nx.path_graph(4)
for flow_func in flow_funcs:
assert_raises(nx.NetworkXError, nx.node_connectivity, G, 10, 1,
flow_func=flow_func)
def test_missing_target():
G = nx.path_graph(4)
for flow_func in flow_funcs:
assert_raises(nx.NetworkXError, nx.node_connectivity, G, 1, 10,
flow_func=flow_func)
def test_edge_missing_source():
G = nx.path_graph(4)
for flow_func in flow_funcs:
assert_raises(nx.NetworkXError, nx.edge_connectivity, G, 10, 1,
flow_func=flow_func)
def test_edge_missing_target():
G = nx.path_graph(4)
for flow_func in flow_funcs:
assert_raises(nx.NetworkXError, nx.edge_connectivity, G, 1, 10,
flow_func=flow_func)
def test_not_weakly_connected():
G = nx.DiGraph()
G.add_path([1, 2, 3])
G.add_path([4, 5])
for flow_func in flow_funcs:
assert_equal(nx.node_connectivity(G), 0,
msg=msg.format(flow_func.__name__))
assert_equal(nx.edge_connectivity(G), 0,
msg=msg.format(flow_func.__name__))
def test_not_connected():
G = nx.Graph()
G.add_path([1, 2, 3])
G.add_path([4, 5])
for flow_func in flow_funcs:
assert_equal(nx.node_connectivity(G), 0,
msg=msg.format(flow_func.__name__))
assert_equal(nx.edge_connectivity(G), 0,
msg=msg.format(flow_func.__name__))
def test_directed_edge_connectivity():
G = nx.cycle_graph(10, create_using=nx.DiGraph()) # only one direction
D = nx.cycle_graph(10).to_directed() # 2 reciprocal edges
for flow_func in flow_funcs:
assert_equal(1, nx.edge_connectivity(G, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
assert_equal(1, local_edge_connectivity(G, 1, 4, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
assert_equal(1, nx.edge_connectivity(G, 1, 4, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
assert_equal(2, nx.edge_connectivity(D, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
assert_equal(2, local_edge_connectivity(D, 1, 4, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
assert_equal(2, nx.edge_connectivity(D, 1, 4, flow_func=flow_func),
msg=msg.format(flow_func.__name__))
def test_cutoff():
G = nx.complete_graph(5)
for local_func in [local_edge_connectivity, local_node_connectivity]:
for flow_func in flow_funcs:
if flow_func is preflow_push:
# cutoff is not supported by preflow_push
continue
for cutoff in [3, 2, 1]:
result = local_func(G, 0, 4, flow_func=flow_func, cutoff=cutoff)
assert_equal(cutoff, result,
msg="cutoff error in {0}".format(flow_func.__name__))
def test_invalid_auxiliary():
G = nx.complete_graph(5)
assert_raises(nx.NetworkXError, local_node_connectivity, G, 0, 3,
auxiliary=G)
def test_interface_only_source():
G = nx.complete_graph(5)
for interface_func in [nx.node_connectivity, nx.edge_connectivity]:
assert_raises(nx.NetworkXError, interface_func, G, s=0)
def test_interface_only_target():
G = nx.complete_graph(5)
for interface_func in [nx.node_connectivity, nx.edge_connectivity]:
assert_raises(nx.NetworkXError, interface_func, G, t=3)
def test_edge_connectivity_flow_vs_stoer_wagner():
graph_funcs = [
nx.icosahedral_graph,
nx.octahedral_graph,
nx.dodecahedral_graph,
]
for graph_func in graph_funcs:
G = graph_func()
assert_equal(nx.stoer_wagner(G)[0], nx.edge_connectivity(G))
class TestAllPairsNodeConnectivity:
def setUp(self):
self.path = nx.path_graph(7)
self.directed_path = nx.path_graph(7, create_using=nx.DiGraph())
self.cycle = nx.cycle_graph(7)
self.directed_cycle = nx.cycle_graph(7, create_using=nx.DiGraph())
self.gnp = nx.gnp_random_graph(30, 0.1)
self.directed_gnp = nx.gnp_random_graph(30, 0.1, directed=True)
self.K20 = nx.complete_graph(20)
self.K10 = nx.complete_graph(10)
self.K5 = nx.complete_graph(5)
self.G_list = [self.path, self.directed_path, self.cycle,
self.directed_cycle, self.gnp, self.directed_gnp, self.K10,
self.K5, self.K20]
def test_cycles(self):
K_undir = nx.all_pairs_node_connectivity(self.cycle)
for source in K_undir:
for target, k in K_undir[source].items():
assert_true(k == 2)
K_dir = nx.all_pairs_node_connectivity(self.directed_cycle)
for source in K_dir:
for target, k in K_dir[source].items():
assert_true(k == 1)
def test_complete(self):
for G in [self.K10, self.K5, self.K20]:
K = nx.all_pairs_node_connectivity(G)
for source in K:
for target, k in K[source].items():
assert_true(k == len(G)-1)
def test_paths(self):
K_undir = nx.all_pairs_node_connectivity(self.path)
for source in K_undir:
for target, k in K_undir[source].items():
assert_true(k == 1)
K_dir = nx.all_pairs_node_connectivity(self.directed_path)
for source in K_dir:
for target, k in K_dir[source].items():
if source < target:
assert_true(k == 1)
else:
assert_true(k == 0)
def test_all_pairs_connectivity_nbunch(self):
G = nx.complete_graph(5)
nbunch = [0, 2, 3]
C = nx.all_pairs_node_connectivity(G, nbunch=nbunch)
assert_equal(len(C), len(nbunch))
def test_all_pairs_connectivity_icosahedral(self):
G = nx.icosahedral_graph()
C = nx.all_pairs_node_connectivity(G)
assert_true(all(5 == C[u][v] for u, v in itertools.combinations(G, 2)))
def test_all_pairs_connectivity(self):
G = nx.Graph()
nodes = [0, 1, 2, 3]
G.add_path(nodes)
A = {n: {} for n in G}
for u, v in itertools.combinations(nodes,2):
A[u][v] = A[v][u] = nx.node_connectivity(G, u, v)
C = nx.all_pairs_node_connectivity(G)
assert_equal(sorted((k, sorted(v)) for k, v in A.items()),
sorted((k, sorted(v)) for k, v in C.items()))
def test_all_pairs_connectivity_directed(self):
G = nx.DiGraph()
nodes = [0, 1, 2, 3]
G.add_path(nodes)
A = {n: {} for n in G}
for u, v in itertools.permutations(nodes, 2):
A[u][v] = nx.node_connectivity(G, u, v)
C = nx.all_pairs_node_connectivity(G)
assert_equal(sorted((k, sorted(v)) for k, v in A.items()),
sorted((k, sorted(v)) for k, v in C.items()))
def test_all_pairs_connectivity_nbunch(self):
G = nx.complete_graph(5)
nbunch = [0, 2, 3]
A = {n: {} for n in nbunch}
for u, v in itertools.combinations(nbunch, 2):
A[u][v] = A[v][u] = nx.node_connectivity(G, u, v)
C = nx.all_pairs_node_connectivity(G, nbunch=nbunch)
assert_equal(sorted((k, sorted(v)) for k, v in A.items()),
sorted((k, sorted(v)) for k, v in C.items()))
def test_all_pairs_connectivity_nbunch_iter(self):
G = nx.complete_graph(5)
nbunch = [0, 2, 3]
A = {n: {} for n in nbunch}
for u, v in itertools.combinations(nbunch, 2):
A[u][v] = A[v][u] = nx.node_connectivity(G, u, v)
C = nx.all_pairs_node_connectivity(G, nbunch=iter(nbunch))
assert_equal(sorted((k, sorted(v)) for k, v in A.items()),
sorted((k, sorted(v)) for k, v in C.items()))
| bsd-3-clause |
chemelnucfin/tensorflow | tensorflow/python/data/experimental/kernel_tests/copy_to_device_test.py | 5 | 22457 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.copy_to_device()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.compat import compat
from tensorflow.python.data.experimental.ops import prefetching_ops
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.util import structure
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat as util_compat
# TODO(b/117581999): add eager coverage when supported.
class CopyToDeviceTest(test_base.DatasetTestBase):
@test_util.deprecated_graph_mode_only
def testCopyToDevice(self):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1"))
with ops.device("/cpu:1"):
iterator = dataset_ops.make_one_shot_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(host_dataset),
dataset_ops.get_structure(device_dataset)))
self.assertEqual(dtypes.int64, next_element.dtype)
self.assertEqual([], next_element.shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceInt32(self):
host_dataset = dataset_ops.Dataset.from_tensors([0, 1, 2, 3])
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1"))
with ops.device("/cpu:1"):
iterator = dataset_ops.make_one_shot_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(host_dataset),
dataset_ops.get_structure(device_dataset)))
self.assertEqual(dtypes.int32, next_element.dtype)
self.assertEqual((4,), next_element.shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
self.assertAllEqual([0, 1, 2, 3], self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToSameDevice(self):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:0"))
with ops.device("/cpu:0"):
iterator = dataset_ops.make_one_shot_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(host_dataset),
dataset_ops.get_structure(device_dataset)))
self.assertEqual(dtypes.int64, next_element.dtype)
self.assertEqual([], next_element.shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceWithPrefetch(self):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1")).prefetch(1)
with ops.device("/cpu:1"):
iterator = dataset_ops.make_one_shot_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(host_dataset),
dataset_ops.get_structure(device_dataset)))
self.assertEqual(dtypes.int64, next_element.dtype)
self.assertEqual([], next_element.shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyDictToDevice(self):
host_dataset = dataset_ops.Dataset.range(10).map(lambda x: {"a": x})
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1"))
with ops.device("/cpu:1"):
iterator = dataset_ops.make_one_shot_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(host_dataset),
dataset_ops.get_structure(device_dataset)))
self.assertEqual(dtypes.int64, next_element["a"].dtype)
self.assertEqual([], next_element["a"].shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
for i in range(10):
self.assertEqual({"a": i}, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyDictToDeviceWithPrefetch(self):
host_dataset = dataset_ops.Dataset.range(10).map(lambda x: {"a": x})
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1")).prefetch(1)
with ops.device("/cpu:1"):
iterator = dataset_ops.make_one_shot_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(host_dataset),
dataset_ops.get_structure(device_dataset)))
self.assertEqual(dtypes.int64, next_element["a"].dtype)
self.assertEqual([], next_element["a"].shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
for i in range(10):
self.assertEqual({"a": i}, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopySparseTensorsToDevice(self):
def make_tensor(i):
return sparse_tensor.SparseTensorValue(
indices=[[0, 0]], values=(i * [1]), dense_shape=[2, 2])
host_dataset = dataset_ops.Dataset.range(10).map(make_tensor)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1"))
with ops.device("/cpu:1"):
iterator = dataset_ops.make_one_shot_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(host_dataset),
dataset_ops.get_structure(device_dataset)))
self.assertEqual(dtypes.int64, next_element.dtype)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
for i in range(10):
actual = self.evaluate(next_element)
self.assertAllEqual([i], actual.values)
self.assertAllEqual([[0, 0]], actual.indices)
self.assertAllEqual([2, 2], actual.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopySparseTensorsToDeviceWithPrefetch(self):
def make_tensor(i):
return sparse_tensor.SparseTensorValue(
indices=[[0, 0]], values=(i * [1]), dense_shape=[2, 2])
host_dataset = dataset_ops.Dataset.range(10).map(make_tensor)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1")).prefetch(1)
with ops.device("/cpu:1"):
iterator = dataset_ops.make_one_shot_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(host_dataset),
dataset_ops.get_structure(device_dataset)))
self.assertEqual(dtypes.int64, next_element.dtype)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
for i in range(10):
actual = self.evaluate(next_element)
self.assertAllEqual([i], actual.values)
self.assertAllEqual([[0, 0]], actual.indices)
self.assertAllEqual([2, 2], actual.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceGpu(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0"))
with ops.device("/gpu:0"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceGpuWithPrefetch(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0")).prefetch(1)
with ops.device("/gpu:0"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceGpuWithMap(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
def generator():
for i in range(10):
yield i, float(i), str(i)
host_dataset = dataset_ops.Dataset.from_generator(
generator, output_types=(dtypes.int32, dtypes.float32, dtypes.string))
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0"))
def gpu_map_func(x, y, z):
return math_ops.square(x), math_ops.square(y), z
device_dataset = device_dataset.apply(
prefetching_ops.map_on_gpu(gpu_map_func))
options = dataset_ops.Options()
options.experimental_optimization.autotune = False
device_dataset = device_dataset.with_options(options)
with ops.device("/gpu:0"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
for i in range(10):
x, y, z = self.evaluate(next_element)
self.assertEqual(i**2, x)
self.assertEqual(float(i**2), y)
self.assertEqual(util_compat.as_bytes(str(i)), z)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceGpuInt32(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.from_tensors([0, 1, 2, 3])
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0"))
with ops.device("/gpu:0"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
self.assertAllEqual([0, 1, 2, 3], self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceGpuInt32AndPrefetch(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.from_tensors([0, 1, 2, 3])
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0")).prefetch(1)
with ops.device("/gpu:0"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
self.assertAllEqual([0, 1, 2, 3], self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceGpuStrings(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.from_tensors(["a", "b", "c"])
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0"))
with ops.device("/gpu:0"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
self.assertAllEqual([b"a", b"b", b"c"], self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceGpuStringsAndPrefetch(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.from_tensors(["a", "b", "c"])
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0"))
with ops.device("/gpu:0"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
self.assertAllEqual([b"a", b"b", b"c"], self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDevicePingPongCPUGPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
with compat.forward_compatibility_horizon(2018, 8, 4):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0", source_device="/cpu:0"))
back_to_cpu_dataset = device_dataset.apply(
prefetching_ops.copy_to_device("/cpu:0", source_device="/gpu:0"))
with ops.device("/cpu:0"):
iterator = dataset_ops.make_initializable_iterator(back_to_cpu_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceWithReInit(self):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1"))
with ops.device("/cpu:1"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(host_dataset),
dataset_ops.get_structure(device_dataset)))
self.assertEqual(dtypes.int64, next_element.dtype)
self.assertEqual([], next_element.shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
self.evaluate(iterator.initializer)
for i in range(5):
self.assertEqual(i, self.evaluate(next_element))
self.evaluate(iterator.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceWithReInitAndPrefetch(self):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1")).prefetch(1)
with ops.device("/cpu:1"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(host_dataset),
dataset_ops.get_structure(device_dataset)))
self.assertEqual(dtypes.int64, next_element.dtype)
self.assertEqual([], next_element.shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
self.evaluate(iterator.initializer)
for i in range(5):
self.assertEqual(i, self.evaluate(next_element))
self.evaluate(iterator.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceGpuWithReInit(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0"))
with ops.device("/gpu:0"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
for i in range(5):
self.assertEqual(i, self.evaluate(next_element))
self.evaluate(iterator.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceGpuWithReInitAndPrefetch(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0")).prefetch(1)
with ops.device("/gpu:0"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
for i in range(5):
self.assertEqual(i, self.evaluate(next_element))
self.evaluate(iterator.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testIteratorGetNextAsOptionalOnGPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.range(3)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0"))
with ops.device("/gpu:0"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_elem = iterator_ops.get_next_as_optional(iterator)
elem_has_value_t = next_elem.has_value()
elem_value_t = next_elem.get_value()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
# Before initializing the iterator, evaluating the optional fails with
# a FailedPreconditionError.
with self.assertRaises(errors.FailedPreconditionError):
self.evaluate(elem_has_value_t)
with self.assertRaises(errors.FailedPreconditionError):
self.evaluate(elem_value_t)
# For each element of the dataset, assert that the optional evaluates to
# the expected value.
self.evaluate(iterator.initializer)
for i in range(3):
elem_has_value, elem_value = self.evaluate(
[elem_has_value_t, elem_value_t])
self.assertTrue(elem_has_value)
self.assertEqual(i, elem_value)
# After exhausting the iterator, `next_elem.has_value()` will evaluate to
# false, and attempting to get the value will fail.
for _ in range(2):
self.assertFalse(self.evaluate(elem_has_value_t))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(elem_value_t)
if __name__ == "__main__":
test.main()
| apache-2.0 |
NervanaSystems/neon | examples/mnist_hdf5.py | 1 | 3625 | #!/usr/bin/env python
# ******************************************************************************
# Copyright 2014-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
"""
Example for using the HDF5 data iterator on a MLP with the MNIST data.
See mnist_mlp.py for more information on the model and data used in this example.
Usage:
python examples/mnist_hdf5.py
"""
from neon.callbacks.callbacks import Callbacks
from neon.data import HDF5IteratorOneHot, MNIST
from neon.initializers import Gaussian
from neon.layers import GeneralizedCost, Affine
from neon.models import Model
from neon.optimizers import GradientDescentMomentum
from neon.transforms import Rectlin, Logistic, CrossEntropyBinary, Misclassification
from neon.util.argparser import NeonArgparser
from neon import logger as neon_logger
import h5py
import numpy as np
# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args()
# load up the mnist data set
dataset = MNIST(path=args.data_dir)
# split into train and tests sets
(X_train, y_train), (X_test, y_test), nclass = dataset.load_data()
# generate the HDF5 file
datsets = {'train': (X_train, y_train),
'test': (X_test, y_test)}
for ky in ['train', 'test']:
df = h5py.File('mnist_%s.h5' % ky, 'w')
# input images
in_dat = datsets[ky][0]
df.create_dataset('input', data=in_dat)
df['input'].attrs['lshape'] = (1, 28, 28) # (C, H, W)
# can also add in a mean image or channel by channel mean for color image
# for mean subtraction during data iteration
# e.g.
if ky == 'train':
mean_image = np.mean(X_train, axis=0)
# use training set mean for both train and val data sets
df.create_dataset('mean', data=mean_image)
target = datsets[ky][1].reshape((-1, 1)) # make it a 2D array
df.create_dataset('output', data=target)
df['output'].attrs['nclass'] = 10
df.close()
# setup a training set iterator
# use the iterator that generates 1-hot output. other HDF5Iterator (sub) classes are
# available for different data layouts
train_set = HDF5IteratorOneHot('mnist_train.h5')
valid_set = HDF5IteratorOneHot('mnist_test.h5')
# setup weight initialization function
init_norm = Gaussian(loc=0.0, scale=0.01)
# setup model layers
layers = [Affine(nout=100, init=init_norm, activation=Rectlin()),
Affine(nout=10, init=init_norm, activation=Logistic(shortcut=True))]
# setup cost function as CrossEntropy
cost = GeneralizedCost(costfunc=CrossEntropyBinary())
# setup optimizer
optimizer = GradientDescentMomentum(
0.1, momentum_coef=0.9, stochastic_round=args.rounding)
# initialize model object
mlp = Model(layers=layers)
# configure callbacks
callbacks = Callbacks(mlp, eval_set=valid_set, **args.callback_args)
# run fit
mlp.fit(train_set, optimizer=optimizer,
num_epochs=args.epochs, cost=cost, callbacks=callbacks)
error_rate = mlp.eval(valid_set, metric=Misclassification())
neon_logger.display('Misclassification error = %.1f%%' % (error_rate * 100))
| apache-2.0 |
tangfeixiong/nova | nova/tests/unit/api/ec2/test_ec2_validate.py | 60 | 11301 | # Copyright 2012 Cloudscaling, Inc.
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslo_config import cfg
from oslo_utils import timeutils
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova.compute import utils as compute_utils
from nova import context
from nova import db
from nova import exception
from nova import test
from nova.tests.unit import cast_as_call
from nova.tests.unit import fake_network
from nova.tests.unit import fake_notifier
from nova.tests.unit.image import fake
CONF = cfg.CONF
CONF.import_opt('compute_driver', 'nova.virt.driver')
class EC2ValidateTestCase(test.TestCase):
def setUp(self):
super(EC2ValidateTestCase, self).setUp()
self.flags(compute_driver='nova.virt.fake.FakeDriver')
def dumb(*args, **kwargs):
pass
self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
fake_network.set_stub_network_methods(self.stubs)
# set up our cloud
self.cloud = cloud.CloudController()
# Short-circuit the conductor service
self.flags(use_local=True, group='conductor')
# Stub out the notification service so we use the no-op serializer
# and avoid lazy-load traces with the wrap_exception decorator in
# the compute service.
fake_notifier.stub_notifier(self.stubs)
self.addCleanup(fake_notifier.reset)
# set up services
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
self.compute = self.start_service('compute')
self.scheduter = self.start_service('scheduler')
self.network = self.start_service('network')
self.image_service = fake.FakeImageService()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
self.EC2_MALFORMED_IDS = ['foobar', '', 123]
self.EC2_VALID__IDS = ['i-284f3a41', 'i-001', 'i-deadbeef']
self.ec2_id_exception_map = [(x,
exception.InvalidInstanceIDMalformed)
for x in self.EC2_MALFORMED_IDS]
self.ec2_id_exception_map.extend([(x, exception.InstanceNotFound)
for x in self.EC2_VALID__IDS])
self.volume_id_exception_map = [(x,
exception.InvalidVolumeIDMalformed)
for x in self.EC2_MALFORMED_IDS]
self.volume_id_exception_map.extend([(x, exception.VolumeNotFound)
for x in self.EC2_VALID__IDS])
def fake_show(meh, context, id, **kwargs):
return {'id': id,
'container_format': 'ami',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine',
'image_state': 'available'}}
def fake_detail(self, context, **kwargs):
image = fake_show(self, context, None)
image['name'] = kwargs.get('name')
return [image]
fake.stub_out_image_service(self.stubs)
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
self.useFixture(cast_as_call.CastAsCall(self.stubs))
# make sure we can map ami-00000001/2 to a uuid in FakeImageService
db.s3_image_create(self.context,
'cedef40a-ed67-4d10-800e-17455edce175')
db.s3_image_create(self.context,
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
def tearDown(self):
super(EC2ValidateTestCase, self).tearDown()
fake.FakeImageService_reset()
# EC2_API tests (InvalidInstanceID.Malformed)
def test_console_output(self):
for ec2_id, e in self.ec2_id_exception_map:
self.assertRaises(e,
self.cloud.get_console_output,
context=self.context,
instance_id=[ec2_id])
def test_describe_instance_attribute(self):
for ec2_id, e in self.ec2_id_exception_map:
self.assertRaises(e,
self.cloud.describe_instance_attribute,
context=self.context,
instance_id=ec2_id,
attribute='kernel')
def test_instance_lifecycle(self):
lifecycle = [self.cloud.terminate_instances,
self.cloud.reboot_instances,
self.cloud.stop_instances,
self.cloud.start_instances,
]
for cmd in lifecycle:
for ec2_id, e in self.ec2_id_exception_map:
self.assertRaises(e,
cmd,
context=self.context,
instance_id=[ec2_id])
def test_create_image(self):
for ec2_id, e in self.ec2_id_exception_map:
self.assertRaises(e,
self.cloud.create_image,
context=self.context,
instance_id=ec2_id)
def test_create_snapshot(self):
for ec2_id, e in self.volume_id_exception_map:
self.assertRaises(e,
self.cloud.create_snapshot,
context=self.context,
volume_id=ec2_id)
def test_describe_volumes(self):
for ec2_id, e in self.volume_id_exception_map:
self.assertRaises(e,
self.cloud.describe_volumes,
context=self.context,
volume_id=[ec2_id])
def test_delete_volume(self):
for ec2_id, e in self.volume_id_exception_map:
self.assertRaises(e,
self.cloud.delete_volume,
context=self.context,
volume_id=ec2_id)
def test_detach_volume(self):
for ec2_id, e in self.volume_id_exception_map:
self.assertRaises(e,
self.cloud.detach_volume,
context=self.context,
volume_id=ec2_id)
class EC2TimestampValidationTestCase(test.NoDBTestCase):
"""Test case for EC2 request timestamp validation."""
def test_validate_ec2_timestamp_valid(self):
params = {'Timestamp': '2011-04-22T11:29:49Z'}
expired = ec2utils.is_ec2_timestamp_expired(params)
self.assertFalse(expired)
def test_validate_ec2_timestamp_old_format(self):
params = {'Timestamp': '2011-04-22T11:29:49'}
expired = ec2utils.is_ec2_timestamp_expired(params)
self.assertTrue(expired)
def test_validate_ec2_timestamp_not_set(self):
params = {}
expired = ec2utils.is_ec2_timestamp_expired(params)
self.assertFalse(expired)
def test_validate_ec2_timestamp_ms_time_regex(self):
result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.123Z')
self.assertIsNotNone(result)
result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.123456Z')
self.assertIsNotNone(result)
result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.1234567Z')
self.assertIsNone(result)
result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.123')
self.assertIsNone(result)
result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49Z')
self.assertIsNone(result)
def test_validate_ec2_timestamp_aws_sdk_format(self):
params = {'Timestamp': '2011-04-22T11:29:49.123Z'}
expired = ec2utils.is_ec2_timestamp_expired(params)
self.assertFalse(expired)
expired = ec2utils.is_ec2_timestamp_expired(params, expires=300)
self.assertTrue(expired)
def test_validate_ec2_timestamp_invalid_format(self):
params = {'Timestamp': '2011-04-22T11:29:49.000P'}
expired = ec2utils.is_ec2_timestamp_expired(params)
self.assertTrue(expired)
def test_validate_ec2_timestamp_advanced_time(self):
# EC2 request with Timestamp in advanced time
timestamp = timeutils.utcnow() + datetime.timedelta(seconds=250)
params = {'Timestamp': timeutils.strtime(timestamp,
"%Y-%m-%dT%H:%M:%SZ")}
expired = ec2utils.is_ec2_timestamp_expired(params, expires=300)
self.assertFalse(expired)
def test_validate_ec2_timestamp_advanced_time_expired(self):
timestamp = timeutils.utcnow() + datetime.timedelta(seconds=350)
params = {'Timestamp': timeutils.strtime(timestamp,
"%Y-%m-%dT%H:%M:%SZ")}
expired = ec2utils.is_ec2_timestamp_expired(params, expires=300)
self.assertTrue(expired)
def test_validate_ec2_req_timestamp_not_expired(self):
params = {'Timestamp': timeutils.isotime()}
expired = ec2utils.is_ec2_timestamp_expired(params, expires=15)
self.assertFalse(expired)
def test_validate_ec2_req_timestamp_expired(self):
params = {'Timestamp': '2011-04-22T12:00:00Z'}
compare = ec2utils.is_ec2_timestamp_expired(params, expires=300)
self.assertTrue(compare)
def test_validate_ec2_req_expired(self):
params = {'Expires': timeutils.isotime()}
expired = ec2utils.is_ec2_timestamp_expired(params)
self.assertTrue(expired)
def test_validate_ec2_req_not_expired(self):
expire = timeutils.utcnow() + datetime.timedelta(seconds=350)
params = {'Expires': timeutils.strtime(expire, "%Y-%m-%dT%H:%M:%SZ")}
expired = ec2utils.is_ec2_timestamp_expired(params)
self.assertFalse(expired)
def test_validate_Expires_timestamp_invalid_format(self):
# EC2 request with invalid Expires
params = {'Expires': '2011-04-22T11:29:49'}
expired = ec2utils.is_ec2_timestamp_expired(params)
self.assertTrue(expired)
def test_validate_ec2_req_timestamp_Expires(self):
# EC2 request with both Timestamp and Expires
params = {'Timestamp': '2011-04-22T11:29:49Z',
'Expires': timeutils.isotime()}
self.assertRaises(exception.InvalidRequest,
ec2utils.is_ec2_timestamp_expired,
params)
| apache-2.0 |
CiscoSystems/fabric_enabler | dfa/db/dfa_db_api.py | 1 | 1275 | # Copyright 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Nader Lahouti, Cisco Systems, Inc.
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
DFA_db_session = None
Base = declarative_base()
def configure_db(cfg):
global DFA_db_session
if DFA_db_session:
return
# engine = create_engine(connection, echo=True)
connection = cfg.dfa_mysql.connection
engine = create_engine(connection, echo=False)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine, autocommit=True)
DFA_db_session = Session()
def get_session():
return DFA_db_session
| apache-2.0 |
sagangwee/sagangwee.github.io | build/pygments/build/lib.linux-i686-2.7/pygments/__init__.py | 43 | 3114 | # -*- coding: utf-8 -*-
"""
Pygments
~~~~~~~~
Pygments is a syntax highlighting package written in Python.
It is a generic syntax highlighter for general use in all kinds of software
such as forum systems, wikis or other applications that need to prettify
source code. Highlights are:
* a wide range of common languages and markup formats is supported
* special attention is paid to details, increasing quality by a fair amount
* support for new languages and formats are added easily
* a number of output formats, presently HTML, LaTeX, RTF, SVG, all image
formats that PIL supports, and ANSI sequences
* it is usable as a command-line tool and as a library
* ... and it highlights even Brainfuck!
The `Pygments tip`_ is installable with ``easy_install Pygments==dev``.
.. _Pygments tip:
http://bitbucket.org/birkenfeld/pygments-main/get/tip.zip#egg=Pygments-dev
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
__version__ = '2.0.2'
__docformat__ = 'restructuredtext'
__all__ = ['lex', 'format', 'highlight']
import sys
from pygments.util import StringIO, BytesIO
def lex(code, lexer):
"""
Lex ``code`` with ``lexer`` and return an iterable of tokens.
"""
try:
return lexer.get_tokens(code)
except TypeError as err:
if isinstance(err.args[0], str) and \
('unbound method get_tokens' in err.args[0] or
'missing 1 required positional argument' in err.args[0]):
raise TypeError('lex() argument must be a lexer instance, '
'not a class')
raise
def format(tokens, formatter, outfile=None):
"""
Format a tokenlist ``tokens`` with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
"""
try:
if not outfile:
realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO()
formatter.format(tokens, realoutfile)
return realoutfile.getvalue()
else:
formatter.format(tokens, outfile)
except TypeError as err:
if isinstance(err.args[0], str) and \
('unbound method format' in err.args[0] or
'missing 1 required positional argument' in err.args[0]):
raise TypeError('format() argument must be a formatter instance, '
'not a class')
raise
def highlight(code, lexer, formatter, outfile=None):
"""
Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
"""
return format(lex(code, lexer), formatter, outfile)
if __name__ == '__main__': # pragma: no cover
from pygments.cmdline import main
sys.exit(main(sys.argv))
| mit |
polymeris/qgis | python/plugins/fTools/tools/doVectorGrid.py | 2 | 14136 | # -*- coding: utf-8 -*-
#-----------------------------------------------------------
#
# fTools
# Copyright (C) 2008-2011 Carson Farmer
# EMAIL: carson.farmer (at) gmail.com
# WEB : http://www.ftools.ca/fTools.html
#
# A collection of data management and analysis tools for vector data
#
#-----------------------------------------------------------
#
# licensed under the terms of GNU GPL 2
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#---------------------------------------------------------------------
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import ftools_utils
from qgis.core import *
from ui_frmVectorGrid import Ui_Dialog
import math
class Dialog(QDialog, Ui_Dialog):
def __init__(self, iface):
QDialog.__init__(self, iface.mainWindow())
self.iface = iface
self.setupUi(self)
QObject.connect(self.toolOut, SIGNAL("clicked()"), self.outFile)
QObject.connect(self.spnX, SIGNAL("valueChanged(double)"), self.offset)
#QObject.connect(self.inShape, SIGNAL("currentIndexChanged(QString)"), self.updateInput)
QObject.connect(self.btnUpdate, SIGNAL("clicked()"), self.updateLayer)
QObject.connect(self.btnCanvas, SIGNAL("clicked()"), self.updateCanvas)
QObject.connect(self.chkAlign, SIGNAL("toggled(bool)"), self.chkAlignToggled)
self.buttonOk = self.buttonBox_2.button( QDialogButtonBox.Ok )
self.setWindowTitle(self.tr("Vector grid"))
self.xMin.setValidator(QDoubleValidator(self.xMin))
self.xMax.setValidator(QDoubleValidator(self.xMax))
self.yMin.setValidator(QDoubleValidator(self.yMin))
self.yMax.setValidator(QDoubleValidator(self.yMax))
self.populateLayers()
def populateLayers( self ):
self.inShape.clear()
layermap = QgsMapLayerRegistry.instance().mapLayers()
for name, layer in layermap.iteritems():
self.inShape.addItem( unicode( layer.name() ) )
if layer == self.iface.activeLayer():
self.inShape.setCurrentIndex( self.inShape.count() -1 )
def offset(self, value):
if self.chkLock.isChecked():
self.spnY.setValue(value)
def updateLayer( self ):
mLayerName = self.inShape.currentText()
if not mLayerName == "":
mLayer = ftools_utils.getMapLayerByName( unicode( mLayerName ) )
# get layer extents
boundBox = mLayer.extent()
# if "align extents and resolution..." button is checked
if self.chkAlign.isChecked():
if not mLayer.type() == QgsMapLayer.RasterLayer:
QMessageBox.information(self, self.tr("Vector grid"), self.tr("Please select a raster layer"))
else:
dx = math.fabs(boundBox.xMaximum()-boundBox.xMinimum()) / mLayer.width()
dy = math.fabs(boundBox.yMaximum()-boundBox.yMinimum()) / mLayer.height()
self.spnX.setValue(dx)
self.spnY.setValue(dy)
self.updateExtents( boundBox )
def updateCanvas( self ):
canvas = self.iface.mapCanvas()
boundBox = canvas.extent()
# if "align extents and resolution..." button is checked
if self.chkAlign.isChecked():
mLayerName = self.inShape.currentText()
if not mLayerName == "":
mLayer = ftools_utils.getMapLayerByName( unicode( mLayerName ) )
if not mLayer.type() == QgsMapLayer.RasterLayer:
QMessageBox.information(self, self.tr("Vector grid"), self.tr("Please select a raster layer"))
else:
# get extents and pixel size
xMin = boundBox.xMinimum()
yMin = boundBox.yMinimum()
xMax = boundBox.xMaximum()
yMax = boundBox.yMaximum()
boundBox2 = mLayer.extent()
dx = math.fabs(boundBox2.xMaximum()-boundBox2.xMinimum()) / mLayer.width()
dy = math.fabs(boundBox2.yMaximum()-boundBox2.yMinimum()) / mLayer.height()
# get pixels from the raster that are closest to the desired extent
newXMin = self.getClosestPixel( boundBox2.xMinimum(), boundBox.xMinimum(), dx, True )
newXMax = self.getClosestPixel( boundBox2.xMaximum(), boundBox.xMaximum(), dx, False )
newYMin = self.getClosestPixel( boundBox2.yMinimum(), boundBox.yMinimum(), dy, True )
newYMax = self.getClosestPixel( boundBox2.yMaximum(), boundBox.yMaximum(), dy, False )
# apply new values if found all min/max
if newXMin is not None and newXMax is not None and newYMin is not None and newYMax is not None:
boundBox.set( newXMin, newYMin, newXMax, newYMax )
self.spnX.setValue(dx)
self.spnY.setValue(dy)
else:
QMessageBox.information(self, self.tr("Vector grid"), self.tr("Unable to compute extents aligned on selected raster layer"))
self.updateExtents( boundBox )
def updateExtents( self, boundBox ):
self.xMin.setText( unicode( boundBox.xMinimum() ) )
self.yMin.setText( unicode( boundBox.yMinimum() ) )
self.xMax.setText( unicode( boundBox.xMaximum() ) )
self.yMax.setText( unicode( boundBox.yMaximum() ) )
def accept(self):
self.buttonOk.setEnabled( False )
if self.xMin.text() == "" or self.xMax.text() == "" or self.yMin.text() == "" or self.yMax.text() == "":
QMessageBox.information(self, self.tr("Vector grid"), self.tr("Please specify valid extent coordinates"))
elif self.outShape.text() == "":
QMessageBox.information(self, self.tr("Vector grid"), self.tr("Please specify output shapefile"))
else:
try:
boundBox = QgsRectangle(
float( self.xMin.text() ),
float( self.yMin.text() ),
float( self.xMax.text() ),
float( self.yMax.text() ) )
except:
QMessageBox.information(self, self.tr("Vector grid"), self.tr("Invalid extent coordinates entered"))
xSpace = self.spnX.value()
ySpace = self.spnY.value()
if self.rdoPolygons.isChecked():
polygon = True
else:
polygon = False
self.outShape.clear()
QApplication.setOverrideCursor(Qt.WaitCursor)
self.compute( boundBox, xSpace, ySpace, polygon )
QApplication.restoreOverrideCursor()
addToTOC = QMessageBox.question(self, self.tr("Generate Vector Grid"), self.tr("Created output shapefile:\n%1\n\nWould you like to add the new layer to the TOC?").arg(unicode(self.shapefileName)), QMessageBox.Yes, QMessageBox.No, QMessageBox.NoButton)
if addToTOC == QMessageBox.Yes:
ftools_utils.addShapeToCanvas( self.shapefileName )
self.populateLayers()
self.progressBar.setValue( 0 )
self.buttonOk.setEnabled( True )
def compute( self, bound, xOffset, yOffset, polygon ):
crs = None
layer = ftools_utils.getMapLayerByName(unicode(self.inShape.currentText()))
if layer is None:
crs = self.iface.mapCanvas().mapRenderer().destinationCrs()
else:
crs = layer.crs()
if not crs.isValid(): crs = None
if polygon:
fields = {0:QgsField("ID", QVariant.Int), 1:QgsField("XMIN", QVariant.Double), 2:QgsField("XMAX", QVariant.Double),
3:QgsField("YMIN", QVariant.Double), 4:QgsField("YMAX", QVariant.Double)}
check = QFile(self.shapefileName)
if check.exists():
if not QgsVectorFileWriter.deleteShapeFile(self.shapefileName):
return
writer = QgsVectorFileWriter(self.shapefileName, self.encoding, fields, QGis.WKBPolygon, crs)
else:
fields = {0:QgsField("ID", QVariant.Int), 1:QgsField("COORD", QVariant.Double)}
check = QFile(self.shapefileName)
if check.exists():
if not QgsVectorFileWriter.deleteShapeFile(self.shapefileName):
return
writer = QgsVectorFileWriter(self.shapefileName, self.encoding, fields, QGis.WKBLineString, crs)
outFeat = QgsFeature()
outGeom = QgsGeometry()
idVar = 0
self.progressBar.setValue( 0 )
if not polygon:
# counters for progressbar - update every 5%
count = 0
count_max = (bound.yMaximum() - bound.yMinimum()) / yOffset
count_update = count_max * 0.10
y = bound.yMaximum()
while y >= bound.yMinimum():
pt1 = QgsPoint(bound.xMinimum(), y)
pt2 = QgsPoint(bound.xMaximum(), y)
line = [pt1, pt2]
outFeat.setGeometry(outGeom.fromPolyline(line))
outFeat.addAttribute(0, QVariant(idVar))
outFeat.addAttribute(1, QVariant(y))
writer.addFeature(outFeat)
y = y - yOffset
idVar = idVar + 1
count += 1
if int( math.fmod( count, count_update ) ) == 0:
prog = int( count / count_max * 50 )
self.progressBar.setValue( prog )
self.progressBar.setValue( 50 )
# counters for progressbar - update every 5%
count = 0
count_max = (bound.xMaximum() - bound.xMinimum()) / xOffset
count_update = count_max * 0.10
x = bound.xMinimum()
while x <= bound.xMaximum():
pt1 = QgsPoint(x, bound.yMaximum())
pt2 = QgsPoint(x, bound.yMinimum())
line = [pt1, pt2]
outFeat.setGeometry(outGeom.fromPolyline(line))
outFeat.addAttribute(0, QVariant(idVar))
outFeat.addAttribute(1, QVariant(x))
writer.addFeature(outFeat)
x = x + xOffset
idVar = idVar + 1
count += 1
if int( math.fmod( count, count_update ) ) == 0:
prog = 50 + int( count / count_max * 50 )
self.progressBar.setValue( prog )
else:
# counters for progressbar - update every 5%
count = 0
count_max = (bound.yMaximum() - bound.yMinimum()) / yOffset
count_update = count_max * 0.05
y = bound.yMaximum()
while y >= bound.yMinimum():
x = bound.xMinimum()
while x <= bound.xMaximum():
pt1 = QgsPoint(x, y)
pt2 = QgsPoint(x + xOffset, y)
pt3 = QgsPoint(x + xOffset, y - yOffset)
pt4 = QgsPoint(x, y - yOffset)
pt5 = QgsPoint(x, y)
polygon = [[pt1, pt2, pt3, pt4, pt5]]
outFeat.setGeometry(outGeom.fromPolygon(polygon))
outFeat.addAttribute(0, QVariant(idVar))
outFeat.addAttribute(1, QVariant(x))
outFeat.addAttribute(2, QVariant(x + xOffset))
outFeat.addAttribute(3, QVariant(y - yOffset))
outFeat.addAttribute(4, QVariant(y))
writer.addFeature(outFeat)
idVar = idVar + 1
x = x + xOffset
y = y - yOffset
count += 1
if int( math.fmod( count, count_update ) ) == 0:
prog = int( count / count_max * 100 )
self.progressBar.setValue( 100 )
#self.progressBar.setRange( 0, 100 )
del writer
def outFile(self):
self.outShape.clear()
( self.shapefileName, self.encoding ) = ftools_utils.saveDialog( self )
if self.shapefileName is None or self.encoding is None:
return
self.outShape.setText( QString( self.shapefileName ) )
def chkAlignToggled(self):
if self.chkAlign.isChecked():
self.spnX.setEnabled( False )
self.lblX.setEnabled( False )
self.spnY.setEnabled( False )
self.lblY.setEnabled( False )
else:
self.spnX.setEnabled( True )
self.lblX.setEnabled( True )
self.spnY.setEnabled( not self.chkLock.isChecked() )
self.lblY.setEnabled( not self.chkLock.isChecked() )
def getClosestPixel(self, startVal, targetVal, step, isMin ):
foundVal = None
tmpVal = startVal
# find pixels covering the extent - slighlyt inneficient b/c loop on all elements before xMin
if targetVal < startVal:
backOneStep = not isMin
step = - step
while foundVal is None:
if tmpVal <= targetVal:
if backOneStep:
tmpVal -= step
foundVal = tmpVal
tmpVal += step
else:
backOneStep = isMin
while foundVal is None:
if tmpVal >= targetVal:
if backOneStep:
tmpVal -= step
foundVal = tmpVal
tmpVal += step
return foundVal
| gpl-2.0 |
Atheros1/PyBitmessage | src/message_data_reader.py | 10 | 3977 | #This program can be used to print out everything in your Inbox or Sent folders and also take things out of the trash.
#Scroll down to the bottom to see the functions that you can uncomment. Save then run this file.
#The functions which only read the database file seem to function just fine even if you have Bitmessage running but you should definitly close it before running the functions that make changes (like taking items out of the trash).
import sqlite3
from time import strftime, localtime
import sys
import shared
import string
appdata = shared.lookupAppdataFolder()
conn = sqlite3.connect( appdata + 'messages.dat' )
conn.text_factory = str
cur = conn.cursor()
def readInbox():
print 'Printing everything in inbox table:'
item = '''select * from inbox'''
parameters = ''
cur.execute(item, parameters)
output = cur.fetchall()
for row in output:
print row
def readSent():
print 'Printing everything in Sent table:'
item = '''select * from sent where folder !='trash' '''
parameters = ''
cur.execute(item, parameters)
output = cur.fetchall()
for row in output:
msgid, toaddress, toripe, fromaddress, subject, message, ackdata, lastactiontime, sleeptill, status, retrynumber, folder, encodingtype, ttl = row
print msgid.encode('hex'), toaddress, 'toripe:', toripe.encode('hex'), 'fromaddress:', fromaddress, 'ENCODING TYPE:', encodingtype, 'SUBJECT:', repr(subject), 'MESSAGE:', repr(message), 'ACKDATA:', ackdata.encode('hex'), lastactiontime, status, retrynumber, folder
def readSubscriptions():
print 'Printing everything in subscriptions table:'
item = '''select * from subscriptions'''
parameters = ''
cur.execute(item, parameters)
output = cur.fetchall()
for row in output:
print row
def readPubkeys():
print 'Printing everything in pubkeys table:'
item = '''select address, transmitdata, time, usedpersonally from pubkeys'''
parameters = ''
cur.execute(item, parameters)
output = cur.fetchall()
for row in output:
address, transmitdata, time, usedpersonally = row
print 'Address:', address, '\tTime first broadcast:', unicode(strftime('%a, %d %b %Y %I:%M %p',localtime(time)),'utf-8'), '\tUsed by me personally:', usedpersonally, '\tFull pubkey message:', transmitdata.encode('hex')
def readInventory():
print 'Printing everything in inventory table:'
item = '''select hash, objecttype, streamnumber, payload, expirestime from inventory'''
parameters = ''
cur.execute(item, parameters)
output = cur.fetchall()
for row in output:
hash, objecttype, streamnumber, payload, expirestime = row
print 'Hash:', hash.encode('hex'), objecttype, streamnumber, '\t', payload.encode('hex'), '\t', unicode(strftime('%a, %d %b %Y %I:%M %p',localtime(expirestime)),'utf-8')
def takeInboxMessagesOutOfTrash():
item = '''update inbox set folder='inbox' where folder='trash' '''
parameters = ''
cur.execute(item, parameters)
output = cur.fetchall()
conn.commit()
print 'done'
def takeSentMessagesOutOfTrash():
item = '''update sent set folder='sent' where folder='trash' '''
parameters = ''
cur.execute(item, parameters)
output = cur.fetchall()
conn.commit()
print 'done'
def markAllInboxMessagesAsUnread():
item = '''update inbox set read='0' '''
parameters = ''
cur.execute(item, parameters)
output = cur.fetchall()
conn.commit()
shared.UISignalQueue.put(('changedInboxUnread', None))
print 'done'
def vacuum():
item = '''VACUUM'''
parameters = ''
cur.execute(item, parameters)
output = cur.fetchall()
conn.commit()
print 'done'
#takeInboxMessagesOutOfTrash()
#takeSentMessagesOutOfTrash()
#markAllInboxMessagesAsUnread()
readInbox()
#readSent()
#readPubkeys()
#readSubscriptions()
#readInventory()
#vacuum() #will defragment and clean empty space from the messages.dat file.
| mit |
pwong-mapr/private-hue | desktop/core/ext-py/python-ldap-2.3.13/Lib/dsml.py | 44 | 8418 | """
dsml - generate and parse DSMLv1 data
(see http://www.oasis-open.org/committees/dsml/)
See http://www.python-ldap.org/ for details.
$Id: dsml.py,v 1.16 2010/05/07 08:15:47 stroeder Exp $
Python compability note:
Tested with Python 2.0+.
"""
__version__ = '2.3.12'
import string,base64
def list_dict(l):
"""
return a dictionary with all items of l being the keys of the dictionary
"""
d = {}
for i in l:
d[i]=None
return d
special_entities = (
('&','&'),
('<','<'),
('"','"'),
("'",'''),
)
def replace_char(s):
for char,entity in special_entities:
s = string.replace(s,char,entity)
return s
class DSMLWriter:
def __init__(
self,f,base64_attrs=[],dsml_comment='',indent=' '
):
"""
Parameters:
f
File object for output.
base64_attrs
Attribute types to be base64-encoded.
dsml_comment
Text placed in comment lines behind <dsml:dsml>.
indent
String used for indentiation of next nested level.
"""
self._output_file = f
self._base64_attrs = list_dict(map(string.lower,base64_attrs))
self._dsml_comment = dsml_comment
self._indent = indent
def _needs_base64_encoding(self,attr_type,attr_value):
if self._base64_attrs:
return self._base64_attrs.has_key(string.lower(attr_type))
else:
try:
unicode(attr_value,'utf-8')
except UnicodeError:
return 1
else:
return 0
def writeHeader(self):
"""
Write the header
"""
self._output_file.write('\n'.join([
'<?xml version="1.0" encoding="UTF-8"?>',
'<!DOCTYPE root PUBLIC "dsml.dtd" "http://www.dsml.org/1.0/dsml.dtd">',
'<dsml:dsml xmlns:dsml="http://www.dsml.org/DSML">',
'%s<dsml:directory-entries>\n' % (self._indent),
])
)
if self._dsml_comment:
self._output_file.write('%s<!--\n' % (self._indent))
self._output_file.write('%s%s\n' % (self._indent,self._dsml_comment))
self._output_file.write('%s-->\n' % (self._indent))
def writeFooter(self):
"""
Write the footer
"""
self._output_file.write('%s</dsml:directory-entries>\n' % (self._indent))
self._output_file.write('</dsml:dsml>\n')
def unparse(self,dn,entry):
return self.writeRecord(dn,entry)
def writeRecord(self,dn,entry):
"""
dn
string-representation of distinguished name
entry
dictionary holding the LDAP entry {attr:data}
"""
# Write line dn: first
self._output_file.write(
'%s<dsml:entry dn="%s">\n' % (
self._indent*2,replace_char(dn)
)
)
objectclasses = entry.get('objectclass',entry.get('objectClass',[]))
self._output_file.write('%s<dsml:objectclass>\n' % (self._indent*3))
for oc in objectclasses:
self._output_file.write('%s<dsml:oc-value>%s</dsml:oc-value>\n' % (self._indent*4,oc))
self._output_file.write('%s</dsml:objectclass>\n' % (self._indent*3))
attr_types = entry.keys()[:]
try:
attr_types.remove('objectclass')
attr_types.remove('objectClass')
except ValueError:
pass
attr_types.sort()
for attr_type in attr_types:
self._output_file.write('%s<dsml:attr name="%s">\n' % (self._indent*3,attr_type))
for attr_value_item in entry[attr_type]:
needs_base64_encoding = self._needs_base64_encoding(
attr_type,attr_value_item
)
if needs_base64_encoding:
attr_value_item = base64.encodestring(attr_value_item)
else:
attr_value_item = replace_char(attr_value_item)
self._output_file.write('%s<dsml:value%s>\n' % (
self._indent*4,
' encoding="base64"'*needs_base64_encoding
)
)
self._output_file.write('%s%s\n' % (
self._indent*5,
attr_value_item
)
)
self._output_file.write('%s</dsml:value>\n' % (
self._indent*4,
)
)
self._output_file.write('%s</dsml:attr>\n' % (self._indent*3))
self._output_file.write('%s</dsml:entry>\n' % (self._indent*2))
return
try:
import xml.sax,xml.sax.handler
except ImportError:
pass
else:
class DSMLv1Handler(xml.sax.handler.ContentHandler):
"""
Content handler class for DSMLv1
"""
def __init__(self,parser_instance):
self._parser_instance = parser_instance
xml.sax.handler.ContentHandler.__init__(self)
def startDocument(self):
pass
def endDocument(self):
pass
def startElement(self,raw_name,attrs):
assert raw_name.startswith(''),'Illegal name'
name = raw_name[5:]
if name=='dsml':
pass
elif name=='directory-entries':
self._parsing_entries = 1
elif name=='entry':
self._dn = attrs['dn']
self._entry = {}
elif name=='attr':
self._attr_type = attrs['name'].encode('utf-8')
self._attr_values = []
elif name=='value':
self._attr_value = ''
self._base64_encoding = attrs.get('encoding','').lower()=='base64'
# Handle object class tags
elif name=='objectclass':
self._object_classes = []
elif name=='oc-value':
self._oc_value = ''
# Unhandled tags
else:
raise ValueError,'Unknown tag %s' % (raw_name)
def endElement(self,raw_name):
assert raw_name.startswith('dsml:'),'Illegal name'
name = raw_name[5:]
if name=='dsml':
pass
elif name=='directory-entries':
self._parsing_entries = 0
elif name=='entry':
self._parser_instance.handle(self._dn,self._entry)
del self._dn
del self._entry
elif name=='attr':
self._entry[self._attr_type] = self._attr_values
del self._attr_type
del self._attr_values
elif name=='value':
if self._base64_encoding:
attr_value = base64.decodestring(self._attr_value.strip())
else:
attr_value = self._attr_value.strip().encode('utf-8')
self._attr_values.append(attr_value)
del attr_value
del self._attr_value
del self._base64_encoding
# Handle object class tags
elif name=='objectclass':
self._entry['objectClass'] = self._object_classes
del self._object_classes
elif name=='oc-value':
self._object_classes.append(self._oc_value.strip().encode('utf-8'))
del self._oc_value
# Unhandled tags
else:
raise ValueError,'Unknown tag %s' % (raw_name)
def characters(self,ch):
if self.__dict__.has_key('_oc_value'):
self._oc_value = self._oc_value + ch
elif self.__dict__.has_key('_attr_value'):
self._attr_value = self._attr_value + ch
else:
pass
class DSMLParser:
"""
Base class for a DSMLv1 parser. Applications should sub-class this
class and override method handle() to implement something meaningful.
Public class attributes:
records_read
Counter for records processed so far
"""
def __init__(
self,
input_file,
ContentHandlerClass,
ignored_attr_types=None,
max_entries=0,
):
"""
Parameters:
input_file
File-object to read the DSMLv1 input from
ignored_attr_types
Attributes with these attribute type names will be ignored.
max_entries
If non-zero specifies the maximum number of entries to be
read from f.
line_sep
String used as line separator
"""
self._input_file = input_file
self._max_entries = max_entries
self._ignored_attr_types = list_dict(map(string.lower,(ignored_attr_types or [])))
self._current_record = None,None
self.records_read = 0
self._parser = xml.sax.make_parser()
self._parser.setFeature(xml.sax.handler.feature_namespaces,0)
content_handler = ContentHandlerClass(self)
self._parser.setContentHandler(content_handler)
def handle(self,*args,**kwargs):
"""
Process a single content DSMLv1 record. This method should be
implemented by applications using DSMLParser.
"""
import pprint
pprint.pprint(args)
pprint.pprint(kwargs)
def parse(self):
"""
Continously read and parse DSML records
"""
self._parser.parse(self._input_file)
| apache-2.0 |
achadwick/mypaint | gui/quickchoice.py | 3 | 12339 | # This file is part of MyPaint.
# Copyright (C) 2013 by Andrew Chadwick <a.t.chadwick@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Widgets and popup dialogs for making quick choices"""
## Imports
from __future__ import division, print_function
import abc
from gi.repository import Gtk
from pixbuflist import PixbufList
import brushmanager
import brushselectionwindow
import widgets
import spinbox
import windowing
from lib.observable import event
import gui.colortools
## Module consts
_DEFAULT_PREFS_ID = u"default"
## Interfaces
class Advanceable:
"""Interface for choosers which can be advanced by pressing keys.
Advancing happens if the chooser is already visible and its key is
pressed again. This can happen repeatedly. The actual action
performed is up to the implementation: advancing some some choosers
may move them forward through pages of alternatives, while other
choosers may actually change a brush setting as they advance.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def advance(self):
"""Advances the chooser to the next page or choice.
Choosers should remain open when their advance() method is
invoked. The actual action performed is up to the concrete
implementation: see the class docs.
"""
## Class defs
class QuickBrushChooser (Gtk.VBox):
"""A quick chooser widget for brushes"""
## Class constants
_PREFS_KEY_TEMPLATE = u"brush_chooser.%s.selected_group"
ICON_SIZE = 48
## Method defs
def __init__(self, app, prefs_id=_DEFAULT_PREFS_ID):
"""Initialize"""
Gtk.VBox.__init__(self)
self.app = app
self.bm = app.brushmanager
self._prefs_key = self._PREFS_KEY_TEMPLATE % (prefs_id,)
active_group_name = app.preferences.get(self._prefs_key, None)
model = self._make_groups_sb_model()
self.groups_sb = spinbox.ItemSpinBox(model, self._groups_sb_changed_cb,
active_group_name)
active_group_name = self.groups_sb.get_value()
brushes = self.bm.get_group_brushes(active_group_name)
self.brushlist = PixbufList(
brushes, self.ICON_SIZE, self.ICON_SIZE,
namefunc=brushselectionwindow.managedbrush_namefunc,
pixbuffunc=brushselectionwindow.managedbrush_pixbuffunc,
idfunc=brushselectionwindow.managedbrush_idfunc
)
self.brushlist.dragging_allowed = False
self.bm.groups_changed += self._groups_changed_cb
self.bm.brushes_changed += self._brushes_changed_cb
self.brushlist.item_selected += self._item_selected_cb
scrolledwin = Gtk.ScrolledWindow()
scrolledwin.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.ALWAYS)
scrolledwin.add_with_viewport(self.brushlist)
w = int(self.ICON_SIZE * 4.5)
h = int(self.ICON_SIZE * 5.0)
scrolledwin.set_min_content_width(w)
scrolledwin.set_min_content_height(h)
scrolledwin.get_child().set_size_request(w, h)
self.pack_start(self.groups_sb, False, False, 0)
self.pack_start(scrolledwin, True, True, 0)
self.set_spacing(widgets.SPACING_TIGHT)
def _item_selected_cb(self, pixbuf_list, brush):
"""Internal: call brush_selected event when an item is chosen"""
self.brush_selected(brush)
@event
def brush_selected(self, brush):
"""Event: a brush was selected
:param brush: The newly chosen brush
"""
def _make_groups_sb_model(self):
"""Internal: create the model for the group choice spinbox"""
group_names = sorted(self.bm.groups.keys())
model = []
for name in group_names:
label_text = brushmanager.translate_group_name(name)
model.append((name, label_text))
return model
def _groups_changed_cb(self, bm):
"""Internal: update the spinbox model at the top of the widget"""
model = self._make_groups_sb_model()
self.groups_sb.set_model(model)
# In case the group has been deleted and recreated, we do this:
group_name = self.groups_sb.get_value()
group_brushes = self.bm.groups.get(group_name, [])
self.brushlist.itemlist = group_brushes
self.brushlist.update()
# See https://github.com/mypaint/mypaint/issues/654
def _brushes_changed_cb(self, bm, brushes):
"""Internal: update the PixbufList if its group was changed."""
# CARE: this might be called in response to the group being deleted.
# Don't recreate it by accident.
group_name = self.groups_sb.get_value()
group_brushes = self.bm.groups.get(group_name)
if brushes is group_brushes:
self.brushlist.update()
def _groups_sb_changed_cb(self, group_name):
"""Internal: update the list of brush icons when the group changes"""
self.app.preferences[self._prefs_key] = group_name
group_brushes = self.bm.groups.get(group_name, [])
self.brushlist.itemlist = group_brushes
self.brushlist.update()
def advance(self):
"""Advances to the next page of brushes."""
self.groups_sb.next()
class BrushChooserPopup (windowing.ChooserPopup):
"""Speedy brush chooser popup"""
def __init__(self, app, prefs_id=_DEFAULT_PREFS_ID):
"""Initialize.
:param gui.application.Application app: main app instance
:param unicode prefs_id: prefs identifier for the chooser
The prefs identifier forms part of preferences key which store
layout and which page of the chooser is selected. It should
follow the same syntax rules as Python simple identifiers.
"""
windowing.ChooserPopup.__init__(
self,
app = app,
actions = [
'ColorChooserPopup',
'ColorChooserPopupFastSubset',
'BrushChooserPopup',
],
config_name = "brush_chooser.%s" % (prefs_id,),
)
self._chosen_brush = None
self._chooser = QuickBrushChooser(app, prefs_id=prefs_id)
self._chooser.brush_selected += self._brush_selected_cb
bl = self._chooser.brushlist
bl.connect("button-release-event", self._brushlist_button_release_cb)
self.add(self._chooser)
def _brush_selected_cb(self, chooser, brush):
"""Internal: update the response brush when an icon is clicked"""
self._chosen_brush = brush
def _brushlist_button_release_cb(self, *junk):
"""Internal: send an accept response on a button release
We only send the response (and close the dialog) on button release to
avoid accidental dabs with the stylus.
"""
if self._chosen_brush is not None:
bm = self.app.brushmanager
bm.select_brush(self._chosen_brush)
self.hide()
self._chosen_brush = None
def advance(self):
"""Advances to the next page of brushes."""
self._chooser.advance()
class QuickColorChooser (Gtk.VBox):
"""A quick chooser widget for colors"""
## Class constants
_PREFS_KEY_TEMPLATE = u"color_chooser.%s.selected_adjuster"
_ALL_ADJUSTER_CLASSES = [
gui.colortools.HCYWheelTool,
gui.colortools.HSVWheelTool,
gui.colortools.PaletteTool,
gui.colortools.HSVCubeTool,
gui.colortools.HSVSquareTool,
gui.colortools.ComponentSlidersTool,
gui.colortools.RingsColorChangerTool,
gui.colortools.WashColorChangerTool,
gui.colortools.CrossedBowlColorChangerTool,
]
_SINGLE_CLICK_ADJUSTER_CLASSES = [
gui.colortools.PaletteTool,
gui.colortools.WashColorChangerTool,
gui.colortools.CrossedBowlColorChangerTool,
]
def __init__(self, app, prefs_id=_DEFAULT_PREFS_ID, single_click=False):
Gtk.VBox.__init__(self)
self._app = app
self._spinbox_model = []
self._adjs = {}
self._pages = []
mgr = app.brush_color_manager
if single_click:
adjuster_classes = self._SINGLE_CLICK_ADJUSTER_CLASSES
else:
adjuster_classes = self._ALL_ADJUSTER_CLASSES
for page_class in adjuster_classes:
name = page_class.__name__
page = page_class()
self._pages.append(page)
self._spinbox_model.append((name, page.tool_widget_title))
self._adjs[name] = page
page.set_color_manager(mgr)
if page_class in self._SINGLE_CLICK_ADJUSTER_CLASSES:
page.connect_after(
"button-release-event",
self._ccwidget_btn_release_cb,
)
self._prefs_key = self._PREFS_KEY_TEMPLATE % (prefs_id,)
active_page = app.preferences.get(self._prefs_key, None)
sb = spinbox.ItemSpinBox(self._spinbox_model, self._spinbox_changed_cb,
active_page)
active_page = sb.get_value()
self._spinbox = sb
self._active_adj = self._adjs[active_page]
self.pack_start(sb, False, False, 0)
self.pack_start(self._active_adj, True, True, 0)
self.set_spacing(widgets.SPACING_TIGHT)
def _spinbox_changed_cb(self, page_name):
self._app.preferences[self._prefs_key] = page_name
self.remove(self._active_adj)
new_adj = self._adjs[page_name]
self._active_adj = new_adj
self.pack_start(self._active_adj, True, True, 0)
self._active_adj.show_all()
def _ccwidget_btn_release_cb(self, ccwidget, event):
"""Internal: fire "choice_completed" after clicking certain widgets"""
self.choice_completed()
return False
@event
def choice_completed(self):
"""Event: a complete selection was made
This is emitted by button-release events on certain kinds of colour
chooser page. Not every page in the chooser emits this event, because
colour is a three-dimensional quantity: clicking on a two-dimensional
popup can't make a complete choice of colour with most pages.
The palette page does emit this event, and it's the default.
"""
def advance(self):
"""Advances to the next color selector."""
self._spinbox.next()
class ColorChooserPopup (windowing.ChooserPopup):
"""Speedy color chooser dialog"""
def __init__(self, app, prefs_id=_DEFAULT_PREFS_ID, single_click=False):
"""Initialize.
:param gui.application.Application app: main app instance
:param unicode prefs_id: prefs identifier for the chooser
:param bool single_click: limit to just the single-click adjusters
The prefs identifier forms part of preferences key which store
layout and which page of the chooser is selected. It should
follow the same syntax rules as Python simple identifiers.
"""
windowing.ChooserPopup.__init__(
self,
app = app,
actions = [
'ColorChooserPopup',
'ColorChooserPopupFastSubset',
'BrushChooserPopup',
],
config_name = u"color_chooser.%s" % (prefs_id,),
)
self._chooser = QuickColorChooser(
app,
prefs_id=prefs_id,
single_click=single_click,
)
self._chooser.choice_completed += self._choice_completed_cb
self.add(self._chooser)
def _choice_completed_cb(self, chooser):
"""Internal: close when a choice is (fully) made
Close the dialog on button release only to avoid accidental dabs
with the stylus.
"""
self.hide()
def advance(self):
"""Advances to the next color selector."""
self._chooser.advance()
## Classes: interface registration
Advanceable.register(QuickBrushChooser)
Advanceable.register(QuickColorChooser)
Advanceable.register(BrushChooserPopup)
Advanceable.register(ColorChooserPopup)
| gpl-2.0 |
DonBeo/scikit-learn | benchmarks/bench_mnist.py | 154 | 6006 | """
=======================
MNIST dataset benchmark
=======================
Benchmark on the MNIST dataset. The dataset comprises 70,000 samples
and 784 features. Here, we consider the task of predicting
10 classes - digits from 0 to 9 from their raw images. By contrast to the
covertype dataset, the feature space is homogenous.
Example of output :
[..]
Classification performance:
===========================
Classifier train-time test-time error-rat
------------------------------------------------------------
Nystroem-SVM 105.07s 0.91s 0.0227
ExtraTrees 48.20s 1.22s 0.0288
RandomForest 47.17s 1.21s 0.0304
SampledRBF-SVM 140.45s 0.84s 0.0486
CART 22.84s 0.16s 0.1214
dummy 0.01s 0.02s 0.8973
"""
from __future__ import division, print_function
# Author: Issam H. Laradji
# Arnaud Joly <arnaud.v.joly@gmail.com>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.datasets import get_data_home
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.externals.joblib import Memory
from sklearn.kernel_approximation import Nystroem
from sklearn.kernel_approximation import RBFSampler
from sklearn.metrics import zero_one_loss
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='F'):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_mldata('MNIST original')
X = check_array(data['data'], dtype=dtype, order=order)
y = data["target"]
# Normalize features
X = X / 255
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 60000
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
return X_train, X_test, y_train, y_test
ESTIMATORS = {
"dummy": DummyClassifier(),
'CART': DecisionTreeClassifier(),
'ExtraTrees': ExtraTreesClassifier(n_estimators=100),
'RandomForest': RandomForestClassifier(n_estimators=100),
'Nystroem-SVM':
make_pipeline(Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'SampledRBF-SVM':
make_pipeline(RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100))
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['ExtraTrees', 'Nystroem-SVM'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=0, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(order=args["order"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25),
X_train.shape[0], int(X_train.nbytes / 1e6)))
print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25),
X_test.shape[0], int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("{0: <24} {1: >10} {2: >11} {3: >12}"
"".format("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 60)
for name in sorted(args["classifiers"], key=error.get):
print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}"
"".format(name, train_time[name], test_time[name], error[name]))
print()
| bsd-3-clause |
grnet/synnefo | snf-cyclades-app/synnefo/db/migrations/old/0092_auto__add_field_subnet_created__add_field_subnet_updated.py | 10 | 19687 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Subnet.created'
db.add_column('db_subnet', 'created',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, default=datetime.datetime(2013, 12, 17, 0, 0), blank=True),
keep_default=False)
# Adding field 'Subnet.updated'
db.add_column('db_subnet', 'updated',
self.gf('django.db.models.fields.DateTimeField')(auto_now=True, default=datetime.datetime(2013, 12, 17, 0, 0), blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Subnet.created'
db.delete_column('db_subnet', 'created')
# Deleting field 'Subnet.updated'
db.delete_column('db_subnet', 'updated')
models = {
'db.backend': {
'Meta': {'ordering': "['clustername']", 'object_name': 'Backend'},
'clustername': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'ctotal': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'dfree': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'disk_templates': ('synnefo.db.fields.SeparatedValuesField', [], {'null': 'True'}),
'drained': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'dtotal': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'hypervisor': ('django.db.models.fields.CharField', [], {'default': "'kvm'", 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'unique': 'True'}),
'mfree': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'mtotal': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'offline': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password_hash': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'pinst_cnt': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'port': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5080'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'db.backendnetwork': {
'Meta': {'unique_together': "(('network', 'backend'),)", 'object_name': 'BackendNetwork'},
'backend': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'networks'", 'on_delete': 'models.PROTECT', 'to': "orm['db.Backend']"}),
'backendjobid': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'backendjobstatus': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backendlogmsg': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'backendopcode': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backendtime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1, 1, 1, 0, 0)'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mac_prefix': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'backend_networks'", 'on_delete': 'models.PROTECT', 'to': "orm['db.Network']"}),
'operstate': ('django.db.models.fields.CharField', [], {'default': "'PENDING'", 'max_length': '30'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'db.bridgepooltable': {
'Meta': {'object_name': 'BridgePoolTable'},
'available_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'base': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offset': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'reserved_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
'db.flavor': {
'Meta': {'unique_together': "(('cpu', 'ram', 'disk', 'disk_template'),)", 'object_name': 'Flavor'},
'cpu': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'disk': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'disk_template': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ram': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'db.ipaddress': {
'Meta': {'unique_together': "(('network', 'address', 'deleted'),)", 'object_name': 'IPAddress'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'floating_ip': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ips'", 'on_delete': 'models.PROTECT', 'to': "orm['db.Network']"}),
'nic': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ips'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['db.NetworkInterface']"}),
'serial': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ips'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['db.QuotaHolderSerial']"}),
'subnet': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ips'", 'on_delete': 'models.PROTECT', 'to': "orm['db.Subnet']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'db.ipaddresslog': {
'Meta': {'object_name': 'IPAddressLog'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'address': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'allocated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'network_id': ('django.db.models.fields.IntegerField', [], {}),
'released_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'server_id': ('django.db.models.fields.IntegerField', [], {})
},
'db.ippooltable': {
'Meta': {'object_name': 'IPPoolTable'},
'available_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'base': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offset': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'reserved_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'size': ('django.db.models.fields.IntegerField', [], {}),
'subnet': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ip_pools'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['db.Subnet']"})
},
'db.macprefixpooltable': {
'Meta': {'object_name': 'MacPrefixPoolTable'},
'available_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'base': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offset': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'reserved_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
'db.network': {
'Meta': {'object_name': 'Network'},
'action': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '32', 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'drained': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'external_router': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'flavor': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'floating_ip_pool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'mac_prefix': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'machines': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['db.VirtualMachine']", 'through': "orm['db.NetworkInterface']", 'symmetrical': 'False'}),
'mode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'serial': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'network'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['db.QuotaHolderSerial']"}),
'state': ('django.db.models.fields.CharField', [], {'default': "'PENDING'", 'max_length': '32'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'})
},
'db.networkinterface': {
'Meta': {'object_name': 'NetworkInterface'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'device_owner': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'firewall_profile': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'mac': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'machine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'nics'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['db.VirtualMachine']"}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'null': 'True'}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'nics'", 'on_delete': 'models.PROTECT', 'to': "orm['db.Network']"}),
'security_groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['db.SecurityGroup']", 'null': 'True', 'symmetrical': 'False'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'ACTIVE'", 'max_length': '32'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'db.quotaholderserial': {
'Meta': {'ordering': "['serial']", 'object_name': 'QuotaHolderSerial'},
'accept': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pending': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'resolved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'serial': ('django.db.models.fields.BigIntegerField', [], {'primary_key': 'True', 'db_index': 'True'})
},
'db.securitygroup': {
'Meta': {'object_name': 'SecurityGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'db.subnet': {
'Meta': {'object_name': 'Subnet'},
'cidr': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'dhcp': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'dns_nameservers': ('synnefo.db.fields.SeparatedValuesField', [], {'null': 'True'}),
'gateway': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'host_routes': ('synnefo.db.fields.SeparatedValuesField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ipversion': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'null': 'True'}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subnets'", 'on_delete': 'models.PROTECT', 'to': "orm['db.Network']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'db.virtualmachine': {
'Meta': {'object_name': 'VirtualMachine'},
'action': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '30', 'null': 'True'}),
'backend': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'virtual_machines'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['db.Backend']"}),
'backend_hash': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'backendjobid': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'backendjobstatus': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backendlogmsg': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'backendopcode': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backendtime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1, 1, 1, 0, 0)'}),
'buildpercentage': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'flavor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['db.Flavor']", 'on_delete': 'models.PROTECT'}),
'hostid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imageid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'operstate': ('django.db.models.fields.CharField', [], {'default': "'BUILD'", 'max_length': '30'}),
'serial': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'virtual_machine'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['db.QuotaHolderSerial']"}),
'suspended': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'task': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'task_job_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'})
},
'db.virtualmachinediagnostic': {
'Meta': {'ordering': "['-created']", 'object_name': 'VirtualMachineDiagnostic'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'machine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'diagnostics'", 'to': "orm['db.VirtualMachine']"}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'source_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'db.virtualmachinemetadata': {
'Meta': {'unique_together': "(('meta_key', 'vm'),)", 'object_name': 'VirtualMachineMetadata'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meta_key': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'meta_value': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'vm': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'metadata'", 'to': "orm['db.VirtualMachine']"})
}
}
complete_apps = ['db'] | gpl-3.0 |
stianvi/ansible-modules-core | database/postgresql/postgresql_user.py | 15 | 25233 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: postgresql_user
short_description: Adds or removes a users (roles) from a PostgreSQL database.
description:
- Add or remove PostgreSQL users (roles) from a remote host and, optionally,
grant the users access to an existing database or tables.
- The fundamental function of the module is to create, or delete, roles from
a PostgreSQL cluster. Privilege assignment, or removal, is an optional
step, which works on one database at a time. This allows for the module to
be called several times in the same module to modify the permissions on
different databases, or to grant permissions to already existing users.
- A user cannot be removed until all the privileges have been stripped from
the user. In such situation, if the module tries to remove the user it
will fail. To avoid this from happening the fail_on_user option signals
the module to try to remove the user, but if not possible keep going; the
module will report if changes happened and separately if the user was
removed or not.
version_added: "0.6"
options:
name:
description:
- name of the user (role) to add or remove
required: true
default: null
password:
description:
- set the user's password, before 1.4 this was required.
- "When passing an encrypted password, the encrypted parameter must also be true, and it must be generated with the format C('str[\\"md5\\"] + md5[ password + username ]'), resulting in a total of 35 characters. An easy way to do this is: C(echo \\"md5`echo -n \\"verysecretpasswordJOE\\" | md5`\\"). Note that if encrypted is set, the stored password will be hashed whether or not it is pre-encrypted."
required: false
default: null
db:
description:
- name of database where permissions will be granted
required: false
default: null
fail_on_user:
description:
- if C(yes), fail when user can't be removed. Otherwise just log and continue
required: false
default: 'yes'
choices: [ "yes", "no" ]
port:
description:
- Database port to connect to.
required: false
default: 5432
login_user:
description:
- User (role) used to authenticate with PostgreSQL
required: false
default: postgres
login_password:
description:
- Password used to authenticate with PostgreSQL
required: false
default: null
login_host:
description:
- Host running PostgreSQL.
required: false
default: localhost
login_unix_socket:
description:
- Path to a Unix domain socket for local connections
required: false
default: null
priv:
description:
- "PostgreSQL privileges string in the format: C(table:priv1,priv2)"
required: false
default: null
role_attr_flags:
description:
- "PostgreSQL role attributes string in the format: CREATEDB,CREATEROLE,SUPERUSER"
required: false
default: ""
choices: [ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEUSER", "[NO]CREATEDB",
"[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION" ]
state:
description:
- The user (role) state
required: false
default: present
choices: [ "present", "absent" ]
encrypted:
description:
- whether the password is stored hashed in the database. boolean. Passwords can be passed already hashed or unhashed, and postgresql ensures the stored password is hashed when encrypted is set.
required: false
default: false
version_added: '1.4'
expires:
description:
- sets the user's password expiration.
required: false
default: null
version_added: '1.4'
no_password_changes:
description:
- if C(yes), don't inspect database for password changes. Effective when C(pg_authid) is not accessible (such as AWS RDS). Otherwise, make password changes as necessary.
required: false
default: 'no'
choices: [ "yes", "no" ]
version_added: '2.0'
notes:
- The default authentication assumes that you are either logging in as or
sudo'ing to the postgres account on the host.
- This module uses psycopg2, a Python PostgreSQL database adapter. You must
ensure that psycopg2 is installed on the host before using this module. If
the remote host is the PostgreSQL server (which is the default case), then
PostgreSQL must also be installed on the remote host. For Ubuntu-based
systems, install the postgresql, libpq-dev, and python-psycopg2 packages
on the remote host before using this module.
- If the passlib library is installed, then passwords that are encrypted
in the DB but not encrypted when passed as arguments can be checked for
changes. If the passlib library is not installed, unencrypted passwords
stored in the DB encrypted will be assumed to have changed.
- If you specify PUBLIC as the user, then the privilege changes will apply
to all users. You may not specify password or role_attr_flags when the
PUBLIC user is specified.
requirements: [ psycopg2 ]
author: "Ansible Core Team"
'''
EXAMPLES = '''
# Create django user and grant access to database and products table
- postgresql_user: db=acme name=django password=ceec4eif7ya priv=CONNECT/products:ALL
# Create rails user, grant privilege to create other databases and demote rails from super user status
- postgresql_user: name=rails password=secret role_attr_flags=CREATEDB,NOSUPERUSER
# Remove test user privileges from acme
- postgresql_user: db=acme name=test priv=ALL/products:ALL state=absent fail_on_user=no
# Remove test user from test database and the cluster
- postgresql_user: db=test name=test priv=ALL state=absent
# Example privileges string format
INSERT,UPDATE/table:SELECT/anothertable:ALL
# Remove an existing user's password
- postgresql_user: db=test user=test password=NULL
'''
import re
import itertools
try:
import psycopg2
import psycopg2.extras
except ImportError:
postgresqldb_found = False
else:
postgresqldb_found = True
from ansible.module_utils.six import iteritems
_flags = ('SUPERUSER', 'CREATEROLE', 'CREATEUSER', 'CREATEDB', 'INHERIT', 'LOGIN', 'REPLICATION')
VALID_FLAGS = frozenset(itertools.chain(_flags, ('NO%s' % f for f in _flags)))
VALID_PRIVS = dict(table=frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL')),
database=frozenset(('CREATE', 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL')),
)
# map to cope with idiosyncracies of SUPERUSER and LOGIN
PRIV_TO_AUTHID_COLUMN = dict(SUPERUSER='rolsuper', CREATEROLE='rolcreaterole',
CREATEUSER='rolcreateuser', CREATEDB='rolcreatedb',
INHERIT='rolinherit', LOGIN='rolcanlogin',
REPLICATION='rolreplication')
class InvalidFlagsError(Exception):
pass
class InvalidPrivsError(Exception):
pass
# ===========================================
# PostgreSQL module specific support methods.
#
def user_exists(cursor, user):
# The PUBLIC user is a special case that is always there
if user == 'PUBLIC':
return True
query = "SELECT rolname FROM pg_roles WHERE rolname=%(user)s"
cursor.execute(query, {'user': user})
return cursor.rowcount > 0
def user_add(cursor, user, password, role_attr_flags, encrypted, expires):
"""Create a new database user (role)."""
# Note: role_attr_flags escaped by parse_role_attrs and encrypted is a literal
query_password_data = dict(password=password, expires=expires)
query = ['CREATE USER %(user)s' % { "user": pg_quote_identifier(user, 'role')}]
if password is not None:
query.append("WITH %(crypt)s" % { "crypt": encrypted })
query.append("PASSWORD %(password)s")
if expires is not None:
query.append("VALID UNTIL %(expires)s")
query.append(role_attr_flags)
query = ' '.join(query)
cursor.execute(query, query_password_data)
return True
def user_alter(cursor, module, user, password, role_attr_flags, encrypted, expires, no_password_changes):
"""Change user password and/or attributes. Return True if changed, False otherwise."""
changed = False
# Note: role_attr_flags escaped by parse_role_attrs and encrypted is a literal
if user == 'PUBLIC':
if password is not None:
module.fail_json(msg="cannot change the password for PUBLIC user")
elif role_attr_flags != '':
module.fail_json(msg="cannot change the role_attr_flags for PUBLIC user")
else:
return False
# Handle passwords.
if not no_password_changes and (password is not None or role_attr_flags != ''):
# Select password and all flag-like columns in order to verify changes.
query_password_data = dict(password=password, expires=expires)
select = "SELECT * FROM pg_authid where rolname=%(user)s"
cursor.execute(select, {"user": user})
# Grab current role attributes.
current_role_attrs = cursor.fetchone()
# Do we actually need to do anything?
pwchanging = False
if password is not None:
if encrypted:
if password.startswith('md5'):
if password != current_role_attrs['rolpassword']:
pwchanging = True
else:
try:
from passlib.hash import postgres_md5 as pm
if pm.encrypt(password, user) != current_role_attrs['rolpassword']:
pwchanging = True
except ImportError:
# Cannot check if passlib is not installed, so assume password is different
pwchanging = True
else:
if password != current_role_attrs['rolpassword']:
pwchanging = True
role_attr_flags_changing = False
if role_attr_flags:
role_attr_flags_dict = {}
for r in role_attr_flags.split(' '):
if r.startswith('NO'):
role_attr_flags_dict[r.replace('NO', '', 1)] = False
else:
role_attr_flags_dict[r] = True
for role_attr_name, role_attr_value in role_attr_flags_dict.items():
if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value:
role_attr_flags_changing = True
expires_changing = (expires is not None and expires == current_roles_attrs['rol_valid_until'])
if not pwchanging and not role_attr_flags_changing and not expires_changing:
return False
alter = ['ALTER USER %(user)s' % {"user": pg_quote_identifier(user, 'role')}]
if pwchanging:
alter.append("WITH %(crypt)s" % {"crypt": encrypted})
alter.append("PASSWORD %(password)s")
alter.append(role_attr_flags)
elif role_attr_flags:
alter.append('WITH %s' % role_attr_flags)
if expires is not None:
alter.append("VALID UNTIL %(expires)s")
try:
cursor.execute(' '.join(alter), query_password_data)
except psycopg2.InternalError:
e = get_exception()
if e.pgcode == '25006':
# Handle errors due to read-only transactions indicated by pgcode 25006
# ERROR: cannot execute ALTER ROLE in a read-only transaction
changed = False
module.fail_json(msg=e.pgerror)
return changed
else:
raise psycopg2.InternalError(e)
# Grab new role attributes.
cursor.execute(select, {"user": user})
new_role_attrs = cursor.fetchone()
# Detect any differences between current_ and new_role_attrs.
for i in range(len(current_role_attrs)):
if current_role_attrs[i] != new_role_attrs[i]:
changed = True
return changed
def user_delete(cursor, user):
"""Try to remove a user. Returns True if successful otherwise False"""
cursor.execute("SAVEPOINT ansible_pgsql_user_delete")
try:
cursor.execute("DROP USER %s" % pg_quote_identifier(user, 'role'))
except:
cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_user_delete")
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
return False
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
return True
def has_table_privileges(cursor, user, table, privs):
"""
Return the difference between the privileges that a user already has and
the privileges that they desire to have.
:returns: tuple of:
* privileges that they have and were requested
* privileges they currently hold but were not requested
* privileges requested that they do not hold
"""
cur_privs = get_table_privileges(cursor, user, table)
have_currently = cur_privs.intersection(privs)
other_current = cur_privs.difference(privs)
desired = privs.difference(cur_privs)
return (have_currently, other_current, desired)
def get_table_privileges(cursor, user, table):
if '.' in table:
schema, table = table.split('.', 1)
else:
schema = 'public'
query = '''SELECT privilege_type FROM information_schema.role_table_grants
WHERE grantee=%s AND table_name=%s AND table_schema=%s'''
cursor.execute(query, (user, table, schema))
return frozenset([x[0] for x in cursor.fetchall()])
def grant_table_privileges(cursor, user, table, privs):
# Note: priv escaped by parse_privs
privs = ', '.join(privs)
query = 'GRANT %s ON TABLE %s TO %s' % (
privs, pg_quote_identifier(table, 'table'), pg_quote_identifier(user, 'role') )
cursor.execute(query)
def revoke_table_privileges(cursor, user, table, privs):
# Note: priv escaped by parse_privs
privs = ', '.join(privs)
query = 'REVOKE %s ON TABLE %s FROM %s' % (
privs, pg_quote_identifier(table, 'table'), pg_quote_identifier(user, 'role') )
cursor.execute(query)
def get_database_privileges(cursor, user, db):
priv_map = {
'C':'CREATE',
'T':'TEMPORARY',
'c':'CONNECT',
}
query = 'SELECT datacl FROM pg_database WHERE datname = %s'
cursor.execute(query, (db,))
datacl = cursor.fetchone()[0]
if datacl is None:
return set()
r = re.search('%s=(C?T?c?)/[a-z]+\,?' % user, datacl)
if r is None:
return set()
o = set()
for v in r.group(1):
o.add(priv_map[v])
return normalize_privileges(o, 'database')
def has_database_privileges(cursor, user, db, privs):
"""
Return the difference between the privileges that a user already has and
the privileges that they desire to have.
:returns: tuple of:
* privileges that they have and were requested
* privileges they currently hold but were not requested
* privileges requested that they do not hold
"""
cur_privs = get_database_privileges(cursor, user, db)
have_currently = cur_privs.intersection(privs)
other_current = cur_privs.difference(privs)
desired = privs.difference(cur_privs)
return (have_currently, other_current, desired)
def grant_database_privileges(cursor, user, db, privs):
# Note: priv escaped by parse_privs
privs =', '.join(privs)
if user == "PUBLIC":
query = 'GRANT %s ON DATABASE %s TO PUBLIC' % (
privs, pg_quote_identifier(db, 'database'))
else:
query = 'GRANT %s ON DATABASE %s TO %s' % (
privs, pg_quote_identifier(db, 'database'),
pg_quote_identifier(user, 'role'))
cursor.execute(query)
def revoke_database_privileges(cursor, user, db, privs):
# Note: priv escaped by parse_privs
privs = ', '.join(privs)
if user == "PUBLIC":
query = 'REVOKE %s ON DATABASE %s FROM PUBLIC' % (
privs, pg_quote_identifier(db, 'database'))
else:
query = 'REVOKE %s ON DATABASE %s FROM %s' % (
privs, pg_quote_identifier(db, 'database'),
pg_quote_identifier(user, 'role'))
cursor.execute(query)
def revoke_privileges(cursor, user, privs):
if privs is None:
return False
revoke_funcs = dict(table=revoke_table_privileges, database=revoke_database_privileges)
check_funcs = dict(table=has_table_privileges, database=has_database_privileges)
changed = False
for type_ in privs:
for name, privileges in iteritems(privs[type_]):
# Check that any of the privileges requested to be removed are
# currently granted to the user
differences = check_funcs[type_](cursor, user, name, privileges)
if differences[0]:
revoke_funcs[type_](cursor, user, name, privileges)
changed = True
return changed
def grant_privileges(cursor, user, privs):
if privs is None:
return False
grant_funcs = dict(table=grant_table_privileges, database=grant_database_privileges)
check_funcs = dict(table=has_table_privileges, database=has_database_privileges)
changed = False
for type_ in privs:
for name, privileges in iteritems(privs[type_]):
# Check that any of the privileges requested for the user are
# currently missing
differences = check_funcs[type_](cursor, user, name, privileges)
if differences[2]:
grant_funcs[type_](cursor, user, name, privileges)
changed = True
return changed
def parse_role_attrs(role_attr_flags):
"""
Parse role attributes string for user creation.
Format:
attributes[,attributes,...]
Where:
attributes := CREATEDB,CREATEROLE,NOSUPERUSER,...
[ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEUSER", "[NO]CREATEDB",
"[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION" ]
"""
if ',' in role_attr_flags:
flag_set = frozenset(r.upper() for r in role_attr_flags.split(","))
elif role_attr_flags:
flag_set = frozenset((role_attr_flags.upper(),))
else:
flag_set = frozenset()
if not flag_set.issubset(VALID_FLAGS):
raise InvalidFlagsError('Invalid role_attr_flags specified: %s' %
' '.join(flag_set.difference(VALID_FLAGS)))
o_flags = ' '.join(flag_set)
return o_flags
def normalize_privileges(privs, type_):
new_privs = set(privs)
if 'ALL' in new_privs:
new_privs.update(VALID_PRIVS[type_])
new_privs.remove('ALL')
if 'TEMP' in new_privs:
new_privs.add('TEMPORARY')
new_privs.remove('TEMP')
return new_privs
def parse_privs(privs, db):
"""
Parse privilege string to determine permissions for database db.
Format:
privileges[/privileges/...]
Where:
privileges := DATABASE_PRIVILEGES[,DATABASE_PRIVILEGES,...] |
TABLE_NAME:TABLE_PRIVILEGES[,TABLE_PRIVILEGES,...]
"""
if privs is None:
return privs
o_privs = {
'database':{},
'table':{}
}
for token in privs.split('/'):
if ':' not in token:
type_ = 'database'
name = db
priv_set = frozenset(x.strip().upper() for x in token.split(',') if x.strip())
else:
type_ = 'table'
name, privileges = token.split(':', 1)
priv_set = frozenset(x.strip().upper() for x in privileges.split(',') if x.strip())
if not priv_set.issubset(VALID_PRIVS[type_]):
raise InvalidPrivsError('Invalid privs specified for %s: %s' %
(type_, ' '.join(priv_set.difference(VALID_PRIVS[type_]))))
priv_set = normalize_privileges(priv_set, type_)
o_privs[type_][name] = priv_set
return o_privs
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default="postgres"),
login_password=dict(default=""),
login_host=dict(default=""),
login_unix_socket=dict(default=""),
user=dict(required=True, aliases=['name']),
password=dict(default=None),
state=dict(default="present", choices=["absent", "present"]),
priv=dict(default=None),
db=dict(default=''),
port=dict(default='5432'),
fail_on_user=dict(type='bool', default='yes'),
role_attr_flags=dict(default=''),
encrypted=dict(type='bool', default='no'),
no_password_changes=dict(type='bool', default='no'),
expires=dict(default=None)
),
supports_check_mode = True
)
user = module.params["user"]
password = module.params["password"]
state = module.params["state"]
fail_on_user = module.params["fail_on_user"]
db = module.params["db"]
if db == '' and module.params["priv"] is not None:
module.fail_json(msg="privileges require a database to be specified")
privs = parse_privs(module.params["priv"], db)
port = module.params["port"]
no_password_changes = module.params["no_password_changes"]
try:
role_attr_flags = parse_role_attrs(module.params["role_attr_flags"])
except InvalidFlagsError:
e = get_exception()
module.fail_json(msg=str(e))
if module.params["encrypted"]:
encrypted = "ENCRYPTED"
else:
encrypted = "UNENCRYPTED"
expires = module.params["expires"]
if not postgresqldb_found:
module.fail_json(msg="the python psycopg2 module is required")
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the **kw
# dictionary
params_map = {
"login_host":"host",
"login_user":"user",
"login_password":"password",
"port":"port",
"db":"database"
}
kw = dict( (params_map[k], v) for (k, v) in iteritems(module.params)
if k in params_map and v != "" )
# If a login_unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
if is_localhost and module.params["login_unix_socket"] != "":
kw["host"] = module.params["login_unix_socket"]
try:
db_connection = psycopg2.connect(**kw)
cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
except Exception:
e = get_exception()
module.fail_json(msg="unable to connect to database: %s" % e)
kw = dict(user=user)
changed = False
user_removed = False
if state == "present":
if user_exists(cursor, user):
try:
changed = user_alter(cursor, module, user, password, role_attr_flags, encrypted, expires, no_password_changes)
except SQLParseError:
e = get_exception()
module.fail_json(msg=str(e))
else:
try:
changed = user_add(cursor, user, password, role_attr_flags, encrypted, expires)
except SQLParseError:
e = get_exception()
module.fail_json(msg=str(e))
try:
changed = grant_privileges(cursor, user, privs) or changed
except SQLParseError:
e = get_exception()
module.fail_json(msg=str(e))
else:
if user_exists(cursor, user):
if module.check_mode:
changed = True
kw['user_removed'] = True
else:
try:
changed = revoke_privileges(cursor, user, privs)
user_removed = user_delete(cursor, user)
except SQLParseError:
e = get_exception()
module.fail_json(msg=str(e))
changed = changed or user_removed
if fail_on_user and not user_removed:
msg = "unable to remove user"
module.fail_json(msg=msg)
kw['user_removed'] = user_removed
if changed:
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
kw['changed'] = changed
module.exit_json(**kw)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.database import *
main()
| gpl-3.0 |
michaelhowden/eden | modules/tests/volunteer/export_volunteers.py | 25 | 1890 | # -*- coding: utf-8 -*-
""" Sahana Eden Volunteer Module Automated Tests
@copyright: 2011-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from tests.web2unittest import SeleniumUnitTest
class ExportVolunteers(SeleniumUnitTest):
def test_export_volunteers(self):
print "\n"
# Login, if not-already done so
self.login(account="admin", nexturl="vol/volunteer/search")
#@ToDo: 1) Perform some sort of check to test the export works
# 2) Integrate this with the search test helper function so that the export is working for EVERY search
# 3) Extend the export to include xls, csv, xml
browser = self.browser
browser.find_element_by_xpath("//img[@src='/eden/static/img/pdficon_small.gif']").click()
| mit |
zjuwangg/scrapy | scrapy/utils/spider.py | 151 | 1911 | import logging
import inspect
import six
from scrapy.spiders import Spider
from scrapy.utils.misc import arg_to_iter
logger = logging.getLogger(__name__)
def iterate_spider_output(result):
return arg_to_iter(result)
def iter_spider_classes(module):
"""Return an iterator over all spider classes defined in the given module
that can be instantiated (ie. which have name)
"""
# this needs to be imported here until get rid of the spider manager
# singleton in scrapy.spider.spiders
from scrapy.spiders import Spider
for obj in six.itervalues(vars(module)):
if inspect.isclass(obj) and \
issubclass(obj, Spider) and \
obj.__module__ == module.__name__ and \
getattr(obj, 'name', None):
yield obj
def spidercls_for_request(spider_loader, request, default_spidercls=None,
log_none=False, log_multiple=False):
"""Return a spider class that handles the given Request.
This will look for the spiders that can handle the given request (using
the spider loader) and return a Spider class if (and only if) there is
only one Spider able to handle the Request.
If multiple spiders (or no spider) are found, it will return the
default_spidercls passed. It can optionally log if multiple or no spiders
are found.
"""
snames = spider_loader.find_by_request(request)
if len(snames) == 1:
return spider_loader.load(snames[0])
if len(snames) > 1 and log_multiple:
logger.error('More than one spider can handle: %(request)s - %(snames)s',
{'request': request, 'snames': ', '.join(snames)})
if len(snames) == 0 and log_none:
logger.error('Unable to find spider that handles: %(request)s',
{'request': request})
return default_spidercls
class DefaultSpider(Spider):
name = 'default'
| bsd-3-clause |
xjsender/HaoGist | requests/structures.py | 1160 | 2977 | # -*- coding: utf-8 -*-
"""
requests.structures
~~~~~~~~~~~~~~~~~~~
Data structures that power Requests.
"""
import collections
class CaseInsensitiveDict(collections.MutableMapping):
"""
A case-insensitive ``dict``-like object.
Implements all methods and operations of
``collections.MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive::
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = dict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return (
(lowerkey, keyval[1])
for (lowerkey, keyval)
in self._store.items()
)
def __eq__(self, other):
if isinstance(other, collections.Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items()))
class LookupDict(dict):
"""Dictionary lookup object."""
def __init__(self, name=None):
self.name = name
super(LookupDict, self).__init__()
def __repr__(self):
return '<lookup \'%s\'>' % (self.name)
def __getitem__(self, key):
# We allow fall-through here, so values default to None
return self.__dict__.get(key, None)
def get(self, key, default=None):
return self.__dict__.get(key, default)
| mit |
krintoxi/NoobSec-Toolkit | NoobSecToolkit - MAC OSX/tools/inject/plugins/dbms/oracle/__init__.py | 10 | 1039 | #!/usr/bin/env python
"""
Copyright (c) 2006-2015 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.enums import DBMS
from lib.core.settings import ORACLE_SYSTEM_DBS
from lib.core.unescaper import unescaper
from plugins.dbms.oracle.enumeration import Enumeration
from plugins.dbms.oracle.filesystem import Filesystem
from plugins.dbms.oracle.fingerprint import Fingerprint
from plugins.dbms.oracle.syntax import Syntax
from plugins.dbms.oracle.takeover import Takeover
from plugins.generic.misc import Miscellaneous
class OracleMap(Syntax, Fingerprint, Enumeration, Filesystem, Miscellaneous, Takeover):
"""
This class defines Oracle methods
"""
def __init__(self):
self.excludeDbsList = ORACLE_SYSTEM_DBS
Syntax.__init__(self)
Fingerprint.__init__(self)
Enumeration.__init__(self)
Filesystem.__init__(self)
Miscellaneous.__init__(self)
Takeover.__init__(self)
unescaper[DBMS.ORACLE] = Syntax.escape
| gpl-2.0 |
Cajoline/mezzanine | mezzanine/twitter/__init__.py | 26 | 1024 | """
Provides models and utilities for displaying different types of Twitter feeds.
"""
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from mezzanine import __version__
# Constants/choices for the different query types.
QUERY_TYPE_USER = "user"
QUERY_TYPE_LIST = "list"
QUERY_TYPE_SEARCH = "search"
QUERY_TYPE_CHOICES = (
(QUERY_TYPE_USER, _("User")),
(QUERY_TYPE_LIST, _("List")),
(QUERY_TYPE_SEARCH, _("Search")),
)
def get_auth_settings():
"""
Returns all the key/secret settings for Twitter access,
only if they're all defined.
"""
from mezzanine.conf import settings
try:
auth_settings = (settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET,
settings.TWITTER_ACCESS_TOKEN_KEY,
settings.TWITTER_ACCESS_TOKEN_SECRET)
except AttributeError:
return None
else:
return auth_settings if all(auth_settings) else None
| bsd-2-clause |
Jusedawg/SickRage | lib/tornado/escape.py | 120 | 14441 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Escaping/unescaping methods for HTML, JSON, URLs, and others.
Also includes a few other miscellaneous string manipulation functions that
have crept in over time.
"""
from __future__ import absolute_import, division, print_function, with_statement
import re
import sys
from tornado.util import unicode_type, basestring_type, u
try:
from urllib.parse import parse_qs as _parse_qs # py3
except ImportError:
from urlparse import parse_qs as _parse_qs # Python 2.6+
try:
import htmlentitydefs # py2
except ImportError:
import html.entities as htmlentitydefs # py3
try:
import urllib.parse as urllib_parse # py3
except ImportError:
import urllib as urllib_parse # py2
import json
try:
unichr
except NameError:
unichr = chr
_XHTML_ESCAPE_RE = re.compile('[&<>"\']')
_XHTML_ESCAPE_DICT = {'&': '&', '<': '<', '>': '>', '"': '"',
'\'': '''}
def xhtml_escape(value):
"""Escapes a string so it is valid within HTML or XML.
Escapes the characters ``<``, ``>``, ``"``, ``'``, and ``&``.
When used in attribute values the escaped strings must be enclosed
in quotes.
.. versionchanged:: 3.2
Added the single quote to the list of escaped characters.
"""
return _XHTML_ESCAPE_RE.sub(lambda match: _XHTML_ESCAPE_DICT[match.group(0)],
to_basestring(value))
def xhtml_unescape(value):
"""Un-escapes an XML-escaped string."""
return re.sub(r"&(#?)(\w+?);", _convert_entity, _unicode(value))
# The fact that json_encode wraps json.dumps is an implementation detail.
# Please see https://github.com/tornadoweb/tornado/pull/706
# before sending a pull request that adds **kwargs to this function.
def json_encode(value):
"""JSON-encodes the given Python object."""
# JSON permits but does not require forward slashes to be escaped.
# This is useful when json data is emitted in a <script> tag
# in HTML, as it prevents </script> tags from prematurely terminating
# the javascript. Some json libraries do this escaping by default,
# although python's standard library does not, so we do it here.
# http://stackoverflow.com/questions/1580647/json-why-are-forward-slashes-escaped
return json.dumps(value).replace("</", "<\\/")
def json_decode(value):
"""Returns Python objects for the given JSON string."""
return json.loads(to_basestring(value))
def squeeze(value):
"""Replace all sequences of whitespace chars with a single space."""
return re.sub(r"[\x00-\x20]+", " ", value).strip()
def url_escape(value, plus=True):
"""Returns a URL-encoded version of the given value.
If ``plus`` is true (the default), spaces will be represented
as "+" instead of "%20". This is appropriate for query strings
but not for the path component of a URL. Note that this default
is the reverse of Python's urllib module.
.. versionadded:: 3.1
The ``plus`` argument
"""
quote = urllib_parse.quote_plus if plus else urllib_parse.quote
return quote(utf8(value))
# python 3 changed things around enough that we need two separate
# implementations of url_unescape. We also need our own implementation
# of parse_qs since python 3's version insists on decoding everything.
if sys.version_info[0] < 3:
def url_unescape(value, encoding='utf-8', plus=True):
"""Decodes the given value from a URL.
The argument may be either a byte or unicode string.
If encoding is None, the result will be a byte string. Otherwise,
the result is a unicode string in the specified encoding.
If ``plus`` is true (the default), plus signs will be interpreted
as spaces (literal plus signs must be represented as "%2B"). This
is appropriate for query strings and form-encoded values but not
for the path component of a URL. Note that this default is the
reverse of Python's urllib module.
.. versionadded:: 3.1
The ``plus`` argument
"""
unquote = (urllib_parse.unquote_plus if plus else urllib_parse.unquote)
if encoding is None:
return unquote(utf8(value))
else:
return unicode_type(unquote(utf8(value)), encoding)
parse_qs_bytes = _parse_qs
else:
def url_unescape(value, encoding='utf-8', plus=True):
"""Decodes the given value from a URL.
The argument may be either a byte or unicode string.
If encoding is None, the result will be a byte string. Otherwise,
the result is a unicode string in the specified encoding.
If ``plus`` is true (the default), plus signs will be interpreted
as spaces (literal plus signs must be represented as "%2B"). This
is appropriate for query strings and form-encoded values but not
for the path component of a URL. Note that this default is the
reverse of Python's urllib module.
.. versionadded:: 3.1
The ``plus`` argument
"""
if encoding is None:
if plus:
# unquote_to_bytes doesn't have a _plus variant
value = to_basestring(value).replace('+', ' ')
return urllib_parse.unquote_to_bytes(value)
else:
unquote = (urllib_parse.unquote_plus if plus
else urllib_parse.unquote)
return unquote(to_basestring(value), encoding=encoding)
def parse_qs_bytes(qs, keep_blank_values=False, strict_parsing=False):
"""Parses a query string like urlparse.parse_qs, but returns the
values as byte strings.
Keys still become type str (interpreted as latin1 in python3!)
because it's too painful to keep them as byte strings in
python3 and in practice they're nearly always ascii anyway.
"""
# This is gross, but python3 doesn't give us another way.
# Latin1 is the universal donor of character encodings.
result = _parse_qs(qs, keep_blank_values, strict_parsing,
encoding='latin1', errors='strict')
encoded = {}
for k, v in result.items():
encoded[k] = [i.encode('latin1') for i in v]
return encoded
_UTF8_TYPES = (bytes, type(None))
def utf8(value):
"""Converts a string argument to a byte string.
If the argument is already a byte string or None, it is returned unchanged.
Otherwise it must be a unicode string and is encoded as utf8.
"""
if isinstance(value, _UTF8_TYPES):
return value
if not isinstance(value, unicode_type):
raise TypeError(
"Expected bytes, unicode, or None; got %r" % type(value)
)
return value.encode("utf-8")
_TO_UNICODE_TYPES = (unicode_type, type(None))
def to_unicode(value):
"""Converts a string argument to a unicode string.
If the argument is already a unicode string or None, it is returned
unchanged. Otherwise it must be a byte string and is decoded as utf8.
"""
if isinstance(value, _TO_UNICODE_TYPES):
return value
if not isinstance(value, bytes):
raise TypeError(
"Expected bytes, unicode, or None; got %r" % type(value)
)
return value.decode("utf-8")
# to_unicode was previously named _unicode not because it was private,
# but to avoid conflicts with the built-in unicode() function/type
_unicode = to_unicode
# When dealing with the standard library across python 2 and 3 it is
# sometimes useful to have a direct conversion to the native string type
if str is unicode_type:
native_str = to_unicode
else:
native_str = utf8
_BASESTRING_TYPES = (basestring_type, type(None))
def to_basestring(value):
"""Converts a string argument to a subclass of basestring.
In python2, byte and unicode strings are mostly interchangeable,
so functions that deal with a user-supplied argument in combination
with ascii string constants can use either and should return the type
the user supplied. In python3, the two types are not interchangeable,
so this method is needed to convert byte strings to unicode.
"""
if isinstance(value, _BASESTRING_TYPES):
return value
if not isinstance(value, bytes):
raise TypeError(
"Expected bytes, unicode, or None; got %r" % type(value)
)
return value.decode("utf-8")
def recursive_unicode(obj):
"""Walks a simple data structure, converting byte strings to unicode.
Supports lists, tuples, and dictionaries.
"""
if isinstance(obj, dict):
return dict((recursive_unicode(k), recursive_unicode(v)) for (k, v) in obj.items())
elif isinstance(obj, list):
return list(recursive_unicode(i) for i in obj)
elif isinstance(obj, tuple):
return tuple(recursive_unicode(i) for i in obj)
elif isinstance(obj, bytes):
return to_unicode(obj)
else:
return obj
# I originally used the regex from
# http://daringfireball.net/2010/07/improved_regex_for_matching_urls
# but it gets all exponential on certain patterns (such as too many trailing
# dots), causing the regex matcher to never return.
# This regex should avoid those problems.
# Use to_unicode instead of tornado.util.u - we don't want backslashes getting
# processed as escapes.
_URL_RE = re.compile(to_unicode(r"""\b((?:([\w-]+):(/{1,3})|www[.])(?:(?:(?:[^\s&()]|&|")*(?:[^!"#$%&'()*+,.:;<=>?@\[\]^`{|}~\s]))|(?:\((?:[^\s&()]|&|")*\)))+)"""))
def linkify(text, shorten=False, extra_params="",
require_protocol=False, permitted_protocols=["http", "https"]):
"""Converts plain text into HTML with links.
For example: ``linkify("Hello http://tornadoweb.org!")`` would return
``Hello <a href="http://tornadoweb.org">http://tornadoweb.org</a>!``
Parameters:
* ``shorten``: Long urls will be shortened for display.
* ``extra_params``: Extra text to include in the link tag, or a callable
taking the link as an argument and returning the extra text
e.g. ``linkify(text, extra_params='rel="nofollow" class="external"')``,
or::
def extra_params_cb(url):
if url.startswith("http://example.com"):
return 'class="internal"'
else:
return 'class="external" rel="nofollow"'
linkify(text, extra_params=extra_params_cb)
* ``require_protocol``: Only linkify urls which include a protocol. If
this is False, urls such as www.facebook.com will also be linkified.
* ``permitted_protocols``: List (or set) of protocols which should be
linkified, e.g. ``linkify(text, permitted_protocols=["http", "ftp",
"mailto"])``. It is very unsafe to include protocols such as
``javascript``.
"""
if extra_params and not callable(extra_params):
extra_params = " " + extra_params.strip()
def make_link(m):
url = m.group(1)
proto = m.group(2)
if require_protocol and not proto:
return url # not protocol, no linkify
if proto and proto not in permitted_protocols:
return url # bad protocol, no linkify
href = m.group(1)
if not proto:
href = "http://" + href # no proto specified, use http
if callable(extra_params):
params = " " + extra_params(href).strip()
else:
params = extra_params
# clip long urls. max_len is just an approximation
max_len = 30
if shorten and len(url) > max_len:
before_clip = url
if proto:
proto_len = len(proto) + 1 + len(m.group(3) or "") # +1 for :
else:
proto_len = 0
parts = url[proto_len:].split("/")
if len(parts) > 1:
# Grab the whole host part plus the first bit of the path
# The path is usually not that interesting once shortened
# (no more slug, etc), so it really just provides a little
# extra indication of shortening.
url = url[:proto_len] + parts[0] + "/" + \
parts[1][:8].split('?')[0].split('.')[0]
if len(url) > max_len * 1.5: # still too long
url = url[:max_len]
if url != before_clip:
amp = url.rfind('&')
# avoid splitting html char entities
if amp > max_len - 5:
url = url[:amp]
url += "..."
if len(url) >= len(before_clip):
url = before_clip
else:
# full url is visible on mouse-over (for those who don't
# have a status bar, such as Safari by default)
params += ' title="%s"' % href
return u('<a href="%s"%s>%s</a>') % (href, params, url)
# First HTML-escape so that our strings are all safe.
# The regex is modified to avoid character entites other than & so
# that we won't pick up ", etc.
text = _unicode(xhtml_escape(text))
return _URL_RE.sub(make_link, text)
def _convert_entity(m):
if m.group(1) == "#":
try:
if m.group(2)[:1].lower() == 'x':
return unichr(int(m.group(2)[1:], 16))
else:
return unichr(int(m.group(2)))
except ValueError:
return "&#%s;" % m.group(2)
try:
return _HTML_UNICODE_MAP[m.group(2)]
except KeyError:
return "&%s;" % m.group(2)
def _build_unicode_map():
unicode_map = {}
for name, value in htmlentitydefs.name2codepoint.items():
unicode_map[name] = unichr(value)
return unicode_map
_HTML_UNICODE_MAP = _build_unicode_map()
| gpl-3.0 |
vberaudi/scipy | scipy/integrate/setup.py | 90 | 3250 | #!/usr/bin/env python
from __future__ import division, print_function, absolute_import
from os.path import join
from scipy._build_utils import numpy_nodepr_api
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info
config = Configuration('integrate', parent_package, top_path)
# Get a local copy of lapack_opt_info
lapack_opt = dict(get_info('lapack_opt',notfound_action=2))
# Pop off the libraries list so it can be combined with
# additional required libraries
lapack_libs = lapack_opt.pop('libraries', [])
mach_src = [join('mach','*.f')]
quadpack_src = [join('quadpack','*.f')]
odepack_src = [join('odepack','*.f')]
dop_src = [join('dop','*.f')]
quadpack_test_src = [join('tests','_test_multivariate.c')]
odeint_banded_test_src = [join('tests', 'banded5x5.f')]
config.add_library('mach', sources=mach_src,
config_fc={'noopt':(__file__,1)})
config.add_library('quadpack', sources=quadpack_src)
config.add_library('odepack', sources=odepack_src)
config.add_library('dop', sources=dop_src)
# Extensions
# quadpack:
config.add_extension('_quadpack',
sources=['_quadpackmodule.c'],
libraries=(['quadpack', 'mach'] + lapack_libs),
depends=(['quadpack.h','__quadpack.h']
+ quadpack_src + mach_src),
**lapack_opt)
# odepack
odepack_libs = ['odepack','mach'] + lapack_libs
odepack_opts = lapack_opt.copy()
odepack_opts.update(numpy_nodepr_api)
config.add_extension('_odepack',
sources=['_odepackmodule.c'],
libraries=odepack_libs,
depends=(odepack_src + mach_src),
**odepack_opts)
# vode
config.add_extension('vode',
sources=['vode.pyf'],
libraries=odepack_libs,
depends=(odepack_src
+ mach_src),
**lapack_opt)
# lsoda
config.add_extension('lsoda',
sources=['lsoda.pyf'],
libraries=odepack_libs,
depends=(odepack_src
+ mach_src),
**lapack_opt)
# dop
config.add_extension('_dop',
sources=['dop.pyf'],
libraries=['dop'],
depends=dop_src)
config.add_extension('_test_multivariate',
sources=quadpack_test_src)
# Fortran+f2py extension module for testing odeint.
config.add_extension('_test_odeint_banded',
sources=odeint_banded_test_src,
libraries=odepack_libs,
depends=(odepack_src + mach_src),
**lapack_opt)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
skycucumber/restful | python/venv/lib/python2.7/site-packages/pymongo/ssl_context.py | 67 | 3660 | # Copyright 2014-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""A fake SSLContext implementation."""
try:
import ssl
except ImportError:
pass
class SSLContext(object):
"""A fake SSLContext.
This implements an API similar to ssl.SSLContext from python 3.2
but does not implement methods or properties that would be
incompatible with ssl.wrap_socket from python 2.6.
You must pass protocol which must be one of the PROTOCOL_* constants
defined in the ssl module. ssl.PROTOCOL_SSLv23 is recommended for maximum
interoperability.
"""
__slots__ = ('_cafile', '_certfile',
'_keyfile', '_protocol', '_verify_mode')
def __init__(self, protocol):
self._cafile = None
self._certfile = None
self._keyfile = None
self._protocol = protocol
self._verify_mode = ssl.CERT_NONE
@property
def protocol(self):
"""The protocol version chosen when constructing the context.
This attribute is read-only.
"""
return self._protocol
def __get_verify_mode(self):
"""Whether to try to verify other peers' certificates and how to
behave if verification fails. This attribute must be one of
ssl.CERT_NONE, ssl.CERT_OPTIONAL or ssl.CERT_REQUIRED.
"""
return self._verify_mode
def __set_verify_mode(self, value):
"""Setter for verify_mode."""
self._verify_mode = value
verify_mode = property(__get_verify_mode, __set_verify_mode)
def load_cert_chain(self, certfile, keyfile=None):
"""Load a private key and the corresponding certificate. The certfile
string must be the path to a single file in PEM format containing the
certificate as well as any number of CA certificates needed to
establish the certificate's authenticity. The keyfile string, if
present, must point to a file containing the private key. Otherwise
the private key will be taken from certfile as well.
"""
self._certfile = certfile
self._keyfile = keyfile
def load_verify_locations(self, cafile=None, dummy=None):
"""Load a set of "certification authority"(CA) certificates used to
validate other peers' certificates when `~verify_mode` is other than
ssl.CERT_NONE.
"""
self._cafile = cafile
def wrap_socket(self, sock, server_side=False,
do_handshake_on_connect=True,
suppress_ragged_eofs=True, dummy=None):
"""Wrap an existing Python socket sock and return an ssl.SSLSocket
object.
"""
return ssl.wrap_socket(sock, keyfile=self._keyfile,
certfile=self._certfile,
server_side=server_side,
cert_reqs=self._verify_mode,
ssl_version=self._protocol,
ca_certs=self._cafile,
do_handshake_on_connect=do_handshake_on_connect,
suppress_ragged_eofs=suppress_ragged_eofs)
| gpl-2.0 |
Yelp/security_monkey | security_monkey/exceptions.py | 15 | 3899 | # Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: exceptions
:synopsis: Defines all security_monkey specific exceptions
.. version:: $$VERSION$$
.. moduleauthor:: Patrick Kelley <pkelley@netflix.com> @monkeysecurity
"""
from security_monkey import app
class SecurityMonkeyException(Exception):
"""Base class for all security monkey exceptions."""
pass
class InvalidARN(SecurityMonkeyException):
"""Found an indecipherable ARN"""
def __init__(self, bad_arn):
self.bad_arn = bad_arn
app.logger.info(self)
def __str__(self):
return repr("Given an invalid ARN: {}".format(self.bad_arn))
class InvalidSourceOwner(SecurityMonkeyException):
"""Source Owners should be an integer representing an AWS account owner."""
def __init__(self, bad_source_owner):
self.bad_source_owner = bad_source_owner
app.logger.info(self)
def __str__(self):
return repr("Given an invalid SourceOwner: {}".format(self.bad_source_owner))
class InvalidAWSJSON(SecurityMonkeyException):
"""The JSON returned from AWS is not valid."""
def __init__(self, bad_json):
self.bad_json = bad_json
app.logger.info(self)
def __str__(self):
return repr("Could not parse invalid JSON from AWS:\n {}".format(self.bad_json))
class BotoConnectionIssue(SecurityMonkeyException):
"""Boto could not connect. This could be a permissions issue."""
def __init__(self, connection_message, tech, account, region):
self.connection_message = connection_message
self.tech = tech
self.account = account
self.region = region
app.logger.info(self)
def __str__(self):
return repr("Problem Connecting to {}/{}/{}:\n{}".format(
self.tech, self.account, self.region, self.connection_message))
class S3PermissionsIssue(SecurityMonkeyException):
"""Boto could not read metadata about an S3 bucket. Check permissions."""
def __init__(self, bucket_name):
self.bucket_name = bucket_name
app.logger.info(self)
def __str__(self):
return repr("AWS returned an exception while attempting " +
"to obtain information on a bucket I should " +
"have access to. Bucket Name: {}".format(self.bucket_name))
class S3ACLReturnedNoneDisplayName(SecurityMonkeyException):
"""The XML representation of an S3 ACL is not providing a proper DisplayName."""
def __init__(self, bucket_name):
self.bucket_name = bucket_name
app.logger.info(self)
def __str__(self):
return repr("AWS returned <DisplayName>None</DisplayName>" +
" in the output of bhandle.get_acl().to_xml()." +
" Bucket Name:{}".format(self.bucket_name))
class AWSRateLimitReached(SecurityMonkeyException):
"""Security Monkey is being throttled by AWS."""
def __init__(self, connection_message, tech, account, region):
self.connection_message = connection_message
self.tech = tech
self.account = account
self.region = region
app.logger.info(self)
def __str__(self):
return repr("Likely reached the AWS rate limit. {}/{}/{}:\n{}".format(
self.tech, self.account, self.region, self.connection_message))
| apache-2.0 |
moreati/pylons | pylons/docs/uploader.py | 4 | 3700 | import cPickle
import mimetypes
import os
import sys
import socket
from os import path
import httplib2
import simplejson
socket.setdefaulttimeout(60)
HERE_DIR = os.getcwd()
BUILD_DIR = path.join(HERE_DIR, '_build')
#host = 'http://localhost:25050'
host = 'http://pylonshq.com'
post_uri = '%s/docs/upload' % host
image_uri = '%s/docs/upload_image' % host
delete_uri = '%s/docs/delete_revision' % host
def scan_dir(parent, directory):
files = []
for name in os.listdir(directory):
full_name = path.join(directory, name)
if name in ['_sources', '_static']:
continue
if path.isdir(full_name):
new_parent = parent + '/' + name
files.extend(scan_dir(new_parent, full_name))
else:
if name.endswith('.fpickle') or name.endswith('.pickle'):
fp = open(full_name, 'r')
data = cPickle.load(fp)
fp.close()
files.append(
('/'.join([parent, name]), data)
)
elif name == 'objects.inv':
fp = open(full_name, 'r')
data = {}
data['current_page_name'] = 'objects.inv'
data['content'] = fp.read()
fp.close()
files.append(
('/'.join([parent, name]), data)
)
return files
def scan_images(directory):
files = []
for name in os.listdir(directory):
if name[-4:] in ['.png', '.jpg', 'gif']:
files.append(name)
return files
files = scan_dir('', path.join(BUILD_DIR, 'web'))
images = scan_images(path.join(BUILD_DIR, 'web', '_images'))
http = httplib2.Http(timeout=60)
basedata = {}
# Find the metadata about versions and such
for filename, filedoc in files:
if 'globalcontext.pickle' in filename:
basedata['version'] = filedoc['version']
basedata['project'] = filedoc['project']
basedata['shorttitle'] = filedoc['shorttitle']
if len(sys.argv) < 2:
raise Exception('Failed to specify doc-key')
dockey = sys.argv[1]
headers = {}
headers.setdefault('Accept', 'application/json')
headers.setdefault('User-Agent', 'Doc Uploader')
headers.setdefault('Content-Type', 'application/json')
headers.setdefault('Authkey', dockey)
language = os.path.split(HERE_DIR)[-1]
# Delete this revision, just in case
# del_uri = '%s/%s/%s' % (delete_uri, basedata['project'], basedata['version'])
# resp, data = http.request(del_uri, 'GET', headers=headers)
for filename, filedoc in files:
if not isinstance(filedoc, dict):
continue
filedoc['filename'] = filename
filedoc['language'] = language
filedoc.update(basedata)
content = simplejson.dumps(filedoc, ensure_ascii=False).encode('utf-8')
headers['Content-Length'] = str(len(content))
resp, data = http.request(post_uri, 'POST', body=content, headers=headers)
status_code = int(resp.status)
if status_code == 200:
print "Uploaded %s" % filename
else:
print "FAILED: %s" % filename
for filename in images:
headers['Content-Type'] = mimetypes.guess_type(filename)
fp = open(path.join(BUILD_DIR, 'web', '_images', filename), 'r')
file_content = fp.read()
fp.close()
headers['Content-Length'] = str(len(file_content))
resp, data = http.request(image_uri + '?version=%s&project=%s&name=%s' %
(basedata['version'], basedata['project'], filename),
'POST', body=file_content, headers=headers)
status_code = int(resp.status)
if status_code == 200:
print "Uploaded %s" % filename
else:
print "FAILED: %s" % filename
| bsd-3-clause |
hmoco/osf.io | api/applications/serializers.py | 7 | 4204 | from django.core.validators import URLValidator
from rest_framework import serializers as ser
from modularodm import Q
from website.models import ApiOAuth2Application
from api.base.serializers import JSONAPISerializer, LinksField, IDField, TypeField, DateByVersion
from api.base.utils import absolute_reverse
class ApiOAuthApplicationBaseSerializer(JSONAPISerializer):
"""Base serializer class for OAuth2 applications """
id = IDField(source='client_id', read_only=True, help_text='The client ID for this application (automatically generated)')
type = TypeField()
client_id = ser.CharField(help_text='The client ID for this application (automatically generated)',
read_only=True)
client_secret = ser.CharField(help_text='The client secret for this application (automatically generated)',
read_only=True) # TODO: May change this later
links = LinksField({
'html': 'absolute_url',
'reset': 'reset_url'
})
def absolute_url(self, obj):
return obj.absolute_url
def get_absolute_url(self, obj):
return obj.get_absolute_url()
def reset_url(self, obj):
return absolute_reverse('applications:application-reset', kwargs={
'client_id': obj.client_id,
'version': self.context['request'].parser_context['kwargs']['version']
})
class Meta:
type_ = 'applications'
class ApiOAuth2ApplicationSerializer(ApiOAuthApplicationBaseSerializer):
"""Serialize data about a registered OAuth2 application"""
id = IDField(source='client_id', read_only=True, help_text='The client ID for this application (automatically generated)')
type = TypeField()
name = ser.CharField(help_text='A short, descriptive name for this application',
required=True)
description = ser.CharField(help_text='An optional description displayed to all users of this application',
required=False,
allow_blank=True)
home_url = ser.CharField(help_text="The full URL to this application's homepage.",
required=True,
validators=[URLValidator()],
label='Home URL')
callback_url = ser.CharField(help_text='The callback URL for this application (refer to OAuth documentation)',
required=True,
validators=[URLValidator()],
label='Callback URL')
owner = ser.CharField(help_text='The id of the user who owns this application',
read_only=True, # Don't let user register an application in someone else's name
source='owner._id')
date_created = DateByVersion(help_text='The date this application was generated (automatically filled in)',
read_only=True)
def create(self, validated_data):
instance = ApiOAuth2Application(**validated_data)
instance.save()
return instance
def update(self, instance, validated_data):
assert isinstance(instance, ApiOAuth2Application), 'instance must be an ApiOAuth2Application'
for attr, value in validated_data.iteritems():
setattr(instance, attr, value)
instance.save()
return instance
class ApiOAuth2ApplicationDetailSerializer(ApiOAuth2ApplicationSerializer):
"""
Overrides ApiOAuth2ApplicationSerializer to make id required.
"""
id = IDField(source='client_id', required=True, help_text='The client ID for this application (automatically generated)')
class ApiOAuth2ApplicationResetSerializer(ApiOAuth2ApplicationDetailSerializer):
def absolute_url(self, obj):
obj = ApiOAuth2Application.find_one(Q('client_id', 'eq', obj['client_id']))
return obj.absolute_url
def reset_url(self, obj):
return absolute_reverse('applications:application-reset', kwargs={
'client_id': obj['client_id'],
'version': self.context['request'].parser_context['kwargs']['version']
})
| apache-2.0 |
big-pegasus/spark | python/pyspark/broadcast.py | 17 | 4630 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import gc
from tempfile import NamedTemporaryFile
from pyspark.cloudpickle import print_exec
if sys.version < '3':
import cPickle as pickle
else:
import pickle
unicode = str
__all__ = ['Broadcast']
# Holds broadcasted data received from Java, keyed by its id.
_broadcastRegistry = {}
def _from_id(bid):
from pyspark.broadcast import _broadcastRegistry
if bid not in _broadcastRegistry:
raise Exception("Broadcast variable '%s' not loaded!" % bid)
return _broadcastRegistry[bid]
class Broadcast(object):
"""
A broadcast variable created with L{SparkContext.broadcast()}.
Access its value through C{.value}.
Examples:
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> b = sc.broadcast([1, 2, 3, 4, 5])
>>> b.value
[1, 2, 3, 4, 5]
>>> sc.parallelize([0, 0]).flatMap(lambda x: b.value).collect()
[1, 2, 3, 4, 5, 1, 2, 3, 4, 5]
>>> b.unpersist()
>>> large_broadcast = sc.broadcast(range(10000))
"""
def __init__(self, sc=None, value=None, pickle_registry=None, path=None):
"""
Should not be called directly by users -- use L{SparkContext.broadcast()}
instead.
"""
if sc is not None:
f = NamedTemporaryFile(delete=False, dir=sc._temp_dir)
self._path = self.dump(value, f)
self._jbroadcast = sc._jvm.PythonRDD.readBroadcastFromFile(sc._jsc, self._path)
self._pickle_registry = pickle_registry
else:
self._jbroadcast = None
self._path = path
def dump(self, value, f):
try:
pickle.dump(value, f, 2)
except pickle.PickleError:
raise
except Exception as e:
msg = "Could not serialize broadcast: " + e.__class__.__name__ + ": " + e.message
print_exec(sys.stderr)
raise pickle.PicklingError(msg)
f.close()
return f.name
def load(self, path):
with open(path, 'rb', 1 << 20) as f:
# pickle.load() may create lots of objects, disable GC
# temporary for better performance
gc.disable()
try:
return pickle.load(f)
finally:
gc.enable()
@property
def value(self):
""" Return the broadcasted value
"""
if not hasattr(self, "_value") and self._path is not None:
self._value = self.load(self._path)
return self._value
def unpersist(self, blocking=False):
"""
Delete cached copies of this broadcast on the executors. If the
broadcast is used after this is called, it will need to be
re-sent to each executor.
:param blocking: Whether to block until unpersisting has completed
"""
if self._jbroadcast is None:
raise Exception("Broadcast can only be unpersisted in driver")
self._jbroadcast.unpersist(blocking)
def destroy(self):
"""
Destroy all data and metadata related to this broadcast variable.
Use this with caution; once a broadcast variable has been destroyed,
it cannot be used again. This method blocks until destroy has
completed.
"""
if self._jbroadcast is None:
raise Exception("Broadcast can only be destroyed in driver")
self._jbroadcast.destroy()
os.unlink(self._path)
def __reduce__(self):
if self._jbroadcast is None:
raise Exception("Broadcast can only be serialized in driver")
self._pickle_registry.add(self)
return _from_id, (self._jbroadcast.id(),)
if __name__ == "__main__":
import doctest
(failure_count, test_count) = doctest.testmod()
if failure_count:
exit(-1)
| apache-2.0 |
hasanferit/trumpcrawler | crawlenv/lib/python3.5/site-packages/pip/_vendor/requests/models.py | 360 | 30532 | # -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import collections
import datetime
from io import BytesIO, UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar
from .packages.urllib3.fields import RequestField
from .packages.urllib3.filepost import encode_multipart_formdata
from .packages.urllib3.util import parse_url
from .packages.urllib3.exceptions import (
DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
from .exceptions import (
HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
ContentDecodingError, ConnectionError, StreamConsumedError)
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, to_native_string,
check_header_validity)
from .compat import (
cookielib, urlunparse, urlsplit, urlencode, str, bytes, StringIO,
is_py2, chardet, builtin_str, basestring)
from .compat import json as complexjson
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
tuples. Order is retained if data is a list of tuples but arbitrary
if parameters are supplied as a dict.
The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)
or 4-tuples (filename, fileobj, contentype, custom_headers).
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, (str, bytes, bytearray)):
fdata = fp
else:
fdata = fp.read()
rf = RequestField(name=k, data=fdata, filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, collections.Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, collections.Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach to the request. If a dictionary is provided, form-encoding will take place.
:param json: json for the body to attach to the request (if files or data is not specified).
:param params: dictionary of URL parameters to append to the URL.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self, method=None, url=None, headers=None, files=None,
data=None, params=None, auth=None, cookies=None, hooks=None, json=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.json = json
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
json=self.json,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Generated from either a :class:`Request <Request>` object or manually.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> r = req.prepare()
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
def prepare(self, method=None, url=None, headers=None, files=None,
data=None, params=None, auth=None, cookies=None, hooks=None, json=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = _copy_cookie_jar(self._cookies)
p.body = self.body
p.hooks = self.hooks
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = to_native_string(self.method.upper())
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
#: We're unable to blindly call unicode/str functions
#: as this will include the bytestring indicator (b'')
#: on python 3.x.
#: https://github.com/kennethreitz/requests/pull/2238
if isinstance(url, bytes):
url = url.decode('utf8')
else:
url = unicode(url) if is_py2 else str(url)
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
error = ("Invalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?")
error = error.format(to_native_string(url, 'utf8'))
raise MissingSchema(error)
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# Only want to apply IDNA to the hostname
try:
host = host.encode('idna').decode('utf-8')
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
if isinstance(params, (str, bytes)):
params = to_native_string(params)
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
self.headers = CaseInsensitiveDict()
if headers:
for header in headers.items():
# Raise exception on invalid header value.
check_header_validity(header)
name, value = header
self.headers[to_native_string(name)] = value
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
length = None
if not data and json is not None:
# urllib3 requires a bytes-like body. Python 2's json.dumps
# provides this natively, but Python 3 gives a Unicode string.
content_type = 'application/json'
body = complexjson.dumps(json)
if not isinstance(body, bytes):
body = body.encode('utf-8')
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, dict))
])
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
if is_stream:
body = data
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ('content-type' not in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
if hasattr(body, 'seek') and hasattr(body, 'tell'):
curr_pos = body.tell()
body.seek(0, 2)
end_pos = body.tell()
self.headers['Content-Length'] = builtin_str(max(0, end_pos - curr_pos))
body.seek(curr_pos, 0)
elif body is not None:
l = super_len(body)
if l:
self.headers['Content-Length'] = builtin_str(l)
elif (self.method not in ('GET', 'HEAD')) and (self.headers.get('Content-Length') is None):
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data.
This function eventually generates a ``Cookie`` header from the
given cookies using cookielib. Due to cookielib's design, the header
will not be regenerated if it already exists, meaning this function
can only be called once for the life of the
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
header is removed beforehand.
"""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
# hooks can be passed as None to the prepare method and to this
# method. To prevent iterating over None, simply use an empty list
# if hooks is False-y
hooks = hooks or []
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content', 'status_code', 'headers', 'url', 'history',
'encoding', 'reason', 'cookies', 'elapsed', 'request'
]
def __init__(self):
super(Response, self).__init__()
self._content = False
self._content_consumed = False
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
# This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta).
#: This property specifically measures the time taken between sending
#: the first byte of the request and finishing parsing the headers. It
#: is therefore unaffected by consuming the response content or the
#: value of the ``stream`` keyword argument.
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return dict(
(attr, getattr(self, attr, None))
for attr in self.__attrs__
)
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
setattr(self, 'raw', None)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __nonzero__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
try:
self.raise_for_status()
except HTTPError:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanent versions of redirect"""
return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library"""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
chunk_size must be of type int or None. A value of None will
function differently depending on the value of `stream`.
stream=True will read data as it arrives in whatever size the
chunks are received. If stream=False, data is returned as
a single chunk.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
# Special case for urllib3.
if hasattr(self.raw, 'stream'):
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
else:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
elif chunk_size is not None and not isinstance(chunk_size, int):
raise TypeError("chunk_size must be an int, it is instead a %s." % type(chunk_size))
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None, delimiter=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
try:
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
except AttributeError:
self._content = None
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
"""
if not self.encoding and self.content and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return complexjson.loads(
self.content.decode(encoding), **kwargs
)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
return complexjson.loads(self.text, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises stored :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if isinstance(self.reason, bytes):
reason = self.reason.decode('utf-8', 'ignore')
else:
reason = self.reason
if 400 <= self.status_code < 500:
http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url)
elif 500 <= self.status_code < 600:
http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
if not self._content_consumed:
self.raw.close()
return self.raw.release_conn()
| mit |
DeNeutoy/deep_qa | deep_qa/data/tokenizers/character_tokenizer.py | 3 | 2914 | from typing import Callable, Dict, List, Tuple
from keras.layers import Layer
from overrides import overrides
from .tokenizer import Tokenizer
from ..data_indexer import DataIndexer
from ...common.params import Params
class CharacterTokenizer(Tokenizer):
"""
A CharacterTokenizer splits strings into character tokens.
Notes
-----
Note that in the code, we're still using the "words" namespace, and the "num_sentence_words"
padding key, instead of using a different "characters" namespace. This is so that the rest of
the code doesn't have to change as much to just use this different tokenizer. For example,
this is an issue when adding start and stop tokens - how is an ``Instance`` class supposed to
know if it should use the "words" or the "characters" namespace when getting a start token id?
If we just always use the "words" namespace for the top-level token namespace, it's not an
issue.
But confusingly, we'll still use the "characters" embedding key... At least the user-facing
parts all use ``characters``; it's only in writing tokenizer code that you need to be careful
about namespaces. TODO(matt): it probably makes sense to change the default namespace to
"tokens", and use that for both the words in ``WordTokenizer`` and the characters in
``CharacterTokenizer``, so the naming isn't so confusing.
"""
def __init__(self, params: Params):
super(CharacterTokenizer, self).__init__(params)
@overrides
def tokenize(self, text: str) -> List[str]:
return list(text)
@overrides
def get_words_for_indexer(self, text: str) -> Dict[str, List[str]]:
return {'words': self.tokenize(text)}
@overrides
def index_text(self,
text: str,
data_indexer: DataIndexer) -> List:
return [data_indexer.get_word_index(char) for char in self.tokenize(text)]
@overrides
def embed_input(self,
input_layer: Layer,
embed_function: Callable[[Layer, str, str], Layer],
text_trainer,
embedding_suffix: str=''):
return embed_function(input_layer,
embedding_name='characters' + embedding_suffix,
vocab_name='words')
@overrides
def get_sentence_shape(self, sentence_length: int, word_length: int) -> Tuple[int]:
return (sentence_length,)
@overrides
def get_padding_lengths(self, sentence_length: int, word_length: int) -> Dict[str, int]:
# Note that `sentence_length` here is the number of _characters_ in the sentence, because
# of how `self.index_text` works. And even though the name isn't great, we'll use
# `num_sentence_words` for the key to this, so that the rest of the code is simpler.
return {'num_sentence_words': sentence_length}
| apache-2.0 |
pysmt/pysmt | pysmt/solvers/z3.py | 1 | 40275 | #
# This file is part of pySMT.
#
# Copyright 2014 Andrea Micheli and Marco Gario
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from pysmt.exceptions import SolverAPINotFound
try:
import z3
except ImportError:
raise SolverAPINotFound
# Keep array models expressed as values instead of Lambdas
# (see https://github.com/Z3Prover/z3/issues/1769)
z3.set_param('model.compact', False)
import pysmt.typing as types
import pysmt.operators as op
from pysmt.solvers.solver import (IncrementalTrackingSolver, UnsatCoreSolver,
Model, Converter, SolverOptions)
from pysmt.solvers.smtlib import SmtLibBasicSolver, SmtLibIgnoreMixin
from pysmt.solvers.qelim import QuantifierEliminator
from pysmt.walkers import DagWalker
from pysmt.exceptions import (SolverReturnedUnknownResultError,
SolverNotConfiguredForUnsatCoresError,
SolverStatusError,
ConvertExpressionError,
UndefinedSymbolError, PysmtValueError)
from pysmt.decorators import clear_pending_pop, catch_conversion_error
from pysmt.logics import LRA, LIA, QF_UFLRA, PYSMT_LOGICS
from pysmt.oracles import get_logic
from pysmt.constants import Fraction, Numeral, is_pysmt_integer, to_python_integer
# patch z3api
z3.is_ite = lambda x: z3.is_app_of(x, z3.Z3_OP_ITE)
z3.is_function = lambda x: z3.is_app_of(x, z3.Z3_OP_UNINTERPRETED)
z3.is_array_store = lambda x: z3.is_app_of(x, z3.Z3_OP_STORE)
z3.get_payload = lambda node,i : z3.Z3_get_decl_int_parameter(node.ctx.ref(),
node.decl().ast, i)
class AstRefKey:
def __init__(self, n):
self.n = n
def __hash__(self):
return self.n.hash()
def __eq__(self, other):
return self.n.eq(other.n)
def askey(n):
assert isinstance(n, z3.AstRef)
return AstRefKey(n)
class Z3Model(Model):
def __init__(self, environment, z3_model):
Model.__init__(self, environment)
self.z3_model = z3_model
self.converter = Z3Converter(environment, z3_model.ctx)
def get_value(self, formula, model_completion=True):
titem = self.converter.convert(formula)
z3_res = self.z3_model.eval(titem, model_completion=model_completion)
return self.converter.back(z3_res, model=self.z3_model)
def iterator_over(self, language):
for x in language:
yield x, self.get_value(x, model_completion=True)
def __iter__(self):
"""Overloading of iterator from Model. We iterate only on the
variables defined in the assignment.
"""
for d in self.z3_model.decls():
if d.arity() == 0:
try:
pysmt_d = self.converter.back(d())
yield pysmt_d, self.get_value(pysmt_d)
except UndefinedSymbolError:
# avoids problems with symbols generated by z3
pass
def __contains__(self, x):
"""Returns whether the model contains a value for 'x'."""
return x in (v for v, _ in self)
# EOC Z3Model
class Z3Options(SolverOptions):
@staticmethod
def _set_option(z3solver, name, value):
try:
z3solver.set(name, value)
except z3.Z3Exception:
raise PysmtValueError("Error setting the option '%s=%s'" \
% (name, value))
except z3.z3types.Z3Exception:
raise PysmtValueError("Error setting the option '%s=%s'" \
% (name, value))
def __call__(self, solver):
self._set_option(solver.z3, 'model', self.generate_models)
if self.unsat_cores_mode is not None:
self._set_option(solver.z3, 'unsat_core', True)
if self.random_seed is not None:
self._set_option(solver.z3, 'random_seed', self.random_seed)
for k,v in self.solver_options.items():
try:
self._set_option(solver.z3, str(k), v)
except z3.Z3Exception:
raise PysmtValueError("Error setting the option '%s=%s'" % (k,v))
except z3.z3types.Z3Exception:
raise PysmtValueError("Error setting the option '%s=%s'" % (k,v))
# EOC Z3Options
class Z3Solver(IncrementalTrackingSolver, UnsatCoreSolver,
SmtLibBasicSolver, SmtLibIgnoreMixin):
LOGICS = PYSMT_LOGICS - set(x for x in PYSMT_LOGICS if x.theory.strings)
OptionsClass = Z3Options
def __init__(self, environment, logic, **options):
IncrementalTrackingSolver.__init__(self,
environment=environment,
logic=logic,
**options)
try:
self.z3 = z3.SolverFor(str(logic))
except z3.Z3Exception:
self.z3 = z3.Solver()
except z3.z3types.Z3Exception:
self.z3 = z3.Solver()
except OSError:
self.z3 = z3.Solver()
self.options(self)
self.declarations = set()
self.converter = Z3Converter(environment, z3_ctx=self.z3.ctx)
self.mgr = environment.formula_manager
self._name_cnt = 0
return
@clear_pending_pop
def _reset_assertions(self):
self.z3.reset()
self.options(self)
@clear_pending_pop
def declare_variable(self, var):
raise NotImplementedError
@clear_pending_pop
def _add_assertion(self, formula, named=None):
self._assert_is_boolean(formula)
term = self.converter.convert(formula)
if self.options.unsat_cores_mode is not None:
# TODO: IF unsat_cores_mode is all, then we add this fresh variable.
# Otherwise, we should track this only if it is named.
key = self.mgr.FreshSymbol(template="_assertion_%d")
tkey = self.converter.convert(key)
self.z3.assert_and_track(term, tkey)
return (key, named, formula)
else:
self.z3.add(term)
return formula
def get_model(self):
return Z3Model(self.environment, self.z3.model())
@clear_pending_pop
def _solve(self, assumptions=None):
if assumptions is not None:
bool_ass = []
other_ass = []
for x in assumptions:
if x.is_literal():
bool_ass.append(self.converter.convert(x))
else:
other_ass.append(x)
if len(other_ass) > 0:
self.push()
self.add_assertion(self.mgr.And(other_ass))
self.pending_pop = True
res = self.z3.check(*bool_ass)
else:
res = self.z3.check()
sres = str(res)
assert sres in ['unknown', 'sat', 'unsat']
if sres == 'unknown':
raise SolverReturnedUnknownResultError
return (sres == 'sat')
def get_unsat_core(self):
"""After a call to solve() yielding UNSAT, returns the unsat core as a
set of formulae"""
return self.get_named_unsat_core().values()
def _named_assertions_map(self):
if self.options.unsat_cores_mode is not None:
return dict((t[0], (t[1],t[2])) for t in self.assertions)
return None
def get_named_unsat_core(self):
"""After a call to solve() yielding UNSAT, returns the unsat core as a
dict of names to formulae"""
if self.options.unsat_cores_mode is None:
raise SolverNotConfiguredForUnsatCoresError
if self.last_result is not False:
raise SolverStatusError("The last call to solve() was not" \
" unsatisfiable")
if self.last_command != "solve":
raise SolverStatusError("The solver status has been modified by a" \
" '%s' command after the last call to" \
" solve()" % self.last_command)
assumptions = self.z3.unsat_core()
pysmt_assumptions = set(self.converter.back(t) for t in assumptions)
res = {}
n_ass_map = self._named_assertions_map()
cnt = 0
for key in pysmt_assumptions:
if key in n_ass_map:
(name, formula) = n_ass_map[key]
if name is None:
name = "_a_%d" % cnt
cnt += 1
res[name] = formula
return res
@clear_pending_pop
def all_sat(self, important, callback):
raise NotImplementedError
@clear_pending_pop
def _push(self, levels=1):
for _ in range(levels):
self.z3.push()
@clear_pending_pop
def _pop(self, levels=1):
for _ in range(levels):
self.z3.pop()
def print_model(self, name_filter=None):
for var in self.declarations:
if name_filter is None or not var.symbol_name().startswith(name_filter):
print("%s = %s" % (var.symbol_name(), self.get_value(var)))
def get_value(self, item):
self._assert_no_function_type(item)
titem = self.converter.convert(item)
z3_res = self.z3.model().eval(titem, model_completion=True)
res = self.converter.back(z3_res, self.z3.model())
if not res.is_constant():
return res.simplify()
return res
def _exit(self):
del self.converter
del self.z3
BOOLREF_SET = op.BOOL_OPERATORS | op.RELATIONS
ARITHREF_SET = op.IRA_OPERATORS
BITVECREF_SET = op.BV_OPERATORS
class Z3Converter(Converter, DagWalker):
def __init__(self, environment, z3_ctx):
DagWalker.__init__(self, environment)
self.mgr = environment.formula_manager
self._get_type = environment.stc.get_type
self._back_memoization = {}
self.ctx = z3_ctx
# Back Conversion
self._back_fun = {
z3.Z3_OP_AND: lambda args, expr: self.mgr.And(args),
z3.Z3_OP_OR: lambda args, expr: self.mgr.Or(args),
z3.Z3_OP_MUL: lambda args, expr: self.mgr.Times(args),
z3.Z3_OP_ADD: lambda args, expr: self.mgr.Plus(args),
z3.Z3_OP_DIV: lambda args, expr: self.mgr.Div(args[0], args[1]),
z3.Z3_OP_IFF: lambda args, expr: self.mgr.Iff(args[0], args[1]),
z3.Z3_OP_XOR: lambda args, expr: self.mgr.Xor(args[0], args[1]),
z3.Z3_OP_FALSE: lambda args, expr: self.mgr.FALSE(),
z3.Z3_OP_TRUE: lambda args, expr: self.mgr.TRUE(),
z3.Z3_OP_GT: lambda args, expr: self.mgr.GT(args[0], args[1]),
z3.Z3_OP_GE: lambda args, expr: self.mgr.GE(args[0], args[1]),
z3.Z3_OP_LT: lambda args, expr: self.mgr.LT(args[0], args[1]),
z3.Z3_OP_LE: lambda args, expr: self.mgr.LE(args[0], args[1]),
z3.Z3_OP_SUB: lambda args, expr: self.mgr.Minus(args[0], args[1]),
z3.Z3_OP_NOT: lambda args, expr: self.mgr.Not(args[0]),
z3.Z3_OP_IMPLIES: lambda args, expr: self.mgr.Implies(args[0], args[1]),
z3.Z3_OP_ITE: lambda args, expr: self.mgr.Ite(args[0], args[1], args[2]),
z3.Z3_OP_TO_REAL: lambda args, expr: self.mgr.ToReal(args[0]),
z3.Z3_OP_BAND : lambda args, expr: self.mgr.BVAnd(args[0], args[1]),
z3.Z3_OP_BOR : lambda args, expr: self.mgr.BVOr(args[0], args[1]),
z3.Z3_OP_BXOR : lambda args, expr: self.mgr.BVXor(args[0], args[1]),
z3.Z3_OP_BNOT : lambda args, expr: self.mgr.BVNot(args[0]),
z3.Z3_OP_BNEG : lambda args, expr: self.mgr.BVNeg(args[0]),
z3.Z3_OP_CONCAT : lambda args, expr: self.mgr.BVConcat(args[0], args[1]),
z3.Z3_OP_ULT : lambda args, expr: self.mgr.BVULT(args[0], args[1]),
z3.Z3_OP_ULEQ : lambda args, expr: self.mgr.BVULE(args[0], args[1]),
z3.Z3_OP_SLT : lambda args, expr: self.mgr.BVSLT(args[0], args[1]),
z3.Z3_OP_SLEQ : lambda args, expr: self.mgr.BVSLE(args[0], args[1]),
z3.Z3_OP_UGT : lambda args, expr: self.mgr.BVUGT(args[0], args[1]),
z3.Z3_OP_UGEQ : lambda args, expr: self.mgr.BVUGE(args[0], args[1]),
z3.Z3_OP_SGT : lambda args, expr: self.mgr.BVSGT(args[0], args[1]),
z3.Z3_OP_SGEQ : lambda args, expr: self.mgr.BVSGE(args[0], args[1]),
z3.Z3_OP_BADD : lambda args, expr: self.mgr.BVAdd(args[0], args[1]),
z3.Z3_OP_BMUL : lambda args, expr: self.mgr.BVMul(args[0], args[1]),
z3.Z3_OP_BUDIV : lambda args, expr: self.mgr.BVUDiv(args[0], args[1]),
z3.Z3_OP_BSDIV : lambda args, expr: self.mgr.BVSDiv(args[0], args[1]),
z3.Z3_OP_BUREM : lambda args, expr: self.mgr.BVURem(args[0], args[1]),
z3.Z3_OP_BSREM : lambda args, expr: self.mgr.BVSRem(args[0], args[1]),
z3.Z3_OP_BSHL : lambda args, expr: self.mgr.BVLShl(args[0], args[1]),
z3.Z3_OP_BLSHR : lambda args, expr: self.mgr.BVLShr(args[0], args[1]),
z3.Z3_OP_BASHR : lambda args, expr: self.mgr.BVAShr(args[0], args[1]),
z3.Z3_OP_BSUB : lambda args, expr: self.mgr.BVSub(args[0], args[1]),
z3.Z3_OP_EXT_ROTATE_LEFT : lambda args, expr: self.mgr.BVRol(args[0], args[1].bv_unsigned_value()),
z3.Z3_OP_EXT_ROTATE_RIGHT: lambda args, expr: self.mgr.BVRor(args[0], args[1].bv_unsigned_value()),
z3.Z3_OP_BV2INT: lambda args, expr: self.mgr.BVToNatural(args[0]),
z3.Z3_OP_POWER : lambda args, expr: self.mgr.Pow(args[0], args[1]),
z3.Z3_OP_SELECT : lambda args, expr: self.mgr.Select(args[0], args[1]),
z3.Z3_OP_STORE : lambda args, expr: self.mgr.Store(args[0], args[1], args[2]),
# Actually use both args, expr
z3.Z3_OP_SIGN_EXT: lambda args, expr: self.mgr.BVSExt(args[0], z3.get_payload(expr, 0)),
z3.Z3_OP_ZERO_EXT: lambda args, expr: self.mgr.BVZExt(args[0], z3.get_payload(expr, 0)),
z3.Z3_OP_ROTATE_LEFT: lambda args, expr: self.mgr.BVRol(args[0], z3.get_payload(expr, 0)),
z3.Z3_OP_ROTATE_RIGHT: lambda args, expr: self.mgr.BVRor(args[0], z3.get_payload(expr, 0)),
z3.Z3_OP_EXTRACT: lambda args, expr: self.mgr.BVExtract(args[0],
z3.get_payload(expr, 1),
z3.get_payload(expr, 0)),
# Complex Back Translation
z3.Z3_OP_EQ : self._back_z3_eq,
z3.Z3_OP_UMINUS : self._back_z3_uminus,
z3.Z3_OP_CONST_ARRAY : self._back_z3_const_array,
}
# Unique reference to Sorts
self.z3RealSort = z3.RealSort(self.ctx)
self.z3BoolSort = z3.BoolSort(self.ctx)
self.z3IntSort = z3.IntSort(self.ctx)
self._z3ArraySorts = {}
self._z3BitVecSorts = {}
self._z3Sorts = {}
# Unique reference to Function Declaration
self._z3_func_decl_cache = {}
return
def z3BitVecSort(self, width):
"""Return the z3 BitVecSort for the given width."""
try:
bvsort = self._z3BitVecSorts[width]
except KeyError:
bvsort = z3.BitVecSort(width, self.ctx)
self._z3BitVecSorts[width] = bvsort
return bvsort
def z3ArraySort(self, key, value):
"""Return the z3 ArraySort for the given key value."""
try:
return self._z3ArraySorts[(askey(key),
askey(value))]
except KeyError:
sort = z3.ArraySort(key, value)
self._z3ArraySorts[(askey(key),
askey(value))] = sort
return sort
def z3Sort(self, name):
"""Return the z3 Sort for the given name."""
name = str(name)
try:
return self._z3Sorts[name]
except KeyError:
sort = z3.DeclareSort(name, self.ctx)
self._z3Sorts[name] = sort
return sort
def get_z3_ref(self, formula):
if formula.node_type in op.QUANTIFIERS:
return z3.QuantifierRef
elif formula.node_type() in BOOLREF_SET:
return z3.BoolRef
elif formula.node_type() in ARITHREF_SET:
return z3.ArithRef
elif formula.node_type() in BITVECREF_SET:
return z3.BitVecRef
elif formula.is_symbol() or formula.is_function_application():
if formula.is_function_application():
type_ = formula.function_name().symbol_type()
type_ = type_.return_type
else:
type_ = formula.symbol_type()
if type_.is_bool_type():
return z3.BoolRef
elif type_.is_real_type() or type_.is_int_type():
return z3.ArithRef
elif type_.is_array_type():
return z3.ArrayRef
elif type_.is_bv_type():
return z3.BitVecRef
else:
raise NotImplementedError(formula)
elif formula.node_type() in op.ARRAY_OPERATORS:
return z3.ArrayRef
elif formula.is_ite():
child = formula.arg(1)
return self.get_z3_ref(child)
else:
assert formula.is_constant(), formula
type_ = formula.constant_type()
if type_.is_bool_type():
return z3.BoolRef
elif type_.is_real_type() or type_.is_int_type():
return z3.ArithRef
elif type_.is_array_type():
return z3.ArrayRef
elif type_.is_bv_type():
return z3.BitVecRef
else:
raise NotImplementedError(formula)
@catch_conversion_error
def convert(self, formula):
z3term = self.walk(formula)
ref_class = self.get_z3_ref(formula)
return ref_class(z3term, self.ctx)
def back(self, expr, model=None):
"""Convert a Z3 expression back into a pySMT expression.
This is done using the Z3 API. For very big expressions, it is
sometimes faster to go through the SMT-LIB format. In those
cases, consider using the method back_via_smtlib.
"""
stack = [expr]
while len(stack) > 0:
current = stack.pop()
key = (askey(current), model)
if key not in self._back_memoization:
self._back_memoization[key] = None
stack.append(current)
for c in current.children():
stack.append(c)
elif self._back_memoization[key] is None:
args = [self._back_memoization[(askey(c), model)]
for c in current.children()]
res = self._back_single_term(current, args, model)
self._back_memoization[key] = res
else:
# we already visited the node, nothing else to do
pass
return self._back_memoization[(askey(expr), model)]
def _back_single_term(self, expr, args, model=None):
assert z3.is_expr(expr)
if z3.is_quantifier(expr):
raise NotImplementedError(
"Quantified back conversion is currently not supported")
assert not len(args) > 2 or \
(z3.is_and(expr) or z3.is_or(expr) or
z3.is_add(expr) or z3.is_mul(expr) or
(len(args) == 3 and (z3.is_ite(expr) or z3.is_array_store(expr)))),\
"Unexpected n-ary term: %s" % expr
res = None
try:
decl = z3.Z3_get_app_decl(expr.ctx_ref(), expr.as_ast())
kind = z3.Z3_get_decl_kind(expr.ctx.ref(), decl)
# Try to get the back-conversion function for the given Kind
fun = self._back_fun[kind]
return fun(args, expr)
except KeyError as ex:
pass
if z3.is_const(expr):
# Const or Symbol
if z3.is_rational_value(expr):
n = expr.numerator_as_long()
d = expr.denominator_as_long()
f = Fraction(n, d)
return self.mgr.Real(f)
elif z3.is_int_value(expr):
n = expr.as_long()
return self.mgr.Int(n)
elif z3.is_bv_value(expr):
n = expr.as_long()
w = expr.size()
return self.mgr.BV(n, w)
elif z3.is_as_array(expr):
if model is None:
raise NotImplementedError("As-array expressions cannot be" \
" handled as they are not " \
"self-contained")
else:
interp_decl = z3.get_as_array_func(expr)
interp = model[interp_decl]
default = self.back(interp.else_value(), model=model)
assign = {}
for i in range(interp.num_entries()):
e = interp.entry(i)
assert e.num_args() == 1
idx = self.back(e.arg_value(0), model=model)
val = self.back(e.value(), model=model)
assign[idx] = val
arr_type = self._z3_to_type(expr.sort())
return self.mgr.Array(arr_type.index_type, default, assign)
elif z3.is_algebraic_value(expr):
# Algebraic value
return self.mgr._Algebraic(Numeral(expr))
else:
# it must be a symbol
try:
return self.mgr.get_symbol(str(expr))
except UndefinedSymbolError:
import warnings
symb_type = self._z3_to_type(expr.sort())
warnings.warn("Defining new symbol: %s" % str(expr))
return self.mgr.FreshSymbol(symb_type,
template="__z3_%d")
elif z3.is_function(expr):
# This needs to be after we try to convert regular Symbols
fsymbol = self.mgr.get_symbol(expr.decl().name())
return self.mgr.Function(fsymbol, args)
# If we reach this point, we did not manage to translate the expression
raise ConvertExpressionError(message=("Unsupported expression: %s" %
(str(expr))),
expression=expr)
def _back_z3_eq(self, args, expr):
if self._get_type(args[0]).is_bool_type():
return self.mgr.Iff(args[0], args[1])
return self.mgr.Equals(args[0], args[1])
def _back_z3_uminus(self, args, expr):
tp = self._get_type(args[0])
if tp.is_real_type():
minus_one = self.mgr.Real(-1)
else:
assert tp.is_int_type()
minus_one = self.mgr.Int(-1)
return self.mgr.Times(args[0], minus_one)
def _back_z3_const_array(self, args, expr):
arr_ty = self._z3_to_type(expr.sort())
return self.mgr.Array(arr_ty.index_type, args[0])
def back_via_smtlib(self, expr):
"""Back convert a Z3 Expression by translation to SMT-LIB."""
from io import StringIO
from pysmt.smtlib.parser import SmtLibZ3Parser
parser = SmtLibZ3Parser(self.env)
z3.Z3_set_ast_print_mode(expr.ctx.ref(), z3.Z3_PRINT_SMTLIB2_COMPLIANT)
s = z3.Z3_benchmark_to_smtlib_string(expr.ctx.ref(),
None, None,
None, None,
0, None,
expr.ast)
stream_in = StringIO(s)
r = parser.get_script(stream_in).get_last_formula(self.mgr)
key = (askey(expr), None)
self._back_memoization[key] = r
return r
# Fwd Conversion
def _to_ast_array(self, args):
"""Convert a list of arguments into an z3.AST vector."""
sz = len(args)
_args = (z3.Ast * sz)()
for i, arg in enumerate(args):
_args[i] = arg
return _args, sz
def walk_not(self, formula, args, **kwargs):
z3term = z3.Z3_mk_not(self.ctx.ref(), args[0])
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_symbol(self, formula, **kwargs):
symbol_type = formula.symbol_type()
sname = formula.symbol_name()
z3_sname = z3.Z3_mk_string_symbol(self.ctx.ref(), sname)
if symbol_type.is_bool_type():
sort_ast = self.z3BoolSort.ast
elif symbol_type.is_real_type():
sort_ast = self.z3RealSort.ast
elif symbol_type.is_int_type():
sort_ast = self.z3IntSort.ast
elif symbol_type.is_array_type():
sort_ast = self._type_to_z3(symbol_type).ast
elif symbol_type.is_string_type():
raise ConvertExpressionError(message=("Unsupported string symbol: %s" %
str(formula)),
expression=formula)
else:
sort_ast = self._type_to_z3(symbol_type).ast
# Create const with given sort
res = z3.Z3_mk_const(self.ctx.ref(), z3_sname, sort_ast)
z3.Z3_inc_ref(self.ctx.ref(), res)
return res
def walk_ite(self, formula, args, **kwargs):
i = args[0]
ni = self.walk_not(None, (i,))
t = args[1]
e = args[2]
if self._get_type(formula).is_bool_type():
# Rewrite as (!i \/ t) & (i \/ e)
_args, sz = self._to_ast_array((ni, t))
or1 = z3.Z3_mk_or(self.ctx.ref(), sz, _args)
z3.Z3_inc_ref(self.ctx.ref(), or1)
_args, sz = self._to_ast_array((i, e))
or2 = z3.Z3_mk_or(self.ctx.ref(), sz, _args)
z3.Z3_inc_ref(self.ctx.ref(), or2)
_args, sz = self._to_ast_array((or1, or2))
z3term = z3.Z3_mk_and(self.ctx.ref(), sz, _args)
z3.Z3_inc_ref(self.ctx.ref(), z3term)
z3.Z3_dec_ref(self.ctx.ref(), or1)
z3.Z3_dec_ref(self.ctx.ref(), or2)
return z3term
z3term = z3.Z3_mk_ite(self.ctx.ref(), i, t, e)
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_algebraic_constant(self, formula, **kwargs):
rep = str(formula.constant_value())
z3term = z3.Z3_mk_numeral(self.ctx.ref(),
rep,
self.z3RealSort.ast)
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_real_constant(self, formula, **kwargs):
frac = formula.constant_value()
n,d = frac.numerator, frac.denominator
rep = str(n) + "/" + str(d)
z3term = z3.Z3_mk_numeral(self.ctx.ref(),
rep,
self.z3RealSort.ast)
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_int_constant(self, formula, **kwargs):
assert is_pysmt_integer(formula.constant_value())
const = str(formula.constant_value())
z3term = z3.Z3_mk_numeral(self.ctx.ref(),
const,
self.z3IntSort.ast)
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_bool_constant(self, formula, **kwargs):
_t = z3.BoolVal(formula.constant_value(), ctx=self.ctx)
z3term = _t.as_ast()
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_quantifier(self, formula, args, **kwargs):
qvars = formula.quantifier_vars()
qvars, qvars_sz = self._to_ast_array([self.walk_symbol(x)\
for x in qvars])
empty_str = z3.Z3_mk_string_symbol(self.ctx.ref(), "")
z3term = z3.Z3_mk_quantifier_const_ex(self.ctx.ref(),
formula.is_forall(),
1, empty_str, empty_str,
qvars_sz, qvars,
0, None, 0, None,
args[0])
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_toreal(self, formula, args, **kwargs):
z3term = z3.Z3_mk_int2real(self.ctx.ref(), args[0])
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def _z3_func_decl(self, func_name):
"""Create a Z3 Function Declaration for the given function."""
try:
return self._z3_func_decl_cache[func_name]
except KeyError:
tp = func_name.symbol_type()
arity = len(tp.param_types)
z3dom = (z3.Sort * arity)()
for i, t in enumerate(tp.param_types):
z3dom[i] = self._type_to_z3(t).ast
z3ret = self._type_to_z3(tp.return_type).ast
z3name = z3.Z3_mk_string_symbol(self.ctx.ref(),
func_name.symbol_name())
z3func = z3.Z3_mk_func_decl(self.ctx.ref(), z3name,
arity, z3dom, z3ret)
self._z3_func_decl_cache[func_name] = z3func
return z3func
def walk_function(self, formula, args, **kwargs):
z3func = self._z3_func_decl(formula.function_name())
_args, sz = self._to_ast_array(args)
z3term = z3.Z3_mk_app(self.ctx.ref(), z3func, sz, _args)
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_bv_constant(self, formula, **kwargs):
value = formula.constant_value()
z3term = z3.Z3_mk_numeral(self.ctx.ref(),
str(value),
self.z3BitVecSort(formula.bv_width()).ast)
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_bv_extract(self, formula, args, **kwargs):
z3term = z3.Z3_mk_extract(self.ctx.ref(),
formula.bv_extract_end(),
formula.bv_extract_start(),
args[0])
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_bv_not(self, formula, args, **kwargs):
z3term = z3.Z3_mk_bvnot(self.ctx.ref(), args[0])
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_bv_neg(self, formula, args, **kwargs):
z3term = z3.Z3_mk_bvneg(self.ctx.ref(), args[0])
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_bv_rol(self, formula, args, **kwargs):
bvsort = self.z3BitVecSort(formula.bv_width())
step = z3.Z3_mk_numeral(self.ctx.ref(),
str(formula.bv_rotation_step()),
bvsort.ast)
z3term = z3.Z3_mk_ext_rotate_left(self.ctx.ref(),
args[0], step)
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_bv_ror(self, formula, args, **kwargs):
bvsort = self.z3BitVecSort(formula.bv_width())
step = z3.Z3_mk_numeral(self.ctx.ref(),
str(formula.bv_rotation_step()),
bvsort.ast)
z3term = z3.Z3_mk_ext_rotate_right(self.ctx.ref(),
args[0], step)
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_bv_zext(self, formula, args, **kwargs):
z3term = z3.Z3_mk_zero_ext(self.ctx.ref(),
formula.bv_extend_step(), args[0])
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_bv_sext (self, formula, args, **kwargs):
z3term = z3.Z3_mk_sign_ext(self.ctx.ref(),
formula.bv_extend_step(), args[0])
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_bv_comp(self, formula, args, **kwargs):
cond = z3.Z3_mk_eq(self.ctx.ref(), args[0], args[1])
z3.Z3_inc_ref(self.ctx.ref(), cond)
then_ = z3.Z3_mk_numeral(self.ctx.ref(), "1", self.z3BitVecSort(1).ast)
z3.Z3_inc_ref(self.ctx.ref(), then_)
else_ = z3.Z3_mk_numeral(self.ctx.ref(), "0", self.z3BitVecSort(1).ast)
z3.Z3_inc_ref(self.ctx.ref(), else_)
z3term = z3.Z3_mk_ite(self.ctx.ref(), cond, then_, else_)
z3.Z3_inc_ref(self.ctx.ref(), z3term)
# De-Ref since this is handled internally by Z3
z3.Z3_dec_ref(self.ctx.ref(), cond)
z3.Z3_dec_ref(self.ctx.ref(), then_)
z3.Z3_dec_ref(self.ctx.ref(), else_)
return z3term
def walk_bv_tonatural(self, formula, args, **kwargs):
z3term = z3.Z3_mk_bv2int(self.ctx.ref(), args[0], False)
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_array_select(self, formula, args, **kwargs):
z3term = z3.Z3_mk_select(self.ctx.ref(), args[0], args[1])
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_array_store(self, formula, args, **kwargs):
z3term = z3.Z3_mk_store(self.ctx.ref(), args[0], args[1], args[2])
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_array_value(self, formula, args, **kwargs):
idx_type = formula.array_value_index_type()
arraysort = self._type_to_z3(idx_type).ast
z3term = z3.Z3_mk_const_array(self.ctx.ref(), arraysort, args[0])
z3.Z3_inc_ref(self.ctx.ref(), z3term)
for i in range(1, len(args), 2):
c = args[i]
z3term = self.walk_array_store(None, (z3term, c, args[i+1]))
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def _z3_to_type(self, sort):
if sort.kind() == z3.Z3_BOOL_SORT:
return types.BOOL
elif sort.kind() == z3.Z3_INT_SORT:
return types.INT
elif sort.kind() == z3.Z3_REAL_SORT:
return types.REAL
elif sort.kind() == z3.Z3_ARRAY_SORT:
return types.ArrayType(self._z3_to_type(sort.domain()),
self._z3_to_type(sort.range()))
elif sort.kind() == z3.Z3_BV_SORT:
return types.BVType(sort.size())
else:
raise NotImplementedError("Unsupported sort in conversion: %s" % sort)
def make_walk_nary(func):
def walk_nary(self, formula, args, **kwargs):
_args, sz = self._to_ast_array(args)
z3term = func(self.ctx.ref(), sz, _args)
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
return walk_nary
def make_walk_binary(func):
def walk_binary(self, formula, args, **kwargs):
z3term = func(self.ctx.ref(), args[0], args[1])
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
return walk_binary
walk_and = make_walk_nary(z3.Z3_mk_and)
walk_or = make_walk_nary(z3.Z3_mk_or)
walk_plus = make_walk_nary(z3.Z3_mk_add)
walk_times = make_walk_nary(z3.Z3_mk_mul)
walk_minus = make_walk_nary(z3.Z3_mk_sub)
walk_implies = make_walk_binary(z3.Z3_mk_implies)
walk_le = make_walk_binary(z3.Z3_mk_le)
walk_lt = make_walk_binary(z3.Z3_mk_lt)
walk_equals = make_walk_binary(z3.Z3_mk_eq)
walk_iff = make_walk_binary(z3.Z3_mk_eq)
walk_pow = make_walk_binary(z3.Z3_mk_power)
walk_div = make_walk_binary(z3.Z3_mk_div)
walk_bv_ult = make_walk_binary(z3.Z3_mk_bvult)
walk_bv_ule = make_walk_binary(z3.Z3_mk_bvule)
walk_bv_slt = make_walk_binary(z3.Z3_mk_bvslt)
walk_bv_sle = make_walk_binary(z3.Z3_mk_bvsle)
walk_bv_concat = make_walk_binary(z3.Z3_mk_concat)
walk_bv_or = make_walk_binary(z3.Z3_mk_bvor)
walk_bv_and = make_walk_binary(z3.Z3_mk_bvand)
walk_bv_xor = make_walk_binary(z3.Z3_mk_bvxor)
walk_bv_add = make_walk_binary(z3.Z3_mk_bvadd)
walk_bv_sub = make_walk_binary(z3.Z3_mk_bvsub)
walk_bv_mul = make_walk_binary(z3.Z3_mk_bvmul)
walk_bv_udiv = make_walk_binary(z3.Z3_mk_bvudiv)
walk_bv_urem = make_walk_binary(z3.Z3_mk_bvurem)
walk_bv_lshl = make_walk_binary(z3.Z3_mk_bvshl)
walk_bv_lshr = make_walk_binary(z3.Z3_mk_bvlshr)
walk_bv_sdiv = make_walk_binary(z3.Z3_mk_bvsdiv)
walk_bv_srem = make_walk_binary(z3.Z3_mk_bvsrem)
walk_bv_ashr = make_walk_binary(z3.Z3_mk_bvashr)
walk_exists = walk_quantifier
walk_forall = walk_quantifier
def _type_to_z3(self, tp):
"""Convert a pySMT type into the corresponding Z3 sort."""
if tp.is_bool_type():
return self.z3BoolSort
elif tp.is_real_type():
return self.z3RealSort
elif tp.is_int_type():
return self.z3IntSort
elif tp.is_array_type():
key_sort = self._type_to_z3(tp.index_type)
val_sort = self._type_to_z3(tp.elem_type)
return self.z3ArraySort(key_sort, val_sort)
elif tp.is_bv_type():
return self.z3BitVecSort(tp.width)
else:
assert tp.is_custom_type(), "Unsupported type '%s'" % tp
return self.z3Sort(tp)
raise NotImplementedError("Unsupported type in conversion: %s" % tp)
def __del__(self):
# Cleaning-up Z3Converter requires dec-ref'ing the terms in the cache
if self.ctx.ref():
# Check that there is still a context object
# This might not be the case if we are using the global context
# and the interpreter is shutting down
for t in self.memoization.values():
z3.Z3_dec_ref(self.ctx.ref(), t)
# EOC Z3Converter
class Z3QuantifierEliminator(QuantifierEliminator):
LOGICS = [LIA, LRA]
def __init__(self, environment, logic=None):
QuantifierEliminator.__init__(self)
self.environment = environment
self.logic = logic
self.converter = Z3Converter(environment, z3.main_ctx())
def eliminate_quantifiers(self, formula):
logic = get_logic(formula, self.environment)
if not logic <= LRA and not logic <= LIA:
raise PysmtValueError("Z3 quantifier elimination only "\
"supports LRA or LIA without combination."\
"(detected logic is: %s)" % str(logic))
simplifier = z3.Tactic('simplify')
eliminator = z3.Tactic('qe')
f = self.converter.convert(formula)
s = simplifier(f, elim_and=True,
pull_cheap_ite=True,
ite_extra_rules=True).as_expr()
res = eliminator(f).as_expr()
pysmt_res = None
try:
pysmt_res = self.converter.back(res)
except ConvertExpressionError:
if logic <= LRA:
raise
raise ConvertExpressionError(message=("Unable to represent" \
"expression %s in pySMT: the quantifier elimination for " \
"LIA is incomplete as it requires the modulus. You can " \
"find the Z3 expression representing the quantifier " \
"elimination as the attribute 'expression' of this " \
"exception object" % str(res)),
expression=res)
return pysmt_res
def _exit(self):
pass
| apache-2.0 |
tbetbetbe/grpc | src/python/grpcio_test/grpc_test/framework/face/testing/stock_service.py | 17 | 12184 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Examples of Python implementations of the stock.proto Stock service."""
from grpc.framework.common import cardinality
from grpc.framework.foundation import abandonment
from grpc.framework.foundation import stream
from grpc.framework.foundation import stream_util
from grpc_test.framework.face.testing import service
from grpc_test._junkdrawer import stock_pb2
SYMBOL_FORMAT = 'test symbol:%03d'
STREAM_LENGTH = 400
# A test-appropriate security-pricing function. :-P
_price = lambda symbol_name: float(hash(symbol_name) % 4096)
def _get_last_trade_price(stock_request, stock_reply_callback, control, active):
"""A unary-request, unary-response test method."""
control.control()
if active():
stock_reply_callback(
stock_pb2.StockReply(
symbol=stock_request.symbol, price=_price(stock_request.symbol)))
else:
raise abandonment.Abandoned()
def _get_last_trade_price_multiple(stock_reply_consumer, control, active):
"""A stream-request, stream-response test method."""
def stock_reply_for_stock_request(stock_request):
control.control()
if active():
return stock_pb2.StockReply(
symbol=stock_request.symbol, price=_price(stock_request.symbol))
else:
raise abandonment.Abandoned()
return stream_util.TransformingConsumer(
stock_reply_for_stock_request, stock_reply_consumer)
def _watch_future_trades(stock_request, stock_reply_consumer, control, active):
"""A unary-request, stream-response test method."""
base_price = _price(stock_request.symbol)
for index in range(stock_request.num_trades_to_watch):
control.control()
if active():
stock_reply_consumer.consume(
stock_pb2.StockReply(
symbol=stock_request.symbol, price=base_price + index))
else:
raise abandonment.Abandoned()
stock_reply_consumer.terminate()
def _get_highest_trade_price(stock_reply_callback, control, active):
"""A stream-request, unary-response test method."""
class StockRequestConsumer(stream.Consumer):
"""Keeps an ongoing record of the most valuable symbol yet consumed."""
def __init__(self):
self._symbol = None
self._price = None
def consume(self, stock_request):
control.control()
if active():
if self._price is None:
self._symbol = stock_request.symbol
self._price = _price(stock_request.symbol)
else:
candidate_price = _price(stock_request.symbol)
if self._price < candidate_price:
self._symbol = stock_request.symbol
self._price = candidate_price
def terminate(self):
control.control()
if active():
if self._symbol is None:
raise ValueError()
else:
stock_reply_callback(
stock_pb2.StockReply(symbol=self._symbol, price=self._price))
self._symbol = None
self._price = None
def consume_and_terminate(self, stock_request):
control.control()
if active():
if self._price is None:
stock_reply_callback(
stock_pb2.StockReply(
symbol=stock_request.symbol,
price=_price(stock_request.symbol)))
else:
candidate_price = _price(stock_request.symbol)
if self._price < candidate_price:
stock_reply_callback(
stock_pb2.StockReply(
symbol=stock_request.symbol, price=candidate_price))
else:
stock_reply_callback(
stock_pb2.StockReply(
symbol=self._symbol, price=self._price))
self._symbol = None
self._price = None
return StockRequestConsumer()
class GetLastTradePrice(service.UnaryUnaryTestMethodImplementation):
"""GetLastTradePrice for use in tests."""
def name(self):
return 'GetLastTradePrice'
def cardinality(self):
return cardinality.Cardinality.UNARY_UNARY
def request_class(self):
return stock_pb2.StockRequest
def response_class(self):
return stock_pb2.StockReply
def serialize_request(self, request):
return request.SerializeToString()
def deserialize_request(self, serialized_request):
return stock_pb2.StockRequest.FromString(serialized_request)
def serialize_response(self, response):
return response.SerializeToString()
def deserialize_response(self, serialized_response):
return stock_pb2.StockReply.FromString(serialized_response)
def service(self, request, response_callback, context, control):
_get_last_trade_price(
request, response_callback, control, context.is_active)
class GetLastTradePriceMessages(service.UnaryUnaryTestMessages):
def __init__(self):
self._index = 0
def request(self):
symbol = SYMBOL_FORMAT % self._index
self._index += 1
return stock_pb2.StockRequest(symbol=symbol)
def verify(self, request, response, test_case):
test_case.assertEqual(request.symbol, response.symbol)
test_case.assertEqual(_price(request.symbol), response.price)
class GetLastTradePriceMultiple(service.StreamStreamTestMethodImplementation):
"""GetLastTradePriceMultiple for use in tests."""
def name(self):
return 'GetLastTradePriceMultiple'
def cardinality(self):
return cardinality.Cardinality.STREAM_STREAM
def request_class(self):
return stock_pb2.StockRequest
def response_class(self):
return stock_pb2.StockReply
def serialize_request(self, request):
return request.SerializeToString()
def deserialize_request(self, serialized_request):
return stock_pb2.StockRequest.FromString(serialized_request)
def serialize_response(self, response):
return response.SerializeToString()
def deserialize_response(self, serialized_response):
return stock_pb2.StockReply.FromString(serialized_response)
def service(self, response_consumer, context, control):
return _get_last_trade_price_multiple(
response_consumer, control, context.is_active)
class GetLastTradePriceMultipleMessages(service.StreamStreamTestMessages):
"""Pairs of message streams for use with GetLastTradePriceMultiple."""
def __init__(self):
self._index = 0
def requests(self):
base_index = self._index
self._index += 1
return [
stock_pb2.StockRequest(symbol=SYMBOL_FORMAT % (base_index + index))
for index in range(STREAM_LENGTH)]
def verify(self, requests, responses, test_case):
test_case.assertEqual(len(requests), len(responses))
for stock_request, stock_reply in zip(requests, responses):
test_case.assertEqual(stock_request.symbol, stock_reply.symbol)
test_case.assertEqual(_price(stock_request.symbol), stock_reply.price)
class WatchFutureTrades(service.UnaryStreamTestMethodImplementation):
"""WatchFutureTrades for use in tests."""
def name(self):
return 'WatchFutureTrades'
def cardinality(self):
return cardinality.Cardinality.UNARY_STREAM
def request_class(self):
return stock_pb2.StockRequest
def response_class(self):
return stock_pb2.StockReply
def serialize_request(self, request):
return request.SerializeToString()
def deserialize_request(self, serialized_request):
return stock_pb2.StockRequest.FromString(serialized_request)
def serialize_response(self, response):
return response.SerializeToString()
def deserialize_response(self, serialized_response):
return stock_pb2.StockReply.FromString(serialized_response)
def service(self, request, response_consumer, context, control):
_watch_future_trades(request, response_consumer, control, context.is_active)
class WatchFutureTradesMessages(service.UnaryStreamTestMessages):
"""Pairs of a single request message and a sequence of response messages."""
def __init__(self):
self._index = 0
def request(self):
symbol = SYMBOL_FORMAT % self._index
self._index += 1
return stock_pb2.StockRequest(
symbol=symbol, num_trades_to_watch=STREAM_LENGTH)
def verify(self, request, responses, test_case):
test_case.assertEqual(STREAM_LENGTH, len(responses))
base_price = _price(request.symbol)
for index, response in enumerate(responses):
test_case.assertEqual(base_price + index, response.price)
class GetHighestTradePrice(service.StreamUnaryTestMethodImplementation):
"""GetHighestTradePrice for use in tests."""
def name(self):
return 'GetHighestTradePrice'
def cardinality(self):
return cardinality.Cardinality.STREAM_UNARY
def request_class(self):
return stock_pb2.StockRequest
def response_class(self):
return stock_pb2.StockReply
def serialize_request(self, request):
return request.SerializeToString()
def deserialize_request(self, serialized_request):
return stock_pb2.StockRequest.FromString(serialized_request)
def serialize_response(self, response):
return response.SerializeToString()
def deserialize_response(self, serialized_response):
return stock_pb2.StockReply.FromString(serialized_response)
def service(self, response_callback, context, control):
return _get_highest_trade_price(
response_callback, control, context.is_active)
class GetHighestTradePriceMessages(service.StreamUnaryTestMessages):
def requests(self):
return [
stock_pb2.StockRequest(symbol=SYMBOL_FORMAT % index)
for index in range(STREAM_LENGTH)]
def verify(self, requests, response, test_case):
price = None
symbol = None
for stock_request in requests:
current_symbol = stock_request.symbol
current_price = _price(current_symbol)
if price is None or price < current_price:
price = current_price
symbol = current_symbol
test_case.assertEqual(price, response.price)
test_case.assertEqual(symbol, response.symbol)
class StockTestService(service.TestService):
"""A corpus of test data with one method of each RPC cardinality."""
def name(self):
return 'Stock'
def unary_unary_scenarios(self):
return {
'GetLastTradePrice': (
GetLastTradePrice(), [GetLastTradePriceMessages()]),
}
def unary_stream_scenarios(self):
return {
'WatchFutureTrades': (
WatchFutureTrades(), [WatchFutureTradesMessages()]),
}
def stream_unary_scenarios(self):
return {
'GetHighestTradePrice': (
GetHighestTradePrice(), [GetHighestTradePriceMessages()])
}
def stream_stream_scenarios(self):
return {
'GetLastTradePriceMultiple': (
GetLastTradePriceMultiple(), [GetLastTradePriceMultipleMessages()]),
}
STOCK_TEST_SERVICE = StockTestService()
| bsd-3-clause |
PuZheng/cloud-dashing | cloud_dashing/portal/user/views.py | 1 | 3864 | # -*- coding: UTF-8 -*-
from flask import (jsonify, request, render_template, redirect, session,
current_app)
from flask.ext.wtf import Form
from flask.ext.babel import _
from wtforms import TextField, PasswordField
from wtforms.validators import DataRequired
from werkzeug.security import generate_password_hash
from flask.ext.login import (current_user, login_user, login_required,
logout_user)
from flask.ext.principal import Identity, AnonymousIdentity, identity_changed
from cloud_dashing.portal.user import user_ws, user
from cloud_dashing.models import User, Group
from cloud_dashing import utils, apis
from cloud_dashing.exceptions import AuthenticateFailure
@user_ws.route('/register', methods=['POST'])
def register_ws():
name = request.args.get("name", type=str)
password = request.args.get("password", type=str)
if not name or not password:
return u"需要name或者password字段", 403
user = User.query.filter(User.name == name).first()
if user:
return u'用户名已存在, 请更换注册名', 403
user = utils.do_commit(User(name=name,
password=generate_password_hash(
password, 'pbkdf2:sha256'),
group=Group.query.get(const.CUSTOMER_GROUP)))
user = apis.wraps(user)
return jsonify(user.as_dict(include_auth_token=True)), 201
@user_ws.route("/login", methods=["POST"])
def login_ws():
name = request.args.get("name", type=str)
password = request.args.get("password", type=str)
if not name or not password:
return u"需要name或者password字段", 403
try:
user = apis.user.authenticate(name, password)
except AuthenticateFailure:
return u'用户名或者密码错误', 403
return jsonify(user.as_dict(include_auth_token=True))
class LoginForm(Form):
username = TextField('username', validators=[DataRequired()])
password = PasswordField('password', validators=[DataRequired()])
@user.route('/login.html', methods=['GET', 'POST'])
def login():
form = LoginForm()
if request.method == "GET":
if current_user.is_anonymous():
return render_template("user/login.html", form=form,
error=request.args.get('error'),
next_url=request.args.get('next'))
return redirect("/")
else:
if form.validate_on_submit():
username = form.username.data
password = form.password.data
try:
user = apis.user.authenticate(username, password)
except AuthenticateFailure:
return render_template("user/login.html",
error=_("invalid username or password"),
form=form), 403
if not login_user(user):
return render_template("user/login.html",
error=_("failed to login")), 403
identity_changed.send(current_app._get_current_object(),
identity=Identity(user.id))
return redirect(request.args.get('next') or "/")
return render_template("user/login.html",
error=_("please input username or password"),
form=form), 403
@user.route("/logout.html")
@login_required
def logout():
try:
logout_user()
except Exception: # in case sesson expire
pass
for key in ('identity.name', 'identity.auth_type'):
session.pop(key, None)
identity_changed.send(current_app._get_current_object(),
identity=AnonymousIdentity())
next_url = request.args.get("next", "/")
return redirect(next_url)
| gpl-2.0 |
fsufitch/eve-shipspinning | modules/eveapi.py | 1 | 26931 | #-----------------------------------------------------------------------------
# eveapi - EVE Online API access
#
# Copyright (c)2007 Jamie "Entity" van den Berge <entity@vapor.com>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE
#
#-----------------------------------------------------------------------------
# Version: 1.1.6 - 27 May 2011
# - Now supports composite keys for IndexRowsets.
# - Fixed calls not working if a path was specified in the root url.
#
# Version: 1.1.5 - 27 Januari 2011
# - Now supports (and defaults to) HTTPS. Non-SSL proxies will still work by
# explicitly specifying http:// in the url.
#
# Version: 1.1.4 - 1 December 2010
# - Empty explicit CDATA tags are now properly handled.
# - _autocast now receives the name of the variable it's trying to typecast,
# enabling custom/future casting functions to make smarter decisions.
#
# Version: 1.1.3 - 6 November 2010
# - Added support for anonymous CDATA inside row tags. This makes the body of
# mails in the rows of char/MailBodies available through the .data attribute.
#
# Version: 1.1.2 - 2 July 2010
# - Fixed __str__ on row objects to work properly with unicode strings.
#
# Version: 1.1.1 - 10 Januari 2010
# - Fixed bug that causes nested tags to not appear in rows of rowsets created
# from normal Elements. This should fix the corp.MemberSecurity method,
# which now returns all data for members. [jehed]
#
# Version: 1.1.0 - 15 Januari 2009
# - Added Select() method to Rowset class. Using it avoids the creation of
# temporary row instances, speeding up iteration considerably.
# - Added ParseXML() function, which can be passed arbitrary API XML file or
# string objects.
# - Added support for proxy servers. A proxy can be specified globally or
# per api connection instance. [suggestion by graalman]
# - Some minor refactoring.
# - Fixed deprecation warning when using Python 2.6.
#
# Version: 1.0.7 - 14 November 2008
# - Added workaround for rowsets that are missing the (required!) columns
# attribute. If missing, it will use the columns found in the first row.
# Note that this is will still break when expecting columns, if the rowset
# is empty. [Flux/Entity]
#
# Version: 1.0.6 - 18 July 2008
# - Enabled expat text buffering to avoid content breaking up. [BigWhale]
#
# Version: 1.0.5 - 03 February 2008
# - Added workaround to make broken XML responses (like the "row:name" bug in
# eve/CharacterID) work as intended.
# - Bogus datestamps before the epoch in XML responses are now set to 0 to
# avoid breaking certain date/time functions. [Anathema Matou]
#
# Version: 1.0.4 - 23 December 2007
# - Changed _autocast() to use timegm() instead of mktime(). [Invisible Hand]
# - Fixed missing attributes of elements inside rows. [Elandra Tenari]
#
# Version: 1.0.3 - 13 December 2007
# - Fixed keyless columns bugging out the parser (in CorporationSheet for ex.)
#
# Version: 1.0.2 - 12 December 2007
# - Fixed parser not working with indented XML.
#
# Version: 1.0.1
# - Some micro optimizations
#
# Version: 1.0
# - Initial release
#
# Requirements:
# Python 2.4+
#
#-----------------------------------------------------------------------------
import httplib
import urlparse
import urllib
import copy
from xml.parsers import expat
from time import strptime
from calendar import timegm
proxy = None
#-----------------------------------------------------------------------------
class Error(StandardError):
def __init__(self, code, message):
self.code = code
self.args = (message.rstrip("."),)
def EVEAPIConnection(url="api.eveonline.com", cacheHandler=None, proxy=None):
# Creates an API object through which you can call remote functions.
#
# The following optional arguments may be provided:
#
# url - root location of the EVEAPI server
#
# proxy - (host,port) specifying a proxy server through which to request
# the API pages. Specifying a proxy overrides default proxy.
#
# cacheHandler - an object which must support the following interface:
#
# retrieve(host, path, params)
#
# Called when eveapi wants to fetch a document.
# host is the address of the server, path is the full path to
# the requested document, and params is a dict containing the
# parameters passed to this api call (userID, apiKey etc).
# The method MUST return one of the following types:
#
# None - if your cache did not contain this entry
# str/unicode - eveapi will parse this as XML
# Element - previously stored object as provided to store()
# file-like object - eveapi will read() XML from the stream.
#
# store(host, path, params, doc, obj)
#
# Called when eveapi wants you to cache this item.
# You can use obj to get the info about the object (cachedUntil
# and currentTime, etc) doc is the XML document the object
# was generated from. It's generally best to cache the XML, not
# the object, unless you pickle the object. Note that this method
# will only be called if you returned None in the retrieve() for
# this object.
#
if not url.startswith("http"):
url = "https://" + url
p = urlparse.urlparse(url, "https")
if p.path and p.path[-1] == "/":
p.path = p.path[:-1]
ctx = _RootContext(None, p.path, {}, {})
ctx._handler = cacheHandler
ctx._scheme = p.scheme
ctx._host = p.netloc
ctx._proxy = proxy or globals()["proxy"]
return ctx
def ParseXML(file_or_string):
try:
return _ParseXML(file_or_string, False, None)
except TypeError:
raise TypeError("XML data must be provided as string or file-like object")
def _ParseXML(response, fromContext, storeFunc):
# pre/post-process XML or Element data
if fromContext and isinstance(response, Element):
obj = response
elif type(response) in (str, unicode):
obj = _Parser().Parse(response, False)
elif hasattr(response, "read"):
obj = _Parser().Parse(response, True)
else:
raise TypeError("retrieve method must return None, string, file-like object or an Element instance")
error = getattr(obj, "error", False)
if error:
raise Error(error.code, error.data)
result = getattr(obj, "result", False)
if not result:
raise RuntimeError("API object does not contain result")
if fromContext and storeFunc:
# call the cache handler to store this object
storeFunc(obj)
# make metadata available to caller somehow
result._meta = obj
return result
#-----------------------------------------------------------------------------
# API Classes
#-----------------------------------------------------------------------------
_listtypes = (list, tuple, dict)
_unspecified = []
class _Context(object):
def __init__(self, root, path, parentDict, newKeywords=None):
self._root = root or self
self._path = path
if newKeywords:
if parentDict:
self.parameters = parentDict.copy()
else:
self.parameters = {}
self.parameters.update(newKeywords)
else:
self.parameters = parentDict or {}
def context(self, *args, **kw):
if kw or args:
path = self._path
if args:
path += "/" + "/".join(args)
return self.__class__(self._root, path, self.parameters, kw)
else:
return self
def __getattr__(self, this):
# perform arcane attribute majick trick
return _Context(self._root, self._path + "/" + this, self.parameters)
def __call__(self, **kw):
if kw:
# specified keywords override contextual ones
for k, v in self.parameters.iteritems():
if k not in kw:
kw[k] = v
else:
# no keywords provided, just update with contextual ones.
kw.update(self.parameters)
# now let the root context handle it further
return self._root(self._path, **kw)
class _AuthContext(_Context):
def character(self, characterID):
# returns a copy of this connection object but for every call made
# through it, it will add the folder "/char" to the url, and the
# characterID to the parameters passed.
return _Context(self._root, self._path + "/char", self.parameters, {"characterID":characterID})
def corporation(self, characterID):
# same as character except for the folder "/corp"
return _Context(self._root, self._path + "/corp", self.parameters, {"characterID":characterID})
class _RootContext(_Context):
def auth(self, userID=None, apiKey=None):
# returns a copy of this object but for every call made through it, the
# userID and apiKey will be added to the API request.
if userID and apiKey:
return _AuthContext(self._root, self._path, self.parameters, {"userID":userID, "apiKey":apiKey})
raise ValueError("Must specify userID and apiKey")
def setcachehandler(self, handler):
self._root._handler = handler
def __call__(self, path, **kw):
# convert list type arguments to something the API likes
for k, v in kw.iteritems():
if isinstance(v, _listtypes):
kw[k] = ','.join(map(str, list(v)))
cache = self._root._handler
# now send the request
path += ".xml.aspx"
if cache:
response = cache.retrieve(self._host, path, kw)
else:
response = None
if response is None:
if self._scheme == "https":
connectionclass = httplib.HTTPSConnection
else:
connectionclass = httplib.HTTPConnection
if self._proxy is None:
http = connectionclass(self._host)
if kw:
http.request("POST", path, urllib.urlencode(kw), {"Content-type": "application/x-www-form-urlencoded"})
else:
http.request("GET", path)
else:
http = connectionclass(*self._proxy)
if kw:
http.request("POST", 'https://'+self._host+path, urllib.urlencode(kw), {"Content-type": "application/x-www-form-urlencoded"})
else:
http.request("GET", 'https://'+self._host+path)
response = http.getresponse()
if response.status != 200:
if response.status == httplib.NOT_FOUND:
raise AttributeError("'%s' not available on API server (404 Not Found)" % path)
else:
raise RuntimeError("'%s' request failed (%d %s)" % (path, response.status, response.reason))
if cache:
store = True
response = response.read()
else:
store = False
else:
store = False
retrieve_fallback = cache and getattr(cache, "retrieve_fallback", False)
if retrieve_fallback:
# implementor is handling fallbacks...
try:
return _ParseXML(response, True, store and (lambda obj: cache.store(self._host, path, kw, response, obj)))
except Error, reason:
response = retrieve_fallback(self._host, path, kw, reason=e)
if response is not None:
return response
raise
else:
# implementor is not handling fallbacks...
return _ParseXML(response, True, store and (lambda obj: cache.store(self._host, path, kw, response, obj)))
#-----------------------------------------------------------------------------
# XML Parser
#-----------------------------------------------------------------------------
def _autocast(key, value):
# attempts to cast an XML string to the most probable type.
try:
if value.strip("-").isdigit():
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
if len(value) == 19 and value[10] == ' ':
# it could be a date string
try:
return max(0, int(timegm(strptime(value, "%Y-%m-%d %H:%M:%S"))))
except OverflowError:
pass
except ValueError:
pass
# couldn't cast. return string unchanged.
return value
class _Parser(object):
def Parse(self, data, isStream=False):
self.container = self.root = None
self._cdata = False
p = expat.ParserCreate()
p.StartElementHandler = self.tag_start
p.CharacterDataHandler = self.tag_cdata
p.StartCdataSectionHandler = self.tag_cdatasection_enter
p.EndCdataSectionHandler = self.tag_cdatasection_exit
p.EndElementHandler = self.tag_end
p.ordered_attributes = True
p.buffer_text = True
if isStream:
p.ParseFile(data)
else:
p.Parse(data, True)
return self.root
def tag_cdatasection_enter(self):
# encountered an explicit CDATA tag.
self._cdata = True
def tag_cdatasection_exit(self):
if self._cdata:
# explicit CDATA without actual data. expat doesn't seem
# to trigger an event for this case, so do it manually.
# (_cdata is set False by this call)
self.tag_cdata("")
else:
self._cdata = False
def tag_start(self, name, attributes):
# <hack>
# If there's a colon in the tag name, cut off the name from the colon
# onward. This is a workaround to make certain bugged XML responses
# (such as eve/CharacterID.xml.aspx) work.
if ":" in name:
name = name[:name.index(":")]
# </hack>
if name == "rowset":
# for rowsets, use the given name
try:
columns = attributes[attributes.index('columns')+1].split(",")
except ValueError:
# rowset did not have columns tag set (this is a bug in API)
# columns will be extracted from first row instead.
columns = []
try:
priKey = attributes[attributes.index('key')+1]
this = IndexRowset(cols=columns, key=priKey)
except ValueError:
this = Rowset(cols=columns)
this._name = attributes[attributes.index('name')+1]
this.__catch = "row" # tag to auto-add to rowset.
else:
this = Element()
this._name = name
this.__parent = self.container
if self.root is None:
# We're at the root. The first tag has to be "eveapi" or we can't
# really assume the rest of the xml is going to be what we expect.
if name != "eveapi":
raise RuntimeError("Invalid API response")
self.root = this
if isinstance(self.container, Rowset) and (self.container.__catch == this._name):
# check for missing columns attribute (see above)
if not self.container._cols:
self.container._cols = attributes[0::2]
self.container.append([_autocast(attributes[i], attributes[i+1]) for i in xrange(0, len(attributes), 2)])
this._isrow = True
this._attributes = this._attributes2 = None
else:
this._isrow = False
this._attributes = attributes
this._attributes2 = []
self.container = this
def tag_cdata(self, data):
if self._cdata:
# unset cdata flag to indicate it's been handled.
self._cdata = False
else:
if data in ("\r\n", "\n") or data.strip() != data:
return
this = self.container
data = _autocast(this._name, data)
if this._isrow:
# sigh. anonymous data inside rows makes Entity cry.
# for the love of Jove, CCP, learn how to use rowsets.
parent = this.__parent
_row = parent._rows[-1]
_row.append(data)
if len(parent._cols) < len(_row):
parent._cols.append("data")
elif this._attributes:
# this tag has attributes, so we can't simply assign the cdata
# as an attribute to the parent tag, as we'll lose the current
# tag's attributes then. instead, we'll assign the data as
# attribute of this tag.
this.data = data
else:
# this was a simple <tag>data</tag> without attributes.
# we won't be doing anything with this actual tag so we can just
# bind it to its parent (done by __tag_end)
setattr(this.__parent, this._name, data)
def tag_end(self, name):
this = self.container
if this is self.root:
del this._attributes
#this.__dict__.pop("_attributes", None)
return
# we're done with current tag, so we can pop it off. This means that
# self.container will now point to the container of element 'this'.
self.container = this.__parent
del this.__parent
attributes = this.__dict__.pop("_attributes")
attributes2 = this.__dict__.pop("_attributes2")
if attributes is None:
# already processed this tag's closure early, in tag_start()
return
if self.container._isrow:
# Special case here. tags inside a row! Such tags have to be
# added as attributes of the row.
parent = self.container.__parent
# get the row line for this element from its parent rowset
_row = parent._rows[-1]
# add this tag's value to the end of the row
_row.append(getattr(self.container, this._name, this))
# fix columns if neccessary.
if len(parent._cols) < len(_row):
parent._cols.append(this._name)
else:
# see if there's already an attribute with this name (this shouldn't
# really happen, but it doesn't hurt to handle this case!
sibling = getattr(self.container, this._name, None)
if sibling is None:
self.container._attributes2.append(this._name)
setattr(self.container, this._name, this)
# Note: there aren't supposed to be any NON-rowset tags containing
# multiples of some tag or attribute. Code below handles this case.
elif isinstance(sibling, Rowset):
# its doppelganger is a rowset, append this as a row to that.
row = [_autocast(attributes[i], attributes[i+1]) for i in xrange(0, len(attributes), 2)]
row.extend([getattr(this, col) for col in attributes2])
sibling.append(row)
elif isinstance(sibling, Element):
# parent attribute is an element. This means we're dealing
# with multiple of the same sub-tag. Change the attribute
# into a Rowset, adding the sibling element and this one.
rs = Rowset()
rs.__catch = rs._name = this._name
row = [_autocast(attributes[i], attributes[i+1]) for i in xrange(0, len(attributes), 2)]+[getattr(this, col) for col in attributes2]
rs.append(row)
row = [getattr(sibling, attributes[i]) for i in xrange(0, len(attributes), 2)]+[getattr(sibling, col) for col in attributes2]
rs.append(row)
rs._cols = [attributes[i] for i in xrange(0, len(attributes), 2)]+[col for col in attributes2]
setattr(self.container, this._name, rs)
else:
# something else must have set this attribute already.
# (typically the <tag>data</tag> case in tag_data())
pass
# Now fix up the attributes and be done with it.
for i in xrange(0, len(attributes), 2):
this.__dict__[attributes[i]] = _autocast(attributes[i], attributes[i+1])
return
#-----------------------------------------------------------------------------
# XML Data Containers
#-----------------------------------------------------------------------------
# The following classes are the various container types the XML data is
# unpacked into.
#
# Note that objects returned by API calls are to be treated as read-only. This
# is not enforced, but you have been warned.
#-----------------------------------------------------------------------------
class Element(object):
# Element is a namespace for attributes and nested tags
def __str__(self):
return "<Element '%s'>" % self._name
_fmt = u"%s:%s".__mod__
class Row(object):
# A Row is a single database record associated with a Rowset.
# The fields in the record are accessed as attributes by their respective
# column name.
#
# To conserve resources, Row objects are only created on-demand. This is
# typically done by Rowsets (e.g. when iterating over the rowset).
def __init__(self, cols=None, row=None):
self._cols = cols or []
self._row = row or []
def __nonzero__(self):
return True
def __ne__(self, other):
return self.__cmp__(other)
def __eq__(self, other):
return self.__cmp__(other) == 0
def __cmp__(self, other):
if type(other) != type(self):
raise TypeError("Incompatible comparison type")
return cmp(self._cols, other._cols) or cmp(self._row, other._row)
def __getattr__(self, this):
try:
return self._row[self._cols.index(this)]
except:
raise AttributeError, this
def __getitem__(self, this):
return self._row[self._cols.index(this)]
def __str__(self):
return "Row(" + ','.join(map(_fmt, zip(self._cols, self._row))) + ")"
class Rowset(object):
# Rowsets are collections of Row objects.
#
# Rowsets support most of the list interface:
# iteration, indexing and slicing
#
# As well as the following methods:
#
# IndexedBy(column)
# Returns an IndexRowset keyed on given column. Requires the column to
# be usable as primary key.
#
# GroupedBy(column)
# Returns a FilterRowset keyed on given column. FilterRowset objects
# can be accessed like dicts. See FilterRowset class below.
#
# SortBy(column, reverse=True)
# Sorts rowset in-place on given column. for a descending sort,
# specify reversed=True.
#
# SortedBy(column, reverse=True)
# Same as SortBy, except this returns a new rowset object instead of
# sorting in-place.
#
# Select(columns, row=False)
# Yields a column values tuple (value, ...) for each row in the rowset.
# If only one column is requested, then just the column value is
# provided instead of the values tuple.
# When row=True, each result will be decorated with the entire row.
#
def IndexedBy(self, column):
return IndexRowset(self._cols, self._rows, column)
def GroupedBy(self, column):
return FilterRowset(self._cols, self._rows, column)
def SortBy(self, column, reverse=False):
ix = self._cols.index(column)
self.sort(key=lambda e: e[ix], reverse=reverse)
def SortedBy(self, column, reverse=False):
rs = self[:]
rs.SortBy(column, reverse)
return rs
def Select(self, *columns, **options):
if len(columns) == 1:
i = self._cols.index(columns[0])
if options.get("row", False):
for line in self._rows:
yield (line, line[i])
else:
for line in self._rows:
yield line[i]
else:
i = map(self._cols.index, columns)
if options.get("row", False):
for line in self._rows:
yield line, [line[x] for x in i]
else:
for line in self._rows:
yield [line[x] for x in i]
# -------------
def __init__(self, cols=None, rows=None):
self._cols = cols or []
self._rows = rows or []
def append(self, row):
if isinstance(row, list):
self._rows.append(row)
elif isinstance(row, Row) and len(row._cols) == len(self._cols):
self._rows.append(row._row)
else:
raise TypeError("incompatible row type")
def __add__(self, other):
if isinstance(other, Rowset):
if len(other._cols) == len(self._cols):
self._rows += other._rows
raise TypeError("rowset instance expected")
def __nonzero__(self):
return not not self._rows
def __len__(self):
return len(self._rows)
def copy(self):
return self[:]
def __getitem__(self, ix):
if type(ix) is slice:
return Rowset(self._cols, self._rows[ix])
return Row(self._cols, self._rows[ix])
def sort(self, *args, **kw):
self._rows.sort(*args, **kw)
def __str__(self):
return ("Rowset(columns=[%s], rows=%d)" % (','.join(self._cols), len(self)))
def __getstate__(self):
return (self._cols, self._rows)
def __setstate__(self, state):
self._cols, self._rows = state
class IndexRowset(Rowset):
# An IndexRowset is a Rowset that keeps an index on a column.
#
# The interface is the same as Rowset, but provides an additional method:
#
# Get(key [, default])
# Returns the Row mapped to provided key in the index. If there is no
# such key in the index, KeyError is raised unless a default value was
# specified.
#
def Get(self, key, *default):
row = self._items.get(key, None)
if row is None:
if default:
return default[0]
raise KeyError, key
return Row(self._cols, row)
# -------------
def __init__(self, cols=None, rows=None, key=None):
try:
if "," in key:
self._ki = ki = [cols.index(k) for k in key.split(",")]
self.composite = True
else:
self._ki = ki = cols.index(key)
self.composite = False
except IndexError:
raise ValueError("Rowset has no column %s" % key)
Rowset.__init__(self, cols, rows)
self._key = key
if self.composite:
self._items = dict((tuple([row[k] for k in ki]), row) for row in self._rows)
else:
self._items = dict((row[ki], row) for row in self._rows)
def __getitem__(self, ix):
if type(ix) is slice:
return IndexRowset(self._cols, self._rows[ix], self._key)
return Rowset.__getitem__(self, ix)
def append(self, row):
Rowset.append(self, row)
if self.composite:
self._items[tuple([row[k] for k in self._ki])] = row
else:
self._items[row[self._ki]] = row
def __getstate__(self):
return (Rowset.__getstate__(self), self._items, self._ki)
def __setstate__(self, state):
state, self._items, self._ki = state
Rowset.__setstate__(self, state)
class FilterRowset(object):
# A FilterRowset works much like an IndexRowset, with the following
# differences:
# - FilterRowsets are accessed much like dicts
# - Each key maps to a Rowset, containing only the rows where the value
# of the column this FilterRowset was made on matches the key.
def __init__(self, cols=None, rows=None, key=None, key2=None, dict=None):
if dict is not None:
self._items = items = dict
elif cols is not None:
self._items = items = {}
idfield = cols.index(key)
if not key2:
for row in rows:
id = row[idfield]
if id in items:
items[id].append(row)
else:
items[id] = [row]
else:
idfield2 = cols.index(key2)
for row in rows:
id = row[idfield]
if id in items:
items[id][row[idfield2]] = row
else:
items[id] = {row[idfield2]:row}
self._cols = cols
self.key = key
self.key2 = key2
self._bind()
def _bind(self):
items = self._items
self.keys = items.keys
self.iterkeys = items.iterkeys
self.__contains__ = items.__contains__
self.has_key = items.has_key
self.__len__ = items.__len__
self.__iter__ = items.__iter__
def copy(self):
return FilterRowset(self._cols[:], None, self.key, self.key2, dict=copy.deepcopy(self._items))
def get(self, key, default=_unspecified):
try:
return self[key]
except KeyError:
if default is _unspecified:
raise
return default
def __getitem__(self, i):
if self.key2:
return IndexRowset(self._cols, None, self.key2, self._items.get(i, {}))
return Rowset(self._cols, self._items[i])
def __getstate__(self):
return (self._cols, self._rows, self._items, self.key, self.key2)
def __setstate__(self, state):
self._cols, self._rows, self._items, self.key, self.key2 = state
self._bind()
| mit |
saruberoz/photodl | scripts/get_instagram_access_token.py | 1 | 1438 | # Taken from:
# https://github.com/Instagram/python-instagram/blob/master/get_access_token.py
from instagram.client import InstagramAPI
import sys
if len(sys.argv) > 1 and sys.argv[1] == 'local':
try:
from test_settings import *
InstagramAPI.host = test_host
InstagramAPI.base_path = test_base_path
InstagramAPI.access_token_field = "access_token"
InstagramAPI.accountorize_url = test_accountorize_url
InstagramAPI.access_token_url = test_access_token_url
InstagramAPI.protocol = test_protocol
except Exception:
pass
client_id = raw_input("Client ID: ").strip()
client_secret = raw_input("Client Secret: ").strip()
redirect_uri = raw_input("Redirect URI: ").strip()
raw_scope = raw_input(
"Requested scope (separated by spaces, blank for just basic read): "
).strip()
scope = raw_scope.split(' ')
# For basic, API seems to need to be set explicitly
if not scope or scope == [""]:
scope = ["basic"]
api = InstagramAPI(client_id=client_id,
client_secret=client_secret,
redirect_uri=redirect_uri)
redirect_uri = api.get_accountorize_login_url(scope=scope)
print "Visit this page and accountorize access" \
" in your browser:\n", redirect_uri
code = raw_input("Paste in code in query string after redirect: ").strip()
access_token = api.exchange_code_for_access_token(code)
print "access token:\n", access_token
| mit |
GoogleCloudPlatform/mlops-on-gcp | workshops/kfp-caip-sklearn/lab-02-kfp-pipeline/pipeline/helper_components.py | 12 | 2846 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
"""Helper components."""
from typing import NamedTuple
def retrieve_best_run(
project_id: str, job_id: str
) -> NamedTuple('Outputs', [('metric_value', float), ('alpha', float),
('max_iter', int)]):
"""Retrieves the parameters of the best Hypertune run."""
from googleapiclient import discovery
from googleapiclient import errors
ml = discovery.build('ml', 'v1')
job_name = 'projects/{}/jobs/{}'.format(project_id, job_id)
request = ml.projects().jobs().get(name=job_name)
try:
response = request.execute()
except errors.HttpError as err:
print(err)
except:
print('Unexpected error')
print(response)
best_trial = response['trainingOutput']['trials'][0]
metric_value = best_trial['finalMetric']['objectiveValue']
alpha = float(best_trial['hyperparameters']['alpha'])
max_iter = int(best_trial['hyperparameters']['max_iter'])
return (metric_value, alpha, max_iter)
def evaluate_model(
dataset_path: str, model_path: str, metric_name: str
) -> NamedTuple('Outputs', [('metric_name', str), ('metric_value', float),
('mlpipeline_metrics', 'Metrics')]):
"""Evaluates a trained sklearn model."""
#import joblib
import pickle
import json
import pandas as pd
import subprocess
import sys
from sklearn.metrics import accuracy_score, recall_score
df_test = pd.read_csv(dataset_path)
X_test = df_test.drop('Cover_Type', axis=1)
y_test = df_test['Cover_Type']
# Copy the model from GCS
model_filename = 'model.pkl'
gcs_model_filepath = '{}/{}'.format(model_path, model_filename)
print(gcs_model_filepath)
subprocess.check_call(['gsutil', 'cp', gcs_model_filepath, model_filename],
stderr=sys.stdout)
with open(model_filename, 'rb') as model_file:
model = pickle.load(model_file)
y_hat = model.predict(X_test)
if metric_name == 'accuracy':
metric_value = accuracy_score(y_test, y_hat)
elif metric_name == 'recall':
metric_value = recall_score(y_test, y_hat)
else:
metric_name = 'N/A'
metric_value = 0
# Export the metric
metrics = {
'metrics': [{
'name': metric_name,
'numberValue': float(metric_value)
}]
}
return (metric_name, metric_value, json.dumps(metrics))
| apache-2.0 |
hobson/aima | aima/logic.py | 2 | 41430 | """Representations and Inference for Logic (Chapters 7-9, 12)
Covers both Propositional and First-Order Logic. First we have four
important data types:
KB Abstract class holds a knowledge base of logical expressions
KB_Agent Abstract class subclasses agents.Agent
Expr A logical expression
substitution Implemented as a dictionary of var:value pairs, {x:1, y:x}
Be careful: some functions take an Expr as argument, and some take a KB.
Then we implement various functions for doing logical inference:
pl_true Evaluate a propositional logical sentence in a model
tt_entails Say if a statement is entailed by a KB
pl_resolution Do resolution on propositional sentences
dpll_satisfiable See if a propositional sentence is satisfiable
WalkSAT (not yet implemented)
And a few other functions:
to_cnf Convert to conjunctive normal form
unify Do unification of two FOL sentences
diff, simp Symbolic differentiation and simplification
"""
import itertools, re
import agents
from utils import *
#______________________________________________________________________________
class KB:
"""A knowledge base to which you can tell and ask sentences.
To create a KB, first subclass this class and implement
tell, ask_generator, and retract. Why ask_generator instead of ask?
The book is a bit vague on what ask means --
For a Propositional Logic KB, ask(P & Q) returns True or False, but for an
FOL KB, something like ask(Brother(x, y)) might return many substitutions
such as {x: Cain, y: Abel}, {x: Abel, y: Cain}, {x: George, y: Jeb}, etc.
So ask_generator generates these one at a time, and ask either returns the
first one or returns False."""
def __init__(self, sentence=None):
abstract
def tell(self, sentence):
"Add the sentence to the KB."
abstract
def ask(self, query):
"""Return a substitution that makes the query true, or,
failing that, return False."""
for result in self.ask_generator(query):
return result
return False
def ask_generator(self, query):
"Yield all the substitutions that make query true."
abstract
def retract(self, sentence):
"Remove sentence from the KB."
abstract
class PropKB(KB):
"A KB for propositional logic. Inefficient, with no indexing."
def __init__(self, sentence=None):
self.clauses = []
if sentence:
self.tell(sentence)
def tell(self, sentence):
"Add the sentence's clauses to the KB."
self.clauses.extend(conjuncts(to_cnf(sentence)))
def ask_generator(self, query):
"Yield the empty substitution if KB implies query; else nothing."
if tt_entails(Expr('&', *self.clauses), query):
yield {}
def retract(self, sentence):
"Remove the sentence's clauses from the KB."
for c in conjuncts(to_cnf(sentence)):
if c in self.clauses:
self.clauses.remove(c)
#______________________________________________________________________________
def KB_AgentProgram(KB):
"""A generic logical knowledge-based agent program. [Fig. 7.1]"""
steps = itertools.count()
def program(percept):
t = steps.next()
KB.tell(make_percept_sentence(percept, t))
action = KB.ask(make_action_query(t))
KB.tell(make_action_sentence(action, t))
return action
def make_percept_sentence(self, percept, t):
return Expr("Percept")(percept, t)
def make_action_query(self, t):
return expr("ShouldDo(action, %d)" % t)
def make_action_sentence(self, action, t):
return Expr("Did")(action[expr('action')], t)
return program
#______________________________________________________________________________
class Expr:
"""A symbolic mathematical expression. We use this class for logical
expressions, and for terms within logical expressions. In general, an
Expr has an op (operator) and a list of args. The op can be:
Null-ary (no args) op:
A number, representing the number itself. (e.g. Expr(42) => 42)
A symbol, representing a variable or constant (e.g. Expr('F') => F)
Unary (1 arg) op:
'~', '-', representing NOT, negation (e.g. Expr('~', Expr('P')) => ~P)
Binary (2 arg) op:
'>>', '<<', representing forward and backward implication
'+', '-', '*', '/', '**', representing arithmetic operators
'<', '>', '>=', '<=', representing comparison operators
'<=>', '^', representing logical equality and XOR
N-ary (0 or more args) op:
'&', '|', representing conjunction and disjunction
A symbol, representing a function term or FOL proposition
Exprs can be constructed with operator overloading: if x and y are Exprs,
then so are x + y and x & y, etc. Also, if F and x are Exprs, then so is
F(x); it works by overloading the __call__ method of the Expr F. Note
that in the Expr that is created by F(x), the op is the str 'F', not the
Expr F. See http://www.python.org/doc/current/ref/specialnames.html
to learn more about operator overloading in Python.
WARNING: x == y and x != y are NOT Exprs. The reason is that we want
to write code that tests 'if x == y:' and if x == y were the same
as Expr('==', x, y), then the result would always be true; not what a
programmer would expect. But we still need to form Exprs representing
equalities and disequalities. We concentrate on logical equality (or
equivalence) and logical disequality (or XOR). You have 3 choices:
(1) Expr('<=>', x, y) and Expr('^', x, y)
Note that ^ is bitwose XOR in Python (and Java and C++)
(2) expr('x <=> y') and expr('x =/= y').
See the doc string for the function expr.
(3) (x % y) and (x ^ y).
It is very ugly to have (x % y) mean (x <=> y), but we need
SOME operator to make (2) work, and this seems the best choice.
WARNING: if x is an Expr, then so is x + 1, because the int 1 gets
coerced to an Expr by the constructor. But 1 + x is an error, because
1 doesn't know how to add an Expr. (Adding an __radd__ method to Expr
wouldn't help, because int.__add__ is still called first.) Therefore,
you should use Expr(1) + x instead, or ONE + x, or expr('1 + x').
"""
def __init__(self, op, *args):
"Op is a string or number; args are Exprs (or are coerced to Exprs)."
assert isinstance(op, str) or (isnumber(op) and not args)
self.op = num_or_str(op)
self.args = map(expr, args) ## Coerce args to Exprs
def __call__(self, *args):
"""Self must be a symbol with no args, such as Expr('F'). Create a new
Expr with 'F' as op and the args as arguments."""
assert is_symbol(self.op) and not self.args
return Expr(self.op, *args)
def __repr__(self):
"Show something like 'P' or 'P(x, y)', or '~P' or '(P | Q | R)'"
if not self.args: # Constant or proposition with arity 0
return str(self.op)
elif is_symbol(self.op): # Functional or propositional operator
return '%s(%s)' % (self.op, ', '.join(map(repr, self.args)))
elif len(self.args) == 1: # Prefix operator
return self.op + repr(self.args[0])
else: # Infix operator
return '(%s)' % (' '+self.op+' ').join(map(repr, self.args))
def __eq__(self, other):
"""x and y are equal iff their ops and args are equal."""
return (other is self) or (isinstance(other, Expr)
and self.op == other.op and self.args == other.args)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
"Need a hash method so Exprs can live in dicts."
return hash(self.op) ^ hash(tuple(self.args))
# See http://www.python.org/doc/current/lib/module-operator.html
# Not implemented: not, abs, pos, concat, contains, *item, *slice
def __lt__(self, other): return Expr('<', self, other)
def __le__(self, other): return Expr('<=', self, other)
def __ge__(self, other): return Expr('>=', self, other)
def __gt__(self, other): return Expr('>', self, other)
def __add__(self, other): return Expr('+', self, other)
def __sub__(self, other): return Expr('-', self, other)
def __and__(self, other): return Expr('&', self, other)
def __div__(self, other): return Expr('/', self, other)
def __truediv__(self, other):return Expr('/', self, other)
def __invert__(self): return Expr('~', self)
def __lshift__(self, other): return Expr('<<', self, other)
def __rshift__(self, other): return Expr('>>', self, other)
def __mul__(self, other): return Expr('*', self, other)
def __neg__(self): return Expr('-', self)
def __or__(self, other): return Expr('|', self, other)
def __pow__(self, other): return Expr('**', self, other)
def __xor__(self, other): return Expr('^', self, other)
def __mod__(self, other): return Expr('<=>', self, other)
def expr(s):
"""Create an Expr representing a logic expression by parsing the input
string. Symbols and numbers are automatically converted to Exprs.
In addition you can use alternative spellings of these operators:
'x ==> y' parses as (x >> y) # Implication
'x <== y' parses as (x << y) # Reverse implication
'x <=> y' parses as (x % y) # Logical equivalence
'x =/= y' parses as (x ^ y) # Logical disequality (xor)
But BE CAREFUL; precedence of implication is wrong. expr('P & Q ==> R & S')
is ((P & (Q >> R)) & S); so you must use expr('(P & Q) ==> (R & S)').
>>> expr('P <=> Q(1)')
(P <=> Q(1))
>>> expr('P & Q | ~R(x, F(x))')
((P & Q) | ~R(x, F(x)))
"""
if isinstance(s, Expr): return s
if isnumber(s): return Expr(s)
## Replace the alternative spellings of operators with canonical spellings
s = s.replace('==>', '>>').replace('<==', '<<')
s = s.replace('<=>', '%').replace('=/=', '^')
## Replace a symbol or number, such as 'P' with 'Expr("P")'
s = re.sub(r'([a-zA-Z0-9_.]+)', r'Expr("\1")', s)
## Now eval the string. (A security hole; do not use with an adversary.)
return eval(s, {'Expr':Expr})
def is_symbol(s):
"A string s is a symbol if it starts with an alphabetic char."
return isinstance(s, str) and s[:1].isalpha()
def is_var_symbol(s):
"A logic variable symbol is an initial-lowercase string."
return is_symbol(s) and s[0].islower()
def is_prop_symbol(s):
"""A proposition logic symbol is an initial-uppercase string other than
TRUE or FALSE."""
return is_symbol(s) and s[0].isupper() and s != 'TRUE' and s != 'FALSE'
def variables(s):
"""Return a set of the variables in expression s.
>>> ppset(variables(F(x, A, y)))
set([x, y])
>>> ppset(variables(F(G(x), z)))
set([x, z])
>>> ppset(variables(expr('F(x, x) & G(x, y) & H(y, z) & R(A, z, z)')))
set([x, y, z])
"""
result = set([])
def walk(s):
if is_variable(s):
result.add(s)
else:
for arg in s.args:
walk(arg)
walk(s)
return result
def is_definite_clause(s):
"""returns True for exprs s of the form A & B & ... & C ==> D,
where all literals are positive. In clause form, this is
~A | ~B | ... | ~C | D, where exactly one clause is positive.
>>> is_definite_clause(expr('Farmer(Mac)'))
True
>>> is_definite_clause(expr('~Farmer(Mac)'))
False
>>> is_definite_clause(expr('(Farmer(f) & Rabbit(r)) ==> Hates(f, r)'))
True
>>> is_definite_clause(expr('(Farmer(f) & ~Rabbit(r)) ==> Hates(f, r)'))
False
>>> is_definite_clause(expr('(Farmer(f) | Rabbit(r)) ==> Hates(f, r)'))
False
"""
if is_symbol(s.op):
return True
elif s.op == '>>':
antecedent, consequent = s.args
return (is_symbol(consequent.op)
and every(lambda arg: is_symbol(arg.op), conjuncts(antecedent)))
else:
return False
def parse_definite_clause(s):
"Return the antecedents and the consequent of a definite clause."
assert is_definite_clause(s)
if is_symbol(s.op):
return [], s
else:
antecedent, consequent = s.args
return conjuncts(antecedent), consequent
## Useful constant Exprs used in examples and code:
TRUE, FALSE, ZERO, ONE, TWO = map(Expr, ['TRUE', 'FALSE', 0, 1, 2])
A, B, C, D, E, F, G, P, Q, x, y, z = map(Expr, 'ABCDEFGPQxyz')
#______________________________________________________________________________
def tt_entails(kb, alpha):
"""Does kb entail the sentence alpha? Use truth tables. For propositional
kb's and sentences. [Fig. 7.10]
>>> tt_entails(expr('P & Q'), expr('Q'))
True
"""
assert not variables(alpha)
return tt_check_all(kb, alpha, prop_symbols(kb & alpha), {})
def tt_check_all(kb, alpha, symbols, model):
"Auxiliary routine to implement tt_entails."
if not symbols:
if pl_true(kb, model):
result = pl_true(alpha, model)
assert result in (True, False)
return result
else:
return True
else:
P, rest = symbols[0], symbols[1:]
return (tt_check_all(kb, alpha, rest, extend(model, P, True)) and
tt_check_all(kb, alpha, rest, extend(model, P, False)))
def prop_symbols(x):
"Return a list of all propositional symbols in x."
if not isinstance(x, Expr):
return []
elif is_prop_symbol(x.op):
return [x]
else:
return list(set(symbol for arg in x.args
for symbol in prop_symbols(arg)))
def tt_true(alpha):
"""Is the propositional sentence alpha a tautology? (alpha will be
coerced to an expr.)
>>> tt_true(expr("(P >> Q) <=> (~P | Q)"))
True
"""
return tt_entails(TRUE, expr(alpha))
def pl_true(exp, model={}):
"""Return True if the propositional logic expression is true in the model,
and False if it is false. If the model does not specify the value for
every proposition, this may return None to indicate 'not obvious';
this may happen even when the expression is tautological."""
op, args = exp.op, exp.args
if exp == TRUE:
return True
elif exp == FALSE:
return False
elif is_prop_symbol(op):
return model.get(exp)
elif op == '~':
p = pl_true(args[0], model)
if p is None: return None
else: return not p
elif op == '|':
result = False
for arg in args:
p = pl_true(arg, model)
if p is True: return True
if p is None: result = None
return result
elif op == '&':
result = True
for arg in args:
p = pl_true(arg, model)
if p is False: return False
if p is None: result = None
return result
p, q = args
if op == '>>':
return pl_true(~p | q, model)
elif op == '<<':
return pl_true(p | ~q, model)
pt = pl_true(p, model)
if pt is None: return None
qt = pl_true(q, model)
if qt is None: return None
if op == '<=>':
return pt == qt
elif op == '^':
return pt != qt
else:
raise ValueError, "illegal operator in logic expression" + str(exp)
#______________________________________________________________________________
## Convert to Conjunctive Normal Form (CNF)
def to_cnf(s):
"""Convert a propositional logical sentence s to conjunctive normal form.
That is, to the form ((A | ~B | ...) & (B | C | ...) & ...) [p. 253]
>>> to_cnf("~(B|C)")
(~B & ~C)
>>> to_cnf("B <=> (P1|P2)")
((~P1 | B) & (~P2 | B) & (P1 | P2 | ~B))
>>> to_cnf("a | (b & c) | d")
((b | a | d) & (c | a | d))
>>> to_cnf("A & (B | (D & E))")
(A & (D | B) & (E | B))
>>> to_cnf("A | (B | (C | (D & E)))")
((D | A | B | C) & (E | A | B | C))
"""
if isinstance(s, str): s = expr(s)
s = eliminate_implications(s) # Steps 1, 2 from p. 253
s = move_not_inwards(s) # Step 3
return distribute_and_over_or(s) # Step 4
def eliminate_implications(s):
"""Change >>, <<, and <=> into &, |, and ~. That is, return an Expr
that is equivalent to s, but has only &, |, and ~ as logical operators.
>>> eliminate_implications(A >> (~B << C))
((~B | ~C) | ~A)
>>> eliminate_implications(A ^ B)
((A & ~B) | (~A & B))
"""
if not s.args or is_symbol(s.op): return s ## (Atoms are unchanged.)
args = map(eliminate_implications, s.args)
a, b = args[0], args[-1]
if s.op == '>>':
return (b | ~a)
elif s.op == '<<':
return (a | ~b)
elif s.op == '<=>':
return (a | ~b) & (b | ~a)
elif s.op == '^':
assert len(args) == 2 ## TODO: relax this restriction
return (a & ~b) | (~a & b)
else:
assert s.op in ('&', '|', '~')
return Expr(s.op, *args)
def move_not_inwards(s):
"""Rewrite sentence s by moving negation sign inward.
>>> move_not_inwards(~(A | B))
(~A & ~B)
>>> move_not_inwards(~(A & B))
(~A | ~B)
>>> move_not_inwards(~(~(A | ~B) | ~~C))
((A | ~B) & ~C)
"""
if s.op == '~':
NOT = lambda b: move_not_inwards(~b)
a = s.args[0]
if a.op == '~': return move_not_inwards(a.args[0]) # ~~A ==> A
if a.op =='&': return associate('|', map(NOT, a.args))
if a.op =='|': return associate('&', map(NOT, a.args))
return s
elif is_symbol(s.op) or not s.args:
return s
else:
return Expr(s.op, *map(move_not_inwards, s.args))
def distribute_and_over_or(s):
"""Given a sentence s consisting of conjunctions and disjunctions
of literals, return an equivalent sentence in CNF.
>>> distribute_and_over_or((A & B) | C)
((A | C) & (B | C))
"""
if s.op == '|':
s = associate('|', s.args)
if s.op != '|':
return distribute_and_over_or(s)
if len(s.args) == 0:
return FALSE
if len(s.args) == 1:
return distribute_and_over_or(s.args[0])
conj = find_if((lambda d: d.op == '&'), s.args)
if not conj:
return s
others = [a for a in s.args if a is not conj]
rest = associate('|', others)
return associate('&', [distribute_and_over_or(c|rest)
for c in conj.args])
elif s.op == '&':
return associate('&', map(distribute_and_over_or, s.args))
else:
return s
def associate(op, args):
"""Given an associative op, return an expression with the same
meaning as Expr(op, *args), but flattened -- that is, with nested
instances of the same op promoted to the top level.
>>> associate('&', [(A&B),(B|C),(B&C)])
(A & B & (B | C) & B & C)
>>> associate('|', [A|(B|(C|(A&B)))])
(A | B | C | (A & B))
"""
args = dissociate(op, args)
if len(args) == 0:
return _op_identity[op]
elif len(args) == 1:
return args[0]
else:
return Expr(op, *args)
_op_identity = {'&':TRUE, '|':FALSE, '+':ZERO, '*':ONE}
def dissociate(op, args):
"""Given an associative op, return a flattened list result such
that Expr(op, *result) means the same as Expr(op, *args)."""
result = []
def collect(subargs):
for arg in subargs:
if arg.op == op: collect(arg.args)
else: result.append(arg)
collect(args)
return result
def conjuncts(s):
"""Return a list of the conjuncts in the sentence s.
>>> conjuncts(A & B)
[A, B]
>>> conjuncts(A | B)
[(A | B)]
"""
return dissociate('&', [s])
def disjuncts(s):
"""Return a list of the disjuncts in the sentence s.
>>> disjuncts(A | B)
[A, B]
>>> disjuncts(A & B)
[(A & B)]
"""
return dissociate('|', [s])
#______________________________________________________________________________
def pl_resolution(KB, alpha):
"Propositional-logic resolution: say if alpha follows from KB. [Fig. 7.12]"
clauses = KB.clauses + conjuncts(to_cnf(~alpha))
new = set()
while True:
n = len(clauses)
pairs = [(clauses[i], clauses[j])
for i in range(n) for j in range(i+1, n)]
for (ci, cj) in pairs:
resolvents = pl_resolve(ci, cj)
if FALSE in resolvents: return True
new = new.union(set(resolvents))
if new.issubset(set(clauses)): return False
for c in new:
if c not in clauses: clauses.append(c)
def pl_resolve(ci, cj):
"""Return all clauses that can be obtained by resolving clauses ci and cj.
>>> for res in pl_resolve(to_cnf(A|B|C), to_cnf(~B|~C|F)):
... ppset(disjuncts(res))
set([A, C, F, ~C])
set([A, B, F, ~B])
"""
clauses = []
for di in disjuncts(ci):
for dj in disjuncts(cj):
if di == ~dj or ~di == dj:
dnew = unique(removeall(di, disjuncts(ci)) +
removeall(dj, disjuncts(cj)))
clauses.append(associate('|', dnew))
return clauses
#______________________________________________________________________________
class PropDefiniteKB(PropKB):
"A KB of propositional definite clauses."
def tell(self, sentence):
"Add a definite clause to this KB."
assert is_definite_clause(sentence), "Must be definite clause"
self.clauses.append(sentence)
def ask_generator(self, query):
"Yield the empty substitution if KB implies query; else nothing."
if pl_fc_entails(self.clauses, query):
yield {}
def retract(self, sentence):
self.clauses.remove(sentence)
def clauses_with_premise(self, p):
"""Return a list of the clauses in KB that have p in their premise.
This could be cached away for O(1) speed, but we'll recompute it."""
return [c for c in self.clauses
if c.op == '>>' and p in conjuncts(c.args[0])]
def pl_fc_entails(KB, q):
"""Use forward chaining to see if a PropDefiniteKB entails symbol q.
[Fig. 7.15]
>>> pl_fc_entails(Fig[7,15], expr('Q'))
True
"""
count = dict([(c, len(conjuncts(c.args[0]))) for c in KB.clauses
if c.op == '>>'])
inferred = DefaultDict(False)
agenda = [s for s in KB.clauses if is_prop_symbol(s.op)]
while agenda:
p = agenda.pop()
if p == q: return True
if not inferred[p]:
inferred[p] = True
for c in KB.clauses_with_premise(p):
count[c] -= 1
if count[c] == 0:
agenda.append(c.args[1])
return False
## Wumpus World example [Fig. 7.13]
Fig[7,13] = expr("(B11 <=> (P12 | P21)) & ~B11")
## Propositional Logic Forward Chaining example [Fig. 7.16]
Fig[7,15] = PropDefiniteKB()
for s in "P>>Q (L&M)>>P (B&L)>>M (A&P)>>L (A&B)>>L A B".split():
Fig[7,15].tell(expr(s))
#______________________________________________________________________________
# DPLL-Satisfiable [Fig. 7.17]
def dpll_satisfiable(s):
"""Check satisfiability of a propositional sentence.
This differs from the book code in two ways: (1) it returns a model
rather than True when it succeeds; this is more useful. (2) The
function find_pure_symbol is passed a list of unknown clauses, rather
than a list of all clauses and the model; this is more efficient.
>>> ppsubst(dpll_satisfiable(A&~B))
{A: True, B: False}
>>> dpll_satisfiable(P&~P)
False
"""
clauses = conjuncts(to_cnf(s))
symbols = prop_symbols(s)
return dpll(clauses, symbols, {})
def dpll(clauses, symbols, model):
"See if the clauses are true in a partial model."
unknown_clauses = [] ## clauses with an unknown truth value
for c in clauses:
val = pl_true(c, model)
if val == False:
return False
if val != True:
unknown_clauses.append(c)
if not unknown_clauses:
return model
P, value = find_pure_symbol(symbols, unknown_clauses)
if P:
return dpll(clauses, removeall(P, symbols), extend(model, P, value))
P, value = find_unit_clause(clauses, model)
if P:
return dpll(clauses, removeall(P, symbols), extend(model, P, value))
P, symbols = symbols[0], symbols[1:]
return (dpll(clauses, symbols, extend(model, P, True)) or
dpll(clauses, symbols, extend(model, P, False)))
def find_pure_symbol(symbols, clauses):
"""Find a symbol and its value if it appears only as a positive literal
(or only as a negative) in clauses.
>>> find_pure_symbol([A, B, C], [A|~B,~B|~C,C|A])
(A, True)
"""
for s in symbols:
found_pos, found_neg = False, False
for c in clauses:
if not found_pos and s in disjuncts(c): found_pos = True
if not found_neg and ~s in disjuncts(c): found_neg = True
if found_pos != found_neg: return s, found_pos
return None, None
def find_unit_clause(clauses, model):
"""Find a forced assignment if possible from a clause with only 1
variable not bound in the model.
>>> find_unit_clause([A|B|C, B|~C, ~A|~B], {A:True})
(B, False)
"""
for clause in clauses:
P, value = unit_clause_assign(clause, model)
if P: return P, value
return None, None
def unit_clause_assign(clause, model):
"""Return a single variable/value pair that makes clause true in
the model, if possible.
>>> unit_clause_assign(A|B|C, {A:True})
(None, None)
>>> unit_clause_assign(B|~C, {A:True})
(None, None)
>>> unit_clause_assign(~A|~B, {A:True})
(B, False)
"""
P, value = None, None
for literal in disjuncts(clause):
sym, positive = inspect_literal(literal)
if sym in model:
if model[sym] == positive:
return None, None # clause already True
elif P:
return None, None # more than 1 unbound variable
else:
P, value = sym, positive
return P, value
def inspect_literal(literal):
"""The symbol in this literal, and the value it should take to
make the literal true.
>>> inspect_literal(P)
(P, True)
>>> inspect_literal(~P)
(P, False)
"""
if literal.op == '~':
return literal.args[0], False
else:
return literal, True
#______________________________________________________________________________
# Walk-SAT [Fig. 7.18]
def WalkSAT(clauses, p=0.5, max_flips=10000):
## model is a random assignment of true/false to the symbols in clauses
## See ~/aima1e/print1/manual/knowledge+logic-answers.tex ???
model = dict([(s, random.choice([True, False]))
for s in prop_symbols(clauses)])
for i in range(max_flips):
satisfied, unsatisfied = [], []
for clause in clauses:
if_(pl_true(clause, model), satisfied, unsatisfied).append(clause)
if not unsatisfied: ## if model satisfies all the clauses
return model
clause = random.choice(unsatisfied)
if probability(p):
sym = random.choice(prop_symbols(clause))
else:
## Flip the symbol in clause that maximizes number of sat. clauses
raise NotImplementedError
model[sym] = not model[sym]
#______________________________________________________________________________
class HybridWumpusAgent(agents.Agent):
"An agent for the wumpus world that does logical inference. [Fig. 7.19]"""
def __init__(self):
unimplemented()
def plan_route(current, goals, allowed):
unimplemented()
#______________________________________________________________________________
def SAT_plan(init, transition, goal, t_max, SAT_solver=dpll_satisfiable):
"[Fig. 7.22]"
for t in range(t_max):
cnf = translate_to_SAT(init, transition, goal, t)
model = SAT_solver(cnf)
if model is not False:
return extract_solution(model)
return None
def translate_to_SAT(init, transition, goal, t):
unimplemented()
def extract_solution(model):
unimplemented()
#______________________________________________________________________________
def unify(x, y, s):
"""Unify expressions x,y with substitution s; return a substitution that
would make x,y equal, or None if x,y can not unify. x and y can be
variables (e.g. Expr('x')), constants, lists, or Exprs. [Fig. 9.1]
>>> ppsubst(unify(x + y, y + C, {}))
{x: y, y: C}
"""
if s is None:
return None
elif x == y:
return s
elif is_variable(x):
return unify_var(x, y, s)
elif is_variable(y):
return unify_var(y, x, s)
elif isinstance(x, Expr) and isinstance(y, Expr):
return unify(x.args, y.args, unify(x.op, y.op, s))
elif isinstance(x, str) or isinstance(y, str):
return None
elif issequence(x) and issequence(y) and len(x) == len(y):
if not x: return s
return unify(x[1:], y[1:], unify(x[0], y[0], s))
else:
return None
def is_variable(x):
"A variable is an Expr with no args and a lowercase symbol as the op."
return isinstance(x, Expr) and not x.args and is_var_symbol(x.op)
def unify_var(var, x, s):
if var in s:
return unify(s[var], x, s)
elif occur_check(var, x, s):
return None
else:
return extend(s, var, x)
def occur_check(var, x, s):
"""Return true if variable var occurs anywhere in x
(or in subst(s, x), if s has a binding for x)."""
if var == x:
return True
elif is_variable(x) and x in s:
return occur_check(var, s[x], s)
elif isinstance(x, Expr):
return (occur_check(var, x.op, s) or
occur_check(var, x.args, s))
elif isinstance(x, (list, tuple)):
return some(lambda element: occur_check(var, element, s), x)
else:
return False
def extend(s, var, val):
"""Copy the substitution s and extend it by setting var to val;
return copy.
>>> ppsubst(extend({x: 1}, y, 2))
{x: 1, y: 2}
"""
s2 = s.copy()
s2[var] = val
return s2
def subst(s, x):
"""Substitute the substitution s into the expression x.
>>> subst({x: 42, y:0}, F(x) + y)
(F(42) + 0)
"""
if isinstance(x, list):
return [subst(s, xi) for xi in x]
elif isinstance(x, tuple):
return tuple([subst(s, xi) for xi in x])
elif not isinstance(x, Expr):
return x
elif is_var_symbol(x.op):
return s.get(x, x)
else:
return Expr(x.op, *[subst(s, arg) for arg in x.args])
def fol_fc_ask(KB, alpha):
"""Inefficient forward chaining for first-order logic. [Fig. 9.3]
KB is a FolKB and alpha must be an atomic sentence."""
while True:
new = {}
for r in KB.clauses:
ps, q = parse_definite_clause(standardize_variables(r))
raise NotImplementedError
def standardize_variables(sentence, dic=None):
"""Replace all the variables in sentence with new variables.
>>> e = expr('F(a, b, c) & G(c, A, 23)')
>>> len(variables(standardize_variables(e)))
3
>>> variables(e).intersection(variables(standardize_variables(e)))
set([])
>>> is_variable(standardize_variables(expr('x')))
True
"""
if dic is None: dic = {}
if not isinstance(sentence, Expr):
return sentence
elif is_var_symbol(sentence.op):
if sentence in dic:
return dic[sentence]
else:
v = Expr('v_%d' % standardize_variables.counter.next())
dic[sentence] = v
return v
else:
return Expr(sentence.op,
*[standardize_variables(a, dic) for a in sentence.args])
standardize_variables.counter = itertools.count()
#______________________________________________________________________________
class FolKB(KB):
"""A knowledge base consisting of first-order definite clauses.
>>> kb0 = FolKB([expr('Farmer(Mac)'), expr('Rabbit(Pete)'),
... expr('(Rabbit(r) & Farmer(f)) ==> Hates(f, r)')])
>>> kb0.tell(expr('Rabbit(Flopsie)'))
>>> kb0.retract(expr('Rabbit(Pete)'))
>>> kb0.ask(expr('Hates(Mac, x)'))[x]
Flopsie
>>> kb0.ask(expr('Wife(Pete, x)'))
False
"""
def __init__(self, initial_clauses=[]):
self.clauses = [] # inefficient: no indexing
for clause in initial_clauses:
self.tell(clause)
def tell(self, sentence):
if is_definite_clause(sentence):
self.clauses.append(sentence)
else:
raise Exception("Not a definite clause: %s" % sentence)
def ask_generator(self, query):
return fol_bc_ask(self, query)
def retract(self, sentence):
self.clauses.remove(sentence)
def fetch_rules_for_goal(self, goal):
return self.clauses
def test_ask(query, kb=None):
q = expr(query)
vars = variables(q)
answers = fol_bc_ask(kb or test_kb, q)
return sorted([pretty(dict((x, v) for x, v in a.items() if x in vars))
for a in answers],
key=repr)
test_kb = FolKB(
map(expr, ['Farmer(Mac)',
'Rabbit(Pete)',
'Mother(MrsMac, Mac)',
'Mother(MrsRabbit, Pete)',
'(Rabbit(r) & Farmer(f)) ==> Hates(f, r)',
'(Mother(m, c)) ==> Loves(m, c)',
'(Mother(m, r) & Rabbit(r)) ==> Rabbit(m)',
'(Farmer(f)) ==> Human(f)',
# Note that this order of conjuncts
# would result in infinite recursion:
#'(Human(h) & Mother(m, h)) ==> Human(m)'
'(Mother(m, h) & Human(h)) ==> Human(m)'
])
)
crime_kb = FolKB(
map(expr,
['(American(x) & Weapon(y) & Sells(x, y, z) & Hostile(z)) ==> Criminal(x)',
'Owns(Nono, M1)',
'Missile(M1)',
'(Missile(x) & Owns(Nono, x)) ==> Sells(West, x, Nono)',
'Missile(x) ==> Weapon(x)',
'Enemy(x, America) ==> Hostile(x)',
'American(West)',
'Enemy(Nono, America)'
])
)
def fol_bc_ask(KB, query):
"""A simple backward-chaining algorithm for first-order logic. [Fig. 9.6]
KB should be an instance of FolKB, and goals a list of literals.
>>> test_ask('Farmer(x)')
['{x: Mac}']
>>> test_ask('Human(x)')
['{x: Mac}', '{x: MrsMac}']
>>> test_ask('Hates(x, y)')
['{x: Mac, y: MrsRabbit}', '{x: Mac, y: Pete}']
>>> test_ask('Loves(x, y)')
['{x: MrsMac, y: Mac}', '{x: MrsRabbit, y: Pete}']
>>> test_ask('Rabbit(x)')
['{x: MrsRabbit}', '{x: Pete}']
>>> test_ask('Criminal(x)', crime_kb)
['{x: West}']
"""
return fol_bc_or(KB, query, {})
def fol_bc_or(KB, goal, theta):
for rule in KB.fetch_rules_for_goal(goal):
lhs, rhs = parse_definite_clause(standardize_variables(rule))
for theta1 in fol_bc_and(KB, lhs, unify(rhs, goal, theta)):
yield theta1
def fol_bc_and(KB, goals, theta):
if theta is None:
pass
elif not goals:
yield theta
else:
first, rest = goals[0], goals[1:]
for theta1 in fol_bc_or(KB, subst(theta, first), theta):
for theta2 in fol_bc_and(KB, rest, theta1):
yield theta2
#______________________________________________________________________________
# Example application (not in the book).
# You can use the Expr class to do symbolic differentiation. This used to be
# a part of AI; now it is considered a separate field, Symbolic Algebra.
def diff(y, x):
"""Return the symbolic derivative, dy/dx, as an Expr.
However, you probably want to simplify the results with simp.
>>> diff(x * x, x)
((x * 1) + (x * 1))
>>> simp(diff(x * x, x))
(2 * x)
"""
if y == x: return ONE
elif not y.args: return ZERO
else:
u, op, v = y.args[0], y.op, y.args[-1]
if op == '+': return diff(u, x) + diff(v, x)
elif op == '-' and len(args) == 1: return -diff(u, x)
elif op == '-': return diff(u, x) - diff(v, x)
elif op == '*': return u * diff(v, x) + v * diff(u, x)
elif op == '/': return (v*diff(u, x) - u*diff(v, x)) / (v * v)
elif op == '**' and isnumber(x.op):
return (v * u ** (v - 1) * diff(u, x))
elif op == '**': return (v * u ** (v - 1) * diff(u, x)
+ u ** v * Expr('log')(u) * diff(v, x))
elif op == 'log': return diff(u, x) / u
else: raise ValueError("Unknown op: %s in diff(%s, %s)" % (op, y, x))
def simp(x):
if not x.args: return x
args = map(simp, x.args)
u, op, v = args[0], x.op, args[-1]
if op == '+':
if v == ZERO: return u
if u == ZERO: return v
if u == v: return TWO * u
if u == -v or v == -u: return ZERO
elif op == '-' and len(args) == 1:
if u.op == '-' and len(u.args) == 1: return u.args[0] ## --y ==> y
elif op == '-':
if v == ZERO: return u
if u == ZERO: return -v
if u == v: return ZERO
if u == -v or v == -u: return ZERO
elif op == '*':
if u == ZERO or v == ZERO: return ZERO
if u == ONE: return v
if v == ONE: return u
if u == v: return u ** 2
elif op == '/':
if u == ZERO: return ZERO
if v == ZERO: return Expr('Undefined')
if u == v: return ONE
if u == -v or v == -u: return ZERO
elif op == '**':
if u == ZERO: return ZERO
if v == ZERO: return ONE
if u == ONE: return ONE
if v == ONE: return u
elif op == 'log':
if u == ONE: return ZERO
else: raise ValueError("Unknown op: " + op)
## If we fall through to here, we can not simplify further
return Expr(op, *args)
def d(y, x):
"Differentiate and then simplify."
return simp(diff(y, x))
#_______________________________________________________________________________
# Utilities for doctest cases
# These functions print their arguments in a standard order
# to compensate for the random order in the standard representation
def pretty(x):
t = type(x)
if t is dict: return pretty_dict(x)
elif t is set: return pretty_set(x)
else: return repr(x)
def pretty_dict(d):
"""Return dictionary d's repr but with the items sorted.
>>> pretty_dict({'m': 'M', 'a': 'A', 'r': 'R', 'k': 'K'})
"{'a': 'A', 'k': 'K', 'm': 'M', 'r': 'R'}"
>>> pretty_dict({z: C, y: B, x: A})
'{x: A, y: B, z: C}'
"""
return '{%s}' % ', '.join('%r: %r' % (k, v)
for k, v in sorted(d.items(), key=repr))
def pretty_set(s):
"""Return set s's repr but with the items sorted.
>>> pretty_set(set(['A', 'Q', 'F', 'K', 'Y', 'B']))
"set(['A', 'B', 'F', 'K', 'Q', 'Y'])"
>>> pretty_set(set([z, y, x]))
'set([x, y, z])'
"""
return 'set(%r)' % sorted(s, key=repr)
def pp(x):
print pretty(x)
def ppsubst(s):
"""Pretty-print substitution s"""
ppdict(s)
def ppdict(d):
print pretty_dict(d)
def ppset(s):
print pretty_set(s)
#________________________________________________________________________
class logicTest: """
### PropKB
>>> kb = PropKB()
>>> kb.tell(A & B)
>>> kb.tell(B >> C)
>>> kb.ask(C) ## The result {} means true, with no substitutions
{}
>>> kb.ask(P)
False
>>> kb.retract(B)
>>> kb.ask(C)
False
>>> pl_true(P, {})
>>> pl_true(P | Q, {P: True})
True
# Notice that the function pl_true cannot reason by cases:
>>> pl_true(P | ~P)
# However, tt_true can:
>>> tt_true(P | ~P)
True
# The following are tautologies from [Fig. 7.11]:
>>> tt_true("(A & B) <=> (B & A)")
True
>>> tt_true("(A | B) <=> (B | A)")
True
>>> tt_true("((A & B) & C) <=> (A & (B & C))")
True
>>> tt_true("((A | B) | C) <=> (A | (B | C))")
True
>>> tt_true("~~A <=> A")
True
>>> tt_true("(A >> B) <=> (~B >> ~A)")
True
>>> tt_true("(A >> B) <=> (~A | B)")
True
>>> tt_true("(A <=> B) <=> ((A >> B) & (B >> A))")
True
>>> tt_true("~(A & B) <=> (~A | ~B)")
True
>>> tt_true("~(A | B) <=> (~A & ~B)")
True
>>> tt_true("(A & (B | C)) <=> ((A & B) | (A & C))")
True
>>> tt_true("(A | (B & C)) <=> ((A | B) & (A | C))")
True
# The following are not tautologies:
>>> tt_true(A & ~A)
False
>>> tt_true(A & B)
False
### An earlier version of the code failed on this:
>>> dpll_satisfiable(A & ~B & C & (A | ~D) & (~E | ~D) & (C | ~D) & (~A | ~F) & (E | ~F) & (~D | ~F) & (B | ~C | D) & (A | ~E | F) & (~A | E | D))
{B: False, C: True, A: True, F: False, D: True, E: False}
### [Fig. 7.13]
>>> alpha = expr("~P12")
>>> to_cnf(Fig[7,13] & ~alpha)
((~P12 | B11) & (~P21 | B11) & (P12 | P21 | ~B11) & ~B11 & P12)
>>> tt_entails(Fig[7,13], alpha)
True
>>> pl_resolution(PropKB(Fig[7,13]), alpha)
True
### [Fig. 7.15]
>>> pl_fc_entails(Fig[7,15], expr('SomethingSilly'))
False
### Unification:
>>> unify(x, x, {})
{}
>>> unify(x, 3, {})
{x: 3}
>>> to_cnf((P&Q) | (~P & ~Q))
((~P | P) & (~Q | P) & (~P | Q) & (~Q | Q))
"""
| mit |
dommert/death_turtle | death_turtle/flask_turboduck/tests/auth.py | 4 | 8634 | from __future__ import with_statement
import datetime
try:
import urlparse
except ImportError:
from urllib import parse as urlparse
from flask import get_flashed_messages
from flask import request
from flask import session
from flask import url_for
from flask_turboduck.auth import Auth
from flask_turboduck.auth import LoginForm
from flask_turboduck.tests.base import FlaskPeeweeTestCase
from flask_turboduck.tests.test_app import User
from flask_turboduck.tests.test_app import app
from flask_turboduck.tests.test_app import auth
from flask_turboduck.tests.test_app import db
class TestAuth(Auth):
def setup(self):
pass
class AuthTestCase(FlaskPeeweeTestCase):
def setUp(self):
super(AuthTestCase, self).setUp()
self.test_auth = TestAuth(app, db)
def login(self, username='admin', password='admin', context=None):
context = context or self.app
return context.post('/accounts/login/', data={
'username': username,
'password': password,
})
def logout(self, context=None):
context = context or self.app
return context.post('/accounts/logout/')
def test_table(self):
self.assertEqual(self.test_auth.User._meta.db_table, 'user')
fake_auth = TestAuth(app, db, db_table='peewee_users')
self.assertEqual(fake_auth.User._meta.db_table, 'peewee_users')
def test_login_view(self):
self.create_users()
with self.flask_app.test_client() as c:
resp = c.get('/accounts/login/')
self.assertEqual(resp.status_code, 200)
# check that we have no logged-in user
self.assertContext('user', None)
frm = self.get_context('form')
self.assertTrue(isinstance(frm, LoginForm))
self.assertEqual(frm.data, {'username': None, 'password': None})
# make a post missing the username
resp = c.post('/accounts/login/', data={
'username': '',
'password': 'xxx',
})
self.assertEqual(resp.status_code, 200)
# check form for errors
frm = self.get_context('form')
self.assertEqual(frm.errors, {'username': [u'This field is required.']})
# check that no messages were generated
self.assertFalse('_flashes' in session)
# check that the auth API does not indicate a logged-in user
self.assertEqual(auth.get_logged_in_user(), None)
# make a post with a bad username/password combo
resp = c.post('/accounts/login/', data={
'username': 'normal',
'password': 'baz',
})
self.assertEqual(resp.status_code, 200)
# both fields were present so no form errors, but flash the user
# indicating bad username/password combo
self.assertTrue('_flashes' in session)
messages = get_flashed_messages()
self.assertEqual(messages, [
'Incorrect username or password',
])
# check that the auth API does not indicate a logged-in user
self.assertEqual(auth.get_logged_in_user(), None)
# make a post with an inactive user
resp = c.post('/accounts/login/', data={
'username': 'inactive',
'password': 'inactive',
})
self.assertEqual(resp.status_code, 200)
# still no logged-in user
self.assertContext('user', None)
# check that the auth API does not indicate a logged-in user
self.assertEqual(auth.get_logged_in_user(), None)
# finally post as a known good user
resp = c.post('/accounts/login/', data={
'username': 'normal',
'password': 'normal',
})
self.assertEqual(resp.status_code, 302)
# check that we now have a logged-in user
self.assertEqual(auth.get_logged_in_user(), self.normal)
def test_login_redirect_in_depth(self):
self.create_users()
with self.flask_app.test_client() as c:
resp = c.get('/admin/')
location = resp.location
parsed = urlparse.urlparse(location)
querystring = urlparse.parse_qs(parsed.query)
self.assertEqual(querystring, {'next': ['/admin/']})
# Following the redirect, the next url is passed to context.
location = location.replace('http://localhost', '')
resp = c.get(location)
self.assertEqual(self.get_context('next'), '/admin/')
# Simulate incorrect password.
resp = c.post('/accounts/login/', data={
'username': 'normal',
'password': 'incorrect-password',
'next': '/admin/',
})
self.assertEqual(resp.status_code, 200)
self.assertEqual(self.get_context('next'), '/admin/')
resp = c.post('/accounts/login/', data={
'username': 'normal',
'password': 'normal',
'next': '/admin/',
})
self.assertEqual(resp.status_code, 302)
self.assertTrue(resp.headers['location'].endswith('/admin/'))
def test_login_default_redirect(self):
self.create_users()
with self.flask_app.test_client() as c:
resp = c.post('/accounts/login/', data={
'username': 'normal',
'password': 'normal',
})
self.assertEqual(resp.status_code, 302)
location = resp.location.replace('http://localhost', '')
self.assertTrue(location, '/')
def test_login_redirect(self):
self.create_users()
with self.flask_app.test_client() as c:
resp = c.post('/accounts/login/', data={
'username': 'normal',
'password': 'normal',
'next': '/foo-baz/',
})
self.assertEqual(resp.status_code, 302)
self.assertTrue(resp.headers['location'].endswith('/foo-baz/'))
def test_login_logout(self):
self.create_users()
with self.flask_app.test_client() as c:
resp = c.post('/accounts/login/', data={
'username': 'normal',
'password': 'normal',
})
self.assertEqual(auth.get_logged_in_user(), self.normal)
resp = c.post('/accounts/logout/')
self.assertEqual(auth.get_logged_in_user(), None)
resp = c.post('/accounts/login/', data={
'username': 'admin',
'password': 'admin',
})
self.assertEqual(auth.get_logged_in_user(), self.admin)
# log back in without logging out
resp = c.post('/accounts/login/', data={
'username': 'normal',
'password': 'normal',
})
self.assertEqual(auth.get_logged_in_user(), self.normal)
def test_login_required(self):
self.create_users()
with self.flask_app.test_client() as c:
resp = c.get('/private/')
self.assertEqual(resp.status_code, 302)
self.assertTrue(resp.headers['location'].endswith('/accounts/login/?next=%2Fprivate%2F'))
self.login('normal', 'normal', c)
resp = c.get('/private/')
self.assertEqual(resp.status_code, 200)
self.assertEqual(auth.get_logged_in_user(), self.normal)
self.login('admin', 'admin', c)
resp = c.get('/private/')
self.assertEqual(resp.status_code, 200)
self.assertEqual(auth.get_logged_in_user(), self.admin)
def test_admin_required(self):
self.create_users()
with self.flask_app.test_client() as c:
resp = c.get('/secret/')
self.assertEqual(resp.status_code, 302)
self.assertTrue(resp.headers['location'].endswith('/accounts/login/?next=%2Fsecret%2F'))
self.login('normal', 'normal', c)
resp = c.get('/secret/')
self.assertEqual(resp.status_code, 302)
self.assertTrue(resp.headers['location'].endswith('/accounts/login/?next=%2Fsecret%2F'))
self.assertEqual(auth.get_logged_in_user(), self.normal)
self.login('admin', 'admin', c)
resp = c.get('/secret/')
self.assertEqual(resp.status_code, 200)
self.assertEqual(auth.get_logged_in_user(), self.admin)
| apache-2.0 |
Alberto-Beralix/Beralix | i386-squashfs-root/usr/lib/python2.7/lib2to3/refactor.py | 75 | 27530 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Refactoring framework.
Used as a main program, this can refactor any number of files and/or
recursively descend down directories. Imported as a module, this
provides infrastructure to write your own refactoring tool.
"""
from __future__ import with_statement
__author__ = "Guido van Rossum <guido@python.org>"
# Python imports
import os
import sys
import logging
import operator
import collections
import StringIO
from itertools import chain
# Local imports
from .pgen2 import driver, tokenize, token
from .fixer_util import find_root
from . import pytree, pygram
from . import btm_utils as bu
from . import btm_matcher as bm
def get_all_fix_names(fixer_pkg, remove_prefix=True):
"""Return a sorted list of all available fix names in the given package."""
pkg = __import__(fixer_pkg, [], [], ["*"])
fixer_dir = os.path.dirname(pkg.__file__)
fix_names = []
for name in sorted(os.listdir(fixer_dir)):
if name.startswith("fix_") and name.endswith(".py"):
if remove_prefix:
name = name[4:]
fix_names.append(name[:-3])
return fix_names
class _EveryNode(Exception):
pass
def _get_head_types(pat):
""" Accepts a pytree Pattern Node and returns a set
of the pattern types which will match first. """
if isinstance(pat, (pytree.NodePattern, pytree.LeafPattern)):
# NodePatters must either have no type and no content
# or a type and content -- so they don't get any farther
# Always return leafs
if pat.type is None:
raise _EveryNode
return set([pat.type])
if isinstance(pat, pytree.NegatedPattern):
if pat.content:
return _get_head_types(pat.content)
raise _EveryNode # Negated Patterns don't have a type
if isinstance(pat, pytree.WildcardPattern):
# Recurse on each node in content
r = set()
for p in pat.content:
for x in p:
r.update(_get_head_types(x))
return r
raise Exception("Oh no! I don't understand pattern %s" %(pat))
def _get_headnode_dict(fixer_list):
""" Accepts a list of fixers and returns a dictionary
of head node type --> fixer list. """
head_nodes = collections.defaultdict(list)
every = []
for fixer in fixer_list:
if fixer.pattern:
try:
heads = _get_head_types(fixer.pattern)
except _EveryNode:
every.append(fixer)
else:
for node_type in heads:
head_nodes[node_type].append(fixer)
else:
if fixer._accept_type is not None:
head_nodes[fixer._accept_type].append(fixer)
else:
every.append(fixer)
for node_type in chain(pygram.python_grammar.symbol2number.itervalues(),
pygram.python_grammar.tokens):
head_nodes[node_type].extend(every)
return dict(head_nodes)
def get_fixers_from_package(pkg_name):
"""
Return the fully qualified names for fixers in the package pkg_name.
"""
return [pkg_name + "." + fix_name
for fix_name in get_all_fix_names(pkg_name, False)]
def _identity(obj):
return obj
if sys.version_info < (3, 0):
import codecs
_open_with_encoding = codecs.open
# codecs.open doesn't translate newlines sadly.
def _from_system_newlines(input):
return input.replace(u"\r\n", u"\n")
def _to_system_newlines(input):
if os.linesep != "\n":
return input.replace(u"\n", os.linesep)
else:
return input
else:
_open_with_encoding = open
_from_system_newlines = _identity
_to_system_newlines = _identity
def _detect_future_features(source):
have_docstring = False
gen = tokenize.generate_tokens(StringIO.StringIO(source).readline)
def advance():
tok = gen.next()
return tok[0], tok[1]
ignore = frozenset((token.NEWLINE, tokenize.NL, token.COMMENT))
features = set()
try:
while True:
tp, value = advance()
if tp in ignore:
continue
elif tp == token.STRING:
if have_docstring:
break
have_docstring = True
elif tp == token.NAME and value == u"from":
tp, value = advance()
if tp != token.NAME or value != u"__future__":
break
tp, value = advance()
if tp != token.NAME or value != u"import":
break
tp, value = advance()
if tp == token.OP and value == u"(":
tp, value = advance()
while tp == token.NAME:
features.add(value)
tp, value = advance()
if tp != token.OP or value != u",":
break
tp, value = advance()
else:
break
except StopIteration:
pass
return frozenset(features)
class FixerError(Exception):
"""A fixer could not be loaded."""
class RefactoringTool(object):
_default_options = {"print_function" : False}
CLASS_PREFIX = "Fix" # The prefix for fixer classes
FILE_PREFIX = "fix_" # The prefix for modules with a fixer within
def __init__(self, fixer_names, options=None, explicit=None):
"""Initializer.
Args:
fixer_names: a list of fixers to import
options: an dict with configuration.
explicit: a list of fixers to run even if they are explicit.
"""
self.fixers = fixer_names
self.explicit = explicit or []
self.options = self._default_options.copy()
if options is not None:
self.options.update(options)
if self.options["print_function"]:
self.grammar = pygram.python_grammar_no_print_statement
else:
self.grammar = pygram.python_grammar
self.errors = []
self.logger = logging.getLogger("RefactoringTool")
self.fixer_log = []
self.wrote = False
self.driver = driver.Driver(self.grammar,
convert=pytree.convert,
logger=self.logger)
self.pre_order, self.post_order = self.get_fixers()
self.files = [] # List of files that were or should be modified
self.BM = bm.BottomMatcher()
self.bmi_pre_order = [] # Bottom Matcher incompatible fixers
self.bmi_post_order = []
for fixer in chain(self.post_order, self.pre_order):
if fixer.BM_compatible:
self.BM.add_fixer(fixer)
# remove fixers that will be handled by the bottom-up
# matcher
elif fixer in self.pre_order:
self.bmi_pre_order.append(fixer)
elif fixer in self.post_order:
self.bmi_post_order.append(fixer)
self.bmi_pre_order_heads = _get_headnode_dict(self.bmi_pre_order)
self.bmi_post_order_heads = _get_headnode_dict(self.bmi_post_order)
def get_fixers(self):
"""Inspects the options to load the requested patterns and handlers.
Returns:
(pre_order, post_order), where pre_order is the list of fixers that
want a pre-order AST traversal, and post_order is the list that want
post-order traversal.
"""
pre_order_fixers = []
post_order_fixers = []
for fix_mod_path in self.fixers:
mod = __import__(fix_mod_path, {}, {}, ["*"])
fix_name = fix_mod_path.rsplit(".", 1)[-1]
if fix_name.startswith(self.FILE_PREFIX):
fix_name = fix_name[len(self.FILE_PREFIX):]
parts = fix_name.split("_")
class_name = self.CLASS_PREFIX + "".join([p.title() for p in parts])
try:
fix_class = getattr(mod, class_name)
except AttributeError:
raise FixerError("Can't find %s.%s" % (fix_name, class_name))
fixer = fix_class(self.options, self.fixer_log)
if fixer.explicit and self.explicit is not True and \
fix_mod_path not in self.explicit:
self.log_message("Skipping implicit fixer: %s", fix_name)
continue
self.log_debug("Adding transformation: %s", fix_name)
if fixer.order == "pre":
pre_order_fixers.append(fixer)
elif fixer.order == "post":
post_order_fixers.append(fixer)
else:
raise FixerError("Illegal fixer order: %r" % fixer.order)
key_func = operator.attrgetter("run_order")
pre_order_fixers.sort(key=key_func)
post_order_fixers.sort(key=key_func)
return (pre_order_fixers, post_order_fixers)
def log_error(self, msg, *args, **kwds):
"""Called when an error occurs."""
raise
def log_message(self, msg, *args):
"""Hook to log a message."""
if args:
msg = msg % args
self.logger.info(msg)
def log_debug(self, msg, *args):
if args:
msg = msg % args
self.logger.debug(msg)
def print_output(self, old_text, new_text, filename, equal):
"""Called with the old version, new version, and filename of a
refactored file."""
pass
def refactor(self, items, write=False, doctests_only=False):
"""Refactor a list of files and directories."""
for dir_or_file in items:
if os.path.isdir(dir_or_file):
self.refactor_dir(dir_or_file, write, doctests_only)
else:
self.refactor_file(dir_or_file, write, doctests_only)
def refactor_dir(self, dir_name, write=False, doctests_only=False):
"""Descends down a directory and refactor every Python file found.
Python files are assumed to have a .py extension.
Files and subdirectories starting with '.' are skipped.
"""
py_ext = os.extsep + "py"
for dirpath, dirnames, filenames in os.walk(dir_name):
self.log_debug("Descending into %s", dirpath)
dirnames.sort()
filenames.sort()
for name in filenames:
if (not name.startswith(".") and
os.path.splitext(name)[1] == py_ext):
fullname = os.path.join(dirpath, name)
self.refactor_file(fullname, write, doctests_only)
# Modify dirnames in-place to remove subdirs with leading dots
dirnames[:] = [dn for dn in dirnames if not dn.startswith(".")]
def _read_python_source(self, filename):
"""
Do our best to decode a Python source file correctly.
"""
try:
f = open(filename, "rb")
except IOError as err:
self.log_error("Can't open %s: %s", filename, err)
return None, None
try:
encoding = tokenize.detect_encoding(f.readline)[0]
finally:
f.close()
with _open_with_encoding(filename, "r", encoding=encoding) as f:
return _from_system_newlines(f.read()), encoding
def refactor_file(self, filename, write=False, doctests_only=False):
"""Refactors a file."""
input, encoding = self._read_python_source(filename)
if input is None:
# Reading the file failed.
return
input += u"\n" # Silence certain parse errors
if doctests_only:
self.log_debug("Refactoring doctests in %s", filename)
output = self.refactor_docstring(input, filename)
if output != input:
self.processed_file(output, filename, input, write, encoding)
else:
self.log_debug("No doctest changes in %s", filename)
else:
tree = self.refactor_string(input, filename)
if tree and tree.was_changed:
# The [:-1] is to take off the \n we added earlier
self.processed_file(unicode(tree)[:-1], filename,
write=write, encoding=encoding)
else:
self.log_debug("No changes in %s", filename)
def refactor_string(self, data, name):
"""Refactor a given input string.
Args:
data: a string holding the code to be refactored.
name: a human-readable name for use in error/log messages.
Returns:
An AST corresponding to the refactored input stream; None if
there were errors during the parse.
"""
features = _detect_future_features(data)
if "print_function" in features:
self.driver.grammar = pygram.python_grammar_no_print_statement
try:
tree = self.driver.parse_string(data)
except Exception as err:
self.log_error("Can't parse %s: %s: %s",
name, err.__class__.__name__, err)
return
finally:
self.driver.grammar = self.grammar
tree.future_features = features
self.log_debug("Refactoring %s", name)
self.refactor_tree(tree, name)
return tree
def refactor_stdin(self, doctests_only=False):
input = sys.stdin.read()
if doctests_only:
self.log_debug("Refactoring doctests in stdin")
output = self.refactor_docstring(input, "<stdin>")
if output != input:
self.processed_file(output, "<stdin>", input)
else:
self.log_debug("No doctest changes in stdin")
else:
tree = self.refactor_string(input, "<stdin>")
if tree and tree.was_changed:
self.processed_file(unicode(tree), "<stdin>", input)
else:
self.log_debug("No changes in stdin")
def refactor_tree(self, tree, name):
"""Refactors a parse tree (modifying the tree in place).
For compatible patterns the bottom matcher module is
used. Otherwise the tree is traversed node-to-node for
matches.
Args:
tree: a pytree.Node instance representing the root of the tree
to be refactored.
name: a human-readable name for this tree.
Returns:
True if the tree was modified, False otherwise.
"""
for fixer in chain(self.pre_order, self.post_order):
fixer.start_tree(tree, name)
#use traditional matching for the incompatible fixers
self.traverse_by(self.bmi_pre_order_heads, tree.pre_order())
self.traverse_by(self.bmi_post_order_heads, tree.post_order())
# obtain a set of candidate nodes
match_set = self.BM.run(tree.leaves())
while any(match_set.values()):
for fixer in self.BM.fixers:
if fixer in match_set and match_set[fixer]:
#sort by depth; apply fixers from bottom(of the AST) to top
match_set[fixer].sort(key=pytree.Base.depth, reverse=True)
if fixer.keep_line_order:
#some fixers(eg fix_imports) must be applied
#with the original file's line order
match_set[fixer].sort(key=pytree.Base.get_lineno)
for node in list(match_set[fixer]):
if node in match_set[fixer]:
match_set[fixer].remove(node)
try:
find_root(node)
except AssertionError:
# this node has been cut off from a
# previous transformation ; skip
continue
if node.fixers_applied and fixer in node.fixers_applied:
# do not apply the same fixer again
continue
results = fixer.match(node)
if results:
new = fixer.transform(node, results)
if new is not None:
node.replace(new)
#new.fixers_applied.append(fixer)
for node in new.post_order():
# do not apply the fixer again to
# this or any subnode
if not node.fixers_applied:
node.fixers_applied = []
node.fixers_applied.append(fixer)
# update the original match set for
# the added code
new_matches = self.BM.run(new.leaves())
for fxr in new_matches:
if not fxr in match_set:
match_set[fxr]=[]
match_set[fxr].extend(new_matches[fxr])
for fixer in chain(self.pre_order, self.post_order):
fixer.finish_tree(tree, name)
return tree.was_changed
def traverse_by(self, fixers, traversal):
"""Traverse an AST, applying a set of fixers to each node.
This is a helper method for refactor_tree().
Args:
fixers: a list of fixer instances.
traversal: a generator that yields AST nodes.
Returns:
None
"""
if not fixers:
return
for node in traversal:
for fixer in fixers[node.type]:
results = fixer.match(node)
if results:
new = fixer.transform(node, results)
if new is not None:
node.replace(new)
node = new
def processed_file(self, new_text, filename, old_text=None, write=False,
encoding=None):
"""
Called when a file has been refactored, and there are changes.
"""
self.files.append(filename)
if old_text is None:
old_text = self._read_python_source(filename)[0]
if old_text is None:
return
equal = old_text == new_text
self.print_output(old_text, new_text, filename, equal)
if equal:
self.log_debug("No changes to %s", filename)
return
if write:
self.write_file(new_text, filename, old_text, encoding)
else:
self.log_debug("Not writing changes to %s", filename)
def write_file(self, new_text, filename, old_text, encoding=None):
"""Writes a string to a file.
It first shows a unified diff between the old text and the new text, and
then rewrites the file; the latter is only done if the write option is
set.
"""
try:
f = _open_with_encoding(filename, "w", encoding=encoding)
except os.error as err:
self.log_error("Can't create %s: %s", filename, err)
return
try:
f.write(_to_system_newlines(new_text))
except os.error as err:
self.log_error("Can't write %s: %s", filename, err)
finally:
f.close()
self.log_debug("Wrote changes to %s", filename)
self.wrote = True
PS1 = ">>> "
PS2 = "... "
def refactor_docstring(self, input, filename):
"""Refactors a docstring, looking for doctests.
This returns a modified version of the input string. It looks
for doctests, which start with a ">>>" prompt, and may be
continued with "..." prompts, as long as the "..." is indented
the same as the ">>>".
(Unfortunately we can't use the doctest module's parser,
since, like most parsers, it is not geared towards preserving
the original source.)
"""
result = []
block = None
block_lineno = None
indent = None
lineno = 0
for line in input.splitlines(True):
lineno += 1
if line.lstrip().startswith(self.PS1):
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
block_lineno = lineno
block = [line]
i = line.find(self.PS1)
indent = line[:i]
elif (indent is not None and
(line.startswith(indent + self.PS2) or
line == indent + self.PS2.rstrip() + u"\n")):
block.append(line)
else:
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
block = None
indent = None
result.append(line)
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
return u"".join(result)
def refactor_doctest(self, block, lineno, indent, filename):
"""Refactors one doctest.
A doctest is given as a block of lines, the first of which starts
with ">>>" (possibly indented), while the remaining lines start
with "..." (identically indented).
"""
try:
tree = self.parse_block(block, lineno, indent)
except Exception as err:
if self.logger.isEnabledFor(logging.DEBUG):
for line in block:
self.log_debug("Source: %s", line.rstrip(u"\n"))
self.log_error("Can't parse docstring in %s line %s: %s: %s",
filename, lineno, err.__class__.__name__, err)
return block
if self.refactor_tree(tree, filename):
new = unicode(tree).splitlines(True)
# Undo the adjustment of the line numbers in wrap_toks() below.
clipped, new = new[:lineno-1], new[lineno-1:]
assert clipped == [u"\n"] * (lineno-1), clipped
if not new[-1].endswith(u"\n"):
new[-1] += u"\n"
block = [indent + self.PS1 + new.pop(0)]
if new:
block += [indent + self.PS2 + line for line in new]
return block
def summarize(self):
if self.wrote:
were = "were"
else:
were = "need to be"
if not self.files:
self.log_message("No files %s modified.", were)
else:
self.log_message("Files that %s modified:", were)
for file in self.files:
self.log_message(file)
if self.fixer_log:
self.log_message("Warnings/messages while refactoring:")
for message in self.fixer_log:
self.log_message(message)
if self.errors:
if len(self.errors) == 1:
self.log_message("There was 1 error:")
else:
self.log_message("There were %d errors:", len(self.errors))
for msg, args, kwds in self.errors:
self.log_message(msg, *args, **kwds)
def parse_block(self, block, lineno, indent):
"""Parses a block into a tree.
This is necessary to get correct line number / offset information
in the parser diagnostics and embedded into the parse tree.
"""
tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent))
tree.future_features = frozenset()
return tree
def wrap_toks(self, block, lineno, indent):
"""Wraps a tokenize stream to systematically modify start/end."""
tokens = tokenize.generate_tokens(self.gen_lines(block, indent).next)
for type, value, (line0, col0), (line1, col1), line_text in tokens:
line0 += lineno - 1
line1 += lineno - 1
# Don't bother updating the columns; this is too complicated
# since line_text would also have to be updated and it would
# still break for tokens spanning lines. Let the user guess
# that the column numbers for doctests are relative to the
# end of the prompt string (PS1 or PS2).
yield type, value, (line0, col0), (line1, col1), line_text
def gen_lines(self, block, indent):
"""Generates lines as expected by tokenize from a list of lines.
This strips the first len(indent + self.PS1) characters off each line.
"""
prefix1 = indent + self.PS1
prefix2 = indent + self.PS2
prefix = prefix1
for line in block:
if line.startswith(prefix):
yield line[len(prefix):]
elif line == prefix.rstrip() + u"\n":
yield u"\n"
else:
raise AssertionError("line=%r, prefix=%r" % (line, prefix))
prefix = prefix2
while True:
yield ""
class MultiprocessingUnsupported(Exception):
pass
class MultiprocessRefactoringTool(RefactoringTool):
def __init__(self, *args, **kwargs):
super(MultiprocessRefactoringTool, self).__init__(*args, **kwargs)
self.queue = None
self.output_lock = None
def refactor(self, items, write=False, doctests_only=False,
num_processes=1):
if num_processes == 1:
return super(MultiprocessRefactoringTool, self).refactor(
items, write, doctests_only)
try:
import multiprocessing
except ImportError:
raise MultiprocessingUnsupported
if self.queue is not None:
raise RuntimeError("already doing multiple processes")
self.queue = multiprocessing.JoinableQueue()
self.output_lock = multiprocessing.Lock()
processes = [multiprocessing.Process(target=self._child)
for i in xrange(num_processes)]
try:
for p in processes:
p.start()
super(MultiprocessRefactoringTool, self).refactor(items, write,
doctests_only)
finally:
self.queue.join()
for i in xrange(num_processes):
self.queue.put(None)
for p in processes:
if p.is_alive():
p.join()
self.queue = None
def _child(self):
task = self.queue.get()
while task is not None:
args, kwargs = task
try:
super(MultiprocessRefactoringTool, self).refactor_file(
*args, **kwargs)
finally:
self.queue.task_done()
task = self.queue.get()
def refactor_file(self, *args, **kwargs):
if self.queue is not None:
self.queue.put((args, kwargs))
else:
return super(MultiprocessRefactoringTool, self).refactor_file(
*args, **kwargs)
| gpl-3.0 |
sergiopasra/pyemir | emirdrp/recipes/image/tests/test_dither.py | 3 | 4803 |
import pytest
import astropy.io.fits as fits
from astropy import wcs
from astropy.modeling.functional_models import Gaussian2D
import numpy
import numpy.random
import numina.core
import numina.exceptions
from ..join import JoinDitheredImagesRecipe
def create_wcs(crpix=None):
w = wcs.WCS(naxis=2)
crpix = [50.0, 50.0] if crpix is None else crpix
w.wcs.crpix = crpix
w.wcs.crval = [0.0, 0.0]
w.wcs.cdelt = [1.0, 1.0]
w.wcs.ctype = ["RA---TAN", "DEC--TAN"]
w.wcs.cd = numpy.array([
[5.41615466e-05, -2.55232165e-07],
[2.55232165e-07, 5.41615466e-05]
])
return w
def dither_pattern(center, base_angle, dist, npoints):
base_angle_rad = base_angle / 180.0 * numpy.pi
step = 2*numpy.pi / npoints
angles = base_angle_rad + numpy.arange(0.0, 2 * numpy.pi, step)
x = center[0] + dist * numpy.cos(angles)
y = center[1] + dist * numpy.sin(angles)
return numpy.asarray([x, y]).T
def create_frame(background=0, std=100, crpix=None, keys=None):
size = (100, 100)
# Update WCS header
wcs = create_wcs(crpix)
y, x = numpy.mgrid[:100, :100]
data = numpy.random.normal(background, std, size)
model = Gaussian2D(
amplitude=30*background,
x_mean=wcs.wcs.crpix[0]-3.5,
y_mean=wcs.wcs.crpix[1]-12.8,
x_stddev=3.0,
y_stddev=4.0
)
data += model(x, y)
hdu = fits.PrimaryHDU(data)
hdu.header = wcs.to_header()
if keys:
for k, v in keys.items():
hdu.header[k] = v
return fits.HDUList([hdu])
def create_ob(value, nimages, crpix, nstare=3, exptime=100.0, starttime=0.0):
frames = []
for i in range(nimages):
base = starttime
off1 = i * exptime
off2 = off1 + exptime
t1 = base + off1
t2 = base + off2
keys = {'TSUTC1': t1, 'TSUTC2': t2, 'NUM-NCOM': nstare, 'NUM-SK': 1}
frame = numina.core.DataFrame(frame=create_frame(background=value, keys=keys, crpix=crpix[i]))
frames.append(frame)
obsresult = numina.core.ObservationResult()
obsresult.frames = frames
return obsresult
@pytest.mark.parametrize("nimages,naccum", [(7, 10)])
def test_join(nimages, naccum):
nstare = 3
value = 13.0
inittime = 1030040001.00034
starttime = inittime
exptime = 105.0
crpix = dither_pattern([50, 50], 0.0, 20.0, nimages)
obsresult = create_ob(value, nimages, crpix, nstare, exptime, starttime)
obsresult.naccum = 1
recipe = JoinDitheredImagesRecipe()
prev_accum = None
while True:
rinput = recipe.create_input(
obresult=obsresult,
accum=prev_accum
)
result = recipe(rinput)
frame_hdul = result.frame.open()
assert frame_hdul[0].header['NUM-NCOM'] == nimages * nstare
accum_hdul = result.accum.open()
assert accum_hdul[0].header['NUM-NCOM'] == nimages * nstare * obsresult.naccum
if obsresult.naccum < naccum:
# Init next loop
starttime += exptime * (nimages + 1)
nobsresult = create_ob(value , nimages, crpix, nstare, exptime, starttime)
nobsresult.naccum = obsresult.naccum + 1
obsresult = nobsresult
prev_accum = result.accum
else:
break
frame_hdul = result.frame.open()
accum_hdul = result.accum.open()
assert frame_hdul[0].header['NUM-NCOM'] == nimages * nstare
assert frame_hdul['MAP'].data.max() == nimages
assert frame_hdul[0].header['TSUTC1'] == starttime
assert frame_hdul[0].header['TSUTC2'] == starttime + nimages * exptime
assert accum_hdul[0].header['NUM-NCOM'] == nimages * nstare * naccum
assert accum_hdul[0].header['TSUTC1'] == inittime
assert accum_hdul[0].header['TSUTC2'] == starttime + nimages * exptime
@pytest.mark.parametrize("nimages", [2, 3, 7])
def test_init_aggregate_result(nimages):
# Test that works in the first run of the loop
nstare = 3
value = 9
starttime = 1030040001.00034
exptime = 105.0
crpix = numpy.array([(50, 50) for _ in range(nimages)])
obsresult = create_ob(value, nimages, crpix, nstare, exptime, starttime)
obsresult.naccum = 1
recipe = JoinDitheredImagesRecipe()
rinput = recipe.create_input(
obresult=obsresult,
)
#import logging
#logging.basicConfig(level=logging.DEBUG)
result1 = recipe.run(rinput)
frame_hdul = result1.frame.open()
accum_hdul = result1.accum.open()
assert(len(frame_hdul) == len(accum_hdul))
for key in ['NUM-NCOM', 'TSUTC1', 'TSUTC2']:
assert frame_hdul[0].header[key] == accum_hdul[0].header[key]
assert frame_hdul[0].header['NUM-NCOM'] == nimages * nstare
assert numpy.allclose(frame_hdul[0].data, accum_hdul[0].data)
| gpl-3.0 |
magvugr/AT | EntVirtual/lib/python2.7/site-packages/django/db/models/fields/__init__.py | 33 | 87855 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import collections
import copy
import datetime
import decimal
import itertools
import uuid
import warnings
from base64 import b64decode, b64encode
from functools import total_ordering
from django import forms
from django.apps import apps
from django.conf import settings
from django.core import checks, exceptions, validators
# When the _meta object was formalized, this exception was moved to
# django.core.exceptions. It is retained here for backwards compatibility
# purposes.
from django.core.exceptions import FieldDoesNotExist # NOQA
from django.db import connection, connections, router
from django.db.models.query_utils import DeferredAttribute, RegisterLookupMixin
from django.utils import six, timezone
from django.utils.datastructures import DictWrapper
from django.utils.dateparse import (
parse_date, parse_datetime, parse_duration, parse_time,
)
from django.utils.deprecation import (
RemovedInDjango20Warning, warn_about_renamed_method,
)
from django.utils.duration import duration_string
from django.utils.encoding import (
force_bytes, force_text, python_2_unicode_compatible, smart_text,
)
from django.utils.functional import Promise, cached_property, curry
from django.utils.ipv6 import clean_ipv6_address
from django.utils.itercompat import is_iterable
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _
# Avoid "TypeError: Item in ``from list'' not a string" -- unicode_literals
# makes these strings unicode
__all__ = [str(x) for x in (
'AutoField', 'BLANK_CHOICE_DASH', 'BigAutoField', 'BigIntegerField',
'BinaryField', 'BooleanField', 'CharField', 'CommaSeparatedIntegerField',
'DateField', 'DateTimeField', 'DecimalField', 'DurationField',
'EmailField', 'Empty', 'Field', 'FieldDoesNotExist', 'FilePathField',
'FloatField', 'GenericIPAddressField', 'IPAddressField', 'IntegerField',
'NOT_PROVIDED', 'NullBooleanField', 'PositiveIntegerField',
'PositiveSmallIntegerField', 'SlugField', 'SmallIntegerField', 'TextField',
'TimeField', 'URLField', 'UUIDField',
)]
class Empty(object):
pass
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start
# of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
def _load_field(app_label, model_name, field_name):
return apps.get_model(app_label, model_name)._meta.get_field(field_name)
# A guide to Field parameters:
#
# * name: The name of the field specified in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
def _empty(of_cls):
new = Empty()
new.__class__ = of_cls
return new
@total_ordering
@python_2_unicode_compatible
class Field(RegisterLookupMixin):
"""Base class for all field types"""
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
empty_values = list(validators.EMPTY_VALUES)
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
'invalid_choice': _('Value %(value)r is not a valid choice.'),
'null': _('This field cannot be null.'),
'blank': _('This field cannot be blank.'),
'unique': _('%(model_name)s with this %(field_label)s '
'already exists.'),
# Translators: The 'lookup_type' is one of 'date', 'year' or 'month'.
# Eg: "Title must be unique for pub_date year"
'unique_for_date': _("%(field_label)s must be unique for "
"%(date_field_label)s %(lookup_type)s."),
}
system_check_deprecated_details = None
system_check_removed_details = None
# Field flags
hidden = False
many_to_many = None
many_to_one = None
one_to_many = None
one_to_one = None
related_model = None
# Generic field type description, usually overridden by subclasses
def _description(self):
return _('Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False, validators=[],
error_messages=None):
self.name = name
self.verbose_name = verbose_name # May be set by set_attributes_from_name
self._verbose_name = verbose_name # Store original for deconstruction
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
self.remote_field = rel
self.is_relation = self.remote_field is not None
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date = unique_for_date
self.unique_for_month = unique_for_month
self.unique_for_year = unique_for_year
if isinstance(choices, collections.Iterator):
choices = list(choices)
self.choices = choices or []
self.help_text = help_text
self.db_index = db_index
self.db_column = db_column
self.db_tablespace = db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
self.auto_created = auto_created
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self._validators = validators # Store for deconstruction later
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self._error_messages = error_messages # Store for deconstruction later
self.error_messages = messages
def __str__(self):
""" Return "app_label.model_label.field_name". """
model = self.model
app = model._meta.app_label
return '%s.%s.%s' % (app, model._meta.object_name, self.name)
def __repr__(self):
"""
Displays the module, class and name of the field.
"""
path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
name = getattr(self, 'name', None)
if name is not None:
return '<%s: %s>' % (path, name)
return '<%s>' % path
def check(self, **kwargs):
errors = []
errors.extend(self._check_field_name())
errors.extend(self._check_choices())
errors.extend(self._check_db_index())
errors.extend(self._check_null_allowed_for_primary_keys())
errors.extend(self._check_backend_specific_checks(**kwargs))
errors.extend(self._check_deprecation_details())
return errors
def _check_field_name(self):
""" Check if field name is valid, i.e. 1) does not end with an
underscore, 2) does not contain "__" and 3) is not "pk". """
if self.name.endswith('_'):
return [
checks.Error(
'Field names must not end with an underscore.',
obj=self,
id='fields.E001',
)
]
elif '__' in self.name:
return [
checks.Error(
'Field names must not contain "__".',
obj=self,
id='fields.E002',
)
]
elif self.name == 'pk':
return [
checks.Error(
"'pk' is a reserved word that cannot be used as a field name.",
obj=self,
id='fields.E003',
)
]
else:
return []
@property
def rel(self):
warnings.warn(
"Usage of field.rel has been deprecated. Use field.remote_field instead.",
RemovedInDjango20Warning, 2)
return self.remote_field
def _check_choices(self):
if self.choices:
if (isinstance(self.choices, six.string_types) or
not is_iterable(self.choices)):
return [
checks.Error(
"'choices' must be an iterable (e.g., a list or tuple).",
obj=self,
id='fields.E004',
)
]
elif any(isinstance(choice, six.string_types) or
not is_iterable(choice) or len(choice) != 2
for choice in self.choices):
return [
checks.Error(
"'choices' must be an iterable containing "
"(actual value, human readable name) tuples.",
obj=self,
id='fields.E005',
)
]
else:
return []
else:
return []
def _check_db_index(self):
if self.db_index not in (None, True, False):
return [
checks.Error(
"'db_index' must be None, True or False.",
obj=self,
id='fields.E006',
)
]
else:
return []
def _check_null_allowed_for_primary_keys(self):
if (self.primary_key and self.null and
not connection.features.interprets_empty_strings_as_nulls):
# We cannot reliably check this for backends like Oracle which
# consider NULL and '' to be equal (and thus set up
# character-based fields a little differently).
return [
checks.Error(
'Primary keys must not have null=True.',
hint=('Set null=False on the field, or '
'remove primary_key=True argument.'),
obj=self,
id='fields.E007',
)
]
else:
return []
def _check_backend_specific_checks(self, **kwargs):
app_label = self.model._meta.app_label
for db in connections:
if router.allow_migrate(db, app_label, model_name=self.model._meta.model_name):
return connections[db].validation.check_field(self, **kwargs)
return []
def _check_deprecation_details(self):
if self.system_check_removed_details is not None:
return [
checks.Error(
self.system_check_removed_details.get(
'msg',
'%s has been removed except for support in historical '
'migrations.' % self.__class__.__name__
),
hint=self.system_check_removed_details.get('hint'),
obj=self,
id=self.system_check_removed_details.get('id', 'fields.EXXX'),
)
]
elif self.system_check_deprecated_details is not None:
return [
checks.Warning(
self.system_check_deprecated_details.get(
'msg',
'%s has been deprecated.' % self.__class__.__name__
),
hint=self.system_check_deprecated_details.get('hint'),
obj=self,
id=self.system_check_deprecated_details.get('id', 'fields.WXXX'),
)
]
return []
def get_col(self, alias, output_field=None):
if output_field is None:
output_field = self
if alias != self.model._meta.db_table or output_field != self:
from django.db.models.expressions import Col
return Col(alias, self, output_field)
else:
return self.cached_col
@cached_property
def cached_col(self):
from django.db.models.expressions import Col
return Col(self.model._meta.db_table, self)
def select_format(self, compiler, sql, params):
"""
Custom format for select clauses. For example, GIS columns need to be
selected as AsText(table.col) on MySQL as the table.col data can't be used
by Django.
"""
return sql, params
def deconstruct(self):
"""
Returns enough information to recreate the field as a 4-tuple:
* The name of the field on the model, if contribute_to_class has been run
* The import path of the field, including the class: django.db.models.IntegerField
This should be the most portable version, so less specific may be better.
* A list of positional arguments
* A dict of keyword arguments
Note that the positional or keyword arguments must contain values of the
following types (including inner values of collection types):
* None, bool, str, unicode, int, long, float, complex, set, frozenset, list, tuple, dict
* UUID
* datetime.datetime (naive), datetime.date
* top-level classes, top-level functions - will be referenced by their full import path
* Storage instances - these have their own deconstruct() method
This is because the values here must be serialized into a text format
(possibly new Python code, possibly JSON) and these are the only types
with encoding handlers defined.
There's no need to return the exact way the field was instantiated this time,
just ensure that the resulting field is the same - prefer keyword arguments
over positional ones, and omit parameters with their default values.
"""
# Short-form way of fetching all the default parameters
keywords = {}
possibles = {
"verbose_name": None,
"primary_key": False,
"max_length": None,
"unique": False,
"blank": False,
"null": False,
"db_index": False,
"default": NOT_PROVIDED,
"editable": True,
"serialize": True,
"unique_for_date": None,
"unique_for_month": None,
"unique_for_year": None,
"choices": [],
"help_text": '',
"db_column": None,
"db_tablespace": settings.DEFAULT_INDEX_TABLESPACE,
"auto_created": False,
"validators": [],
"error_messages": None,
}
attr_overrides = {
"unique": "_unique",
"error_messages": "_error_messages",
"validators": "_validators",
"verbose_name": "_verbose_name",
}
equals_comparison = {"choices", "validators", "db_tablespace"}
for name, default in possibles.items():
value = getattr(self, attr_overrides.get(name, name))
# Unroll anything iterable for choices into a concrete list
if name == "choices" and isinstance(value, collections.Iterable):
value = list(value)
# Do correct kind of comparison
if name in equals_comparison:
if value != default:
keywords[name] = value
else:
if value is not default:
keywords[name] = value
# Work out path - we shorten it for known Django core fields
path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
if path.startswith("django.db.models.fields.related"):
path = path.replace("django.db.models.fields.related", "django.db.models")
if path.startswith("django.db.models.fields.files"):
path = path.replace("django.db.models.fields.files", "django.db.models")
if path.startswith("django.db.models.fields.proxy"):
path = path.replace("django.db.models.fields.proxy", "django.db.models")
if path.startswith("django.db.models.fields"):
path = path.replace("django.db.models.fields", "django.db.models")
# Return basic info - other fields should override this.
return (
force_text(self.name, strings_only=True),
path,
[],
keywords,
)
def clone(self):
"""
Uses deconstruct() to clone a new copy of this Field.
Will not preserve any class attachments/attribute names.
"""
name, path, args, kwargs = self.deconstruct()
return self.__class__(*args, **kwargs)
def __eq__(self, other):
# Needed for @total_ordering
if isinstance(other, Field):
return self.creation_counter == other.creation_counter
return NotImplemented
def __lt__(self, other):
# This is needed because bisect does not take a comparison function.
if isinstance(other, Field):
return self.creation_counter < other.creation_counter
return NotImplemented
def __hash__(self):
return hash(self.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.remote_field:
obj.remote_field = copy.copy(self.remote_field)
if hasattr(self.remote_field, 'field') and self.remote_field.field is self:
obj.remote_field.field = obj
memodict[id(self)] = obj
return obj
def __copy__(self):
# We need to avoid hitting __reduce__, so define this
# slightly weird copy construct.
obj = Empty()
obj.__class__ = self.__class__
obj.__dict__ = self.__dict__.copy()
return obj
def __reduce__(self):
"""
Pickling should return the model._meta.fields instance of the field,
not a new copy of that field. So, we use the app registry to load the
model and then the field back.
"""
if not hasattr(self, 'model'):
# Fields are sometimes used without attaching them to models (for
# example in aggregation). In this case give back a plain field
# instance. The code below will create a new empty instance of
# class self.__class__, then update its dict with self.__dict__
# values - so, this is very close to normal pickle.
return _empty, (self.__class__,), self.__dict__
return _load_field, (self.model._meta.app_label, self.model._meta.object_name,
self.name)
def get_pk_value_on_save(self, instance):
"""
Hook to generate new PK values on save. This method is called when
saving instances with no primary key value set. If this method returns
something else than None, then the returned value is used when saving
the new instance.
"""
if self.default:
return self.get_default()
return None
def to_python(self, value):
"""
Converts the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Returns the converted value. Subclasses should override this.
"""
return value
@cached_property
def validators(self):
"""
Some validators can't be created at field initialization time.
This method provides a way to delay their creation until required.
"""
return list(itertools.chain(self.default_validators, self._validators))
def run_validators(self, value):
if value in self.empty_values:
return
errors = []
for v in self.validators:
try:
v(value)
except exceptions.ValidationError as e:
if hasattr(e, 'code') and e.code in self.error_messages:
e.message = self.error_messages[e.code]
errors.extend(e.error_list)
if errors:
raise exceptions.ValidationError(errors)
def validate(self, value, model_instance):
"""
Validates value and throws ValidationError. Subclasses should override
this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self.choices and value not in self.empty_values:
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for
# options.
for optgroup_key, optgroup_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
raise exceptions.ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages['null'], code='null')
if not self.blank and value in self.empty_values:
raise exceptions.ValidationError(self.error_messages['blank'], code='blank')
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors
from to_python and validate are propagated. The correct value is
returned if no error is raised.
"""
value = self.to_python(value)
self.validate(value, model_instance)
self.run_validators(value)
return value
def db_check(self, connection):
"""
Return the database column check constraint for this field, for the
provided connection. Works the same way as db_type() for the case that
get_internal_type() does not map to a preexisting model field.
"""
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
try:
return connection.data_type_check_constraints[self.get_internal_type()] % data
except KeyError:
return None
def db_type(self, connection):
"""
Return the database column data type for this field, for the provided
connection.
"""
# The default implementation of this method looks at the
# backend-specific data_types dictionary, looking up the field by its
# "internal type".
#
# A Field class can implement the get_internal_type() method to specify
# which *preexisting* Django Field class it's most similar to -- i.e.,
# a custom field might be represented by a TEXT column type, which is
# the same as the TextField Django field type, which means the custom
# field's get_internal_type() returns 'TextField'.
#
# But the limitation of the get_internal_type() / data_types approach
# is that it cannot handle database column types that aren't already
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
try:
return connection.data_types[self.get_internal_type()] % data
except KeyError:
return None
def rel_db_type(self, connection):
"""
Return the data type that a related field pointing to this field should
use. For example, this method is called by ForeignKey and OneToOneField
to determine its data type.
"""
return self.db_type(connection)
def db_parameters(self, connection):
"""
Extension of db_type(), providing a range of different return
values (type, checks).
This will look at db_type(), allowing custom model fields to override it.
"""
type_string = self.db_type(connection)
check_string = self.db_check(connection)
return {
"type": type_string,
"check": check_string,
}
def db_type_suffix(self, connection):
return connection.data_types_suffix.get(self.get_internal_type())
def get_db_converters(self, connection):
if hasattr(self, 'from_db_value'):
return [self.from_db_value]
return []
@property
def unique(self):
return self._unique or self.primary_key
def set_attributes_from_name(self, name):
if not self.name:
self.name = name
self.attname, self.column = self.get_attname_column()
self.concrete = self.column is not None
if self.verbose_name is None and self.name:
self.verbose_name = self.name.replace('_', ' ')
def contribute_to_class(self, cls, name, private_only=False, virtual_only=NOT_PROVIDED):
"""
Register the field with the model class it belongs to.
If private_only is True, a separate instance of this field will be
created for every subclass of cls, even if cls is not an abstract
model.
"""
if virtual_only is not NOT_PROVIDED:
warnings.warn(
"The `virtual_only` argument of Field.contribute_to_class() "
"has been renamed to `private_only`.",
RemovedInDjango20Warning, stacklevel=2
)
private_only = virtual_only
self.set_attributes_from_name(name)
self.model = cls
if private_only:
cls._meta.add_field(self, private=True)
else:
cls._meta.add_field(self)
if self.column:
# Don't override classmethods with the descriptor. This means that
# if you have a classmethod and a field with the same name, then
# such fields can't be deferred (we don't have a check for this).
if not getattr(cls, self.attname, None):
setattr(cls, self.attname, DeferredAttribute(self.attname, cls))
if self.choices:
setattr(cls, 'get_%s_display' % self.name,
curry(cls._get_FIELD_display, field=self))
def get_filter_kwargs_for_object(self, obj):
"""
Return a dict that when passed as kwargs to self.model.filter(), would
yield all instances having the same value for this field as obj has.
"""
return {self.name: getattr(obj, self.attname)}
def get_attname(self):
return self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_cache_name(self):
return '_%s_cache' % self.name
def get_internal_type(self):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"""
Returns field's value just before saving.
"""
return getattr(model_instance, self.attname)
def get_prep_value(self, value):
"""
Perform preliminary non-db specific value checks and conversions.
"""
if isinstance(value, Promise):
value = value._proxy____cast()
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""Returns field's value prepared for interacting with the database
backend.
Used by the default implementations of get_db_prep_save().
"""
if not prepared:
value = self.get_prep_value(value)
return value
def get_db_prep_save(self, value, connection):
"""
Returns field's value prepared for saving into a database.
"""
return self.get_db_prep_value(value, connection=connection,
prepared=False)
def has_default(self):
"""
Returns a boolean of whether this field has a default value.
"""
return self.default is not NOT_PROVIDED
def get_default(self):
"""
Returns the default value for this field.
"""
if self.has_default():
if callable(self.default):
return self.default()
return self.default
if not self.empty_strings_allowed or self.null and not connection.features.interprets_empty_strings_as_nulls:
return None
return ""
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH, limit_choices_to=None):
"""Returns choices with a default blank choices included, for use
as SelectField choices for this field."""
blank_defined = False
choices = list(self.choices) if self.choices else []
named_groups = choices and isinstance(choices[0][1], (list, tuple))
if not named_groups:
for choice, __ in choices:
if choice in ('', None):
blank_defined = True
break
first_choice = (blank_choice if include_blank and
not blank_defined else [])
if self.choices:
return first_choice + choices
rel_model = self.remote_field.model
limit_choices_to = limit_choices_to or self.get_limit_choices_to()
if hasattr(self.remote_field, 'get_related_field'):
lst = [(getattr(x, self.remote_field.get_related_field().attname),
smart_text(x))
for x in rel_model._default_manager.complex_filter(
limit_choices_to)]
else:
lst = [(x._get_pk_val(), smart_text(x))
for x in rel_model._default_manager.complex_filter(
limit_choices_to)]
return first_choice + lst
@warn_about_renamed_method(
'Field', '_get_val_from_obj', 'value_from_object',
RemovedInDjango20Warning
)
def _get_val_from_obj(self, obj):
if obj is not None:
return getattr(obj, self.attname)
else:
return self.get_default()
def value_to_string(self, obj):
"""
Returns a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return smart_text(self.value_from_object(obj))
def _get_flatchoices(self):
"""Flattened version of choices tuple."""
flat = []
for choice, value in self.choices:
if isinstance(value, (list, tuple)):
flat.extend(value)
else:
flat.append((choice, value))
return flat
flatchoices = property(_get_flatchoices)
def save_form_data(self, instance, data):
setattr(instance, self.name, data)
def formfield(self, form_class=None, choices_form_class=None, **kwargs):
"""
Returns a django.forms.Field instance for this database Field.
"""
defaults = {'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices:
# Fields with choices get special treatment.
include_blank = (self.blank or
not (self.has_default() or 'initial' in kwargs))
defaults['choices'] = self.get_choices(include_blank=include_blank)
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
if choices_form_class is not None:
form_class = choices_form_class
else:
form_class = forms.TypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in list(kwargs):
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial'):
del kwargs[k]
defaults.update(kwargs)
if form_class is None:
form_class = forms.CharField
return form_class(**defaults)
def value_from_object(self, obj):
"""
Returns the value of this field in the given model instance.
"""
return getattr(obj, self.attname)
class AutoField(Field):
description = _("Integer")
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be an integer."),
}
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
super(AutoField, self).__init__(*args, **kwargs)
def check(self, **kwargs):
errors = super(AutoField, self).check(**kwargs)
errors.extend(self._check_primary_key())
return errors
def _check_primary_key(self):
if not self.primary_key:
return [
checks.Error(
'AutoFields must set primary_key=True.',
obj=self,
id='fields.E100',
),
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(AutoField, self).deconstruct()
del kwargs['blank']
kwargs['primary_key'] = True
return name, path, args, kwargs
def get_internal_type(self):
return "AutoField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def rel_db_type(self, connection):
return IntegerField().db_type(connection=connection)
def validate(self, value, model_instance):
pass
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
value = connection.ops.validate_autopk_value(value)
return value
def get_prep_value(self, value):
value = super(AutoField, self).get_prep_value(value)
if value is None:
return None
return int(value)
def contribute_to_class(self, cls, name, **kwargs):
assert not cls._meta.has_auto_field, \
"A model can't have more than one AutoField."
super(AutoField, self).contribute_to_class(cls, name, **kwargs)
cls._meta.has_auto_field = True
cls._meta.auto_field = self
def formfield(self, **kwargs):
return None
class BigAutoField(AutoField):
description = _("Big (8 byte) integer")
def get_internal_type(self):
return "BigAutoField"
def rel_db_type(self, connection):
return BigIntegerField().db_type(connection=connection)
class BooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be either True or False."),
}
description = _("Boolean (Either True or False)")
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
super(BooleanField, self).__init__(*args, **kwargs)
def check(self, **kwargs):
errors = super(BooleanField, self).check(**kwargs)
errors.extend(self._check_null(**kwargs))
return errors
def _check_null(self, **kwargs):
if getattr(self, 'null', False):
return [
checks.Error(
'BooleanFields do not accept null values.',
hint='Use a NullBooleanField instead.',
obj=self,
id='fields.E110',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(BooleanField, self).deconstruct()
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "BooleanField"
def to_python(self, value):
if value in (True, False):
# if value is 1 or 0 than it's equal to True or False, but we want
# to return a true bool for semantic reasons.
return bool(value)
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_prep_value(self, value):
value = super(BooleanField, self).get_prep_value(value)
if value is None:
return None
return self.to_python(value)
def formfield(self, **kwargs):
# Unlike most fields, BooleanField figures out include_blank from
# self.null instead of self.blank.
if self.choices:
include_blank = not (self.has_default() or 'initial' in kwargs)
defaults = {'choices': self.get_choices(include_blank=include_blank)}
else:
defaults = {'form_class': forms.BooleanField}
defaults.update(kwargs)
return super(BooleanField, self).formfield(**defaults)
class CharField(Field):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
super(CharField, self).__init__(*args, **kwargs)
self.validators.append(validators.MaxLengthValidator(self.max_length))
def check(self, **kwargs):
errors = super(CharField, self).check(**kwargs)
errors.extend(self._check_max_length_attribute(**kwargs))
return errors
def _check_max_length_attribute(self, **kwargs):
if self.max_length is None:
return [
checks.Error(
"CharFields must define a 'max_length' attribute.",
obj=self,
id='fields.E120',
)
]
elif not isinstance(self.max_length, six.integer_types) or self.max_length <= 0:
return [
checks.Error(
"'max_length' must be a positive integer.",
obj=self,
id='fields.E121',
)
]
else:
return []
def get_internal_type(self):
return "CharField"
def to_python(self, value):
if isinstance(value, six.string_types) or value is None:
return value
return smart_text(value)
def get_prep_value(self, value):
value = super(CharField, self).get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length}
defaults.update(kwargs)
return super(CharField, self).formfield(**defaults)
class CommaSeparatedIntegerField(CharField):
default_validators = [validators.validate_comma_separated_integer_list]
description = _("Comma-separated integers")
system_check_deprecated_details = {
'msg': (
'CommaSeparatedIntegerField has been deprecated. Support '
'for it (except in historical migrations) will be removed '
'in Django 2.0.'
),
'hint': (
'Use CharField(validators=[validate_comma_separated_integer_list]) instead.'
),
'id': 'fields.W901',
}
def formfield(self, **kwargs):
defaults = {
'error_messages': {
'invalid': _('Enter only digits separated by commas.'),
}
}
defaults.update(kwargs)
return super(CommaSeparatedIntegerField, self).formfield(**defaults)
class DateTimeCheckMixin(object):
def check(self, **kwargs):
errors = super(DateTimeCheckMixin, self).check(**kwargs)
errors.extend(self._check_mutually_exclusive_options())
errors.extend(self._check_fix_default_value())
return errors
def _check_mutually_exclusive_options(self):
# auto_now, auto_now_add, and default are mutually exclusive
# options. The use of more than one of these options together
# will trigger an Error
mutually_exclusive_options = [self.auto_now_add, self.auto_now, self.has_default()]
enabled_options = [option not in (None, False) for option in mutually_exclusive_options].count(True)
if enabled_options > 1:
return [
checks.Error(
"The options auto_now, auto_now_add, and default "
"are mutually exclusive. Only one of these options "
"may be present.",
obj=self,
id='fields.E160',
)
]
else:
return []
def _check_fix_default_value(self):
return []
class DateField(DateTimeCheckMixin, Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid date format. It must be "
"in YYYY-MM-DD format."),
'invalid_date': _("'%(value)s' value has the correct format (YYYY-MM-DD) "
"but it is an invalid date."),
}
description = _("Date (without time)")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
super(DateField, self).__init__(verbose_name, name, **kwargs)
def _check_fix_default_value(self):
"""
Adds a warning to the checks framework stating, that using an actual
date or datetime value is probably wrong; it's only being evaluated on
server start-up.
For details see ticket #21905
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
if not timezone.is_naive(value):
value = timezone.make_naive(value, timezone.utc)
value = value.date()
elif isinstance(value, datetime.date):
# Nothing to do, as dates don't have tz information
pass
else:
# No explicit date / datetime value -- no checks necessary
return []
offset = datetime.timedelta(days=1)
lower = (now - offset).date()
upper = (now + offset).date()
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(DateField, self).deconstruct()
if self.auto_now:
kwargs['auto_now'] = True
if self.auto_now_add:
kwargs['auto_now_add'] = True
if self.auto_now or self.auto_now_add:
del kwargs['editable']
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "DateField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
if settings.USE_TZ and timezone.is_aware(value):
# Convert aware datetimes to the default time zone
# before casting them to dates (#17742).
default_timezone = timezone.get_default_timezone()
value = timezone.make_naive(value, default_timezone)
return value.date()
if isinstance(value, datetime.date):
return value
try:
parsed = parse_date(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_date'],
code='invalid_date',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.date.today()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateField, self).pre_save(model_instance, add)
def contribute_to_class(self, cls, name, **kwargs):
super(DateField, self).contribute_to_class(cls, name, **kwargs)
if not self.null:
setattr(
cls, 'get_next_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self, is_next=True)
)
setattr(
cls, 'get_previous_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self, is_next=False)
)
def get_prep_value(self, value):
value = super(DateField, self).get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_datefield_value(value)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateField}
defaults.update(kwargs)
return super(DateField, self).formfield(**defaults)
class DateTimeField(DateField):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format."),
'invalid_date': _("'%(value)s' value has the correct format "
"(YYYY-MM-DD) but it is an invalid date."),
'invalid_datetime': _("'%(value)s' value has the correct format "
"(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) "
"but it is an invalid date/time."),
}
description = _("Date (with time)")
# __init__ is inherited from DateField
def _check_fix_default_value(self):
"""
Adds a warning to the checks framework stating, that using an actual
date or datetime value is probably wrong; it's only being evaluated on
server start-up.
For details see ticket #21905
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc)
elif isinstance(value, datetime.date):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
lower = datetime.datetime(lower.year, lower.month, lower.day)
upper = now + second_offset
upper = datetime.datetime(upper.year, upper.month, upper.day)
value = datetime.datetime(value.year, value.month, value.day)
else:
# No explicit date / datetime value -- no checks necessary
return []
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def get_internal_type(self):
return "DateTimeField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
value = datetime.datetime(value.year, value.month, value.day)
if settings.USE_TZ:
# For backwards compatibility, interpret naive datetimes in
# local time. This won't work during DST change, but we can't
# do much about it, so we let the exceptions percolate up the
# call stack.
warnings.warn("DateTimeField %s.%s received a naive datetime "
"(%s) while time zone support is active." %
(self.model.__name__, self.name, value),
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
try:
parsed = parse_datetime(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_datetime'],
code='invalid_datetime',
params={'value': value},
)
try:
parsed = parse_date(value)
if parsed is not None:
return datetime.datetime(parsed.year, parsed.month, parsed.day)
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_date'],
code='invalid_date',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = timezone.now()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateTimeField, self).pre_save(model_instance, add)
# contribute_to_class is inherited from DateField, it registers
# get_next_by_FOO and get_prev_by_FOO
def get_prep_value(self, value):
value = super(DateTimeField, self).get_prep_value(value)
value = self.to_python(value)
if value is not None and settings.USE_TZ and timezone.is_naive(value):
# For backwards compatibility, interpret naive datetimes in local
# time. This won't work during DST change, but we can't do much
# about it, so we let the exceptions percolate up the call stack.
try:
name = '%s.%s' % (self.model.__name__, self.name)
except AttributeError:
name = '(unbound)'
warnings.warn("DateTimeField %s received a naive datetime (%s)"
" while time zone support is active." %
(name, value),
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
def get_db_prep_value(self, value, connection, prepared=False):
# Casts datetimes into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_datetimefield_value(value)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateTimeField}
defaults.update(kwargs)
return super(DateTimeField, self).formfield(**defaults)
class DecimalField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be a decimal number."),
}
description = _("Decimal number")
def __init__(self, verbose_name=None, name=None, max_digits=None,
decimal_places=None, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
super(DecimalField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(DecimalField, self).check(**kwargs)
digits_errors = self._check_decimal_places()
digits_errors.extend(self._check_max_digits())
if not digits_errors:
errors.extend(self._check_decimal_places_and_max_digits(**kwargs))
else:
errors.extend(digits_errors)
return errors
def _check_decimal_places(self):
try:
decimal_places = int(self.decimal_places)
if decimal_places < 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"DecimalFields must define a 'decimal_places' attribute.",
obj=self,
id='fields.E130',
)
]
except ValueError:
return [
checks.Error(
"'decimal_places' must be a non-negative integer.",
obj=self,
id='fields.E131',
)
]
else:
return []
def _check_max_digits(self):
try:
max_digits = int(self.max_digits)
if max_digits <= 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"DecimalFields must define a 'max_digits' attribute.",
obj=self,
id='fields.E132',
)
]
except ValueError:
return [
checks.Error(
"'max_digits' must be a positive integer.",
obj=self,
id='fields.E133',
)
]
else:
return []
def _check_decimal_places_and_max_digits(self, **kwargs):
if int(self.decimal_places) > int(self.max_digits):
return [
checks.Error(
"'max_digits' must be greater or equal to 'decimal_places'.",
obj=self,
id='fields.E134',
)
]
return []
@cached_property
def validators(self):
return super(DecimalField, self).validators + [
validators.DecimalValidator(self.max_digits, self.decimal_places)
]
def deconstruct(self):
name, path, args, kwargs = super(DecimalField, self).deconstruct()
if self.max_digits is not None:
kwargs['max_digits'] = self.max_digits
if self.decimal_places is not None:
kwargs['decimal_places'] = self.decimal_places
return name, path, args, kwargs
def get_internal_type(self):
return "DecimalField"
def to_python(self, value):
if value is None:
return value
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def _format(self, value):
if isinstance(value, six.string_types):
return value
else:
return self.format_number(value)
def format_number(self, value):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
# Method moved to django.db.backends.utils.
#
# It is preserved because it is used by the oracle backend
# (django.db.backends.oracle.query), and also for
# backwards-compatibility with any external code which may have used
# this method.
from django.db.backends import utils
return utils.format_number(value, self.max_digits, self.decimal_places)
def get_db_prep_save(self, value, connection):
return connection.ops.adapt_decimalfield_value(self.to_python(value), self.max_digits, self.decimal_places)
def get_prep_value(self, value):
value = super(DecimalField, self).get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
defaults = {
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
'form_class': forms.DecimalField,
}
defaults.update(kwargs)
return super(DecimalField, self).formfield(**defaults)
class DurationField(Field):
"""Stores timedelta objects.
Uses interval on postgres, INVERAL DAY TO SECOND on Oracle, and bigint of
microseconds on other databases.
"""
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"[DD] [HH:[MM:]]ss[.uuuuuu] format.")
}
description = _("Duration")
def get_internal_type(self):
return "DurationField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.timedelta):
return value
try:
parsed = parse_duration(value)
except ValueError:
pass
else:
if parsed is not None:
return parsed
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_db_prep_value(self, value, connection, prepared=False):
if connection.features.has_native_duration_field:
return value
if value is None:
return None
# Discard any fractional microseconds due to floating point arithmetic.
return int(round(value.total_seconds() * 1000000))
def get_db_converters(self, connection):
converters = []
if not connection.features.has_native_duration_field:
converters.append(connection.ops.convert_durationfield_value)
return converters + super(DurationField, self).get_db_converters(connection)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else duration_string(val)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.DurationField,
}
defaults.update(kwargs)
return super(DurationField, self).formfield(**defaults)
class EmailField(CharField):
default_validators = [validators.validate_email]
description = _("Email address")
def __init__(self, *args, **kwargs):
# max_length=254 to be compliant with RFCs 3696 and 5321
kwargs['max_length'] = kwargs.get('max_length', 254)
super(EmailField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(EmailField, self).deconstruct()
# We do not exclude max_length if it matches default as we want to change
# the default in future.
return name, path, args, kwargs
def formfield(self, **kwargs):
# As with CharField, this will cause email validation to be performed
# twice.
defaults = {
'form_class': forms.EmailField,
}
defaults.update(kwargs)
return super(EmailField, self).formfield(**defaults)
class FilePathField(Field):
description = _("File path")
def __init__(self, verbose_name=None, name=None, path='', match=None,
recursive=False, allow_files=True, allow_folders=False, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
kwargs['max_length'] = kwargs.get('max_length', 100)
super(FilePathField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(FilePathField, self).check(**kwargs)
errors.extend(self._check_allowing_files_or_folders(**kwargs))
return errors
def _check_allowing_files_or_folders(self, **kwargs):
if not self.allow_files and not self.allow_folders:
return [
checks.Error(
"FilePathFields must have either 'allow_files' or 'allow_folders' set to True.",
obj=self,
id='fields.E140',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(FilePathField, self).deconstruct()
if self.path != '':
kwargs['path'] = self.path
if self.match is not None:
kwargs['match'] = self.match
if self.recursive is not False:
kwargs['recursive'] = self.recursive
if self.allow_files is not True:
kwargs['allow_files'] = self.allow_files
if self.allow_folders is not False:
kwargs['allow_folders'] = self.allow_folders
if kwargs.get("max_length") == 100:
del kwargs["max_length"]
return name, path, args, kwargs
def get_prep_value(self, value):
value = super(FilePathField, self).get_prep_value(value)
if value is None:
return None
return six.text_type(value)
def formfield(self, **kwargs):
defaults = {
'path': self.path,
'match': self.match,
'recursive': self.recursive,
'form_class': forms.FilePathField,
'allow_files': self.allow_files,
'allow_folders': self.allow_folders,
}
defaults.update(kwargs)
return super(FilePathField, self).formfield(**defaults)
def get_internal_type(self):
return "FilePathField"
class FloatField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be a float."),
}
description = _("Floating point number")
def get_prep_value(self, value):
value = super(FloatField, self).get_prep_value(value)
if value is None:
return None
return float(value)
def get_internal_type(self):
return "FloatField"
def to_python(self, value):
if value is None:
return value
try:
return float(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FloatField}
defaults.update(kwargs)
return super(FloatField, self).formfield(**defaults)
class IntegerField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be an integer."),
}
description = _("Integer")
def check(self, **kwargs):
errors = super(IntegerField, self).check(**kwargs)
errors.extend(self._check_max_length_warning())
return errors
def _check_max_length_warning(self):
if self.max_length is not None:
return [
checks.Warning(
"'max_length' is ignored when used with IntegerField",
hint="Remove 'max_length' from field",
obj=self,
id='fields.W122',
)
]
return []
@cached_property
def validators(self):
# These validators can't be added at field initialization time since
# they're based on values retrieved from `connection`.
validators_ = super(IntegerField, self).validators
internal_type = self.get_internal_type()
min_value, max_value = connection.ops.integer_field_range(internal_type)
if min_value is not None:
for validator in validators_:
if isinstance(validator, validators.MinValueValidator) and validator.limit_value >= min_value:
break
else:
validators_.append(validators.MinValueValidator(min_value))
if max_value is not None:
for validator in validators_:
if isinstance(validator, validators.MaxValueValidator) and validator.limit_value <= max_value:
break
else:
validators_.append(validators.MaxValueValidator(max_value))
return validators_
def get_prep_value(self, value):
value = super(IntegerField, self).get_prep_value(value)
if value is None:
return None
return int(value)
def get_internal_type(self):
return "IntegerField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
defaults = {'form_class': forms.IntegerField}
defaults.update(kwargs)
return super(IntegerField, self).formfield(**defaults)
class BigIntegerField(IntegerField):
empty_strings_allowed = False
description = _("Big (8 byte) integer")
MAX_BIGINT = 9223372036854775807
def get_internal_type(self):
return "BigIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': -BigIntegerField.MAX_BIGINT - 1,
'max_value': BigIntegerField.MAX_BIGINT}
defaults.update(kwargs)
return super(BigIntegerField, self).formfield(**defaults)
class IPAddressField(Field):
empty_strings_allowed = False
description = _("IPv4 address")
system_check_removed_details = {
'msg': (
'IPAddressField has been removed except for support in '
'historical migrations.'
),
'hint': 'Use GenericIPAddressField instead.',
'id': 'fields.E900',
}
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 15
super(IPAddressField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(IPAddressField, self).deconstruct()
del kwargs['max_length']
return name, path, args, kwargs
def get_prep_value(self, value):
value = super(IPAddressField, self).get_prep_value(value)
if value is None:
return None
return six.text_type(value)
def get_internal_type(self):
return "IPAddressField"
class GenericIPAddressField(Field):
empty_strings_allowed = False
description = _("IP address")
default_error_messages = {}
def __init__(self, verbose_name=None, name=None, protocol='both',
unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.protocol = protocol
self.default_validators, invalid_error_message = \
validators.ip_address_validators(protocol, unpack_ipv4)
self.default_error_messages['invalid'] = invalid_error_message
kwargs['max_length'] = 39
super(GenericIPAddressField, self).__init__(verbose_name, name, *args,
**kwargs)
def check(self, **kwargs):
errors = super(GenericIPAddressField, self).check(**kwargs)
errors.extend(self._check_blank_and_null_values(**kwargs))
return errors
def _check_blank_and_null_values(self, **kwargs):
if not getattr(self, 'null', False) and getattr(self, 'blank', False):
return [
checks.Error(
'GenericIPAddressFields cannot have blank=True if null=False, '
'as blank values are stored as nulls.',
obj=self,
id='fields.E150',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(GenericIPAddressField, self).deconstruct()
if self.unpack_ipv4 is not False:
kwargs['unpack_ipv4'] = self.unpack_ipv4
if self.protocol != "both":
kwargs['protocol'] = self.protocol
if kwargs.get("max_length") == 39:
del kwargs['max_length']
return name, path, args, kwargs
def get_internal_type(self):
return "GenericIPAddressField"
def to_python(self, value):
if value is None:
return None
if not isinstance(value, six.string_types):
value = force_text(value)
value = value.strip()
if ':' in value:
return clean_ipv6_address(value, self.unpack_ipv4, self.error_messages['invalid'])
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_ipaddressfield_value(value)
def get_prep_value(self, value):
value = super(GenericIPAddressField, self).get_prep_value(value)
if value is None:
return None
if value and ':' in value:
try:
return clean_ipv6_address(value, self.unpack_ipv4)
except exceptions.ValidationError:
pass
return six.text_type(value)
def formfield(self, **kwargs):
defaults = {
'protocol': self.protocol,
'form_class': forms.GenericIPAddressField,
}
defaults.update(kwargs)
return super(GenericIPAddressField, self).formfield(**defaults)
class NullBooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be either None, True or False."),
}
description = _("Boolean (Either True, False or None)")
def __init__(self, *args, **kwargs):
kwargs['null'] = True
kwargs['blank'] = True
super(NullBooleanField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(NullBooleanField, self).deconstruct()
del kwargs['null']
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "NullBooleanField"
def to_python(self, value):
if value is None:
return None
if value in (True, False):
return bool(value)
if value in ('None',):
return None
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_prep_value(self, value):
value = super(NullBooleanField, self).get_prep_value(value)
if value is None:
return None
return self.to_python(value)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.NullBooleanField,
'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
defaults.update(kwargs)
return super(NullBooleanField, self).formfield(**defaults)
class PositiveIntegerRelDbTypeMixin(object):
def rel_db_type(self, connection):
"""
Return the data type that a related field pointing to this field should
use. In most cases, a foreign key pointing to a positive integer
primary key will have an integer column data type but some databases
(e.g. MySQL) have an unsigned integer type. In that case
(related_fields_match_type=True), the primary key should return its
db_type.
"""
if connection.features.related_fields_match_type:
return self.db_type(connection)
else:
return IntegerField().db_type(connection=connection)
class PositiveIntegerField(PositiveIntegerRelDbTypeMixin, IntegerField):
description = _("Positive integer")
def get_internal_type(self):
return "PositiveIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveIntegerField, self).formfield(**defaults)
class PositiveSmallIntegerField(PositiveIntegerRelDbTypeMixin, IntegerField):
description = _("Positive small integer")
def get_internal_type(self):
return "PositiveSmallIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveSmallIntegerField, self).formfield(**defaults)
class SlugField(CharField):
default_validators = [validators.validate_slug]
description = _("Slug (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 50)
# Set db_index=True unless it's been set manually.
if 'db_index' not in kwargs:
kwargs['db_index'] = True
self.allow_unicode = kwargs.pop('allow_unicode', False)
if self.allow_unicode:
self.default_validators = [validators.validate_unicode_slug]
super(SlugField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(SlugField, self).deconstruct()
if kwargs.get("max_length") == 50:
del kwargs['max_length']
if self.db_index is False:
kwargs['db_index'] = False
else:
del kwargs['db_index']
if self.allow_unicode is not False:
kwargs['allow_unicode'] = self.allow_unicode
return name, path, args, kwargs
def get_internal_type(self):
return "SlugField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.SlugField, 'allow_unicode': self.allow_unicode}
defaults.update(kwargs)
return super(SlugField, self).formfield(**defaults)
class SmallIntegerField(IntegerField):
description = _("Small integer")
def get_internal_type(self):
return "SmallIntegerField"
class TextField(Field):
description = _("Text")
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if isinstance(value, six.string_types) or value is None:
return value
return smart_text(value)
def get_prep_value(self, value):
value = super(TextField, self).get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length, 'widget': forms.Textarea}
defaults.update(kwargs)
return super(TextField, self).formfield(**defaults)
class TimeField(DateTimeCheckMixin, Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"HH:MM[:ss[.uuuuuu]] format."),
'invalid_time': _("'%(value)s' value has the correct format "
"(HH:MM[:ss[.uuuuuu]]) but it is an invalid time."),
}
description = _("Time")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
super(TimeField, self).__init__(verbose_name, name, **kwargs)
def _check_fix_default_value(self):
"""
Adds a warning to the checks framework stating, that using an actual
time or datetime value is probably wrong; it's only being evaluated on
server start-up.
For details see ticket #21905
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc)
elif isinstance(value, datetime.time):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
value = datetime.datetime.combine(now.date(), value)
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc).time()
else:
# No explicit time / datetime value -- no checks necessary
return []
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(TimeField, self).deconstruct()
if self.auto_now is not False:
kwargs["auto_now"] = self.auto_now
if self.auto_now_add is not False:
kwargs["auto_now_add"] = self.auto_now_add
if self.auto_now or self.auto_now_add:
del kwargs['blank']
del kwargs['editable']
return name, path, args, kwargs
def get_internal_type(self):
return "TimeField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, datetime.time):
return value
if isinstance(value, datetime.datetime):
# Not usually a good idea to pass in a datetime here (it loses
# information), but this can be a side-effect of interacting with a
# database backend (e.g. Oracle), so we'll be accommodating.
return value.time()
try:
parsed = parse_time(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_time'],
code='invalid_time',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now().time()
setattr(model_instance, self.attname, value)
return value
else:
return super(TimeField, self).pre_save(model_instance, add)
def get_prep_value(self, value):
value = super(TimeField, self).get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts times into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_timefield_value(value)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.TimeField}
defaults.update(kwargs)
return super(TimeField, self).formfield(**defaults)
class URLField(CharField):
default_validators = [validators.URLValidator()]
description = _("URL")
def __init__(self, verbose_name=None, name=None, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 200)
super(URLField, self).__init__(verbose_name, name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(URLField, self).deconstruct()
if kwargs.get("max_length") == 200:
del kwargs['max_length']
return name, path, args, kwargs
def formfield(self, **kwargs):
# As with CharField, this will cause URL validation to be performed
# twice.
defaults = {
'form_class': forms.URLField,
}
defaults.update(kwargs)
return super(URLField, self).formfield(**defaults)
class BinaryField(Field):
description = _("Raw binary data")
empty_values = [None, b'']
def __init__(self, *args, **kwargs):
kwargs['editable'] = False
super(BinaryField, self).__init__(*args, **kwargs)
if self.max_length is not None:
self.validators.append(validators.MaxLengthValidator(self.max_length))
def deconstruct(self):
name, path, args, kwargs = super(BinaryField, self).deconstruct()
del kwargs['editable']
return name, path, args, kwargs
def get_internal_type(self):
return "BinaryField"
def get_placeholder(self, value, compiler, connection):
return connection.ops.binary_placeholder_sql(value)
def get_default(self):
if self.has_default() and not callable(self.default):
return self.default
default = super(BinaryField, self).get_default()
if default == '':
return b''
return default
def get_db_prep_value(self, value, connection, prepared=False):
value = super(BinaryField, self).get_db_prep_value(value, connection, prepared)
if value is not None:
return connection.Database.Binary(value)
return value
def value_to_string(self, obj):
"""Binary data is serialized as base64"""
return b64encode(force_bytes(self.value_from_object(obj))).decode('ascii')
def to_python(self, value):
# If it's a string, it should be base64-encoded data
if isinstance(value, six.text_type):
return six.memoryview(b64decode(force_bytes(value)))
return value
class UUIDField(Field):
default_error_messages = {
'invalid': _("'%(value)s' is not a valid UUID."),
}
description = 'Universally unique identifier'
empty_strings_allowed = False
def __init__(self, verbose_name=None, **kwargs):
kwargs['max_length'] = 32
super(UUIDField, self).__init__(verbose_name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(UUIDField, self).deconstruct()
del kwargs['max_length']
return name, path, args, kwargs
def get_internal_type(self):
return "UUIDField"
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
if not isinstance(value, uuid.UUID):
try:
value = uuid.UUID(value)
except AttributeError:
raise TypeError(self.error_messages['invalid'] % {'value': value})
if connection.features.has_native_uuid_field:
return value
return value.hex
def to_python(self, value):
if value and not isinstance(value, uuid.UUID):
try:
return uuid.UUID(value)
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
return value
def formfield(self, **kwargs):
defaults = {
'form_class': forms.UUIDField,
}
defaults.update(kwargs)
return super(UUIDField, self).formfield(**defaults)
| gpl-3.0 |
hyperized/ansible | lib/ansible/modules/storage/netapp/na_ontap_cluster_peer.py | 37 | 12647 | #!/usr/bin/python
# (c) 2018-2019, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Create/Delete cluster peer relations on ONTAP
extends_documentation_fragment:
- netapp.na_ontap
module: na_ontap_cluster_peer
options:
state:
choices: ['present', 'absent']
description:
- Whether the specified cluster peer should exist or not.
default: present
source_intercluster_lifs:
description:
- List of intercluster addresses of the source cluster.
- Used as peer-addresses in destination cluster.
- All these intercluster lifs should belong to the source cluster.
version_added: "2.8"
aliases:
- source_intercluster_lif
dest_intercluster_lifs:
description:
- List of intercluster addresses of the destination cluster.
- Used as peer-addresses in source cluster.
- All these intercluster lifs should belong to the destination cluster.
version_added: "2.8"
aliases:
- dest_intercluster_lif
passphrase:
description:
- The arbitrary passphrase that matches the one given to the peer cluster.
source_cluster_name:
description:
- The name of the source cluster name in the peer relation to be deleted.
dest_cluster_name:
description:
- The name of the destination cluster name in the peer relation to be deleted.
- Required for delete
dest_hostname:
description:
- Destination cluster IP or hostname which needs to be peered
- Required to complete the peering process at destination cluster.
required: True
dest_username:
description:
- Destination username.
- Optional if this is same as source username.
dest_password:
description:
- Destination password.
- Optional if this is same as source password.
short_description: NetApp ONTAP Manage Cluster peering
version_added: "2.7"
'''
EXAMPLES = """
- name: Create cluster peer
na_ontap_cluster_peer:
state: present
source_intercluster_lifs: 1.2.3.4,1.2.3.5
dest_intercluster_lifs: 1.2.3.6,1.2.3.7
passphrase: XXXX
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
dest_hostname: "{{ dest_netapp_hostname }}"
- name: Delete cluster peer
na_ontap_cluster_peer:
state: absent
source_cluster_name: test-source-cluster
dest_cluster_name: test-dest-cluster
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
dest_hostname: "{{ dest_netapp_hostname }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.netapp_module import NetAppModule
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppONTAPClusterPeer(object):
"""
Class with cluster peer methods
"""
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
source_intercluster_lifs=dict(required=False, type='list', aliases=['source_intercluster_lif']),
dest_intercluster_lifs=dict(required=False, type='list', aliases=['dest_intercluster_lif']),
passphrase=dict(required=False, type='str', no_log=True),
dest_hostname=dict(required=True, type='str'),
dest_username=dict(required=False, type='str'),
dest_password=dict(required=False, type='str', no_log=True),
source_cluster_name=dict(required=False, type='str'),
dest_cluster_name=dict(required=False, type='str')
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_together=[['source_intercluster_lifs', 'dest_intercluster_lifs']],
required_if=[('state', 'absent', ['source_cluster_name', 'dest_cluster_name'])],
supports_check_mode=True
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
# set destination server connection
self.module.params['hostname'] = self.parameters['dest_hostname']
if self.parameters.get('dest_username'):
self.module.params['username'] = self.parameters['dest_username']
if self.parameters.get('dest_password'):
self.module.params['password'] = self.parameters['dest_password']
self.dest_server = netapp_utils.setup_na_ontap_zapi(module=self.module)
# reset to source host connection for asup logs
self.module.params['hostname'] = self.parameters['hostname']
def cluster_peer_get_iter(self, cluster):
"""
Compose NaElement object to query current source cluster using peer-cluster-name and peer-addresses parameters
:param cluster: type of cluster (source or destination)
:return: NaElement object for cluster-get-iter with query
"""
cluster_peer_get = netapp_utils.zapi.NaElement('cluster-peer-get-iter')
query = netapp_utils.zapi.NaElement('query')
cluster_peer_info = netapp_utils.zapi.NaElement('cluster-peer-info')
if cluster == 'source':
peer_lifs, peer_cluster = 'dest_intercluster_lifs', 'dest_cluster_name'
else:
peer_lifs, peer_cluster = 'source_intercluster_lifs', 'source_cluster_name'
if self.parameters.get(peer_lifs):
peer_addresses = netapp_utils.zapi.NaElement('peer-addresses')
for peer in self.parameters.get(peer_lifs):
peer_addresses.add_new_child('remote-inet-address', peer)
cluster_peer_info.add_child_elem(peer_addresses)
if self.parameters.get(peer_cluster):
cluster_peer_info.add_new_child('cluster-name', self.parameters[peer_cluster])
query.add_child_elem(cluster_peer_info)
cluster_peer_get.add_child_elem(query)
return cluster_peer_get
def cluster_peer_get(self, cluster):
"""
Get current cluster peer info
:param cluster: type of cluster (source or destination)
:return: Dictionary of current cluster peer details if query successful, else return None
"""
cluster_peer_get_iter = self.cluster_peer_get_iter(cluster)
result, cluster_info = None, dict()
if cluster == 'source':
server = self.server
else:
server = self.dest_server
try:
result = server.invoke_successfully(cluster_peer_get_iter, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error fetching cluster peer %s: %s'
% (self.parameters['dest_cluster_name'], to_native(error)),
exception=traceback.format_exc())
# return cluster peer details
if result.get_child_by_name('num-records') and \
int(result.get_child_content('num-records')) >= 1:
cluster_peer_info = result.get_child_by_name('attributes-list').get_child_by_name('cluster-peer-info')
cluster_info['cluster_name'] = cluster_peer_info.get_child_content('cluster-name')
peers = cluster_peer_info.get_child_by_name('peer-addresses')
cluster_info['peer-addresses'] = [peer.get_content() for peer in peers.get_children()]
return cluster_info
return None
def cluster_peer_delete(self, cluster):
"""
Delete a cluster peer on source or destination
For source cluster, peer cluster-name = destination cluster name and vice-versa
:param cluster: type of cluster (source or destination)
:return:
"""
if cluster == 'source':
server, peer_cluster_name = self.server, self.parameters['dest_cluster_name']
else:
server, peer_cluster_name = self.dest_server, self.parameters['source_cluster_name']
cluster_peer_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'cluster-peer-delete', **{'cluster-name': peer_cluster_name})
try:
server.invoke_successfully(cluster_peer_delete, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error deleting cluster peer %s: %s'
% (peer_cluster_name, to_native(error)),
exception=traceback.format_exc())
def cluster_peer_create(self, cluster):
"""
Create a cluster peer on source or destination
For source cluster, peer addresses = destination inter-cluster LIFs and vice-versa
:param cluster: type of cluster (source or destination)
:return: None
"""
cluster_peer_create = netapp_utils.zapi.NaElement.create_node_with_children('cluster-peer-create')
if self.parameters.get('passphrase') is not None:
cluster_peer_create.add_new_child('passphrase', self.parameters['passphrase'])
peer_addresses = netapp_utils.zapi.NaElement('peer-addresses')
if cluster == 'source':
server, peer_address = self.server, self.parameters['dest_intercluster_lifs']
else:
server, peer_address = self.dest_server, self.parameters['source_intercluster_lifs']
for each in peer_address:
peer_addresses.add_new_child('remote-inet-address', each)
cluster_peer_create.add_child_elem(peer_addresses)
try:
server.invoke_successfully(cluster_peer_create, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error creating cluster peer %s: %s'
% (peer_address, to_native(error)),
exception=traceback.format_exc())
def apply(self):
"""
Apply action to cluster peer
:return: None
"""
self.asup_log_for_cserver("na_ontap_cluster_peer")
source = self.cluster_peer_get('source')
destination = self.cluster_peer_get('destination')
source_action = self.na_helper.get_cd_action(source, self.parameters)
destination_action = self.na_helper.get_cd_action(destination, self.parameters)
self.na_helper.changed = False
# create only if expected cluster peer relation is not present on both source and destination clusters
if source_action == 'create' and destination_action == 'create':
self.cluster_peer_create('source')
self.cluster_peer_create('destination')
self.na_helper.changed = True
# delete peer relation in cluster where relation is present
else:
if source_action == 'delete':
self.cluster_peer_delete('source')
self.na_helper.changed = True
if destination_action == 'delete':
self.cluster_peer_delete('destination')
self.na_helper.changed = True
self.module.exit_json(changed=self.na_helper.changed)
def asup_log_for_cserver(self, event_name):
"""
Fetch admin vserver for the given cluster
Create and Autosupport log event with the given module name
:param event_name: Name of the event log
:return: None
"""
results = netapp_utils.get_cserver(self.server)
cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
netapp_utils.ems_log_event(event_name, cserver)
def main():
"""
Execute action
:return: None
"""
community_obj = NetAppONTAPClusterPeer()
community_obj.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
gohin/django | tests/builtin_server/tests.py | 368 | 5230 | from __future__ import unicode_literals
import sys
import traceback
from io import BytesIO
from unittest import TestCase
from wsgiref import simple_server
# If data is too large, socket will choke, so write chunks no larger than 32MB
# at a time. The rationale behind the 32MB can be found on Django's Trac:
# https://code.djangoproject.com/ticket/5596#comment:4
MAX_SOCKET_CHUNK_SIZE = 32 * 1024 * 1024 # 32 MB
class ServerHandler(simple_server.ServerHandler, object):
error_status = str("500 INTERNAL SERVER ERROR")
def write(self, data):
"""'write()' callable as specified by PEP 3333"""
assert isinstance(data, bytes), "write() argument must be bytestring"
if not self.status:
raise AssertionError("write() before start_response()")
elif not self.headers_sent:
# Before the first output, send the stored headers
self.bytes_sent = len(data) # make sure we know content-length
self.send_headers()
else:
self.bytes_sent += len(data)
# XXX check Content-Length and truncate if too many bytes written?
data = BytesIO(data)
for chunk in iter(lambda: data.read(MAX_SOCKET_CHUNK_SIZE), b''):
self._write(chunk)
self._flush()
def error_output(self, environ, start_response):
super(ServerHandler, self).error_output(environ, start_response)
return ['\n'.join(traceback.format_exception(*sys.exc_info()))]
# Backport of http://hg.python.org/cpython/rev/d5af1b235dab. See #16241.
# This can be removed when support for Python <= 2.7.3 is deprecated.
def finish_response(self):
try:
if not self.result_is_file() or not self.sendfile():
for data in self.result:
self.write(data)
self.finish_content()
finally:
self.close()
class DummyHandler(object):
def log_request(self, *args, **kwargs):
pass
class FileWrapperHandler(ServerHandler):
def __init__(self, *args, **kwargs):
super(FileWrapperHandler, self).__init__(*args, **kwargs)
self.request_handler = DummyHandler()
self._used_sendfile = False
def sendfile(self):
self._used_sendfile = True
return True
def wsgi_app(environ, start_response):
start_response(str('200 OK'), [(str('Content-Type'), str('text/plain'))])
return [b'Hello World!']
def wsgi_app_file_wrapper(environ, start_response):
start_response(str('200 OK'), [(str('Content-Type'), str('text/plain'))])
return environ['wsgi.file_wrapper'](BytesIO(b'foo'))
class WSGIFileWrapperTests(TestCase):
"""
Test that the wsgi.file_wrapper works for the builting server.
Tests for #9659: wsgi.file_wrapper in the builtin server.
We need to mock a couple of handlers and keep track of what
gets called when using a couple kinds of WSGI apps.
"""
def test_file_wrapper_uses_sendfile(self):
env = {'SERVER_PROTOCOL': 'HTTP/1.0'}
handler = FileWrapperHandler(None, BytesIO(), BytesIO(), env)
handler.run(wsgi_app_file_wrapper)
self.assertTrue(handler._used_sendfile)
self.assertEqual(handler.stdout.getvalue(), b'')
self.assertEqual(handler.stderr.getvalue(), b'')
def test_file_wrapper_no_sendfile(self):
env = {'SERVER_PROTOCOL': 'HTTP/1.0'}
handler = FileWrapperHandler(None, BytesIO(), BytesIO(), env)
handler.run(wsgi_app)
self.assertFalse(handler._used_sendfile)
self.assertEqual(handler.stdout.getvalue().splitlines()[-1], b'Hello World!')
self.assertEqual(handler.stderr.getvalue(), b'')
class WriteChunkCounterHandler(ServerHandler):
"""
Server handler that counts the number of chunks written after headers were
sent. Used to make sure large response body chunking works properly.
"""
def __init__(self, *args, **kwargs):
super(WriteChunkCounterHandler, self).__init__(*args, **kwargs)
self.request_handler = DummyHandler()
self.headers_written = False
self.write_chunk_counter = 0
def send_headers(self):
super(WriteChunkCounterHandler, self).send_headers()
self.headers_written = True
def _write(self, data):
if self.headers_written:
self.write_chunk_counter += 1
self.stdout.write(data)
def send_big_data_app(environ, start_response):
start_response(str('200 OK'), [(str('Content-Type'), str('text/plain'))])
# Return a blob of data that is 1.5 times the maximum chunk size.
return [b'x' * (MAX_SOCKET_CHUNK_SIZE + MAX_SOCKET_CHUNK_SIZE // 2)]
class ServerHandlerChunksProperly(TestCase):
"""
Test that the ServerHandler chunks data properly.
Tests for #18972: The logic that performs the math to break data into
32MB (MAX_SOCKET_CHUNK_SIZE) chunks was flawed, BUT it didn't actually
cause any problems.
"""
def test_chunked_data(self):
env = {'SERVER_PROTOCOL': 'HTTP/1.0'}
handler = WriteChunkCounterHandler(None, BytesIO(), BytesIO(), env)
handler.run(send_big_data_app)
self.assertEqual(handler.write_chunk_counter, 2)
| bsd-3-clause |
lgarren/spack | lib/spack/external/py/_apipkg.py | 210 | 5855 | """
apipkg: control the exported namespace of a python package.
see http://pypi.python.org/pypi/apipkg
(c) holger krekel, 2009 - MIT license
"""
import os
import sys
from types import ModuleType
__version__ = '1.3.dev'
def _py_abspath(path):
"""
special version of abspath
that will leave paths from jython jars alone
"""
if path.startswith('__pyclasspath__'):
return path
else:
return os.path.abspath(path)
def initpkg(pkgname, exportdefs, attr=dict()):
""" initialize given package from the export definitions. """
oldmod = sys.modules.get(pkgname)
d = {}
f = getattr(oldmod, '__file__', None)
if f:
f = _py_abspath(f)
d['__file__'] = f
if hasattr(oldmod, '__version__'):
d['__version__'] = oldmod.__version__
if hasattr(oldmod, '__loader__'):
d['__loader__'] = oldmod.__loader__
if hasattr(oldmod, '__path__'):
d['__path__'] = [_py_abspath(p) for p in oldmod.__path__]
if '__doc__' not in exportdefs and getattr(oldmod, '__doc__', None):
d['__doc__'] = oldmod.__doc__
d.update(attr)
if hasattr(oldmod, "__dict__"):
oldmod.__dict__.update(d)
mod = ApiModule(pkgname, exportdefs, implprefix=pkgname, attr=d)
sys.modules[pkgname] = mod
def importobj(modpath, attrname):
module = __import__(modpath, None, None, ['__doc__'])
if not attrname:
return module
retval = module
names = attrname.split(".")
for x in names:
retval = getattr(retval, x)
return retval
class ApiModule(ModuleType):
def __docget(self):
try:
return self.__doc
except AttributeError:
if '__doc__' in self.__map__:
return self.__makeattr('__doc__')
def __docset(self, value):
self.__doc = value
__doc__ = property(__docget, __docset)
def __init__(self, name, importspec, implprefix=None, attr=None):
self.__name__ = name
self.__all__ = [x for x in importspec if x != '__onfirstaccess__']
self.__map__ = {}
self.__implprefix__ = implprefix or name
if attr:
for name, val in attr.items():
# print "setting", self.__name__, name, val
setattr(self, name, val)
for name, importspec in importspec.items():
if isinstance(importspec, dict):
subname = '%s.%s' % (self.__name__, name)
apimod = ApiModule(subname, importspec, implprefix)
sys.modules[subname] = apimod
setattr(self, name, apimod)
else:
parts = importspec.split(':')
modpath = parts.pop(0)
attrname = parts and parts[0] or ""
if modpath[0] == '.':
modpath = implprefix + modpath
if not attrname:
subname = '%s.%s' % (self.__name__, name)
apimod = AliasModule(subname, modpath)
sys.modules[subname] = apimod
if '.' not in name:
setattr(self, name, apimod)
else:
self.__map__[name] = (modpath, attrname)
def __repr__(self):
l = []
if hasattr(self, '__version__'):
l.append("version=" + repr(self.__version__))
if hasattr(self, '__file__'):
l.append('from ' + repr(self.__file__))
if l:
return '<ApiModule %r %s>' % (self.__name__, " ".join(l))
return '<ApiModule %r>' % (self.__name__,)
def __makeattr(self, name):
"""lazily compute value for name or raise AttributeError if unknown."""
# print "makeattr", self.__name__, name
target = None
if '__onfirstaccess__' in self.__map__:
target = self.__map__.pop('__onfirstaccess__')
importobj(*target)()
try:
modpath, attrname = self.__map__[name]
except KeyError:
if target is not None and name != '__onfirstaccess__':
# retry, onfirstaccess might have set attrs
return getattr(self, name)
raise AttributeError(name)
else:
result = importobj(modpath, attrname)
setattr(self, name, result)
try:
del self.__map__[name]
except KeyError:
pass # in a recursive-import situation a double-del can happen
return result
__getattr__ = __makeattr
def __dict__(self):
# force all the content of the module to be loaded when __dict__ is read
dictdescr = ModuleType.__dict__['__dict__']
dict = dictdescr.__get__(self)
if dict is not None:
hasattr(self, 'some')
for name in self.__all__:
try:
self.__makeattr(name)
except AttributeError:
pass
return dict
__dict__ = property(__dict__)
def AliasModule(modname, modpath, attrname=None):
mod = []
def getmod():
if not mod:
x = importobj(modpath, None)
if attrname is not None:
x = getattr(x, attrname)
mod.append(x)
return mod[0]
class AliasModule(ModuleType):
def __repr__(self):
x = modpath
if attrname:
x += "." + attrname
return '<AliasModule %r for %r>' % (modname, x)
def __getattribute__(self, name):
try:
return getattr(getmod(), name)
except ImportError:
return None
def __setattr__(self, name, value):
setattr(getmod(), name, value)
def __delattr__(self, name):
delattr(getmod(), name)
return AliasModule(str(modname))
| lgpl-2.1 |
Fshem/micro_bank_system | bank/migrations/0001_initial.py | 3 | 4092 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2017-02-27 20:21
from __future__ import unicode_literals
import django.db.models.deletion
import django.utils.timezone
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Addresse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('postal_address', models.TextField(max_length=1000)),
('email', models.EmailField(max_length=254)),
('phone_no', models.IntegerField()),
('tel_no', models.IntegerField(blank=True)),
('resident_town', models.CharField(max_length=20)),
('estate', models.CharField(max_length=20)),
('house_no', models.IntegerField(blank=True)),
],
),
migrations.CreateModel(
name='ATMCard',
fields=[
('number', models.IntegerField(auto_created=True, editable=False, primary_key=True, serialize=False)),
('expiry_date', models.DateTimeField()),
('csv', models.IntegerField()),
('reg_date', models.DateTimeField(default=django.utils.timezone.now)),
],
),
migrations.CreateModel(
name='CurrentAccount',
fields=[
('account_number',
models.IntegerField(auto_created=True, editable=False, primary_key=True, serialize=False)),
('amount', models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=7)),
('reg_date', models.DateTimeField(default=django.utils.timezone.now)),
],
),
migrations.CreateModel(
name='Personal_Information',
fields=[
('first_name', models.CharField(max_length=50)),
('surname', models.CharField(max_length=50)),
('other_names', models.CharField(max_length=50)),
('id_number', models.IntegerField(primary_key=True, serialize=False)),
('passport', models.BooleanField(default=False)),
('passport_no', models.IntegerField(blank=True)),
('pin_no', models.IntegerField()),
('register_atm', models.BooleanField(default=True)),
('date_registered', models.DateTimeField(default=django.utils.timezone.now)),
('address', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bank.Addresse')),
('atm_card', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bank.ATMCard')),
(
'current_ac', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bank.CurrentAccount')),
],
),
migrations.CreateModel(
name='SavingAccount',
fields=[
('account_number',
models.IntegerField(auto_created=True, editable=False, primary_key=True, serialize=False)),
('amount', models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=7)),
('reg_date', models.DateTimeField(default=django.utils.timezone.now)),
],
),
migrations.AddField(
model_name='personal_information',
name='saving_ac',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='bank.SavingAccount'),
),
migrations.AddField(
model_name='atmcard',
name='atm_current',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bank.CurrentAccount'),
),
migrations.AddField(
model_name='atmcard',
name='atm_saving',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='bank.SavingAccount'),
),
]
| agpl-3.0 |
edxzw/edx-platform | openedx/core/djangoapps/profile_images/images.py | 26 | 5907 | """
Image file manipulation functions related to profile images.
"""
from cStringIO import StringIO
from collections import namedtuple
from django.conf import settings
from django.core.files.base import ContentFile
from django.utils.translation import ugettext as _, ugettext_noop as _noop
from PIL import Image
from openedx.core.djangoapps.user_api.accounts.image_helpers import get_profile_image_storage
ImageType = namedtuple('ImageType', ('extensions', 'mimetypes', 'magic'))
IMAGE_TYPES = {
'jpeg': ImageType(
extensions=['.jpeg', '.jpg'],
mimetypes=['image/jpeg', 'image/pjpeg'],
magic=['ffd8'],
),
'png': ImageType(
extensions=[".png"],
mimetypes=['image/png'],
magic=["89504e470d0a1a0a"],
),
'gif': ImageType(
extensions=[".gif"],
mimetypes=['image/gif'],
magic=["474946383961", "474946383761"],
),
}
def user_friendly_size(size):
"""
Convert size in bytes to user friendly size.
Arguments:
size (int): size in bytes
Returns:
user friendly size
"""
units = [_('bytes'), _('KB'), _('MB')]
i = 0
while size >= 1024:
size /= 1024
i += 1
return u'{} {}'.format(size, units[i])
def get_valid_file_types():
"""
Return comma separated string of valid file types.
"""
return ', '.join([', '.join(IMAGE_TYPES[ft].extensions) for ft in IMAGE_TYPES.keys()])
FILE_UPLOAD_TOO_LARGE = _noop(u'The file must be smaller than {image_max_size} in size.'.format(image_max_size=user_friendly_size(settings.PROFILE_IMAGE_MAX_BYTES))) # pylint: disable=line-too-long
FILE_UPLOAD_TOO_SMALL = _noop(u'The file must be at least {image_min_size} in size.'.format(image_min_size=user_friendly_size(settings.PROFILE_IMAGE_MIN_BYTES))) # pylint: disable=line-too-long
FILE_UPLOAD_BAD_TYPE = _noop(u'The file must be one of the following types: {valid_file_types}.'.format(valid_file_types=get_valid_file_types())) # pylint: disable=line-too-long
FILE_UPLOAD_BAD_EXT = _noop(u'The file name extension for this file does not match the file data. The file may be corrupted.') # pylint: disable=line-too-long
FILE_UPLOAD_BAD_MIMETYPE = _noop(u'The Content-Type header for this file does not match the file data. The file may be corrupted.') # pylint: disable=line-too-long
class ImageValidationError(Exception):
"""
Exception to use when the system rejects a user-supplied source image.
"""
@property
def user_message(self):
"""
Translate the developer-facing exception message for API clients.
"""
# pylint: disable=translation-of-non-string
return _(self.message)
def validate_uploaded_image(uploaded_file):
"""
Raises ImageValidationError if the server should refuse to use this
uploaded file as the source image for a user's profile image. Otherwise,
returns nothing.
"""
# validation code by @pmitros,
# adapted from https://github.com/pmitros/ProfileXBlock
# see also: http://en.wikipedia.org/wiki/Magic_number_%28programming%29
if uploaded_file.size > settings.PROFILE_IMAGE_MAX_BYTES:
raise ImageValidationError(FILE_UPLOAD_TOO_LARGE)
elif uploaded_file.size < settings.PROFILE_IMAGE_MIN_BYTES:
raise ImageValidationError(FILE_UPLOAD_TOO_SMALL)
# check the file extension looks acceptable
filename = unicode(uploaded_file.name).lower()
filetype = [ft for ft in IMAGE_TYPES if any(filename.endswith(ext) for ext in IMAGE_TYPES[ft].extensions)]
if not filetype:
raise ImageValidationError(FILE_UPLOAD_BAD_TYPE)
filetype = filetype[0]
# check mimetype matches expected file type
if uploaded_file.content_type not in IMAGE_TYPES[filetype].mimetypes:
raise ImageValidationError(FILE_UPLOAD_BAD_MIMETYPE)
# check magic number matches expected file type
headers = IMAGE_TYPES[filetype].magic
if uploaded_file.read(len(headers[0]) / 2).encode('hex') not in headers:
raise ImageValidationError(FILE_UPLOAD_BAD_EXT)
# avoid unexpected errors from subsequent modules expecting the fp to be at 0
uploaded_file.seek(0)
def _get_scaled_image_file(image_obj, size):
"""
Given a PIL.Image object, get a resized copy using `size` (square) and
return a file-like object containing the data saved as a JPEG.
Note that the file object returned is a django ContentFile which holds
data in memory (not on disk).
"""
if image_obj.mode != "RGB":
image_obj = image_obj.convert("RGB")
scaled = image_obj.resize((size, size), Image.ANTIALIAS)
string_io = StringIO()
scaled.save(string_io, format='JPEG')
image_file = ContentFile(string_io.getvalue())
return image_file
def create_profile_images(image_file, profile_image_names):
"""
Generates a set of image files based on image_file and
stores them according to the sizes and filenames specified
in `profile_image_names`.
"""
image_obj = Image.open(image_file)
# first center-crop the image if needed (but no scaling yet).
width, height = image_obj.size
if width != height:
side = width if width < height else height
image_obj = image_obj.crop(((width - side) / 2, (height - side) / 2, (width + side) / 2, (height + side) / 2))
storage = get_profile_image_storage()
for size, name in profile_image_names.items():
scaled_image_file = _get_scaled_image_file(image_obj, size)
# Store the file.
try:
storage.save(name, scaled_image_file)
finally:
scaled_image_file.close()
def remove_profile_images(profile_image_names):
"""
Physically remove the image files specified in `profile_image_names`
"""
storage = get_profile_image_storage()
for name in profile_image_names.values():
storage.delete(name)
| agpl-3.0 |
ibm-messaging/iot-python | samples/customMessageFormat/myCustomCodec.py | 2 | 1936 | # *****************************************************************************
# Copyright (c) 2019 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
from datetime import datetime
import pytz
try:
from wiotp.sdk import Message, MessageCodec
except ImportError:
# This part is only required to run the sample from within the samples
# directory when the module itself is not installed.
import sys
import os
import inspect
cmd_subfolder = os.path.realpath(
os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "../../src"))
)
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
from wiotp.sdk import Message
class MyCodec(MessageCodec):
"""
Dedicated encoder for supporting a very specific dataset, serialises a dictionary object
of the following format:
{
'hello' : 'world',
'x' : 10
}
into a simple comma-seperated message:
world,10
"""
@staticmethod
def encode(data=None, timestamp=None):
return data["hello"] + "," + str(data["x"])
@staticmethod
def decode(message):
"""
The decoder understands the comma-seperated format produced by the encoder and
allocates the two values to the correct keys:
data['hello'] = 'world'
data['x'] = 10
"""
(hello, x) = message.payload.split(",")
data = {}
data["hello"] = hello
data["x"] = x
timestamp = datetime.now(pytz.timezone("UTC"))
return Message(data, timestamp)
| epl-1.0 |
waytai/odoo | addons/l10n_ar/__openerp__.py | 260 | 1695 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Cubic ERP - Teradata SAC (<http://cubicerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Argentina Localization Chart Account',
'version': '1.0',
'description': """
Argentinian accounting chart and tax localization.
==================================================
Plan contable argentino e impuestos de acuerdo a disposiciones vigentes
""",
'author': ['Cubic ERP'],
'website': 'http://cubicERP.com',
'category': 'Localization/Account Charts',
'depends': ['account_chart'],
'data':[
'account_tax_code.xml',
'l10n_ar_chart.xml',
'account_tax.xml',
'l10n_ar_wizard.xml',
],
'demo': [],
'active': False,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
connorimes/servo | tests/wpt/css-tests/tools/pywebsocket/src/test/test_handshake_hybi00.py | 466 | 17345 | #!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for handshake.hybi00 module."""
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket.handshake._base import HandshakeException
from mod_pywebsocket.handshake.hybi00 import Handshaker
from mod_pywebsocket.handshake.hybi00 import _validate_subprotocol
from test import mock
_TEST_KEY1 = '4 @1 46546xW%0l 1 5'
_TEST_KEY2 = '12998 5 Y3 1 .P00'
_TEST_KEY3 = '^n:ds[4U'
_TEST_CHALLENGE_RESPONSE = '8jKS\'y:G*Co,Wxa-'
_GOOD_REQUEST = (
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3)
_GOOD_REQUEST_CAPITALIZED_HEADER_VALUES = (
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'UPGRADE',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'WEBSOCKET',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3)
_GOOD_REQUEST_CASE_MIXED_HEADER_NAMES = (
80,
'GET',
'/demo',
{
'hOsT': 'example.com',
'cOnNeCtIoN': 'Upgrade',
'sEc-wEbsOcKeT-kEy2': _TEST_KEY2,
'sEc-wEbsOcKeT-pRoToCoL': 'sample',
'uPgRaDe': 'WebSocket',
'sEc-wEbsOcKeT-kEy1': _TEST_KEY1,
'oRiGiN': 'http://example.com',
},
_TEST_KEY3)
_GOOD_RESPONSE_DEFAULT_PORT = (
'HTTP/1.1 101 WebSocket Protocol Handshake\r\n'
'Upgrade: WebSocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Location: ws://example.com/demo\r\n'
'Sec-WebSocket-Origin: http://example.com\r\n'
'Sec-WebSocket-Protocol: sample\r\n'
'\r\n' +
_TEST_CHALLENGE_RESPONSE)
_GOOD_RESPONSE_SECURE = (
'HTTP/1.1 101 WebSocket Protocol Handshake\r\n'
'Upgrade: WebSocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Location: wss://example.com/demo\r\n'
'Sec-WebSocket-Origin: http://example.com\r\n'
'Sec-WebSocket-Protocol: sample\r\n'
'\r\n' +
_TEST_CHALLENGE_RESPONSE)
_GOOD_REQUEST_NONDEFAULT_PORT = (
8081,
'GET',
'/demo',
{
'Host': 'example.com:8081',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3)
_GOOD_RESPONSE_NONDEFAULT_PORT = (
'HTTP/1.1 101 WebSocket Protocol Handshake\r\n'
'Upgrade: WebSocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Location: ws://example.com:8081/demo\r\n'
'Sec-WebSocket-Origin: http://example.com\r\n'
'Sec-WebSocket-Protocol: sample\r\n'
'\r\n' +
_TEST_CHALLENGE_RESPONSE)
_GOOD_RESPONSE_SECURE_NONDEF = (
'HTTP/1.1 101 WebSocket Protocol Handshake\r\n'
'Upgrade: WebSocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Location: wss://example.com:8081/demo\r\n'
'Sec-WebSocket-Origin: http://example.com\r\n'
'Sec-WebSocket-Protocol: sample\r\n'
'\r\n' +
_TEST_CHALLENGE_RESPONSE)
_GOOD_REQUEST_NO_PROTOCOL = (
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3)
_GOOD_RESPONSE_NO_PROTOCOL = (
'HTTP/1.1 101 WebSocket Protocol Handshake\r\n'
'Upgrade: WebSocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Location: ws://example.com/demo\r\n'
'Sec-WebSocket-Origin: http://example.com\r\n'
'\r\n' +
_TEST_CHALLENGE_RESPONSE)
_GOOD_REQUEST_WITH_OPTIONAL_HEADERS = (
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'EmptyValue': '',
'Sec-WebSocket-Protocol': 'sample',
'AKey': 'AValue',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3)
# TODO(tyoshino): Include \r \n in key3, challenge response.
_GOOD_REQUEST_WITH_NONPRINTABLE_KEY = (
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': 'y R2 48 Q1O4 e|BV3 i5 1 u- 65',
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': '36 7 74 i 92 2\'m 9 0G',
'Origin': 'http://example.com',
},
''.join(map(chr, [0x01, 0xd1, 0xdd, 0x3b, 0xd1, 0x56, 0x63, 0xff])))
_GOOD_RESPONSE_WITH_NONPRINTABLE_KEY = (
'HTTP/1.1 101 WebSocket Protocol Handshake\r\n'
'Upgrade: WebSocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Location: ws://example.com/demo\r\n'
'Sec-WebSocket-Origin: http://example.com\r\n'
'Sec-WebSocket-Protocol: sample\r\n'
'\r\n' +
''.join(map(chr, [0x0b, 0x99, 0xfa, 0x55, 0xbd, 0x01, 0x23, 0x7b,
0x45, 0xa2, 0xf1, 0xd0, 0x87, 0x8a, 0xee, 0xeb])))
_GOOD_REQUEST_WITH_QUERY_PART = (
80,
'GET',
'/demo?e=mc2',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3)
_GOOD_RESPONSE_WITH_QUERY_PART = (
'HTTP/1.1 101 WebSocket Protocol Handshake\r\n'
'Upgrade: WebSocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Location: ws://example.com/demo?e=mc2\r\n'
'Sec-WebSocket-Origin: http://example.com\r\n'
'Sec-WebSocket-Protocol: sample\r\n'
'\r\n' +
_TEST_CHALLENGE_RESPONSE)
_BAD_REQUESTS = (
( # HTTP request
80,
'GET',
'/demo',
{
'Host': 'www.google.com',
'User-Agent': 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5;'
' en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3'
' GTB6 GTBA',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,'
'*/*;q=0.8',
'Accept-Language': 'en-us,en;q=0.5',
'Accept-Encoding': 'gzip,deflate',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Keep-Alive': '300',
'Connection': 'keep-alive',
}),
( # Wrong method
80,
'POST',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3),
( # Missing Upgrade
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Sec-WebSocket-Protocol': 'sample',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3),
( # Wrong Upgrade
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'NonWebSocket',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3),
( # Empty WebSocket-Protocol
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Sec-WebSocket-Protocol': '',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3),
( # Wrong port number format
80,
'GET',
'/demo',
{
'Host': 'example.com:0x50',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3),
( # Header/connection port mismatch
8080,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3),
( # Illegal WebSocket-Protocol
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Sec-WebSocket-Protocol': 'illegal\x09protocol',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3),
)
def _create_request(request_def):
data = ''
if len(request_def) > 4:
data = request_def[4]
conn = mock.MockConn(data)
conn.local_addr = ('0.0.0.0', request_def[0])
return mock.MockRequest(
method=request_def[1],
uri=request_def[2],
headers_in=request_def[3],
connection=conn)
def _create_get_memorized_lines(lines):
"""Creates a function that returns the given string."""
def get_memorized_lines():
return lines
return get_memorized_lines
def _create_requests_with_lines(request_lines_set):
requests = []
for lines in request_lines_set:
request = _create_request(_GOOD_REQUEST)
request.connection.get_memorized_lines = _create_get_memorized_lines(
lines)
requests.append(request)
return requests
class HyBi00HandshakerTest(unittest.TestCase):
def test_good_request_default_port(self):
request = _create_request(_GOOD_REQUEST)
handshaker = Handshaker(request, mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual(_GOOD_RESPONSE_DEFAULT_PORT,
request.connection.written_data())
self.assertEqual('/demo', request.ws_resource)
self.assertEqual('http://example.com', request.ws_origin)
self.assertEqual('ws://example.com/demo', request.ws_location)
self.assertEqual('sample', request.ws_protocol)
def test_good_request_capitalized_header_values(self):
request = _create_request(_GOOD_REQUEST_CAPITALIZED_HEADER_VALUES)
handshaker = Handshaker(request, mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual(_GOOD_RESPONSE_DEFAULT_PORT,
request.connection.written_data())
def test_good_request_case_mixed_header_names(self):
request = _create_request(_GOOD_REQUEST_CASE_MIXED_HEADER_NAMES)
handshaker = Handshaker(request, mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual(_GOOD_RESPONSE_DEFAULT_PORT,
request.connection.written_data())
def test_good_request_secure_default_port(self):
request = _create_request(_GOOD_REQUEST)
request.connection.local_addr = ('0.0.0.0', 443)
request.is_https_ = True
handshaker = Handshaker(request, mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual(_GOOD_RESPONSE_SECURE,
request.connection.written_data())
self.assertEqual('sample', request.ws_protocol)
def test_good_request_nondefault_port(self):
request = _create_request(_GOOD_REQUEST_NONDEFAULT_PORT)
handshaker = Handshaker(request,
mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual(_GOOD_RESPONSE_NONDEFAULT_PORT,
request.connection.written_data())
self.assertEqual('sample', request.ws_protocol)
def test_good_request_secure_non_default_port(self):
request = _create_request(_GOOD_REQUEST_NONDEFAULT_PORT)
request.is_https_ = True
handshaker = Handshaker(request, mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual(_GOOD_RESPONSE_SECURE_NONDEF,
request.connection.written_data())
self.assertEqual('sample', request.ws_protocol)
def test_good_request_default_no_protocol(self):
request = _create_request(_GOOD_REQUEST_NO_PROTOCOL)
handshaker = Handshaker(request, mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual(_GOOD_RESPONSE_NO_PROTOCOL,
request.connection.written_data())
self.assertEqual(None, request.ws_protocol)
def test_good_request_optional_headers(self):
request = _create_request(_GOOD_REQUEST_WITH_OPTIONAL_HEADERS)
handshaker = Handshaker(request, mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual('AValue',
request.headers_in['AKey'])
self.assertEqual('',
request.headers_in['EmptyValue'])
def test_good_request_with_nonprintable_key(self):
request = _create_request(_GOOD_REQUEST_WITH_NONPRINTABLE_KEY)
handshaker = Handshaker(request, mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual(_GOOD_RESPONSE_WITH_NONPRINTABLE_KEY,
request.connection.written_data())
self.assertEqual('sample', request.ws_protocol)
def test_good_request_with_query_part(self):
request = _create_request(_GOOD_REQUEST_WITH_QUERY_PART)
handshaker = Handshaker(request, mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual(_GOOD_RESPONSE_WITH_QUERY_PART,
request.connection.written_data())
self.assertEqual('ws://example.com/demo?e=mc2', request.ws_location)
def test_bad_requests(self):
for request in map(_create_request, _BAD_REQUESTS):
handshaker = Handshaker(request, mock.MockDispatcher())
self.assertRaises(HandshakeException, handshaker.do_handshake)
class HyBi00ValidateSubprotocolTest(unittest.TestCase):
def test_validate_subprotocol(self):
# should succeed.
_validate_subprotocol('sample')
_validate_subprotocol('Sample')
_validate_subprotocol('sample\x7eprotocol')
_validate_subprotocol('sample\x20protocol')
# should fail.
self.assertRaises(HandshakeException,
_validate_subprotocol,
'')
self.assertRaises(HandshakeException,
_validate_subprotocol,
'sample\x19protocol')
self.assertRaises(HandshakeException,
_validate_subprotocol,
'sample\x7fprotocol')
self.assertRaises(HandshakeException,
_validate_subprotocol,
# "Japan" in Japanese
u'\u65e5\u672c')
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| mpl-2.0 |
robinro/ansible-modules-extras | infrastructure/foreman/katello.py | 18 | 17104 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Eric D Helms <ericdhelms@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: katello
short_description: Manage Katello Resources
description:
- Allows the management of Katello resources inside your Foreman server
version_added: "2.3"
author: "Eric D Helms (@ehelms)"
requirements:
- "nailgun >= 0.28.0"
- "python >= 2.6"
- datetime
options:
server_url:
description:
- URL of Foreman server
required: true
username:
description:
- Username on Foreman server
required: true
password:
description:
- Password for user accessing Foreman server
required: true
entity:
description:
- The Foreman resource that the action will be performed on (e.g. organization, host)
required: true
params:
description:
- Parameters associated to the entity resource to set or edit in dictionary format (e.g. name, description)
required: true
'''
EXAMPLES = '''
Simple Example:
- name: "Create Product"
local_action:
module: katello
username: "admin"
password: "admin"
server_url: "https://fakeserver.com"
entity: "product"
params:
name: "Centos 7"
Abstraction Example:
katello.yml
---
- name: "{{ name }}"
local_action:
module: katello
username: "admin"
password: "admin"
server_url: "https://fakeserver.com"
entity: "{{ entity }}"
params: "{{ params }}"
tasks.yml
---
- include: katello.yml
vars:
name: "Create Dev Environment"
entity: "lifecycle_environment"
params:
name: "Dev"
prior: "Library"
organization: "Default Organization"
- include: katello.yml
vars:
name: "Create Centos Product"
entity: "product"
params:
name: "Centos 7"
organization: "Default Organization"
- include: katello.yml
vars:
name: "Create 7.2 Repository"
entity: "repository"
params:
name: "Centos 7.2"
product: "Centos 7"
organization: "Default Organization"
content_type: "yum"
url: "http://mirror.centos.org/centos/7/os/x86_64/"
- include: katello.yml
vars:
name: "Create Centos 7 View"
entity: "content_view"
params:
name: "Centos 7 View"
organization: "Default Organization"
repositories:
- name: "Centos 7.2"
product: "Centos 7"
- include: katello.yml
vars:
name: "Enable RHEL Product"
entity: "repository_set"
params:
name: "Red Hat Enterprise Linux 7 Server (RPMs)"
product: "Red Hat Enterprise Linux Server"
organization: "Default Organization"
basearch: "x86_64"
releasever: "7"
'''
RETURN = '''# '''
import datetime
try:
from nailgun import entities, entity_fields, entity_mixins
from nailgun.config import ServerConfig
HAS_NAILGUN_PACKAGE = True
except:
HAS_NAILGUN_PACKAGE = False
class NailGun(object):
def __init__(self, server, entities, module):
self._server = server
self._entities = entities
self._module = module
entity_mixins.TASK_TIMEOUT = 1000
def find_organization(self, name, **params):
org = self._entities.Organization(self._server, name=name, **params)
response = org.search(set(), {'search': 'name={}'.format(name)})
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No organization found for %s" % name)
def find_lifecycle_environment(self, name, organization):
org = self.find_organization(organization)
lifecycle_env = self._entities.LifecycleEnvironment(self._server, name=name, organization=org)
response = lifecycle_env.search()
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Lifecycle Found found for %s" % name)
def find_product(self, name, organization):
org = self.find_organization(organization)
product = self._entities.Product(self._server, name=name, organization=org)
response = product.search()
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Product found for %s" % name)
def find_repository(self, name, product, organization):
product = self.find_product(product, organization)
repository = self._entities.Repository(self._server, name=name, product=product)
repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
repository.organization = product.organization
response = repository.search()
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Repository found for %s" % name)
def find_content_view(self, name, organization):
org = self.find_organization(organization)
content_view = self._entities.ContentView(self._server, name=name, organization=org)
response = content_view.search()
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Content View found for %s" % name)
def organization(self, params):
name = params['name']
del params['name']
org = self.find_organization(name, **params)
if org:
org = self._entities.Organization(self._server, name=name, id=org.id, **params)
org.update()
else:
org = self._entities.Organization(self._server, name=name, **params)
org.create()
return True
def manifest(self, params):
org = self.find_organization(params['organization'])
params['organization'] = org.id
try:
file = open(os.getcwd() + params['content'], 'r')
content = file.read()
finally:
file.close()
manifest = self._entities.Subscription(self._server)
try:
manifest.upload(
data={'organization_id': org.id},
files={'content': content}
)
return True
except Exception:
e = get_exception()
if "Import is the same as existing data" in e.message:
return True
else:
self._module.fail_json(msg="Manifest import failed with %s" % e)
def product(self, params):
org = self.find_organization(params['organization'])
params['organization'] = org.id
product = self._entities.Product(self._server, **params)
response = product.search()
if len(response) == 1:
product.id = response[0].id
product.update()
else:
product.create()
return True
def sync_product(self, params):
org = self.find_organization(params['organization'])
product = self.find_product(params['name'], org.name)
return product.sync()
def repository(self, params):
product = self.find_product(params['product'], params['organization'])
params['product'] = product.id
del params['organization']
repository = self._entities.Repository(self._server, **params)
repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
repository.organization = product.organization
response = repository.search()
if len(response) == 1:
repository.id = response[0].id
repository.update()
else:
repository.create()
return True
def sync_repository(self, params):
org = self.find_organization(params['organization'])
repository = self.find_repository(params['name'], params['product'], org.name)
return repository.sync()
def repository_set(self, params):
product = self.find_product(params['product'], params['organization'])
del params['product']
del params['organization']
if not product:
return False
else:
reposet = self._entities.RepositorySet(self._server, product=product, name=params['name'])
reposet = reposet.search()[0]
formatted_name = [params['name'].replace('(', '').replace(')', '')]
formatted_name.append(params['basearch'])
if params['releasever']:
formatted_name.append(params['releasever'])
formatted_name = ' '.join(formatted_name)
repository = self._entities.Repository(self._server, product=product, name=formatted_name)
repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
repository.organization = product.organization
repository = repository.search()
if len(repository) == 0:
reposet.enable(data={'basearch': params['basearch'], 'releasever': params['releasever']})
return True
def sync_plan(self, params):
org = self.find_organization(params['organization'])
params['organization'] = org.id
params['sync_date'] = datetime.datetime.strptime(params['sync_date'], "%H:%M")
products = params['products']
del params['products']
sync_plan = self._entities.SyncPlan(
self._server,
name=params['name'],
organization=org
)
response = sync_plan.search()
sync_plan.sync_date = params['sync_date']
sync_plan.interval = params['interval']
if len(response) == 1:
sync_plan.id = response[0].id
sync_plan.update()
else:
response = sync_plan.create()
sync_plan.id = response[0].id
if products:
ids = []
for name in products:
product = self.find_product(name, org.name)
ids.append(product.id)
sync_plan.add_products(data={'product_ids': ids})
return True
def content_view(self, params):
org = self.find_organization(params['organization'])
content_view = self._entities.ContentView(self._server, name=params['name'], organization=org)
response = content_view.search()
if len(response) == 1:
content_view.id = response[0].id
content_view.update()
else:
content_view = content_view.create()
if params['repositories']:
repos = []
for repository in params['repositories']:
repository = self.find_repository(repository['name'], repository['product'], org.name)
repos.append(repository)
content_view.repository = repos
content_view.update(['repository'])
def find_content_view(self, name, organization):
org = self.find_organization(organization)
content_view = self._entities.ContentView(self._server, name=name, organization=org)
response = content_view.search()
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Content View found for %s" % name)
def find_content_view_version(self, name, organization, environment):
env = self.find_lifecycle_environment(environment, organization)
content_view = self.find_content_view(name, organization)
content_view_version = self._entities.ContentViewVersion(self._server, content_view=content_view)
response = content_view_version.search(['content_view'], {'environment_id': env.id})
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Content View version found for %s" % response)
def publish(self, params):
content_view = self.find_content_view(params['name'], params['organization'])
return content_view.publish()
def promote(self, params):
to_environment = self.find_lifecycle_environment(params['to_environment'], params['organization'])
version = self.find_content_view_version(params['name'], params['organization'], params['from_environment'])
data = {'environment_id': to_environment.id}
return version.promote(data=data)
def lifecycle_environment(self, params):
org = self.find_organization(params['organization'])
prior_env = self.find_lifecycle_environment(params['prior'], params['organization'])
lifecycle_env = self._entities.LifecycleEnvironment(self._server, name=params['name'], organization=org, prior=prior_env)
response = lifecycle_env.search()
if len(response) == 1:
lifecycle_env.id = response[0].id
lifecycle_env.update()
else:
lifecycle_env.create()
return True
def activation_key(self, params):
org = self.find_organization(params['organization'])
activation_key = self._entities.ActivationKey(self._server, name=params['name'], organization=org)
response = activation_key.search()
if len(response) == 1:
activation_key.id = response[0].id
activation_key.update()
else:
activation_key.create()
if params['content_view']:
content_view = self.find_content_view(params['content_view'], params['organization'])
lifecycle_environment = self.find_lifecycle_environment(params['lifecycle_environment'], params['organization'])
activation_key.content_view = content_view
activation_key.environment = lifecycle_environment
activation_key.update()
return True
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(required=True),
username=dict(required=True, no_log=True),
password=dict(required=True, no_log=True),
entity=dict(required=True, no_log=False),
action=dict(required=False, no_log=False),
verify_ssl=dict(required=False, type='bool', default=False),
params=dict(required=True, no_log=True, type='dict'),
),
supports_check_mode=True
)
if not HAS_NAILGUN_PACKAGE:
module.fail_json(msg="Missing required nailgun module (check docs or install with: pip install nailgun")
server_url = module.params['server_url']
username = module.params['username']
password = module.params['password']
entity = module.params['entity']
action = module.params['action']
params = module.params['params']
verify_ssl = module.params['verify_ssl']
server = ServerConfig(
url=server_url,
auth=(username, password),
verify=verify_ssl
)
ng = NailGun(server, entities, module)
# Lets make an connection to the server with username and password
try:
org = entities.Organization(server)
org.search()
except Exception as e:
module.fail_json(msg="Failed to connect to Foreman server: %s " % e)
result = False
if entity == 'product':
if action == 'sync':
result = ng.sync_product(params)
else:
result = ng.product(params)
elif entity == 'repository':
if action == 'sync':
result = ng.sync_repository(params)
else:
result = ng.repository(params)
elif entity == 'manifest':
result = ng.manifest(params)
elif entity == 'repository_set':
result = ng.repository_set(params)
elif entity == 'sync_plan':
result = ng.sync_plan(params)
elif entity == 'content_view':
if action == 'publish':
result = ng.publish(params)
elif action == 'promote':
result = ng.promote(params)
else:
result = ng.content_view(params)
elif entity == 'lifecycle_environment':
result = ng.lifecycle_environment(params)
elif entity == 'activation_key':
result = ng.activation_key(params)
else:
module.fail_json(changed=False, result="Unsupported entity supplied")
module.exit_json(changed=result, result="%s updated" % entity)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
binaryage/drydrop | dryapp/pygments/formatter.py | 27 | 2607 | # -*- coding: utf-8 -*-
"""
pygments.formatter
~~~~~~~~~~~~~~~~~~
Base formatter class.
:copyright: 2006-2007 by Georg Brandl, Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from pygments.util import get_bool_opt
from pygments.styles import get_style_by_name
__all__ = ['Formatter']
def _lookup_style(style):
if isinstance(style, basestring):
return get_style_by_name(style)
return style
class Formatter(object):
"""
Converts a token stream to text.
Options accepted:
``style``
The style to use, can be a string or a Style subclass
(default: "default"). Not used by e.g. the
TerminalFormatter.
``full``
Tells the formatter to output a "full" document, i.e.
a complete self-contained document. This doesn't have
any effect for some formatters (default: false).
``title``
If ``full`` is true, the title that should be used to
caption the document (default: '').
``encoding``
If given, must be an encoding name. This will be used to
convert the Unicode token strings to byte strings in the
output. If it is "" or None, Unicode strings will be written
to the output file, which most file-like objects do not
support (default: None).
``outencoding``
Overrides ``encoding`` if given.
"""
#: Name of the formatter
name = None
#: Shortcuts for the formatter
aliases = []
#: fn match rules
filenames = []
#: If True, this formatter outputs Unicode strings when no encoding
#: option is given.
unicodeoutput = True
def __init__(self, **options):
self.style = _lookup_style(options.get('style', 'default'))
self.full = get_bool_opt(options, 'full', False)
self.title = options.get('title', '')
self.encoding = options.get('encoding', None) or None
self.encoding = options.get('outencoding', None) or self.encoding
self.options = options
def get_style_defs(self, arg=''):
"""
Return the style definitions for the current style as a string.
``arg`` is an additional argument whose meaning depends on the
formatter used. Note that ``arg`` can also be a list or tuple
for some formatters like the html formatter.
"""
return ''
def format(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
"""
raise NotImplementedError()
| bsd-3-clause |
Ropes/PDX-Council-Minutes-Data | orm/tables.py | 1 | 1641 | # coding: utf-8
from sqlalchemy import BigInteger, Column, Date, ForeignKey, Index, Integer, String
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class Meetingdate(Base):
__tablename__ = 'MeetingDate'
__table_args__ = (
Index('MeetingDate_id_date_key', 'id', 'date'),
)
id = Column(BigInteger, primary_key=True)
date = Column(Date, nullable=False, unique=True)
class Token(Base):
__tablename__ = 'Token'
__table_args__ = (
Index('Token_token_dateid_key', 'token', 'dateid'),
)
tokenid = Column(BigInteger, primary_key=True)
count = Column(Integer, nullable=False, server_default=u'0')
dateid = Column(ForeignKey('MeetingDate.id'))
token = Column(String(50))
MeetingDate = relationship(u'Meetingdate')
class Tokenlink(Base):
__tablename__ = 'TokenLinks'
__table_args__ = (
Index('TokenLinks_source_target_index_distance_dateid_key', 'source', 'target', 'index', 'distance', 'dateid'),
)
dateid = Column(ForeignKey('MeetingDate.id'), nullable=False)
source = Column(ForeignKey('Token.tokenid'), nullable=False)
target = Column(ForeignKey('Token.tokenid'), nullable=False)
distance = Column(Integer, nullable=False)
index = Column(Integer, nullable=False)
linkid = Column(BigInteger, primary_key=True)
MeetingDate = relationship(u'Meetingdate')
Token = relationship(u'Token', primaryjoin='Tokenlink.source == Token.tokenid')
Token1 = relationship(u'Token', primaryjoin='Tokenlink.target == Token.tokenid')
| mit |
hajgato/easybuild-easyblocks | easybuild/easyblocks/h/hypre.py | 4 | 2250 | ##
# Copyright 2009-2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for Hypre, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import os
from easybuild.easyblocks.generic.configuremake import ConfigureMake
class EB_Hypre(ConfigureMake):
"""Support for building Hypre."""
def configure_step(self):
"""Configure Hypre build after setting extra configure options."""
self.cfg.update('configopts', '--with-MPI-include=%s' % os.getenv('MPI_INC_DIR'))
for dep in ["BLAS", "LAPACK"]:
libs = ' '.join(os.getenv('%s_STATIC_LIBS' % dep).split(','))
self.cfg.update('configopts', '--with-%s-libs="%s"' % (dep.lower(), libs))
self.cfg.update('configopts', '--with-%s-lib-dirs="%s"' % (dep.lower(),
os.getenv('%s_LIB_DIR' % dep)))
super(EB_Hypre, self).configure_step()
def sanity_check_step(self):
"""Custom sanity check for Hypre."""
custom_paths = {
'files':['lib/libHYPRE.a'],
'dirs':['include']
}
super(EB_Hypre, self).sanity_check_step(custom_paths=custom_paths)
| gpl-2.0 |
vitan/django | django/contrib/gis/db/backends/oracle/introspection.py | 539 | 1977 | import sys
import cx_Oracle
from django.db.backends.oracle.introspection import DatabaseIntrospection
from django.utils import six
class OracleIntrospection(DatabaseIntrospection):
# Associating any OBJECTVAR instances with GeometryField. Of course,
# this won't work right on Oracle objects that aren't MDSYS.SDO_GEOMETRY,
# but it is the only object type supported within Django anyways.
data_types_reverse = DatabaseIntrospection.data_types_reverse.copy()
data_types_reverse[cx_Oracle.OBJECT] = 'GeometryField'
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# Querying USER_SDO_GEOM_METADATA to get the SRID and dimension information.
try:
cursor.execute(
'SELECT "DIMINFO", "SRID" FROM "USER_SDO_GEOM_METADATA" '
'WHERE "TABLE_NAME"=%s AND "COLUMN_NAME"=%s',
(table_name.upper(), geo_col.upper())
)
row = cursor.fetchone()
except Exception as msg:
new_msg = (
'Could not find entry in USER_SDO_GEOM_METADATA '
'corresponding to "%s"."%s"\n'
'Error message: %s.') % (table_name, geo_col, msg)
six.reraise(Exception, Exception(new_msg), sys.exc_info()[2])
# TODO: Research way to find a more specific geometry field type for
# the column's contents.
field_type = 'GeometryField'
# Getting the field parameters.
field_params = {}
dim, srid = row
if srid != 4326:
field_params['srid'] = srid
# Length of object array ( SDO_DIM_ARRAY ) is number of dimensions.
dim = len(dim)
if dim != 2:
field_params['dim'] = dim
finally:
cursor.close()
return field_type, field_params
| bsd-3-clause |
AlexHill/django | django/utils/dates.py | 115 | 2296 | "Commonly-used date structures"
from django.utils.translation import ugettext_lazy as _, pgettext_lazy
WEEKDAYS = {
0: _('Monday'), 1: _('Tuesday'), 2: _('Wednesday'), 3: _('Thursday'), 4: _('Friday'),
5: _('Saturday'), 6: _('Sunday')
}
WEEKDAYS_ABBR = {
0: _('Mon'), 1: _('Tue'), 2: _('Wed'), 3: _('Thu'), 4: _('Fri'),
5: _('Sat'), 6: _('Sun')
}
WEEKDAYS_REV = {
'monday': 0, 'tuesday': 1, 'wednesday': 2, 'thursday': 3, 'friday': 4,
'saturday': 5, 'sunday': 6
}
MONTHS = {
1: _('January'), 2: _('February'), 3: _('March'), 4: _('April'), 5: _('May'), 6: _('June'),
7: _('July'), 8: _('August'), 9: _('September'), 10: _('October'), 11: _('November'),
12: _('December')
}
MONTHS_3 = {
1: _('jan'), 2: _('feb'), 3: _('mar'), 4: _('apr'), 5: _('may'), 6: _('jun'),
7: _('jul'), 8: _('aug'), 9: _('sep'), 10: _('oct'), 11: _('nov'), 12: _('dec')
}
MONTHS_3_REV = {
'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6, 'jul': 7, 'aug': 8,
'sep': 9, 'oct': 10, 'nov': 11, 'dec': 12
}
MONTHS_AP = { # month names in Associated Press style
1: pgettext_lazy('abbrev. month', 'Jan.'),
2: pgettext_lazy('abbrev. month', 'Feb.'),
3: pgettext_lazy('abbrev. month', 'March'),
4: pgettext_lazy('abbrev. month', 'April'),
5: pgettext_lazy('abbrev. month', 'May'),
6: pgettext_lazy('abbrev. month', 'June'),
7: pgettext_lazy('abbrev. month', 'July'),
8: pgettext_lazy('abbrev. month', 'Aug.'),
9: pgettext_lazy('abbrev. month', 'Sept.'),
10: pgettext_lazy('abbrev. month', 'Oct.'),
11: pgettext_lazy('abbrev. month', 'Nov.'),
12: pgettext_lazy('abbrev. month', 'Dec.')
}
MONTHS_ALT = { # required for long date representation by some locales
1: pgettext_lazy('alt. month', 'January'),
2: pgettext_lazy('alt. month', 'February'),
3: pgettext_lazy('alt. month', 'March'),
4: pgettext_lazy('alt. month', 'April'),
5: pgettext_lazy('alt. month', 'May'),
6: pgettext_lazy('alt. month', 'June'),
7: pgettext_lazy('alt. month', 'July'),
8: pgettext_lazy('alt. month', 'August'),
9: pgettext_lazy('alt. month', 'September'),
10: pgettext_lazy('alt. month', 'October'),
11: pgettext_lazy('alt. month', 'November'),
12: pgettext_lazy('alt. month', 'December')
}
| bsd-3-clause |
julen/translate | translate/convert/ini2po.py | 25 | 5481 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2007 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Convert .ini files to Gettext PO localization files.
See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/ini2po.html
for examples and usage instructions.
"""
import logging
from translate.storage import po
logger = logging.getLogger(__name__)
class ini2po:
"""Convert a .ini file to a .po file for handling the translation..."""
def convert_store(self, input_store, duplicatestyle="msgctxt"):
"""Convert a .ini file to a .po file..."""
output_store = po.pofile()
output_header = output_store.header()
output_header.addnote("extracted from %s" % input_store.filename,
"developer")
for input_unit in input_store.units:
output_unit = self.convert_unit(input_unit, "developer")
if output_unit is not None:
output_store.addunit(output_unit)
output_store.removeduplicates(duplicatestyle)
return output_store
def merge_store(self, template_store, input_store, blankmsgstr=False,
duplicatestyle="msgctxt"):
"""Convert two .ini files to a .po file..."""
output_store = po.pofile()
output_header = output_store.header()
note = "extracted from %s, %s" % (template_store.filename,
input_store.filename)
output_header.addnote(note, "developer")
input_store.makeindex()
for template_unit in template_store.units:
origpo = self.convert_unit(template_unit, "developer")
# Try and find a translation of the same name...
template_unit_name = "".join(template_unit.getlocations())
if template_unit_name in input_store.locationindex:
translatedini = input_store.locationindex[template_unit_name]
translatedpo = self.convert_unit(translatedini, "translator")
else:
translatedpo = None
# If we have a valid po unit, get the translation and add it...
if origpo is not None:
if translatedpo is not None and not blankmsgstr:
origpo.target = translatedpo.source
output_store.addunit(origpo)
elif translatedpo is not None:
logger.error("error converting original ini definition %s",
origpo.name)
output_store.removeduplicates(duplicatestyle)
return output_store
def convert_unit(self, input_unit, commenttype):
"""Convert a .ini unit to a .po unit. Returns None if empty or not for
translation.
"""
if input_unit is None:
return None
# Escape unicode.
output_unit = po.pounit(encoding="UTF-8")
output_unit.addlocation("".join(input_unit.getlocations()))
output_unit.source = input_unit.source
output_unit.target = ""
return output_unit
def convertini(input_file, output_file, template_file, pot=False,
duplicatestyle="msgctxt", dialect="default"):
"""Read in *input_file* using ini, converts using :class:`ini2po`, writes
to *output_file*.
"""
from translate.storage import ini
input_store = ini.inifile(input_file, dialect=dialect)
convertor = ini2po()
if template_file is None:
output_store = convertor.convert_store(input_store,
duplicatestyle=duplicatestyle)
else:
template_store = ini.inifile(template_file, dialect=dialect)
output_store = convertor.merge_store(template_store, input_store,
blankmsgstr=pot,
duplicatestyle=duplicatestyle)
if output_store.isempty():
return 0
output_file.write(str(output_store))
return 1
def convertisl(input_file, output_file, template_file, pot=False,
duplicatestyle="msgctxt", dialect="inno"):
return convertini(input_file, output_file, template_file, pot=False,
duplicatestyle="msgctxt", dialect=dialect)
def main(argv=None):
from translate.convert import convert
formats = {
"ini": ("po", convertini), ("ini", "ini"): ("po", convertini),
"isl": ("po", convertisl), ("isl", "isl"): ("po", convertisl),
"iss": ("po", convertisl), ("iss", "iss"): ("po", convertisl),
}
parser = convert.ConvertOptionParser(formats, usetemplates=True,
usepots=True, description=__doc__)
parser.add_duplicates_option()
parser.passthrough.append("pot")
parser.run(argv)
if __name__ == '__main__':
main()
| gpl-2.0 |
KohlsTechnology/ansible | lib/ansible/modules/network/nxos/nxos_ntp_auth.py | 18 | 9573 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_ntp_auth
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages NTP authentication.
description:
- Manages NTP authentication.
author:
- Jason Edelman (@jedelman8)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- If C(state=absent), the module will remove the given key configuration if it exists.
- If C(state=absent) and C(authentication=on), authentication will be turned off.
options:
key_id:
description:
- Authentication key identifier (numeric).
md5string:
description:
- MD5 String.
auth_type:
description:
- Whether the given md5string is in cleartext or
has been encrypted. If in cleartext, the device
will encrypt it before storing it.
default: text
choices: ['text', 'encrypt']
trusted_key:
description:
- Whether the given key is required to be supplied by a time source
for the device to synchronize to the time source.
choices: [ 'false', 'true' ]
default: 'false'
authentication:
description:
- Turns NTP authentication on or off.
choices: ['on', 'off']
state:
description:
- Manage the state of the resource.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# Basic NTP authentication configuration
- nxos_ntp_auth:
key_id: 32
md5string: hello
auth_type: text
'''
RETURN = '''
commands:
description: command sent to the device
returned: always
type: list
sample: ["ntp authentication-key 32 md5 helloWorld 0", "ntp trusted-key 32"]
'''
import re
from ansible.module_utils.network.nxos.nxos import get_config, load_config, run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
def execute_show_command(command, module):
if 'show run' not in command:
command = {
'command': command,
'output': 'json',
}
else:
command = {
'command': command,
'output': 'text',
}
return run_commands(module, [command])
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_ntp_auth(module):
command = 'show ntp authentication-status'
body = execute_show_command(command, module)[0]
ntp_auth_str = body['authentication']
if 'enabled' in ntp_auth_str:
ntp_auth = True
else:
ntp_auth = False
return ntp_auth
def get_ntp_trusted_key(module):
trusted_key_list = []
command = 'show run | inc ntp.trusted-key'
trusted_key_str = execute_show_command(command, module)[0]
if trusted_key_str:
trusted_keys = trusted_key_str.splitlines()
else:
trusted_keys = []
for line in trusted_keys:
if line:
trusted_key_list.append(str(line.split()[2]))
return trusted_key_list
def get_ntp_auth_key(key_id, module):
authentication_key = {}
command = 'show run | inc ntp.authentication-key.{0}'.format(key_id)
auth_regex = (r".*ntp\sauthentication-key\s(?P<key_id>\d+)\s"
r"md5\s(?P<md5string>\S+)\s(?P<atype>\S+).*")
body = execute_show_command(command, module)[0]
try:
match_authentication = re.match(auth_regex, body, re.DOTALL)
group_authentication = match_authentication.groupdict()
authentication_key['key_id'] = group_authentication['key_id']
authentication_key['md5string'] = group_authentication['md5string']
if group_authentication['atype'] == '7':
authentication_key['auth_type'] = 'encrypt'
else:
authentication_key['auth_type'] = 'text'
except (AttributeError, TypeError):
authentication_key = {}
return authentication_key
def get_ntp_auth_info(key_id, module):
auth_info = get_ntp_auth_key(key_id, module)
trusted_key_list = get_ntp_trusted_key(module)
auth_power = get_ntp_auth(module)
if key_id in trusted_key_list:
auth_info['trusted_key'] = 'true'
else:
auth_info['trusted_key'] = 'false'
if auth_power:
auth_info['authentication'] = 'on'
else:
auth_info['authentication'] = 'off'
return auth_info
def auth_type_to_num(auth_type):
if auth_type == 'encrypt':
return '7'
else:
return '0'
def set_ntp_auth_key(key_id, md5string, auth_type, trusted_key, authentication):
ntp_auth_cmds = []
if key_id and md5string:
auth_type_num = auth_type_to_num(auth_type)
ntp_auth_cmds.append(
'ntp authentication-key {0} md5 {1} {2}'.format(
key_id, md5string, auth_type_num))
if trusted_key == 'true':
ntp_auth_cmds.append(
'ntp trusted-key {0}'.format(key_id))
elif trusted_key == 'false':
ntp_auth_cmds.append(
'no ntp trusted-key {0}'.format(key_id))
if authentication == 'on':
ntp_auth_cmds.append(
'ntp authenticate')
elif authentication == 'off':
ntp_auth_cmds.append(
'no ntp authenticate')
return ntp_auth_cmds
def remove_ntp_auth_key(key_id, md5string, auth_type, trusted_key, authentication):
auth_remove_cmds = []
if key_id:
auth_type_num = auth_type_to_num(auth_type)
auth_remove_cmds.append(
'no ntp authentication-key {0} md5 {1} {2}'.format(
key_id, md5string, auth_type_num))
if authentication:
auth_remove_cmds.append(
'no ntp authenticate')
return auth_remove_cmds
def main():
argument_spec = dict(
key_id=dict(type='str'),
md5string=dict(type='str'),
auth_type=dict(choices=['text', 'encrypt'], default='text'),
trusted_key=dict(choices=['true', 'false'], default='false'),
authentication=dict(choices=['on', 'off']),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
key_id = module.params['key_id']
md5string = module.params['md5string']
auth_type = module.params['auth_type']
trusted_key = module.params['trusted_key']
authentication = module.params['authentication']
state = module.params['state']
if key_id:
if not trusted_key and not md5string:
module.fail_json(msg='trusted_key or md5string MUST be specified')
args = dict(key_id=key_id, md5string=md5string,
auth_type=auth_type, trusted_key=trusted_key,
authentication=authentication)
changed = False
proposed = dict((k, v) for k, v in args.items() if v is not None)
existing = get_ntp_auth_info(key_id, module)
end_state = existing
delta = dict(set(proposed.items()).difference(existing.items()))
commands = []
if state == 'present':
if delta:
command = set_ntp_auth_key(
key_id, md5string, delta.get('auth_type'),
delta.get('trusted_key'), delta.get('authentication'))
if command:
commands.append(command)
elif state == 'absent':
auth_toggle = None
if existing.get('authentication') == 'on':
auth_toggle = True
if not existing.get('key_id'):
key_id = None
command = remove_ntp_auth_key(
key_id, md5string, auth_type, trusted_key, auth_toggle)
if command:
commands.append(command)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
load_config(module, cmds)
end_state = get_ntp_auth_info(key_id, module)
delta = dict(set(end_state.items()).difference(existing.items()))
if delta or (len(existing) != len(end_state)):
changed = True
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
results['end_state'] = end_state
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
rahushen/ansible | test/runner/lib/powershell_import_analysis.py | 102 | 2276 | """Analyze powershell import statements."""
from __future__ import absolute_import, print_function
import os
import re
from lib.util import (
display,
)
def get_powershell_module_utils_imports(powershell_targets):
"""Return a dictionary of module_utils names mapped to sets of powershell file paths.
:type powershell_targets: list[TestTarget]
:rtype: dict[str, set[str]]
"""
module_utils = enumerate_module_utils()
imports_by_target_path = {}
for target in powershell_targets:
imports_by_target_path[target.path] = extract_powershell_module_utils_imports(target.path, module_utils)
imports = dict([(module_util, set()) for module_util in module_utils])
for target_path in imports_by_target_path:
for module_util in imports_by_target_path[target_path]:
imports[module_util].add(target_path)
for module_util in sorted(imports):
if not imports[module_util]:
display.warning('No imports found which use the "%s" module_util.' % module_util)
return imports
def enumerate_module_utils():
"""Return a list of available module_utils imports.
:rtype: set[str]
"""
return set(os.path.splitext(p)[0] for p in os.listdir('lib/ansible/module_utils/powershell') if os.path.splitext(p)[1] == '.psm1')
def extract_powershell_module_utils_imports(path, module_utils):
"""Return a list of module_utils imports found in the specified source file.
:type path: str
:type module_utils: set[str]
:rtype: set[str]
"""
imports = set()
with open(path, 'r') as module_fd:
code = module_fd.read()
if '# POWERSHELL_COMMON' in code:
imports.add('Ansible.ModuleUtils.Legacy')
lines = code.splitlines()
line_number = 0
for line in lines:
line_number += 1
match = re.search(r'(?i)^#\s*requires\s+-module(?:s?)\s*(Ansible\.ModuleUtils\..+)', line)
if not match:
continue
import_name = match.group(1)
if import_name in module_utils:
imports.add(import_name)
else:
display.warning('%s:%d Invalid module_utils import: %s' % (path, line_number, import_name))
return imports
| gpl-3.0 |
PopCap/GameIdea | Engine/Source/ThirdParty/HTML5/emsdk/Win64/python/2.7.5.3_64bit/Lib/idlelib/Bindings.py | 130 | 3295 | """Define the menu contents, hotkeys, and event bindings.
There is additional configuration information in the EditorWindow class (and
subclasses): the menus are created there based on the menu_specs (class)
variable, and menus not created are silently skipped in the code here. This
makes it possible, for example, to define a Debug menu which is only present in
the PythonShell window, and a Format menu which is only present in the Editor
windows.
"""
import sys
from idlelib.configHandler import idleConf
from idlelib import macosxSupport
menudefs = [
# underscore prefixes character to underscore
('file', [
('_New Window', '<<open-new-window>>'),
('_Open...', '<<open-window-from-file>>'),
('Open _Module...', '<<open-module>>'),
('Class _Browser', '<<open-class-browser>>'),
('_Path Browser', '<<open-path-browser>>'),
None,
('_Save', '<<save-window>>'),
('Save _As...', '<<save-window-as-file>>'),
('Save Cop_y As...', '<<save-copy-of-window-as-file>>'),
None,
('Prin_t Window', '<<print-window>>'),
None,
('_Close', '<<close-window>>'),
('E_xit', '<<close-all-windows>>'),
]),
('edit', [
('_Undo', '<<undo>>'),
('_Redo', '<<redo>>'),
None,
('Cu_t', '<<cut>>'),
('_Copy', '<<copy>>'),
('_Paste', '<<paste>>'),
('Select _All', '<<select-all>>'),
None,
('_Find...', '<<find>>'),
('Find A_gain', '<<find-again>>'),
('Find _Selection', '<<find-selection>>'),
('Find in Files...', '<<find-in-files>>'),
('R_eplace...', '<<replace>>'),
('Go to _Line', '<<goto-line>>'),
]),
('format', [
('_Indent Region', '<<indent-region>>'),
('_Dedent Region', '<<dedent-region>>'),
('Comment _Out Region', '<<comment-region>>'),
('U_ncomment Region', '<<uncomment-region>>'),
('Tabify Region', '<<tabify-region>>'),
('Untabify Region', '<<untabify-region>>'),
('Toggle Tabs', '<<toggle-tabs>>'),
('New Indent Width', '<<change-indentwidth>>'),
]),
('run', [
('Python Shell', '<<open-python-shell>>'),
]),
('shell', [
('_View Last Restart', '<<view-restart>>'),
('_Restart Shell', '<<restart-shell>>'),
]),
('debug', [
('_Go to File/Line', '<<goto-file-line>>'),
('!_Debugger', '<<toggle-debugger>>'),
('_Stack Viewer', '<<open-stack-viewer>>'),
('!_Auto-open Stack Viewer', '<<toggle-jit-stack-viewer>>'),
]),
('options', [
('_Configure IDLE...', '<<open-config-dialog>>'),
None,
]),
('help', [
('_About IDLE', '<<about-idle>>'),
None,
('_IDLE Help', '<<help>>'),
('Python _Docs', '<<python-docs>>'),
]),
]
if macosxSupport.runningAsOSXApp():
# Running as a proper MacOS application bundle. This block restructures
# the menus a little to make them conform better to the HIG.
quitItem = menudefs[0][1][-1]
closeItem = menudefs[0][1][-2]
# Remove the last 3 items of the file menu: a separator, close window and
# quit. Close window will be reinserted just above the save item, where
# it should be according to the HIG. Quit is in the application menu.
del menudefs[0][1][-3:]
menudefs[0][1].insert(6, closeItem)
# Remove the 'About' entry from the help menu, it is in the application
# menu
del menudefs[-1][1][0:2]
default_keydefs = idleConf.GetCurrentKeySet()
del sys
| bsd-2-clause |
Qalthos/ansible | lib/ansible/modules/commands/command.py | 14 | 11529 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
# Copyright: (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: command
short_description: Execute commands on targets
version_added: historical
description:
- The C(command) module takes the command name followed by a list of space-delimited arguments.
- The given command will be executed on all selected nodes.
- The command(s) will not be
processed through the shell, so variables like C($HOME) and operations
like C("<"), C(">"), C("|"), C(";") and C("&") will not work.
Use the M(shell) module if you need these features.
- To create C(command) tasks that are easier to read,
pass parameters using the C(args) L(task keyword,../reference_appendices/playbooks_keywords.html#task).
- For Windows targets, use the M(win_command) module instead.
options:
free_form:
description:
- The command module takes a free form command to run.
- There is no actual parameter named 'free form'.
- See the examples on how to use this module.
required: yes
argv:
description:
- Passes the command as a list rather than a string.
- Use C(argv) to avoid quoting values that would otherwise be interpreted incorrectly (for example "user name").
- Only the string or the list form can be
provided, not both. One or the other must be provided.
version_added: "2.6"
creates:
description:
- A filename or (since 2.0) glob pattern. If it already exists, this step B(won't) be run.
removes:
description:
- A filename or (since 2.0) glob pattern. If it already exists, this step B(will) be run.
version_added: "0.8"
chdir:
description:
- Change into this directory before running the command.
version_added: "0.6"
warn:
description:
- Enable or disable task warnings.
type: bool
default: yes
version_added: "1.8"
stdin:
description:
- Set the stdin of the command directly to the specified value.
version_added: "2.4"
stdin_add_newline:
type: bool
default: yes
description:
- If set to C(yes), append a newline to stdin data.
version_added: "2.8"
strip_empty_ends:
description:
- Strip empty lines from the end of stdout/stderr in result.
version_added: "2.8"
type: bool
default: yes
notes:
- If you want to run a command through the shell (say you are using C(<), C(>), C(|), etc), you actually want the M(shell) module instead.
Parsing shell metacharacters can lead to unexpected commands being executed if quoting is not done correctly so it is more secure to
use the C(command) module when possible.
- " C(creates), C(removes), and C(chdir) can be specified after the command.
For instance, if you only want to run a command if a certain file does not exist, use this."
- Check mode is supported when passing C(creates) or C(removes). If running in check mode and either of these are specified, the module will
check for the existence of the file and report the correct changed status. If these are not supplied, the task will be skipped.
- The C(executable) parameter is removed since version 2.4. If you have a need for this parameter, use the M(shell) module instead.
- For Windows targets, use the M(win_command) module instead.
- For rebooting systems, use the M(reboot) or M(win_reboot) module.
seealso:
- module: raw
- module: script
- module: shell
- module: win_command
author:
- Ansible Core Team
- Michael DeHaan
'''
EXAMPLES = r'''
- name: return motd to registered var
command: cat /etc/motd
register: mymotd
- name: Run command if /path/to/database does not exist (without 'args').
command: /usr/bin/make_database.sh db_user db_name creates=/path/to/database
# 'args' is a task keyword, passed at the same level as the module
- name: Run command if /path/to/database does not exist (with 'args').
command: /usr/bin/make_database.sh db_user db_name
args:
creates: /path/to/database
- name: Change the working directory to somedir/ and run the command as db_owner if /path/to/database does not exist.
command: /usr/bin/make_database.sh db_user db_name
become: yes
become_user: db_owner
args:
chdir: somedir/
creates: /path/to/database
# 'argv' is a parameter, indented one level from the module
- name: Use 'argv' to send a command as a list - leave 'command' empty
command:
argv:
- /usr/bin/make_database.sh
- Username with whitespace
- dbname with whitespace
- name: safely use templated variable to run command. Always use the quote filter to avoid injection issues.
command: cat {{ myfile|quote }}
register: myoutput
'''
RETURN = r'''
cmd:
description: the cmd that was run on the remote machine
returned: always
type: list
sample:
- echo
- hello
delta:
description: cmd end time - cmd start time
returned: always
type: str
sample: 0:00:00.001529
end:
description: cmd end time
returned: always
type: str
sample: '2017-09-29 22:03:48.084657'
start:
description: cmd start time
returned: always
type: str
sample: '2017-09-29 22:03:48.083128'
'''
import datetime
import glob
import os
import shlex
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.common.collections import is_iterable
def check_command(module, commandline):
arguments = {'chown': 'owner', 'chmod': 'mode', 'chgrp': 'group',
'ln': 'state=link', 'mkdir': 'state=directory',
'rmdir': 'state=absent', 'rm': 'state=absent', 'touch': 'state=touch'}
commands = {'curl': 'get_url or uri', 'wget': 'get_url or uri',
'svn': 'subversion', 'service': 'service',
'mount': 'mount', 'rpm': 'yum, dnf or zypper', 'yum': 'yum', 'apt-get': 'apt',
'tar': 'unarchive', 'unzip': 'unarchive', 'sed': 'replace, lineinfile or template',
'dnf': 'dnf', 'zypper': 'zypper'}
become = ['sudo', 'su', 'pbrun', 'pfexec', 'runas', 'pmrun', 'machinectl']
if isinstance(commandline, list):
command = commandline[0]
else:
command = commandline.split()[0]
command = os.path.basename(command)
disable_suffix = "If you need to use command because {mod} is insufficient you can add" \
" 'warn: false' to this command task or set 'command_warnings=False' in" \
" ansible.cfg to get rid of this message."
substitutions = {'mod': None, 'cmd': command}
if command in arguments:
msg = "Consider using the {mod} module with {subcmd} rather than running '{cmd}'. " + disable_suffix
substitutions['mod'] = 'file'
substitutions['subcmd'] = arguments[command]
module.warn(msg.format(**substitutions))
if command in commands:
msg = "Consider using the {mod} module rather than running '{cmd}'. " + disable_suffix
substitutions['mod'] = commands[command]
module.warn(msg.format(**substitutions))
if command in become:
module.warn("Consider using 'become', 'become_method', and 'become_user' rather than running %s" % (command,))
def main():
# the command module is the one ansible module that does not take key=value args
# hence don't copy this one if you are looking to build others!
module = AnsibleModule(
argument_spec=dict(
_raw_params=dict(),
_uses_shell=dict(type='bool', default=False),
argv=dict(type='list'),
chdir=dict(type='path'),
executable=dict(),
creates=dict(type='path'),
removes=dict(type='path'),
# The default for this really comes from the action plugin
warn=dict(type='bool', default=True),
stdin=dict(required=False),
stdin_add_newline=dict(type='bool', default=True),
strip_empty_ends=dict(type='bool', default=True),
),
supports_check_mode=True,
)
shell = module.params['_uses_shell']
chdir = module.params['chdir']
executable = module.params['executable']
args = module.params['_raw_params']
argv = module.params['argv']
creates = module.params['creates']
removes = module.params['removes']
warn = module.params['warn']
stdin = module.params['stdin']
stdin_add_newline = module.params['stdin_add_newline']
strip = module.params['strip_empty_ends']
if not shell and executable:
module.warn("As of Ansible 2.4, the parameter 'executable' is no longer supported with the 'command' module. Not using '%s'." % executable)
executable = None
if (not args or args.strip() == '') and not argv:
module.fail_json(rc=256, msg="no command given")
if args and argv:
module.fail_json(rc=256, msg="only command or argv can be given, not both")
if not shell and args:
args = shlex.split(args)
args = args or argv
# All args must be strings
if is_iterable(args, include_strings=False):
args = [to_native(arg, errors='surrogate_or_strict', nonstring='simplerepr') for arg in args]
if chdir:
chdir = os.path.abspath(chdir)
os.chdir(chdir)
if creates:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of command executions.
if glob.glob(creates):
module.exit_json(
cmd=args,
stdout="skipped, since %s exists" % creates,
changed=False,
rc=0
)
if removes:
# do not run the command if the line contains removes=filename
# and the filename does not exist. This allows idempotence
# of command executions.
if not glob.glob(removes):
module.exit_json(
cmd=args,
stdout="skipped, since %s does not exist" % removes,
changed=False,
rc=0
)
if warn:
check_command(module, args)
startd = datetime.datetime.now()
if not module.check_mode:
rc, out, err = module.run_command(args, executable=executable, use_unsafe_shell=shell, encoding=None, data=stdin, binary_data=(not stdin_add_newline))
elif creates or removes:
rc = 0
out = err = b'Command would have run if not in check mode'
else:
module.exit_json(msg="skipped, running in check mode", skipped=True)
endd = datetime.datetime.now()
delta = endd - startd
if strip:
out = out.rstrip(b"\r\n")
err = err.rstrip(b"\r\n")
result = dict(
cmd=args,
stdout=out,
stderr=err,
rc=rc,
start=str(startd),
end=str(endd),
delta=str(delta),
changed=True,
)
if rc != 0:
module.fail_json(msg='non-zero return code', **result)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/pip-9.0.1-py2.7.egg/pip/_vendor/requests/packages/chardet/sjisprober.py | 1777 | 3764 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return self._mContextAnalyzer.get_charset_name()
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:],
charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3
- charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| gpl-3.0 |
RuiNascimento/krepo | script.module.lambdascrapers/lib/lambdascrapers/sources_placenta/en_placenta-1.7.8/to_be_fixed/needsfixing/streamlord.py | 2 | 6515 | # -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @Daddy_Blamo wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.Blamo
import re,traceback,urllib,urlparse
from resources.lib.modules import control
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import jsunpack
from resources.lib.modules import log_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['streamlord.com']
self.base_link = 'http://www.streamlord.com'
self.search_link = '/searchtest.php'
self.user = control.setting('streamlord.user')
self.password = control.setting('streamlord.pass')
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
failure = traceback.format_exc()
log_utils.log('StreamLord - Exception: \n' + str(failure))
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
failure = traceback.format_exc()
log_utils.log('StreamLord - Exception: \n' + str(failure))
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
failure = traceback.format_exc()
log_utils.log('StreamLord - Exception: \n' + str(failure))
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if (self.user != '' and self.password != ''): #raise Exception()
login = urlparse.urljoin(self.base_link, '/login.html')
post = urllib.urlencode({'username': self.user, 'password': self.password, 'submit': 'Login'})
cookie = client.request(login, post=post, output='cookie', close=False)
r = client.request(login, post=post, cookie=cookie, output='extended')
headers = {'User-Agent': r[3]['User-Agent'], 'Cookie': r[4]}
else:
headers = {}
if not str(url).startswith('http'):
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
year = data['year']
query = urlparse.urljoin(self.base_link, self.search_link)
post = urllib.urlencode({'searchapi2': title})
r = client.request(query, post=post, headers=headers)
if 'tvshowtitle' in data:
r = re.findall('(watch-tvshow-.+?-\d+\.html)', r)
r = [(i, re.findall('watch-tvshow-(.+?)-\d+\.html', i)) for i in r]
else:
r = re.findall('(watch-movie-.+?-\d+\.html)', r)
r = [(i, re.findall('watch-movie-(.+?)-\d+\.html', i)) for i in r]
r = [(i[0], i[1][0]) for i in r if len(i[1]) > 0]
r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1])]
r = [i[0] for i in r][0]
u = urlparse.urljoin(self.base_link, r)
for i in range(3):
r = client.request(u, headers=headers)
if not 'failed' in r: break
if 'season' in data and 'episode' in data:
r = re.findall('(episode-.+?-.+?\d+.+?\d+-\d+.html)', r)
r = [i for i in r if '-s%02de%02d-' % (int(data['season']), int(data['episode'])) in i.lower()][0]
r = urlparse.urljoin(self.base_link, r)
r = client.request(r, headers=headers)
else:
r = urlparse.urljoin(self.base_link, url)
r = client.request(r, post=post, headers=headers)
quality = 'HD' if '-movie-' in r else 'SD'
try:
f = re.findall('''["']sources['"]\s*:\s*\[(.*?)\]''', r)[0]
f = re.findall('''['"]*file['"]*\s*:\s*([^\(]+)''', f)[0]
u = re.findall('function\s+%s[^{]+{\s*([^}]+)' % f, r)[0]
u = re.findall('\[([^\]]+)[^+]+\+\s*([^.]+).*?getElementById\("([^"]+)', u)[0]
a = re.findall('var\s+%s\s*=\s*\[([^\]]+)' % u[1], r)[0]
b = client.parseDOM(r, 'span', {'id': u[2]})[0]
url = u[0] + a + b
url = url.replace('"', '').replace(',', '').replace('\/', '/')
url += '|' + urllib.urlencode(headers)
except:
try:
url = r = jsunpack.unpack(r)
url = url.replace('"', '')
except:
url = re.findall(r'sources[\'"]\s*:\s*\[.*?file[\'"]\s*:\s*(\w+)\(\).*function\s+\1\(\)\s*\{\s*return\([\'"]([^\'"]+)',r,re.DOTALL)[0][1]
sources.append({'source': 'cdn', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False, 'autoplay': True})
return sources
except:
failure = traceback.format_exc()
log_utils.log('StreamLord - Exception: \n' + str(failure))
return sources
def resolve(self, url):
return url | gpl-2.0 |
nonnib/eve-metrics | web2py/applications/evemetrics/controllers/servicemonitor.py | 2 | 4397 | # -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
import types, locale, datetime, collections, pyodbc
import time
from pprint import pprint
import sys
INFO_BY_STATUS = {
-1 : {"background": "row_none", "icon": "monitor_notrun.png"}, # Pending
0 : {"background": "row_none", "icon": "monitor_notrun.png"}, # Unknown
1 : {"background": "row_progress", "icon": "monitor_running.png"}, # In progress
2 : {"background": "row_good", "icon": "monitor_good.png"}, # Finished
3 : {"background": "row_warning", "icon": "monitor_warning.png"}, # Warning
4 : {"background": "row_bad", "icon": "monitor_bad.png"}, # Error
}
def index():
if "index" not in request.url:
redirect(request.url + "/index")
return {}
def ProcessRows(rows):
ret = {}
for r in rows:
d = {
"statusDetail" : r.statusDetail,
"statusID" : r.statusID,
"status" : r.status,
"duration" : r.duration,
"avgDuration" : r.avgDuration,
"percentComplete" : r.percentComplete,
"startTime" : r.startTime,
"inOvertime" : r.inOvertime,
"percentComplete" : r.percentComplete,
}
info = INFO_BY_STATUS.get(d["statusID"], {})
d = dict(d.items() + info.items())
if r.percentComplete > 1.5 and r.statusID == 1:
pct = " (overdue)"
if int(r.duration.split(":")[0]) >= 1:
d["background"] = "row_danger"
d["icon"] = "monitor_running_error.png"
else:
pct = " (%.0f%%)" % (100*r.percentComplete)
d["percentCompleteString"] = pct
ret[r.name] = d
return ret
def JobStatus():
sql = "SELECT * FROM ebs_FACTORY.monitor.jobExecutionStatus"
rows = dbmetrics.executesql(sql)
ret = ProcessRows(rows)
return {"rows" : ret, "title": "Job Status"}
def SSISStatus():
sql = "SELECT * FROM ebs_FACTORY.monitor.ssisExecutionStatus"
rows = dbmetrics.executesql(sql)
ret = ProcessRows(rows)
return {"rows" : ret, "title": "SSIS Package Status"}
def TasksInProgress():
factorySql = "EXEC ebs_FACTORY.factory.Events_InProgress"
factoryRows = dbmetrics.executesql(factorySql)
metricsSql = "EXEC ebs_METRICS.metric.Events_InProgress"
metricsRows = dbmetrics.executesql(metricsSql)
taskManagerSQL = "EXEC rdb_TASKMANAGER.task.Task_TasksInProgress"
taskManagerRows = dbmetrics.executesql(taskManagerSQL)
taskManagerIdleSQL = "EXEC rdb_TASKMANAGER.task.Task_IdleTasks"
taskManagerIdleRows = dbmetrics.executesql(taskManagerIdleSQL)
taskManagerFailedSQL = "EXEC rdb_TASKMANAGER.task.Task_FailedTasks"
taskManagerFailedRows = dbmetrics.executesql(taskManagerFailedSQL)
results = {
"results": [("EVE_Metrics tasks in progress", metricsRows),
("Factory tasks in progress", factoryRows),
("Task Manager tasks in progress", taskManagerRows)],
"idleFailed": [("Task Manager idle tasks", taskManagerIdleRows),
("Task Manager failed tasks", taskManagerFailedRows)]
}
return results
def TaskReport():
rows = dbmetrics.executesql("EXEC metric.TaskReports_Select")
report = request.vars.report or "nightly.process"
dt = request.vars.dt
if dt:
dt = datetime.datetime.strptime(dt, "%Y-%m-%d")
else:
dt = datetime.datetime.today()
if dt > datetime.datetime.now():
session.flash = "Date is in the future, you silly billy"
redirect(request.env.http_referer)
dtString = dt.strftime("%Y-%m-%d")
reports = collections.OrderedDict()
for r in rows:
reports[r.report] = r.title
sql = "EXEC metric.TaskReports_Report '%s', '%s'" % (report, dtString)
#sql = "EXEC metric.TaskReports_Report '%s'" % (report)
rows = dbmetrics.executesql(sql)
currDate = datetime.datetime.today()
for r in rows:
print r.startTime, type(r.startTime)
return {
"rows" : rows,
"currDate" : currDate,
"reports" : reports,
"report" : report,
"dt" : dt,
"dtString" : dtString,
}
| mit |
etzhou/edx-platform | common/djangoapps/dark_lang/migrations/0001_initial.py | 114 | 4801 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'DarkLangConfig'
db.create_table('dark_lang_darklangconfig', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('change_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('changed_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, on_delete=models.PROTECT)),
('enabled', self.gf('django.db.models.fields.BooleanField')(default=False)),
('released_languages', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('dark_lang', ['DarkLangConfig'])
def backwards(self, orm):
# Deleting model 'DarkLangConfig'
db.delete_table('dark_lang_darklangconfig')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'dark_lang.darklangconfig': {
'Meta': {'object_name': 'DarkLangConfig'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'released_languages': ('django.db.models.fields.TextField', [], {'blank': 'True'})
}
}
complete_apps = ['dark_lang']
| agpl-3.0 |
stanonyime/pynojo | pynojo/view/navbar.py | 3 | 1516 | # $File: navbar.py
# $Date: Fri Feb 17 00:18:20 2012 +0800
#
# Copyright (C) 2012 the pynojo development team <see AUTHORS file>
#
# Contributors to this file:
# Kai Jia <jia.kai66@gmail.com>
#
# This file is part of pynojo
#
# pynojo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pynojo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pynojo. If not, see <http://www.gnu.org/licenses/>.
"""The content of the navigation bar. See the source of this file for
details."""
class PageDesc:
"""describe a page that should appear in the navigation bar"""
route_name = None
_title = None
def __init__(self, route_name, title):
"""
:param title: a callable to generate the page title which is
displayed in the navigation bar.
"""
self.route_name = route_name
self._title = title
@property
def title(self):
"""the title of this page"""
return self._title()
PAGES = [
PageDesc('home', lambda: _('Home')),
PageDesc('about', lambda: _('About')),
]
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.