repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
rickerc/nova_audit | nova/tests/api/ec2/test_middleware.py | 11 | 7918 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from eventlet.green import httplib
from lxml import etree
import mox
from oslo.config import cfg
import webob
import webob.dec
import webob.exc
from nova.api import ec2
from nova import context
from nova import exception
from nova.openstack.common import timeutils
from nova import test
from nova import wsgi
CONF = cfg.CONF
@webob.dec.wsgify
def conditional_forbid(req):
"""Helper wsgi app returns 403 if param 'die' is 1."""
if 'die' in req.params and req.params['die'] == '1':
raise webob.exc.HTTPForbidden()
return 'OK'
class LockoutTestCase(test.NoDBTestCase):
"""Test case for the Lockout middleware."""
def setUp(self): # pylint: disable=C0103
super(LockoutTestCase, self).setUp()
timeutils.set_time_override()
self.lockout = ec2.Lockout(conditional_forbid)
def tearDown(self): # pylint: disable=C0103
timeutils.clear_time_override()
super(LockoutTestCase, self).tearDown()
def _send_bad_attempts(self, access_key, num_attempts=1):
"""Fail x."""
for i in xrange(num_attempts):
req = webob.Request.blank('/?AWSAccessKeyId=%s&die=1' % access_key)
self.assertEqual(req.get_response(self.lockout).status_int, 403)
def _is_locked_out(self, access_key):
"""Sends a test request to see if key is locked out."""
req = webob.Request.blank('/?AWSAccessKeyId=%s' % access_key)
return (req.get_response(self.lockout).status_int == 403)
def test_lockout(self):
self._send_bad_attempts('test', CONF.lockout_attempts)
self.assertTrue(self._is_locked_out('test'))
def test_timeout(self):
self._send_bad_attempts('test', CONF.lockout_attempts)
self.assertTrue(self._is_locked_out('test'))
timeutils.advance_time_seconds(CONF.lockout_minutes * 60)
self.assertFalse(self._is_locked_out('test'))
def test_multiple_keys(self):
self._send_bad_attempts('test1', CONF.lockout_attempts)
self.assertTrue(self._is_locked_out('test1'))
self.assertFalse(self._is_locked_out('test2'))
timeutils.advance_time_seconds(CONF.lockout_minutes * 60)
self.assertFalse(self._is_locked_out('test1'))
self.assertFalse(self._is_locked_out('test2'))
def test_window_timeout(self):
self._send_bad_attempts('test', CONF.lockout_attempts - 1)
self.assertFalse(self._is_locked_out('test'))
timeutils.advance_time_seconds(CONF.lockout_window * 60)
self._send_bad_attempts('test', CONF.lockout_attempts - 1)
self.assertFalse(self._is_locked_out('test'))
class ExecutorTestCase(test.NoDBTestCase):
def setUp(self):
super(ExecutorTestCase, self).setUp()
self.executor = ec2.Executor()
def _execute(self, invoke):
class Fake(object):
pass
fake_ec2_request = Fake()
fake_ec2_request.invoke = invoke
fake_wsgi_request = Fake()
fake_wsgi_request.environ = {
'nova.context': context.get_admin_context(),
'ec2.request': fake_ec2_request,
}
return self.executor(fake_wsgi_request)
def _extract_message(self, result):
tree = etree.fromstring(result.body)
return tree.findall('./Errors')[0].find('Error/Message').text
def test_instance_not_found(self):
def not_found(context):
raise exception.InstanceNotFound(instance_id=5)
result = self._execute(not_found)
self.assertIn('i-00000005', self._extract_message(result))
def test_instance_not_found_none(self):
def not_found(context):
raise exception.InstanceNotFound(instance_id=None)
# NOTE(mikal): we want no exception to be raised here, which was what
# was happening in bug/1080406
result = self._execute(not_found)
self.assertIn('None', self._extract_message(result))
def test_snapshot_not_found(self):
def not_found(context):
raise exception.SnapshotNotFound(snapshot_id=5)
result = self._execute(not_found)
self.assertIn('snap-00000005', self._extract_message(result))
def test_volume_not_found(self):
def not_found(context):
raise exception.VolumeNotFound(volume_id=5)
result = self._execute(not_found)
self.assertIn('vol-00000005', self._extract_message(result))
class FakeResponse(object):
reason = "Test Reason"
def __init__(self, status=400):
self.status = status
def read(self):
return '{}'
class KeystoneAuthTestCase(test.NoDBTestCase):
def setUp(self):
super(KeystoneAuthTestCase, self).setUp()
self.kauth = ec2.EC2KeystoneAuth(conditional_forbid)
def _validate_ec2_error(self, response, http_status, ec2_code):
self.assertEqual(response.status_code, http_status,
'Expected HTTP status %s' % http_status)
root_e = etree.XML(response.body)
self.assertEqual(root_e.tag, 'Response',
"Top element must be Response.")
errors_e = root_e.find('Errors')
error_e = errors_e[0]
code_e = error_e.find('Code')
self.assertIsNotNone(code_e, "Code element must be present.")
self.assertEqual(code_e.text, ec2_code)
def test_no_signature(self):
req = wsgi.Request.blank('/test')
resp = self.kauth(req)
self._validate_ec2_error(resp, 400, 'AuthFailure')
def test_no_key_id(self):
req = wsgi.Request.blank('/test')
req.GET['Signature'] = 'test-signature'
resp = self.kauth(req)
self._validate_ec2_error(resp, 400, 'AuthFailure')
def test_communication_failure(self):
req = wsgi.Request.blank('/test')
req.GET['Signature'] = 'test-signature'
req.GET['AWSAccessKeyId'] = 'test-key-id'
conn = httplib.HTTPConnection('/mock')
self.mox.StubOutWithMock(httplib.HTTPConnection, 'request')
self.mox.StubOutWithMock(httplib.HTTPConnection, 'getresponse')
conn.request('POST', mox.IgnoreArg(), body=mox.IgnoreArg(),
headers=mox.IgnoreArg())
resp = FakeResponse()
conn.getresponse().AndReturn(resp)
self.mox.ReplayAll()
resp = self.kauth(req)
self._validate_ec2_error(resp, 400, 'AuthFailure')
def test_no_result_data(self):
req = wsgi.Request.blank('/test')
req.GET['Signature'] = 'test-signature'
req.GET['AWSAccessKeyId'] = 'test-key-id'
conn = httplib.HTTPConnection('/mock')
self.mox.StubOutWithMock(httplib.HTTPConnection, 'request')
self.mox.StubOutWithMock(httplib.HTTPConnection, 'getresponse')
self.mox.StubOutWithMock(httplib.HTTPConnection, 'close')
conn.request('POST', mox.IgnoreArg(), body=mox.IgnoreArg(),
headers=mox.IgnoreArg())
resp = FakeResponse(200)
conn.getresponse().AndReturn(resp)
conn.close()
self.mox.ReplayAll()
resp = self.kauth(req)
self._validate_ec2_error(resp, 400, 'AuthFailure')
| apache-2.0 |
slohse/ansible | test/units/test_constants.py | 187 | 3203 | # -*- coding: utf-8 -*-
# (c) 2017 Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
import pwd
import os
import pytest
from ansible import constants
from ansible.module_utils.six import StringIO
from ansible.module_utils.six.moves import configparser
from ansible.module_utils._text import to_text
@pytest.fixture
def cfgparser():
CFGDATA = StringIO("""
[defaults]
defaults_one = 'data_defaults_one'
[level1]
level1_one = 'data_level1_one'
""")
p = configparser.ConfigParser()
p.readfp(CFGDATA)
return p
@pytest.fixture
def user():
user = {}
user['uid'] = os.geteuid()
pwd_entry = pwd.getpwuid(user['uid'])
user['username'] = pwd_entry.pw_name
user['home'] = pwd_entry.pw_dir
return user
@pytest.fixture
def cfg_file():
data = '/ansible/test/cfg/path'
old_cfg_file = constants.CONFIG_FILE
constants.CONFIG_FILE = os.path.join(data, 'ansible.cfg')
yield data
constants.CONFIG_FILE = old_cfg_file
@pytest.fixture
def null_cfg_file():
old_cfg_file = constants.CONFIG_FILE
del constants.CONFIG_FILE
yield
constants.CONFIG_FILE = old_cfg_file
@pytest.fixture
def cwd():
data = '/ansible/test/cwd/'
old_cwd = os.getcwd
os.getcwd = lambda: data
old_cwdu = None
if hasattr(os, 'getcwdu'):
old_cwdu = os.getcwdu
os.getcwdu = lambda: to_text(data)
yield data
os.getcwd = old_cwd
if hasattr(os, 'getcwdu'):
os.getcwdu = old_cwdu
class TestMkBoolean:
def test_bools(self):
assert constants.mk_boolean(True) is True
assert constants.mk_boolean(False) is False
def test_none(self):
assert constants.mk_boolean(None) is False
def test_numbers(self):
assert constants.mk_boolean(1) is True
assert constants.mk_boolean(0) is False
assert constants.mk_boolean(0.0) is False
# Current mk_boolean doesn't consider these to be true values
# def test_other_numbers(self):
# assert constants.mk_boolean(2) is True
# assert constants.mk_boolean(-1) is True
# assert constants.mk_boolean(0.1) is True
def test_strings(self):
assert constants.mk_boolean("true") is True
assert constants.mk_boolean("TRUE") is True
assert constants.mk_boolean("t") is True
assert constants.mk_boolean("yes") is True
assert constants.mk_boolean("y") is True
assert constants.mk_boolean("on") is True
| gpl-3.0 |
flwh/KK_mt6589_iq451 | prebuilts/python/linux-x86/2.7.5/lib/python2.7/test/test_socketserver.py | 45 | 10832 | """
Test suite for SocketServer.py.
"""
import contextlib
import imp
import os
import select
import signal
import socket
import select
import errno
import tempfile
import unittest
import SocketServer
import test.test_support
from test.test_support import reap_children, reap_threads, verbose
try:
import threading
except ImportError:
threading = None
test.test_support.requires("network")
TEST_STR = "hello world\n"
HOST = test.test_support.HOST
HAVE_UNIX_SOCKETS = hasattr(socket, "AF_UNIX")
HAVE_FORKING = hasattr(os, "fork") and os.name != "os2"
def signal_alarm(n):
"""Call signal.alarm when it exists (i.e. not on Windows)."""
if hasattr(signal, 'alarm'):
signal.alarm(n)
# Remember real select() to avoid interferences with mocking
_real_select = select.select
def receive(sock, n, timeout=20):
r, w, x = _real_select([sock], [], [], timeout)
if sock in r:
return sock.recv(n)
else:
raise RuntimeError, "timed out on %r" % (sock,)
if HAVE_UNIX_SOCKETS:
class ForkingUnixStreamServer(SocketServer.ForkingMixIn,
SocketServer.UnixStreamServer):
pass
class ForkingUnixDatagramServer(SocketServer.ForkingMixIn,
SocketServer.UnixDatagramServer):
pass
@contextlib.contextmanager
def simple_subprocess(testcase):
pid = os.fork()
if pid == 0:
# Don't raise an exception; it would be caught by the test harness.
os._exit(72)
yield None
pid2, status = os.waitpid(pid, 0)
testcase.assertEqual(pid2, pid)
testcase.assertEqual(72 << 8, status)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SocketServerTest(unittest.TestCase):
"""Test all socket servers."""
def setUp(self):
signal_alarm(60) # Kill deadlocks after 60 seconds.
self.port_seed = 0
self.test_files = []
def tearDown(self):
signal_alarm(0) # Didn't deadlock.
reap_children()
for fn in self.test_files:
try:
os.remove(fn)
except os.error:
pass
self.test_files[:] = []
def pickaddr(self, proto):
if proto == socket.AF_INET:
return (HOST, 0)
else:
# XXX: We need a way to tell AF_UNIX to pick its own name
# like AF_INET provides port==0.
dir = None
if os.name == 'os2':
dir = '\socket'
fn = tempfile.mktemp(prefix='unix_socket.', dir=dir)
if os.name == 'os2':
# AF_UNIX socket names on OS/2 require a specific prefix
# which can't include a drive letter and must also use
# backslashes as directory separators
if fn[1] == ':':
fn = fn[2:]
if fn[0] in (os.sep, os.altsep):
fn = fn[1:]
if os.sep == '/':
fn = fn.replace(os.sep, os.altsep)
else:
fn = fn.replace(os.altsep, os.sep)
self.test_files.append(fn)
return fn
def make_server(self, addr, svrcls, hdlrbase):
class MyServer(svrcls):
def handle_error(self, request, client_address):
self.close_request(request)
self.server_close()
raise
class MyHandler(hdlrbase):
def handle(self):
line = self.rfile.readline()
self.wfile.write(line)
if verbose: print "creating server"
server = MyServer(addr, MyHandler)
self.assertEqual(server.server_address, server.socket.getsockname())
return server
@reap_threads
def run_server(self, svrcls, hdlrbase, testfunc):
server = self.make_server(self.pickaddr(svrcls.address_family),
svrcls, hdlrbase)
# We had the OS pick a port, so pull the real address out of
# the server.
addr = server.server_address
if verbose:
print "server created"
print "ADDR =", addr
print "CLASS =", svrcls
t = threading.Thread(
name='%s serving' % svrcls,
target=server.serve_forever,
# Short poll interval to make the test finish quickly.
# Time between requests is short enough that we won't wake
# up spuriously too many times.
kwargs={'poll_interval':0.01})
t.daemon = True # In case this function raises.
t.start()
if verbose: print "server running"
for i in range(3):
if verbose: print "test client", i
testfunc(svrcls.address_family, addr)
if verbose: print "waiting for server"
server.shutdown()
t.join()
if verbose: print "done"
def stream_examine(self, proto, addr):
s = socket.socket(proto, socket.SOCK_STREAM)
s.connect(addr)
s.sendall(TEST_STR)
buf = data = receive(s, 100)
while data and '\n' not in buf:
data = receive(s, 100)
buf += data
self.assertEqual(buf, TEST_STR)
s.close()
def dgram_examine(self, proto, addr):
s = socket.socket(proto, socket.SOCK_DGRAM)
s.sendto(TEST_STR, addr)
buf = data = receive(s, 100)
while data and '\n' not in buf:
data = receive(s, 100)
buf += data
self.assertEqual(buf, TEST_STR)
s.close()
def test_TCPServer(self):
self.run_server(SocketServer.TCPServer,
SocketServer.StreamRequestHandler,
self.stream_examine)
def test_ThreadingTCPServer(self):
self.run_server(SocketServer.ThreadingTCPServer,
SocketServer.StreamRequestHandler,
self.stream_examine)
if HAVE_FORKING:
def test_ForkingTCPServer(self):
with simple_subprocess(self):
self.run_server(SocketServer.ForkingTCPServer,
SocketServer.StreamRequestHandler,
self.stream_examine)
if HAVE_UNIX_SOCKETS:
def test_UnixStreamServer(self):
self.run_server(SocketServer.UnixStreamServer,
SocketServer.StreamRequestHandler,
self.stream_examine)
def test_ThreadingUnixStreamServer(self):
self.run_server(SocketServer.ThreadingUnixStreamServer,
SocketServer.StreamRequestHandler,
self.stream_examine)
if HAVE_FORKING:
def test_ForkingUnixStreamServer(self):
with simple_subprocess(self):
self.run_server(ForkingUnixStreamServer,
SocketServer.StreamRequestHandler,
self.stream_examine)
def test_UDPServer(self):
self.run_server(SocketServer.UDPServer,
SocketServer.DatagramRequestHandler,
self.dgram_examine)
def test_ThreadingUDPServer(self):
self.run_server(SocketServer.ThreadingUDPServer,
SocketServer.DatagramRequestHandler,
self.dgram_examine)
if HAVE_FORKING:
def test_ForkingUDPServer(self):
with simple_subprocess(self):
self.run_server(SocketServer.ForkingUDPServer,
SocketServer.DatagramRequestHandler,
self.dgram_examine)
@contextlib.contextmanager
def mocked_select_module(self):
"""Mocks the select.select() call to raise EINTR for first call"""
old_select = select.select
class MockSelect:
def __init__(self):
self.called = 0
def __call__(self, *args):
self.called += 1
if self.called == 1:
# raise the exception on first call
raise select.error(errno.EINTR, os.strerror(errno.EINTR))
else:
# Return real select value for consecutive calls
return old_select(*args)
select.select = MockSelect()
try:
yield select.select
finally:
select.select = old_select
def test_InterruptServerSelectCall(self):
with self.mocked_select_module() as mock_select:
pid = self.run_server(SocketServer.TCPServer,
SocketServer.StreamRequestHandler,
self.stream_examine)
# Make sure select was called again:
self.assertGreater(mock_select.called, 1)
# Alas, on Linux (at least) recvfrom() doesn't return a meaningful
# client address so this cannot work:
# if HAVE_UNIX_SOCKETS:
# def test_UnixDatagramServer(self):
# self.run_server(SocketServer.UnixDatagramServer,
# SocketServer.DatagramRequestHandler,
# self.dgram_examine)
#
# def test_ThreadingUnixDatagramServer(self):
# self.run_server(SocketServer.ThreadingUnixDatagramServer,
# SocketServer.DatagramRequestHandler,
# self.dgram_examine)
#
# if HAVE_FORKING:
# def test_ForkingUnixDatagramServer(self):
# self.run_server(SocketServer.ForkingUnixDatagramServer,
# SocketServer.DatagramRequestHandler,
# self.dgram_examine)
@reap_threads
def test_shutdown(self):
# Issue #2302: shutdown() should always succeed in making an
# other thread leave serve_forever().
class MyServer(SocketServer.TCPServer):
pass
class MyHandler(SocketServer.StreamRequestHandler):
pass
threads = []
for i in range(20):
s = MyServer((HOST, 0), MyHandler)
t = threading.Thread(
name='MyServer serving',
target=s.serve_forever,
kwargs={'poll_interval':0.01})
t.daemon = True # In case this function raises.
threads.append((t, s))
for t, s in threads:
t.start()
s.shutdown()
for t, s in threads:
t.join()
def test_main():
if imp.lock_held():
# If the import lock is held, the threads will hang
raise unittest.SkipTest("can't run when import lock is held")
test.test_support.run_unittest(SocketServerTest)
if __name__ == "__main__":
test_main()
| gpl-2.0 |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/rest_framework/utils/breadcrumbs.py | 46 | 2004 | from __future__ import unicode_literals
from django.core.urlresolvers import resolve, get_script_prefix
def get_breadcrumbs(url):
"""
Given a url returns a list of breadcrumbs, which are each a
tuple of (name, url).
"""
from rest_framework.settings import api_settings
from rest_framework.views import APIView
view_name_func = api_settings.VIEW_NAME_FUNCTION
def breadcrumbs_recursive(url, breadcrumbs_list, prefix, seen):
"""
Add tuples of (name, url) to the breadcrumbs list,
progressively chomping off parts of the url.
"""
try:
(view, unused_args, unused_kwargs) = resolve(url)
except Exception:
pass
else:
# Check if this is a REST framework view,
# and if so add it to the breadcrumbs
cls = getattr(view, 'cls', None)
if cls is not None and issubclass(cls, APIView):
# Don't list the same view twice in a row.
# Probably an optional trailing slash.
if not seen or seen[-1] != view:
suffix = getattr(view, 'suffix', None)
name = view_name_func(cls, suffix)
breadcrumbs_list.insert(0, (name, prefix + url))
seen.append(view)
if url == '':
# All done
return breadcrumbs_list
elif url.endswith('/'):
# Drop trailing slash off the end and continue to try to
# resolve more breadcrumbs
url = url.rstrip('/')
return breadcrumbs_recursive(url, breadcrumbs_list, prefix, seen)
# Drop trailing non-slash off the end and continue to try to
# resolve more breadcrumbs
url = url[:url.rfind('/') + 1]
return breadcrumbs_recursive(url, breadcrumbs_list, prefix, seen)
prefix = get_script_prefix().rstrip('/')
url = url[len(prefix):]
return breadcrumbs_recursive(url, [], prefix, [])
| agpl-3.0 |
unicri/edx-platform | common/djangoapps/student/migrations/0045_add_trk_partner_to_linkedin_config.py | 102 | 14314 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'LinkedInAddToProfileConfiguration.trk_partner_name'
db.add_column('student_linkedinaddtoprofileconfiguration', 'trk_partner_name',
self.gf('django.db.models.fields.CharField')(default='', max_length=10, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'LinkedInAddToProfileConfiguration.trk_partner_name'
db.delete_column('student_linkedinaddtoprofileconfiguration', 'trk_partner_name')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.anonymoususerid': {
'Meta': {'object_name': 'AnonymousUserId'},
'anonymous_user_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseaccessrole': {
'Meta': {'unique_together': "(('user', 'org', 'course_id', 'role'),)", 'object_name': 'CourseAccessRole'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'org': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollmentallowed': {
'Meta': {'unique_together': "(('email', 'course_id'),)", 'object_name': 'CourseEnrollmentAllowed'},
'auto_enroll': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'student.dashboardconfiguration': {
'Meta': {'object_name': 'DashboardConfiguration'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recent_enrollment_time_delta': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'student.linkedinaddtoprofileconfiguration': {
'Meta': {'object_name': 'LinkedInAddToProfileConfiguration'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'company_identifier': ('django.db.models.fields.TextField', [], {}),
'dashboard_tracking_code': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'trk_partner_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10', 'blank': 'True'})
},
'student.loginfailures': {
'Meta': {'object_name': 'LoginFailures'},
'failure_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lockout_until': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.passwordhistory': {
'Meta': {'object_name': 'PasswordHistory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'time_set': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'allow_certificate': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'city': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'goals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'year_of_birth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'student.usersignupsource': {
'Meta': {'object_name': 'UserSignupSource'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.userstanding': {
'Meta': {'object_name': 'UserStanding'},
'account_status': ('django.db.models.fields.CharField', [], {'max_length': '31', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'standing_last_changed_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'standing'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student']
| agpl-3.0 |
pschmitt/home-assistant | homeassistant/components/aruba/device_tracker.py | 19 | 4001 | """Support for Aruba Access Points."""
import logging
import re
import pexpect
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
_DEVICES_REGEX = re.compile(
r"(?P<name>([^\s]+)?)\s+"
+ r"(?P<ip>([0-9]{1,3}[\.]){3}[0-9]{1,3})\s+"
+ r"(?P<mac>([0-9a-f]{2}[:-]){5}([0-9a-f]{2}))\s+"
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
}
)
def get_scanner(hass, config):
"""Validate the configuration and return a Aruba scanner."""
scanner = ArubaDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class ArubaDeviceScanner(DeviceScanner):
"""This class queries a Aruba Access Point for connected devices."""
def __init__(self, config):
"""Initialize the scanner."""
self.host = config[CONF_HOST]
self.username = config[CONF_USERNAME]
self.password = config[CONF_PASSWORD]
self.last_results = {}
# Test the router is accessible.
data = self.get_aruba_data()
self.success_init = data is not None
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return [client["mac"] for client in self.last_results]
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
if not self.last_results:
return None
for client in self.last_results:
if client["mac"] == device:
return client["name"]
return None
def _update_info(self):
"""Ensure the information from the Aruba Access Point is up to date.
Return boolean if scanning successful.
"""
if not self.success_init:
return False
data = self.get_aruba_data()
if not data:
return False
self.last_results = data.values()
return True
def get_aruba_data(self):
"""Retrieve data from Aruba Access Point and return parsed result."""
connect = f"ssh {self.username}@{self.host}"
ssh = pexpect.spawn(connect)
query = ssh.expect(
[
"password:",
pexpect.TIMEOUT,
pexpect.EOF,
"continue connecting (yes/no)?",
"Host key verification failed.",
"Connection refused",
"Connection timed out",
],
timeout=120,
)
if query == 1:
_LOGGER.error("Timeout")
return
if query == 2:
_LOGGER.error("Unexpected response from router")
return
if query == 3:
ssh.sendline("yes")
ssh.expect("password:")
elif query == 4:
_LOGGER.error("Host key changed")
return
elif query == 5:
_LOGGER.error("Connection refused by server")
return
elif query == 6:
_LOGGER.error("Connection timed out")
return
ssh.sendline(self.password)
ssh.expect("#")
ssh.sendline("show clients")
ssh.expect("#")
devices_result = ssh.before.split(b"\r\n")
ssh.sendline("exit")
devices = {}
for device in devices_result:
match = _DEVICES_REGEX.search(device.decode("utf-8"))
if match:
devices[match.group("ip")] = {
"ip": match.group("ip"),
"mac": match.group("mac").upper(),
"name": match.group("name"),
}
return devices
| apache-2.0 |
danielharbor/openerp | addons/fleet/__openerp__.py | 53 | 2482 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Fleet Management',
'version' : '0.1',
'author' : 'OpenERP S.A.',
'sequence': 110,
'category': 'Managing vehicles and contracts',
'website' : 'https://www.odoo.com/page/fleet',
'summary' : 'Vehicle, leasing, insurances, costs',
'description' : """
Vehicle, leasing, insurances, cost
==================================
With this module, OpenERP helps you managing all your vehicles, the
contracts associated to those vehicle as well as services, fuel log
entries, costs and many other features necessary to the management
of your fleet of vehicle(s)
Main Features
-------------
* Add vehicles to your fleet
* Manage contracts for vehicles
* Reminder when a contract reach its expiration date
* Add services, fuel log entry, odometer values for all vehicles
* Show all costs associated to a vehicle or to a type of service
* Analysis graph for costs
""",
'depends' : [
'base',
'mail',
'board'
],
'data' : [
'security/fleet_security.xml',
'security/ir.model.access.csv',
'fleet_view.xml',
'fleet_cars.xml',
'fleet_data.xml',
'fleet_board_view.xml',
],
'images': ['images/costs_analysis.jpeg','images/indicative_costs_analysis.jpeg','images/vehicles.jpeg','images/vehicles_contracts.jpeg','images/vehicles_fuel.jpeg','images/vehicles_odometer.jpeg','images/vehicles_services.jpeg'],
'demo': ['fleet_demo.xml'],
'installable' : True,
'application' : True,
}
| agpl-3.0 |
takeshineshiro/horizon | openstack_dashboard/dashboards/settings/user/tests.py | 73 | 1493 | # Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.core.urlresolvers import reverse
from openstack_dashboard.test import helpers as test
INDEX_URL = reverse("horizon:settings:user:index")
class UserSettingsTest(test.TestCase):
def test_timezone_offset_is_displayed(self):
res = self.client.get(INDEX_URL)
self.assertContains(res, "UTC +11:00: Australia (Melbourne) Time")
self.assertContains(res, "UTC -03:00: Falkland Islands Time")
self.assertContains(res, "UTC -10:00: United States (Honolulu) Time")
def test_display_language(self):
# Add an unknown language to LANGUAGES list
settings.LANGUAGES += (('unknown', 'Unknown Language'),)
res = self.client.get(INDEX_URL)
# Known language
self.assertContains(res, 'English')
# Unknown language
self.assertContains(res, 'Unknown Language')
| apache-2.0 |
fabioz/Pydev | plugins/org.python.pydev.jython/Lib/encodings/iso2022_jp_ext.py | 816 | 1069 | #
# iso2022_jp_ext.py: Python Unicode Codec for ISO2022_JP_EXT
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_ext')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp_ext',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| epl-1.0 |
jaggu303619/asylum | openerp/addons/survey/wizard/survey_print_answer.py | 54 | 3127 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class survey_print_answer(osv.osv_memory):
_name = 'survey.print.answer'
_columns = {
'response_ids': fields.many2many('survey.response','survey_print_response',\
'response_id','print_id', "Answer", required="1"),
'orientation': fields.selection([('vertical','Portrait(Vertical)'),\
('horizontal','Landscape(Horizontal)')], 'Orientation'),
'paper_size': fields.selection([('letter','Letter (8.5" x 11")'),\
('legal','Legal (8.5" x 14")'),\
('a4','A4 (210mm x 297mm)')], 'Paper Size'),
'page_number': fields.boolean('Include Page Number'),
'without_pagebreak': fields.boolean('Print Without Page Breaks')
}
_defaults = {
'orientation': lambda *a:'vertical',
'paper_size': lambda *a:'letter',
'page_number': lambda *a: 0,
'without_pagebreak': lambda *a: 0
}
def action_next(self, cr, uid, ids, context=None):
"""
Print Survey Answer in pdf format.
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of print answer IDs
@param context: A standard dictionary for contextual values
@return : Dictionary value for created survey answer report
"""
if context is None:
context = {}
datas = {'ids': context.get('active_ids', [])}
res = self.read(cr, uid, ids, ['response_ids', 'orientation', 'paper_size',\
'page_number', 'without_pagebreak'], context=context)
res = res and res[0] or {}
datas['form'] = res
datas['model'] = 'survey.print.answer'
return {
'type': 'ir.actions.report.xml',
'report_name': 'survey.browse.response',
'datas': datas,
}
survey_print_answer()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
citrix-openstack/build-ryu | ryu/app/wsgi.py | 2 | 4084 | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2012 Isaku Yamahata <yamahata at private email ne jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from oslo.config import cfg
import webob.dec
from ryu.lib import hub
from routes import Mapper
from routes.util import URLGenerator
CONF = cfg.CONF
CONF.register_cli_opts([
cfg.StrOpt('wsapi-host', default='', help='webapp listen host'),
cfg.IntOpt('wsapi-port', default=8080, help='webapp listen port')
])
HEX_PATTERN = r'0x[0-9a-z]+'
DIGIT_PATTERN = r'[1-9][0-9]*'
def route(name, path, methods=None, requirements=None):
def _route(controller_method):
controller_method.routing_info = {
'name': name,
'path': path,
'methods': methods,
'requirements': requirements,
}
return controller_method
return _route
class ControllerBase(object):
special_vars = ['action', 'controller']
def __init__(self, req, link, data, **config):
self.req = req
self.link = link
for name, value in config.items():
setattr(self, name, value)
def __call__(self, req):
action = self.req.urlvars.get('action', 'index')
if hasattr(self, '__before__'):
self.__before__()
kwargs = self.req.urlvars.copy()
for attr in self.special_vars:
if attr in kwargs:
del kwargs[attr]
return getattr(self, action)(req, **kwargs)
class WSGIApplication(object):
def __init__(self, **config):
self.config = config
self.mapper = Mapper()
self.registory = {}
super(WSGIApplication, self).__init__()
@webob.dec.wsgify
def __call__(self, req):
match = self.mapper.match(environ=req.environ)
if not match:
return webob.exc.HTTPNotFound()
req.urlvars = match
link = URLGenerator(self.mapper, req.environ)
data = None
name = match['controller'].__name__
if name in self.registory:
data = self.registory[name]
controller = match['controller'](req, link, data, **self.config)
return controller(req)
def register(self, controller):
methods = inspect.getmembers(controller,
lambda v: inspect.ismethod(v) and
hasattr(v, 'routing_info'))
for method_name, method in methods:
routing_info = getattr(method, 'routing_info')
name = routing_info['name']
path = routing_info['path']
conditions = {}
if routing_info.get('methods'):
conditions['method'] = routing_info['methods']
requirements = routing_info.get('requirements') or {}
self.mapper.connect(name,
path,
controller=controller,
requirements=requirements,
action=method_name,
conditions=conditions)
class WSGIServer(hub.WSGIServer):
def __init__(self, application, **config):
super(WSGIServer, self).__init__((CONF.wsapi_host, CONF.wsapi_port),
application, **config)
def __call__(self):
self.serve_forever()
def start_service(app_mgr):
for instance in app_mgr.contexts.values():
if instance.__class__ == WSGIApplication:
return WSGIServer(instance)
return None
| apache-2.0 |
cntnboys/410Lab5 | env-lab4/lib/python2.7/site-packages/pip-1.1-py2.7.egg/pip/commands/uninstall.py | 80 | 1626 | from pip.req import InstallRequirement, RequirementSet, parse_requirements
from pip.basecommand import Command
from pip.exceptions import InstallationError
class UninstallCommand(Command):
name = 'uninstall'
usage = '%prog [OPTIONS] PACKAGE_NAMES ...'
summary = 'Uninstall packages'
def __init__(self):
super(UninstallCommand, self).__init__()
self.parser.add_option(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='FILENAME',
help='Uninstall all the packages listed in the given requirements file. '
'This option can be used multiple times.')
self.parser.add_option(
'-y', '--yes',
dest='yes',
action='store_true',
help="Don't ask for confirmation of uninstall deletions.")
def run(self, options, args):
requirement_set = RequirementSet(
build_dir=None,
src_dir=None,
download_dir=None)
for name in args:
requirement_set.add_requirement(
InstallRequirement.from_line(name))
for filename in options.requirements:
for req in parse_requirements(filename, options=options):
requirement_set.add_requirement(req)
if not requirement_set.has_requirements:
raise InstallationError('You must give at least one requirement '
'to %(name)s (see "pip help %(name)s")' % dict(name=self.name))
requirement_set.uninstall(auto_confirm=options.yes)
UninstallCommand()
| apache-2.0 |
dd00/commandergenius | project/jni/python/src/Lib/test/test_multifile.py | 56 | 1650 | from test import test_support
import mimetools
multifile = test_support.import_module('multifile', deprecated=True)
import cStringIO
msg = """Mime-Version: 1.0
Content-Type: multipart/mixed;
boundary="=====================_590453667==_"
X-OriginalArrivalTime: 05 Feb 2002 03:43:23.0310 (UTC) FILETIME=[42D88CE0:01C1ADF7]
--=====================_590453667==_
Content-Type: multipart/alternative;
boundary="=====================_590453677==_.ALT"
--=====================_590453677==_.ALT
Content-Type: text/plain; charset="us-ascii"; format=flowed
test A
--=====================_590453677==_.ALT
Content-Type: text/html; charset="us-ascii"
<html>
<b>test B</font></b></html>
--=====================_590453677==_.ALT--
--=====================_590453667==_
Content-Type: text/plain; charset="us-ascii"
Content-Disposition: attachment; filename="att.txt"
Attached Content.
Attached Content.
Attached Content.
Attached Content.
--=====================_590453667==_--
"""
def getMIMEMsg(mf):
global boundaries, linecount
msg = mimetools.Message(mf)
#print "TYPE: %s" % msg.gettype()
if msg.getmaintype() == 'multipart':
boundary = msg.getparam("boundary")
boundaries += 1
mf.push(boundary)
while mf.next():
getMIMEMsg(mf)
mf.pop()
else:
lines = mf.readlines()
linecount += len(lines)
def test_main():
global boundaries, linecount
boundaries = 0
linecount = 0
f = cStringIO.StringIO(msg)
getMIMEMsg(multifile.MultiFile(f))
assert boundaries == 2
assert linecount == 9
if __name__ == '__main__':
test_main()
| lgpl-2.1 |
shinyChen/browserscope | categories/security/test_set.py | 9 | 8893 | #!/usr/bin/python2.4
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License')
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmark Tests Definitions."""
import logging
from categories import test_set_base
_CATEGORY = 'security'
class SecurityTest(test_set_base.TestBase):
TESTS_URL_PATH = '/%s/test' % _CATEGORY
def __init__(self, key, name, doc):
"""Initialze a benchmark test.
Args:
key: key for this in dict's
name: a human readable label for display
url_name: the name used in the url
doc: a description of the test
value_range: (min_value, max_value) as integer values
"""
test_set_base.TestBase.__init__(
self,
key=key,
name=name,
url=self.TESTS_URL_PATH,
doc=doc,
min_value=0,
max_value=1)
_TESTS = (
# key, name, doc
SecurityTest('postMessage API', 'postMessage',
'''Checks whether the browser supports the
<a href="http://www.whatwg.org/specs/web-apps/current-work/multipage/comms.html#crossDocumentMessages">HTML 5
cross-document messaging</a> API that enables secure communication between origins.'''),
SecurityTest('JSON.parse API', 'JSON.parse',
'''Checks whether the browser natively supports the <a href="http://json.org/js.html">JSON.parse</a> API.
Native JSON parsing is safer than using eval.'''),
SecurityTest('toStaticHTML API', 'toStaticHTML',
'''Checks whether the browser supports the
<a href="http://msdn.microsoft.com/en-us/library/cc848922%28VS.85%29.aspx">toStaticHTML API</a>
for sanitizing untrusted inputs.'''),
SecurityTest('httpOnly cookie API', 'httpOnly cookies',
'''Checks whether the browser supports the
<a href="http://tools.ietf.org/html/draft-abarth-cookie-02#section-5.1.6">httpOnly cookie attribute</a>,
which is a mitigation for cross-site scripting attacks.'''),
SecurityTest('X-Frame-Options',
'X-Frame-Options',
'''Checks whether the browser supports the
<a href="http://blogs.msdn.com/ie/archive/2009/01/27/ie8-security-part-vii-clickjacking-defenses.aspx">X-Frame-Options API</a>,
which prevents clickjacking attacks by restricting how pages may be framed.'''),
SecurityTest('X-Content-Type-Options',
'X-Content-Type-Options',
'''Checks whether the browser supports the <a href="http://blogs.msdn.com/ie/archive/2008/07/02/ie8-security-part-v-comprehensive-protection.aspx">X-Content-Type-Options API</a>,
which <a href="http://www.adambarth.com/papers/2009/barth-caballero-song.pdf">prevents MIME sniffing</a>.'''),
SecurityTest('Block reflected XSS', 'Block reflected XSS',
'''Checks whether the browser blocks execution of JavaScript code that appears in the request
URL. Browser-based XSS filters mitigate some classes of cross-site scripting attacks.'''),
SecurityTest('Block location spoofing', 'Block location spoofing',
'''The global "location" object can be used by JavaScript to determine what page it is
executing on. It is used by Flash Player, Google AJAX API, and many bookmarklets.
Browsers should block
<a href="http://www.adambarth.com/papers/2009/adida-barth-jackson.pdf">JavaScript rootkits</a>
that try to overwrite the location object.'''),
SecurityTest('Block JSON hijacking', 'Block JSON hijacking',
'''Documents encoded in JSON format can be read across domains if the browser
supports a
<a href="http://www.fortify.com/advisory.jsp">mutable Array constructor</a>
that is called when array literals are encountered. JSON hijacking is also possible if the
browser supports a
<a href="http://haacked.com/archive/2009/06/25/json-hijacking.aspx">mutable setter function</a>
for the Object prototype that is called when object literals are encountered.'''),
SecurityTest('Block XSS in CSS', 'Block XSS in CSS',
'''Script in stylesheets can be used by attackers to evade server-side XSS filters.
Support for CSS expressions has been
<a href="http://blogs.msdn.com/ie/archive/2008/10/16/ending-expressions.aspx">discontinued
in IE8 standards mode</a> and XBL in stylesheets has been
<a href="http://www.mozilla.org/security/announce/2009/mfsa2009-18.html">restricted
to same-origin code in separate files</a> in Firefox. We check to make sure that script injected
into a site via stylesheet does not execute.'''),
SecurityTest('Sandbox attribute',
'Sandbox attribute',
'''Checks whether the browser supports the
<a href="http://www.whatwg.org/specs/web-apps/current-work/#attr-iframe-sandbox">sandbox attribute</a>,
which enables a set of extra restrictions on any content hosted by the iframe.'''),
SecurityTest('Origin header',
'Origin header',
'''Checks whether the browser supports the
<a href="http://tools.ietf.org/html/draft-abarth-origin">Origin header</a>, which is a mitigation for
<a href="http://en.wikipedia.org/wiki/Cross-site_request_forgery">cross-site request forgery</a> (CSRF) attacks.'''),
SecurityTest('Strict Transport Security',
'Strict Transport Security',
'''Checks whether the browser supports
<a href="http://lists.w3.org/Archives/Public/www-archive/2009Sep/att-0051/draft-hodges-strict-transport-sec-05.plain.html">Strict Transport Security</a>,
which enables web sites to declare themselves accessible only via secure connections.'''),
SecurityTest('Block cross-origin CSS attacks',
'Block cross-origin CSS attacks',
'''By injecting CSS selectors into the target site,
attackers can steal confidential data across domains using style sheet import, even
<a href="http://websec.sv.cmu.edu/css/css.pdf">without JavaScript</a>.
Browsers should correctly determine the content type when loading cross-origin CSS resources.'''),
SecurityTest('Cross Origin Resource Sharing',
'Cross Origin Resource Sharing',
'''Checks whether the browser supports the APIs for making
<a href="http://www.w3.org/TR/cors/">cross origin requests</a>.'''),
SecurityTest('Block visited link sniffing',
'Block visited link sniffing',
'''Most browsers display visited links with a :visited CSS pseudo class.
A user's browsing history <a href="http://whattheinternetknowsaboutyou.com/">can be sniffed</a> by
<a href="http://jeremiahgrossman.blogspot.com/2006/08/i-know-where-youve-been.html">testing the visited links</a> by
checking this CSS class. We test whether browsers restrict access to the
:visited pseudo class.'''),
SecurityTest('Content Security Policy',
'Content Security Policy',
'''Checks whether the browser supports
<a href="http://research.sidstamm.com/papers/csp-www2010.pdf">Content Security Policy</a>,
which reduces the XSS attack surfaces for websites that wish to opt-in.'''),
)
class SecurityTestSet(test_set_base.TestSet):
def GetRowScoreAndDisplayValue(self, results):
"""Get the overall score for this row of results data.
Args:
results: {
'test_key_1': {'score': score_1, 'raw_score': raw_score_1, ...},
'test_key_2': {'score': score_2, 'raw_score': raw_score_2, ...},
...
}
Returns:
score, display_value
# score is from 0 to 100.
# display_value is the text for the cell.
"""
#logging.info('security getrowscore results: %s' % results)
total_tests = 0
total_valid_tests = 0
total_score = 0
tests = self.tests
for test in tests:
total_tests += 1
if test.key in results and results[test.key]['raw_score'] is not None:
score = results[test.key]['score']
logging.info('test: %s, score: %s' % (test.key, score))
total_valid_tests += 1
# For booleans, when "score" is 100 that's test_type true.
if score == 100:
total_score += 1
else:
logging.info('test: %s has no median' % test.key)
#logging.info('%s, %s, %s' % (total_score, total_tests, total_valid_tests))
if total_valid_tests:
score = int(round(100 * total_score / total_tests))
display = '%s/%s' % (total_score, total_valid_tests)
else:
score = 0
display = ''
return score, display
TEST_SET = SecurityTestSet(
category=_CATEGORY,
category_name='Security',
summary_doc='Tests JavaScript APIs for ability to block harmful interactions between sites',
tests=_TESTS,
test_page='/%s/test_tpl' % _CATEGORY
)
| apache-2.0 |
gmacchi93/serverInfoParaguay | apps/venv/lib/python2.7/site-packages/setuptools/tests/test_bdist_egg.py | 286 | 1954 | """develop tests
"""
import sys
import os, re, shutil, tempfile, unittest
import tempfile
import site
from distutils.errors import DistutilsError
from setuptools.compat import StringIO
from setuptools.command.bdist_egg import bdist_egg
from setuptools.command import easy_install as easy_install_pkg
from setuptools.dist import Distribution
SETUP_PY = """\
from setuptools import setup
setup(name='foo', py_modules=['hi'])
"""
class TestDevelopTest(unittest.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp()
self.old_cwd = os.getcwd()
os.chdir(self.dir)
f = open('setup.py', 'w')
f.write(SETUP_PY)
f.close()
f = open('hi.py', 'w')
f.write('1\n')
f.close()
if sys.version >= "2.6":
self.old_base = site.USER_BASE
site.USER_BASE = tempfile.mkdtemp()
self.old_site = site.USER_SITE
site.USER_SITE = tempfile.mkdtemp()
def tearDown(self):
os.chdir(self.old_cwd)
shutil.rmtree(self.dir)
if sys.version >= "2.6":
shutil.rmtree(site.USER_BASE)
shutil.rmtree(site.USER_SITE)
site.USER_BASE = self.old_base
site.USER_SITE = self.old_site
def test_bdist_egg(self):
dist = Distribution(dict(
script_name='setup.py',
script_args=['bdist_egg'],
name='foo',
py_modules=['hi']
))
os.makedirs(os.path.join('build', 'src'))
old_stdout = sys.stdout
sys.stdout = o = StringIO()
try:
dist.parse_command_line()
dist.run_commands()
finally:
sys.stdout = old_stdout
# let's see if we got our egg link at the right place
[content] = os.listdir('dist')
self.assertTrue(re.match('foo-0.0.0-py[23].\d.egg$', content))
def test_suite():
return unittest.makeSuite(TestDevelopTest)
| apache-2.0 |
smithfarm/s3-tests | s3tests/functional/__init__.py | 3 | 12312 | import ConfigParser
import boto.exception
import boto.s3.connection
import bunch
import itertools
import os
import random
import string
from .utils import region_sync_meta
s3 = bunch.Bunch()
config = bunch.Bunch()
targets = bunch.Bunch()
# this will be assigned by setup()
prefix = None
calling_formats = dict(
ordinary=boto.s3.connection.OrdinaryCallingFormat(),
subdomain=boto.s3.connection.SubdomainCallingFormat(),
vhost=boto.s3.connection.VHostCallingFormat(),
)
def get_prefix():
assert prefix is not None
return prefix
def is_slow_backend():
return slow_backend
def choose_bucket_prefix(template, max_len=30):
"""
Choose a prefix for our test buckets, so they're easy to identify.
Use template and feed it more and more random filler, until it's
as long as possible but still below max_len.
"""
rand = ''.join(
random.choice(string.ascii_lowercase + string.digits)
for c in range(255)
)
while rand:
s = template.format(random=rand)
if len(s) <= max_len:
return s
rand = rand[:-1]
raise RuntimeError(
'Bucket prefix template is impossible to fulfill: {template!r}'.format(
template=template,
),
)
def nuke_prefixed_buckets_on_conn(prefix, name, conn):
print 'Cleaning buckets from connection {name} prefix {prefix!r}.'.format(
name=name,
prefix=prefix,
)
for bucket in conn.get_all_buckets():
print 'prefix=',prefix
if bucket.name.startswith(prefix):
print 'Cleaning bucket {bucket}'.format(bucket=bucket)
success = False
for i in xrange(2):
try:
try:
iterator = iter(bucket.list_versions())
# peek into iterator to issue list operation
try:
keys = itertools.chain([next(iterator)], iterator)
except StopIteration:
keys = [] # empty iterator
except boto.exception.S3ResponseError as e:
# some S3 implementations do not support object
# versioning - fall back to listing without versions
if e.error_code != 'NotImplemented':
raise e
keys = bucket.list();
for key in keys:
print 'Cleaning bucket {bucket} key {key}'.format(
bucket=bucket,
key=key,
)
# key.set_canned_acl('private')
bucket.delete_key(key.name, version_id = key.version_id)
bucket.delete()
success = True
except boto.exception.S3ResponseError as e:
if e.error_code != 'AccessDenied':
print 'GOT UNWANTED ERROR', e.error_code
raise
# seems like we don't have permissions set appropriately, we'll
# modify permissions and retry
pass
if success:
return
bucket.set_canned_acl('private')
def nuke_prefixed_buckets(prefix):
# If no regions are specified, use the simple method
if targets.main.master == None:
for name, conn in s3.items():
print 'Deleting buckets on {name}'.format(name=name)
nuke_prefixed_buckets_on_conn(prefix, name, conn)
else:
# First, delete all buckets on the master connection
for name, conn in s3.items():
if conn == targets.main.master.connection:
print 'Deleting buckets on {name} (master)'.format(name=name)
nuke_prefixed_buckets_on_conn(prefix, name, conn)
# Then sync to propagate deletes to secondaries
region_sync_meta(targets.main, targets.main.master.connection)
print 'region-sync in nuke_prefixed_buckets'
# Now delete remaining buckets on any other connection
for name, conn in s3.items():
if conn != targets.main.master.connection:
print 'Deleting buckets on {name} (non-master)'.format(name=name)
nuke_prefixed_buckets_on_conn(prefix, name, conn)
print 'Done with cleanup of test buckets.'
class TargetConfig:
def __init__(self, cfg, section):
self.port = None
self.api_name = ''
self.is_master = False
self.is_secure = False
self.sync_agent_addr = None
self.sync_agent_port = 0
self.sync_meta_wait = 0
try:
self.api_name = cfg.get(section, 'api_name')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
pass
try:
self.port = cfg.getint(section, 'port')
except ConfigParser.NoOptionError:
pass
try:
self.host=cfg.get(section, 'host')
except ConfigParser.NoOptionError:
raise RuntimeError(
'host not specified for section {s}'.format(s=section)
)
try:
self.is_master=cfg.getboolean(section, 'is_master')
except ConfigParser.NoOptionError:
pass
try:
self.is_secure=cfg.getboolean(section, 'is_secure')
except ConfigParser.NoOptionError:
pass
try:
raw_calling_format = cfg.get(section, 'calling_format')
except ConfigParser.NoOptionError:
raw_calling_format = 'ordinary'
try:
self.sync_agent_addr = cfg.get(section, 'sync_agent_addr')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
pass
try:
self.sync_agent_port = cfg.getint(section, 'sync_agent_port')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
pass
try:
self.sync_meta_wait = cfg.getint(section, 'sync_meta_wait')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
pass
try:
self.calling_format = calling_formats[raw_calling_format]
except KeyError:
raise RuntimeError(
'calling_format unknown: %r' % raw_calling_format
)
class TargetConnection:
def __init__(self, conf, conn):
self.conf = conf
self.connection = conn
class RegionsInfo:
def __init__(self):
self.m = bunch.Bunch()
self.master = None
self.secondaries = []
def add(self, name, region_config):
self.m[name] = region_config
if (region_config.is_master):
if not self.master is None:
raise RuntimeError(
'multiple regions defined as master'
)
self.master = region_config
else:
self.secondaries.append(region_config)
def get(self, name):
return self.m[name]
def get(self):
return self.m
def iteritems(self):
return self.m.iteritems()
regions = RegionsInfo()
class RegionsConn:
def __init__(self):
self.m = bunch.Bunch()
self.default = None
self.master = None
self.secondaries = []
def iteritems(self):
return self.m.iteritems()
def set_default(self, conn):
self.default = conn
def add(self, name, conn):
self.m[name] = conn
if not self.default:
self.default = conn
if (conn.conf.is_master):
self.master = conn
else:
self.secondaries.append(conn)
# nosetests --processes=N with N>1 is safe
_multiprocess_can_split_ = True
def setup():
cfg = ConfigParser.RawConfigParser()
try:
path = os.environ['S3TEST_CONF']
except KeyError:
raise RuntimeError(
'To run tests, point environment '
+ 'variable S3TEST_CONF to a config file.',
)
with file(path) as f:
cfg.readfp(f)
global prefix
global targets
global slow_backend
try:
template = cfg.get('fixtures', 'bucket prefix')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
template = 'test-{random}-'
prefix = choose_bucket_prefix(template=template)
try:
slow_backend = cfg.getboolean('fixtures', 'slow backend')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
slow_backend = False
# pull the default_region out, if it exists
try:
default_region = cfg.get('fixtures', 'default_region')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
default_region = None
s3.clear()
config.clear()
for section in cfg.sections():
try:
(type_, name) = section.split(None, 1)
except ValueError:
continue
if type_ != 'region':
continue
regions.add(name, TargetConfig(cfg, section))
for section in cfg.sections():
try:
(type_, name) = section.split(None, 1)
except ValueError:
continue
if type_ != 's3':
continue
if len(regions.get()) == 0:
regions.add("default", TargetConfig(cfg, section))
config[name] = bunch.Bunch()
for var in [
'user_id',
'display_name',
'email',
]:
try:
config[name][var] = cfg.get(section, var)
except ConfigParser.NoOptionError:
pass
targets[name] = RegionsConn()
for (k, conf) in regions.iteritems():
conn = boto.s3.connection.S3Connection(
aws_access_key_id=cfg.get(section, 'access_key'),
aws_secret_access_key=cfg.get(section, 'secret_key'),
is_secure=conf.is_secure,
port=conf.port,
host=conf.host,
# TODO test vhost calling format
calling_format=conf.calling_format,
)
temp_targetConn = TargetConnection(conf, conn)
targets[name].add(k, temp_targetConn)
# Explicitly test for and set the default region, if specified.
# If it was not specified, use the 'is_master' flag to set it.
if default_region:
if default_region == name:
targets[name].set_default(temp_targetConn)
elif conf.is_master:
targets[name].set_default(temp_targetConn)
s3[name] = targets[name].default.connection
# WARNING! we actively delete all buckets we see with the prefix
# we've chosen! Choose your prefix with care, and don't reuse
# credentials!
# We also assume nobody else is going to use buckets with that
# prefix. This is racy but given enough randomness, should not
# really fail.
nuke_prefixed_buckets(prefix=prefix)
def teardown():
# remove our buckets here also, to avoid littering
nuke_prefixed_buckets(prefix=prefix)
bucket_counter = itertools.count(1)
def get_new_bucket_name():
"""
Get a bucket name that probably does not exist.
We make every attempt to use a unique random prefix, so if a
bucket by this name happens to exist, it's ok if tests give
false negatives.
"""
name = '{prefix}{num}'.format(
prefix=prefix,
num=next(bucket_counter),
)
return name
def get_new_bucket(target=None, name=None, headers=None):
"""
Get a bucket that exists and is empty.
Always recreates a bucket from scratch. This is useful to also
reset ACLs and such.
"""
if target is None:
target = targets.main.default
connection = target.connection
if name is None:
name = get_new_bucket_name()
# the only way for this to fail with a pre-existing bucket is if
# someone raced us between setup nuke_prefixed_buckets and here;
# ignore that as astronomically unlikely
bucket = connection.create_bucket(name, location=target.conf.api_name, headers=headers)
return bucket
| mit |
jacquesqiao/Paddle | python/paddle/fluid/tests/unittests/test_l1_norm_op.py | 5 | 1266 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import unittest
from op_test import OpTest
class TestL1NormOp(OpTest):
"""Test l1_norm
"""
def setUp(self):
self.op_type = "l1_norm"
self.max_relative_error = 0.005
X = np.random.uniform(-1, 1, (13, 19)).astype("float32")
X[np.abs(X) < self.max_relative_error] = 0.1
self.inputs = {'X': X}
self.outputs = {'Out': np.sum(np.abs(X))}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(
['X'], 'Out', max_relative_error=self.max_relative_error)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
nitin-cherian/LifeLongLearning | Web_Development_Python/RealPython/flask-hello-world/env/lib/python3.5/site-packages/werkzeug/contrib/securecookie.py | 91 | 12174 | # -*- coding: utf-8 -*-
r"""
werkzeug.contrib.securecookie
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module implements a cookie that is not alterable from the client
because it adds a checksum the server checks for. You can use it as
session replacement if all you have is a user id or something to mark
a logged in user.
Keep in mind that the data is still readable from the client as a
normal cookie is. However you don't have to store and flush the
sessions you have at the server.
Example usage:
>>> from werkzeug.contrib.securecookie import SecureCookie
>>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
Dumping into a string so that one can store it in a cookie:
>>> value = x.serialize()
Loading from that string again:
>>> x = SecureCookie.unserialize(value, "deadbeef")
>>> x["baz"]
(1, 2, 3)
If someone modifies the cookie and the checksum is wrong the unserialize
method will fail silently and return a new empty `SecureCookie` object.
Keep in mind that the values will be visible in the cookie so do not
store data in a cookie you don't want the user to see.
Application Integration
=======================
If you are using the werkzeug request objects you could integrate the
secure cookie into your application like this::
from werkzeug.utils import cached_property
from werkzeug.wrappers import BaseRequest
from werkzeug.contrib.securecookie import SecureCookie
# don't use this key but a different one; you could just use
# os.urandom(20) to get something random
SECRET_KEY = '\xfa\xdd\xb8z\xae\xe0}4\x8b\xea'
class Request(BaseRequest):
@cached_property
def client_session(self):
data = self.cookies.get('session_data')
if not data:
return SecureCookie(secret_key=SECRET_KEY)
return SecureCookie.unserialize(data, SECRET_KEY)
def application(environ, start_response):
request = Request(environ)
# get a response object here
response = ...
if request.client_session.should_save:
session_data = request.client_session.serialize()
response.set_cookie('session_data', session_data,
httponly=True)
return response(environ, start_response)
A less verbose integration can be achieved by using shorthand methods::
class Request(BaseRequest):
@cached_property
def client_session(self):
return SecureCookie.load_cookie(self, secret_key=COOKIE_SECRET)
def application(environ, start_response):
request = Request(environ)
# get a response object here
response = ...
request.client_session.save_cookie(response)
return response(environ, start_response)
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import pickle
import base64
from hmac import new as hmac
from time import time
from hashlib import sha1 as _default_hash
from werkzeug._compat import iteritems, text_type
from werkzeug.urls import url_quote_plus, url_unquote_plus
from werkzeug._internal import _date_to_unix
from werkzeug.contrib.sessions import ModificationTrackingDict
from werkzeug.security import safe_str_cmp
from werkzeug._compat import to_native
class UnquoteError(Exception):
"""Internal exception used to signal failures on quoting."""
class SecureCookie(ModificationTrackingDict):
"""Represents a secure cookie. You can subclass this class and provide
an alternative mac method. The import thing is that the mac method
is a function with a similar interface to the hashlib. Required
methods are update() and digest().
Example usage:
>>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
>>> x["foo"]
42
>>> x["baz"]
(1, 2, 3)
>>> x["blafasel"] = 23
>>> x.should_save
True
:param data: the initial data. Either a dict, list of tuples or `None`.
:param secret_key: the secret key. If not set `None` or not specified
it has to be set before :meth:`serialize` is called.
:param new: The initial value of the `new` flag.
"""
#: The hash method to use. This has to be a module with a new function
#: or a function that creates a hashlib object. Such as `hashlib.md5`
#: Subclasses can override this attribute. The default hash is sha1.
#: Make sure to wrap this in staticmethod() if you store an arbitrary
#: function there such as hashlib.sha1 which might be implemented
#: as a function.
hash_method = staticmethod(_default_hash)
#: the module used for serialization. Unless overriden by subclasses
#: the standard pickle module is used.
serialization_method = pickle
#: if the contents should be base64 quoted. This can be disabled if the
#: serialization process returns cookie safe strings only.
quote_base64 = True
def __init__(self, data=None, secret_key=None, new=True):
ModificationTrackingDict.__init__(self, data or ())
# explicitly convert it into a bytestring because python 2.6
# no longer performs an implicit string conversion on hmac
if secret_key is not None:
secret_key = bytes(secret_key)
self.secret_key = secret_key
self.new = new
def __repr__(self):
return '<%s %s%s>' % (
self.__class__.__name__,
dict.__repr__(self),
self.should_save and '*' or ''
)
@property
def should_save(self):
"""True if the session should be saved. By default this is only true
for :attr:`modified` cookies, not :attr:`new`.
"""
return self.modified
@classmethod
def quote(cls, value):
"""Quote the value for the cookie. This can be any object supported
by :attr:`serialization_method`.
:param value: the value to quote.
"""
if cls.serialization_method is not None:
value = cls.serialization_method.dumps(value)
if cls.quote_base64:
value = b''.join(base64.b64encode(value).splitlines()).strip()
return value
@classmethod
def unquote(cls, value):
"""Unquote the value for the cookie. If unquoting does not work a
:exc:`UnquoteError` is raised.
:param value: the value to unquote.
"""
try:
if cls.quote_base64:
value = base64.b64decode(value)
if cls.serialization_method is not None:
value = cls.serialization_method.loads(value)
return value
except Exception:
# unfortunately pickle and other serialization modules can
# cause pretty every error here. if we get one we catch it
# and convert it into an UnquoteError
raise UnquoteError()
def serialize(self, expires=None):
"""Serialize the secure cookie into a string.
If expires is provided, the session will be automatically invalidated
after expiration when you unseralize it. This provides better
protection against session cookie theft.
:param expires: an optional expiration date for the cookie (a
:class:`datetime.datetime` object)
"""
if self.secret_key is None:
raise RuntimeError('no secret key defined')
if expires:
self['_expires'] = _date_to_unix(expires)
result = []
mac = hmac(self.secret_key, None, self.hash_method)
for key, value in sorted(self.items()):
result.append(('%s=%s' % (
url_quote_plus(key),
self.quote(value).decode('ascii')
)).encode('ascii'))
mac.update(b'|' + result[-1])
return b'?'.join([
base64.b64encode(mac.digest()).strip(),
b'&'.join(result)
])
@classmethod
def unserialize(cls, string, secret_key):
"""Load the secure cookie from a serialized string.
:param string: the cookie value to unserialize.
:param secret_key: the secret key used to serialize the cookie.
:return: a new :class:`SecureCookie`.
"""
if isinstance(string, text_type):
string = string.encode('utf-8', 'replace')
if isinstance(secret_key, text_type):
secret_key = secret_key.encode('utf-8', 'replace')
try:
base64_hash, data = string.split(b'?', 1)
except (ValueError, IndexError):
items = ()
else:
items = {}
mac = hmac(secret_key, None, cls.hash_method)
for item in data.split(b'&'):
mac.update(b'|' + item)
if b'=' not in item:
items = None
break
key, value = item.split(b'=', 1)
# try to make the key a string
key = url_unquote_plus(key.decode('ascii'))
try:
key = to_native(key)
except UnicodeError:
pass
items[key] = value
# no parsing error and the mac looks okay, we can now
# sercurely unpickle our cookie.
try:
client_hash = base64.b64decode(base64_hash)
except TypeError:
items = client_hash = None
if items is not None and safe_str_cmp(client_hash, mac.digest()):
try:
for key, value in iteritems(items):
items[key] = cls.unquote(value)
except UnquoteError:
items = ()
else:
if '_expires' in items:
if time() > items['_expires']:
items = ()
else:
del items['_expires']
else:
items = ()
return cls(items, secret_key, False)
@classmethod
def load_cookie(cls, request, key='session', secret_key=None):
"""Loads a :class:`SecureCookie` from a cookie in request. If the
cookie is not set, a new :class:`SecureCookie` instanced is
returned.
:param request: a request object that has a `cookies` attribute
which is a dict of all cookie values.
:param key: the name of the cookie.
:param secret_key: the secret key used to unquote the cookie.
Always provide the value even though it has
no default!
"""
data = request.cookies.get(key)
if not data:
return cls(secret_key=secret_key)
return cls.unserialize(data, secret_key)
def save_cookie(self, response, key='session', expires=None,
session_expires=None, max_age=None, path='/', domain=None,
secure=None, httponly=False, force=False):
"""Saves the SecureCookie in a cookie on response object. All
parameters that are not described here are forwarded directly
to :meth:`~BaseResponse.set_cookie`.
:param response: a response object that has a
:meth:`~BaseResponse.set_cookie` method.
:param key: the name of the cookie.
:param session_expires: the expiration date of the secure cookie
stored information. If this is not provided
the cookie `expires` date is used instead.
"""
if force or self.should_save:
data = self.serialize(session_expires or expires)
response.set_cookie(key, data, expires=expires, max_age=max_age,
path=path, domain=domain, secure=secure,
httponly=httponly)
| mit |
Dude-X/selenium | py/selenium/webdriver/common/by.py | 61 | 1111 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The By implementation.
"""
class By(object):
"""
Set of supported locator strategies.
"""
ID = "id"
XPATH = "xpath"
LINK_TEXT = "link text"
PARTIAL_LINK_TEXT = "partial link text"
NAME = "name"
TAG_NAME = "tag name"
CLASS_NAME = "class name"
CSS_SELECTOR = "css selector"
| apache-2.0 |
luogangyi/bcec-nova | nova/cells/manager.py | 9 | 24581 | # Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cells Service Manager
"""
import datetime
import time
from oslo.config import cfg
from oslo import messaging as oslo_messaging
from nova.cells import messaging
from nova.cells import state as cells_state
from nova.cells import utils as cells_utils
from nova import context
from nova import exception
from nova import manager
from nova.objects import instance as instance_obj
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import periodic_task
from nova.openstack.common import timeutils
cell_manager_opts = [
cfg.StrOpt('driver',
default='nova.cells.rpc_driver.CellsRPCDriver',
help='Cells communication driver to use'),
cfg.IntOpt("instance_updated_at_threshold",
default=3600,
help="Number of seconds after an instance was updated "
"or deleted to continue to update cells"),
cfg.IntOpt("instance_update_num_instances",
default=1,
help="Number of instances to update per periodic task run")
]
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
CONF.register_opts(cell_manager_opts, group='cells')
LOG = logging.getLogger(__name__)
class CellsManager(manager.Manager):
"""The nova-cells manager class. This class defines RPC
methods that the local cell may call. This class is NOT used for
messages coming from other cells. That communication is
driver-specific.
Communication to other cells happens via the nova.cells.messaging module.
The MessageRunner from that module will handle routing the message to
the correct cell via the communications driver. Most methods below
create 'targeted' (where we want to route a message to a specific cell)
or 'broadcast' (where we want a message to go to multiple cells)
messages.
Scheduling requests get passed to the scheduler class.
"""
target = oslo_messaging.Target(version='1.27')
def __init__(self, *args, **kwargs):
LOG.warn(_('The cells feature of Nova is considered experimental '
'by the OpenStack project because it receives much '
'less testing than the rest of Nova. This may change '
'in the future, but current deployers should be aware '
'that the use of it in production right now may be '
'risky.'))
# Mostly for tests.
cell_state_manager = kwargs.pop('cell_state_manager', None)
super(CellsManager, self).__init__(service_name='cells',
*args, **kwargs)
if cell_state_manager is None:
cell_state_manager = cells_state.CellStateManager
self.state_manager = cell_state_manager()
self.msg_runner = messaging.MessageRunner(self.state_manager)
cells_driver_cls = importutils.import_class(
CONF.cells.driver)
self.driver = cells_driver_cls()
self.instances_to_heal = iter([])
def post_start_hook(self):
"""Have the driver start its servers for inter-cell communication.
Also ask our child cells for their capacities and capabilities so
we get them more quickly than just waiting for the next periodic
update. Receiving the updates from the children will cause us to
update our parents. If we don't have any children, just update
our parents immediately.
"""
# FIXME(comstud): There's currently no hooks when services are
# stopping, so we have no way to stop servers cleanly.
self.driver.start_servers(self.msg_runner)
ctxt = context.get_admin_context()
if self.state_manager.get_child_cells():
self.msg_runner.ask_children_for_capabilities(ctxt)
self.msg_runner.ask_children_for_capacities(ctxt)
else:
self._update_our_parents(ctxt)
@periodic_task.periodic_task
def _update_our_parents(self, ctxt):
"""Update our parent cells with our capabilities and capacity
if we're at the bottom of the tree.
"""
self.msg_runner.tell_parents_our_capabilities(ctxt)
self.msg_runner.tell_parents_our_capacities(ctxt)
@periodic_task.periodic_task
def _heal_instances(self, ctxt):
"""Periodic task to send updates for a number of instances to
parent cells.
On every run of the periodic task, we will attempt to sync
'CONF.cells.instance_update_num_instances' number of instances.
When we get the list of instances, we shuffle them so that multiple
nova-cells services aren't attempting to sync the same instances
in lockstep.
If CONF.cells.instance_update_at_threshold is set, only attempt
to sync instances that have been updated recently. The CONF
setting defines the maximum number of seconds old the updated_at
can be. Ie, a threshold of 3600 means to only update instances
that have modified in the last hour.
"""
if not self.state_manager.get_parent_cells():
# No need to sync up if we have no parents.
return
info = {'updated_list': False}
def _next_instance():
try:
instance = self.instances_to_heal.next()
except StopIteration:
if info['updated_list']:
return
threshold = CONF.cells.instance_updated_at_threshold
updated_since = None
if threshold > 0:
updated_since = timeutils.utcnow() - datetime.timedelta(
seconds=threshold)
self.instances_to_heal = cells_utils.get_instances_to_sync(
ctxt, updated_since=updated_since, shuffle=True,
uuids_only=True)
info['updated_list'] = True
try:
instance = self.instances_to_heal.next()
except StopIteration:
return
return instance
rd_context = ctxt.elevated(read_deleted='yes')
for i in xrange(CONF.cells.instance_update_num_instances):
while True:
# Yield to other greenthreads
time.sleep(0)
instance_uuid = _next_instance()
if not instance_uuid:
return
try:
instance = self.db.instance_get_by_uuid(rd_context,
instance_uuid)
except exception.InstanceNotFound:
continue
self._sync_instance(ctxt, instance)
break
def _sync_instance(self, ctxt, instance):
"""Broadcast an instance_update or instance_destroy message up to
parent cells.
"""
if instance['deleted']:
self.instance_destroy_at_top(ctxt, instance)
else:
self.instance_update_at_top(ctxt, instance)
def schedule_run_instance(self, ctxt, host_sched_kwargs):
"""Pick a cell (possibly ourselves) to build new instance(s)
and forward the request accordingly.
"""
# Target is ourselves first.
our_cell = self.state_manager.get_my_state()
self.msg_runner.schedule_run_instance(ctxt, our_cell,
host_sched_kwargs)
def build_instances(self, ctxt, build_inst_kwargs):
"""Pick a cell (possibly ourselves) to build new instance(s) and
forward the request accordingly.
"""
# Target is ourselves first.
our_cell = self.state_manager.get_my_state()
self.msg_runner.build_instances(ctxt, our_cell, build_inst_kwargs)
def get_cell_info_for_neighbors(self, _ctxt):
"""Return cell information for our neighbor cells."""
return self.state_manager.get_cell_info_for_neighbors()
def run_compute_api_method(self, ctxt, cell_name, method_info, call):
"""Call a compute API method in a specific cell."""
response = self.msg_runner.run_compute_api_method(ctxt,
cell_name,
method_info,
call)
if call:
return response.value_or_raise()
def instance_update_at_top(self, ctxt, instance):
"""Update an instance at the top level cell."""
self.msg_runner.instance_update_at_top(ctxt, instance)
def instance_destroy_at_top(self, ctxt, instance):
"""Destroy an instance at the top level cell."""
self.msg_runner.instance_destroy_at_top(ctxt, instance)
def instance_delete_everywhere(self, ctxt, instance, delete_type):
"""This is used by API cell when it didn't know what cell
an instance was in, but the instance was requested to be
deleted or soft_deleted. So, we'll broadcast this everywhere.
"""
if isinstance(instance, dict):
instance = instance_obj.Instance._from_db_object(ctxt,
instance_obj.Instance(), instance)
self.msg_runner.instance_delete_everywhere(ctxt, instance,
delete_type)
def instance_fault_create_at_top(self, ctxt, instance_fault):
"""Create an instance fault at the top level cell."""
self.msg_runner.instance_fault_create_at_top(ctxt, instance_fault)
def bw_usage_update_at_top(self, ctxt, bw_update_info):
"""Update bandwidth usage at top level cell."""
self.msg_runner.bw_usage_update_at_top(ctxt, bw_update_info)
def sync_instances(self, ctxt, project_id, updated_since, deleted):
"""Force a sync of all instances, potentially by project_id,
and potentially since a certain date/time.
"""
self.msg_runner.sync_instances(ctxt, project_id, updated_since,
deleted)
def service_get_all(self, ctxt, filters):
"""Return services in this cell and in all child cells."""
responses = self.msg_runner.service_get_all(ctxt, filters)
ret_services = []
# 1 response per cell. Each response is a list of services.
for response in responses:
services = response.value_or_raise()
for service in services:
cells_utils.add_cell_to_service(service, response.cell_name)
ret_services.append(service)
return ret_services
def service_get_by_compute_host(self, ctxt, host_name):
"""Return a service entry for a compute host in a certain cell."""
cell_name, host_name = cells_utils.split_cell_and_item(host_name)
response = self.msg_runner.service_get_by_compute_host(ctxt,
cell_name,
host_name)
service = response.value_or_raise()
cells_utils.add_cell_to_service(service, response.cell_name)
return service
def get_host_uptime(self, ctxt, host_name):
"""Return host uptime for a compute host in a certain cell
:param host_name: fully qualified hostname. It should be in format of
parent!child@host_id
"""
cell_name, host_name = cells_utils.split_cell_and_item(host_name)
response = self.msg_runner.get_host_uptime(ctxt, cell_name,
host_name)
return response.value_or_raise()
def service_update(self, ctxt, host_name, binary, params_to_update):
"""Used to enable/disable a service. For compute services, setting to
disabled stops new builds arriving on that host.
:param host_name: the name of the host machine that the service is
running
:param binary: The name of the executable that the service runs as
:param params_to_update: eg. {'disabled': True}
:returns: the service reference
"""
cell_name, host_name = cells_utils.split_cell_and_item(host_name)
response = self.msg_runner.service_update(
ctxt, cell_name, host_name, binary, params_to_update)
service = response.value_or_raise()
cells_utils.add_cell_to_service(service, response.cell_name)
return service
def service_delete(self, ctxt, cell_service_id):
"""Deletes the specified service."""
cell_name, service_id = cells_utils.split_cell_and_item(
cell_service_id)
self.msg_runner.service_delete(ctxt, cell_name, service_id)
def proxy_rpc_to_manager(self, ctxt, topic, rpc_message, call, timeout):
"""Proxy an RPC message as-is to a manager."""
compute_topic = CONF.compute_topic
cell_and_host = topic[len(compute_topic) + 1:]
cell_name, host_name = cells_utils.split_cell_and_item(cell_and_host)
response = self.msg_runner.proxy_rpc_to_manager(ctxt, cell_name,
host_name, topic, rpc_message, call, timeout)
return response.value_or_raise()
def task_log_get_all(self, ctxt, task_name, period_beginning,
period_ending, host=None, state=None):
"""Get task logs from the DB from all cells or a particular
cell.
If 'host' is not None, host will be of the format 'cell!name@host',
with '@host' being optional. The query will be directed to the
appropriate cell and return all task logs, or task logs matching
the host if specified.
'state' also may be None. If it's not, filter by the state as well.
"""
if host is None:
cell_name = None
else:
cell_name, host = cells_utils.split_cell_and_item(host)
# If no cell name was given, assume that the host name is the
# cell_name and that the target is all hosts
if cell_name is None:
cell_name, host = host, cell_name
responses = self.msg_runner.task_log_get_all(ctxt, cell_name,
task_name, period_beginning, period_ending,
host=host, state=state)
# 1 response per cell. Each response is a list of task log
# entries.
ret_task_logs = []
for response in responses:
task_logs = response.value_or_raise()
for task_log in task_logs:
cells_utils.add_cell_to_task_log(task_log,
response.cell_name)
ret_task_logs.append(task_log)
return ret_task_logs
def compute_node_get(self, ctxt, compute_id):
"""Get a compute node by ID in a specific cell."""
cell_name, compute_id = cells_utils.split_cell_and_item(
compute_id)
response = self.msg_runner.compute_node_get(ctxt, cell_name,
compute_id)
node = response.value_or_raise()
cells_utils.add_cell_to_compute_node(node, cell_name)
return node
def compute_node_get_all(self, ctxt, hypervisor_match=None):
"""Return list of compute nodes in all cells."""
responses = self.msg_runner.compute_node_get_all(ctxt,
hypervisor_match=hypervisor_match)
# 1 response per cell. Each response is a list of compute_node
# entries.
ret_nodes = []
for response in responses:
nodes = response.value_or_raise()
for node in nodes:
cells_utils.add_cell_to_compute_node(node,
response.cell_name)
ret_nodes.append(node)
return ret_nodes
def compute_node_stats(self, ctxt):
"""Return compute node stats totals from all cells."""
responses = self.msg_runner.compute_node_stats(ctxt)
totals = {}
for response in responses:
data = response.value_or_raise()
for key, val in data.iteritems():
totals.setdefault(key, 0)
totals[key] += val
return totals
def actions_get(self, ctxt, cell_name, instance_uuid):
response = self.msg_runner.actions_get(ctxt, cell_name, instance_uuid)
return response.value_or_raise()
def action_get_by_request_id(self, ctxt, cell_name, instance_uuid,
request_id):
response = self.msg_runner.action_get_by_request_id(ctxt, cell_name,
instance_uuid,
request_id)
return response.value_or_raise()
def action_events_get(self, ctxt, cell_name, action_id):
response = self.msg_runner.action_events_get(ctxt, cell_name,
action_id)
return response.value_or_raise()
def consoleauth_delete_tokens(self, ctxt, instance_uuid):
"""Delete consoleauth tokens for an instance in API cells."""
self.msg_runner.consoleauth_delete_tokens(ctxt, instance_uuid)
def validate_console_port(self, ctxt, instance_uuid, console_port,
console_type):
"""Validate console port with child cell compute node."""
instance = self.db.instance_get_by_uuid(ctxt, instance_uuid)
if not instance['cell_name']:
raise exception.InstanceUnknownCell(instance_uuid=instance_uuid)
response = self.msg_runner.validate_console_port(ctxt,
instance['cell_name'], instance_uuid, console_port,
console_type)
return response.value_or_raise()
def get_capacities(self, ctxt, cell_name):
return self.state_manager.get_capacities(cell_name)
def bdm_update_or_create_at_top(self, ctxt, bdm, create=None):
"""BDM was created/updated in this cell. Tell the API cells."""
self.msg_runner.bdm_update_or_create_at_top(ctxt, bdm, create=create)
def bdm_destroy_at_top(self, ctxt, instance_uuid, device_name=None,
volume_id=None):
"""BDM was destroyed for instance in this cell. Tell the API cells."""
self.msg_runner.bdm_destroy_at_top(ctxt, instance_uuid,
device_name=device_name,
volume_id=volume_id)
def get_migrations(self, ctxt, filters):
"""Fetch migrations applying the filters."""
target_cell = None
if "cell_name" in filters:
_path_cell_sep = cells_utils.PATH_CELL_SEP
target_cell = '%s%s%s' % (CONF.cells.name, _path_cell_sep,
filters['cell_name'])
responses = self.msg_runner.get_migrations(ctxt, target_cell,
False, filters)
migrations = []
for response in responses:
migrations += response.value_or_raise()
return migrations
def instance_update_from_api(self, ctxt, instance, expected_vm_state,
expected_task_state, admin_state_reset):
"""Update an instance in its cell."""
self.msg_runner.instance_update_from_api(ctxt, instance,
expected_vm_state,
expected_task_state,
admin_state_reset)
def start_instance(self, ctxt, instance):
"""Start an instance in its cell."""
self.msg_runner.start_instance(ctxt, instance)
def stop_instance(self, ctxt, instance, do_cast=True):
"""Stop an instance in its cell."""
response = self.msg_runner.stop_instance(ctxt, instance,
do_cast=do_cast)
if not do_cast:
return response.value_or_raise()
def cell_create(self, ctxt, values):
return self.state_manager.cell_create(ctxt, values)
def cell_update(self, ctxt, cell_name, values):
return self.state_manager.cell_update(ctxt, cell_name, values)
def cell_delete(self, ctxt, cell_name):
return self.state_manager.cell_delete(ctxt, cell_name)
def cell_get(self, ctxt, cell_name):
return self.state_manager.cell_get(ctxt, cell_name)
def reboot_instance(self, ctxt, instance, reboot_type):
"""Reboot an instance in its cell."""
self.msg_runner.reboot_instance(ctxt, instance, reboot_type)
def pause_instance(self, ctxt, instance):
"""Pause an instance in its cell."""
self.msg_runner.pause_instance(ctxt, instance)
def unpause_instance(self, ctxt, instance):
"""Unpause an instance in its cell."""
self.msg_runner.unpause_instance(ctxt, instance)
def suspend_instance(self, ctxt, instance):
"""Suspend an instance in its cell."""
self.msg_runner.suspend_instance(ctxt, instance)
def resume_instance(self, ctxt, instance):
"""Resume an instance in its cell."""
self.msg_runner.resume_instance(ctxt, instance)
def terminate_instance(self, ctxt, instance):
"""Delete an instance in its cell."""
self.msg_runner.terminate_instance(ctxt, instance)
def soft_delete_instance(self, ctxt, instance):
"""Soft-delete an instance in its cell."""
self.msg_runner.soft_delete_instance(ctxt, instance)
def resize_instance(self, ctxt, instance, flavor,
extra_instance_updates):
"""Resize an instance in its cell."""
self.msg_runner.resize_instance(ctxt, instance,
flavor, extra_instance_updates)
def live_migrate_instance(self, ctxt, instance, block_migration,
disk_over_commit, host_name):
"""Live migrate an instance in its cell."""
self.msg_runner.live_migrate_instance(ctxt, instance,
block_migration,
disk_over_commit,
host_name)
def revert_resize(self, ctxt, instance):
"""Revert a resize for an instance in its cell."""
self.msg_runner.revert_resize(ctxt, instance)
def confirm_resize(self, ctxt, instance):
"""Confirm a resize for an instance in its cell."""
self.msg_runner.confirm_resize(ctxt, instance)
def reset_network(self, ctxt, instance):
"""Reset networking for an instance in its cell."""
self.msg_runner.reset_network(ctxt, instance)
def inject_network_info(self, ctxt, instance):
"""Inject networking for an instance in its cell."""
self.msg_runner.inject_network_info(ctxt, instance)
def snapshot_instance(self, ctxt, instance, image_id):
"""Snapshot an instance in its cell."""
self.msg_runner.snapshot_instance(ctxt, instance, image_id)
def backup_instance(self, ctxt, instance, image_id, backup_type, rotation):
"""Backup an instance in its cell."""
self.msg_runner.backup_instance(ctxt, instance, image_id,
backup_type, rotation)
def rebuild_instance(self, ctxt, instance, image_href, admin_password,
files_to_inject, preserve_ephemeral, kwargs):
self.msg_runner.rebuild_instance(ctxt, instance, image_href,
admin_password, files_to_inject,
preserve_ephemeral, kwargs)
| apache-2.0 |
bunnyitvn/webptn | tests/modeltests/one_to_one/models.py | 60 | 1578 | """
10. One-to-one relationships
To define a one-to-one relationship, use ``OneToOneField()``.
In this example, a ``Place`` optionally can be a ``Restaurant``.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
def __str__(self):
return "%s the place" % self.name
@python_2_unicode_compatible
class Restaurant(models.Model):
place = models.OneToOneField(Place, primary_key=True)
serves_hot_dogs = models.BooleanField()
serves_pizza = models.BooleanField()
def __str__(self):
return "%s the restaurant" % self.place.name
@python_2_unicode_compatible
class Waiter(models.Model):
restaurant = models.ForeignKey(Restaurant)
name = models.CharField(max_length=50)
def __str__(self):
return "%s the waiter at %s" % (self.name, self.restaurant)
class ManualPrimaryKey(models.Model):
primary_key = models.CharField(max_length=10, primary_key=True)
name = models.CharField(max_length = 50)
class RelatedModel(models.Model):
link = models.OneToOneField(ManualPrimaryKey)
name = models.CharField(max_length = 50)
@python_2_unicode_compatible
class MultiModel(models.Model):
link1 = models.OneToOneField(Place)
link2 = models.OneToOneField(ManualPrimaryKey)
name = models.CharField(max_length=50)
def __str__(self):
return "Multimodel %s" % self.name
| bsd-3-clause |
flimshaw/three.js | utils/exporters/blender/addons/io_three/__init__.py | 60 | 31178 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import os
import json
import logging
import bpy
from bpy_extras.io_utils import ExportHelper
from bpy.props import (
EnumProperty,
BoolProperty,
FloatProperty,
IntProperty,
StringProperty
)
from . import constants
logging.basicConfig(
format='%(levelname)s:THREE:%(message)s',
level=logging.DEBUG)
bl_info = {
'name': "Three.js Format",
'author': "repsac, mrdoob, yomotsu, mpk, jpweeks, rkusa, tschw, jackcaron, bhouston",
'version': (1, 5, 0),
'blender': (2, 74, 0),
'location': "File > Export",
'description': "Export Three.js formatted JSON files.",
'warning': "Importer not included.",
'wiki_url': "https://github.com/mrdoob/three.js/tree/"\
"master/utils/exporters/blender",
'tracker_url': "https://github.com/mrdoob/three.js/issues",
'category': 'Import-Export'
}
def _geometry_types():
"""The valid geometry types that are supported by Three.js
:return: list of tuples
"""
keys = (constants.GLOBAL,
constants.GEOMETRY,
constants.BUFFER_GEOMETRY)
types = []
for key in keys:
types.append((key, key.title(), key))
return types
bpy.types.Mesh.THREE_geometry_type = EnumProperty(
name="Geometry type",
description="Geometry type",
items=_geometry_types(),
default=constants.GLOBAL)
class ThreeMesh(bpy.types.Panel):
"""Creates custom properties on a mesh node"""
bl_label = 'THREE'
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'data'
def draw(self, context):
"""
:param context:
"""
row = self.layout.row()
if context.mesh:
row.prop(context.mesh,
'THREE_geometry_type',
text="Type")
def _blending_types(index):
"""Supported blending types for Three.js
:param index:
:type index: int
:returns: tuple if types (str, str, str)
"""
types = (constants.BLENDING_TYPES.NONE,
constants.BLENDING_TYPES.NORMAL,
constants.BLENDING_TYPES.ADDITIVE,
constants.BLENDING_TYPES.SUBTRACTIVE,
constants.BLENDING_TYPES.MULTIPLY,
constants.BLENDING_TYPES.CUSTOM)
return (types[index], types[index], types[index])
bpy.types.Material.THREE_blending_type = EnumProperty(
name="Blending type",
description="Blending type",
items=[_blending_types(x) for x in range(5)],
default=constants.BLENDING_TYPES.NORMAL)
bpy.types.Material.THREE_depth_write = BoolProperty(default=True)
bpy.types.Material.THREE_depth_test = BoolProperty(default=True)
bpy.types.Material.THREE_double_sided = BoolProperty(default=False)
class ThreeMaterial(bpy.types.Panel):
"""Adds custom properties to the Materials of an object"""
bl_label = 'THREE'
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'material'
def draw(self, context):
"""
:param context:
"""
layout = self.layout
mat = context.material
if mat is not None:
row = layout.row()
row.label(text="Selected material: %s" % mat.name)
row = layout.row()
row.prop(mat, 'THREE_blending_type',
text="Blending type")
row = layout.row()
row.prop(mat, 'THREE_depth_write',
text="Enable depth writing")
row = layout.row()
row.prop(mat, 'THREE_depth_test',
text="Enable depth testing")
row = layout.row()
row.prop(mat, 'THREE_double_sided',
text="Double-sided")
def _mag_filters(index):
"""Three.js mag filters
:param index:
:type index: int
:returns: tuple with the filter values
"""
types = (constants.LINEAR_FILTERS.LINEAR,
constants.NEAREST_FILTERS.NEAREST)
return (types[index], types[index], types[index])
bpy.types.Texture.THREE_mag_filter = EnumProperty(
name="Mag Filter",
items=[_mag_filters(x) for x in range(2)],
default=constants.LINEAR_FILTERS.LINEAR)
def _min_filters(index):
"""Three.js min filters
:param index:
:type index: int
:returns: tuple with the filter values
"""
types = (constants.LINEAR_FILTERS.LINEAR,
constants.LINEAR_FILTERS.MIP_MAP_NEAREST,
constants.LINEAR_FILTERS.MIP_MAP_LINEAR,
constants.NEAREST_FILTERS.NEAREST,
constants.NEAREST_FILTERS.MIP_MAP_NEAREST,
constants.NEAREST_FILTERS.MIP_MAP_LINEAR)
return (types[index], types[index], types[index])
bpy.types.Texture.THREE_min_filter = EnumProperty(
name="Min Filter",
items=[_min_filters(x) for x in range(6)],
default=constants.LINEAR_FILTERS.MIP_MAP_LINEAR)
def _mapping(index):
"""Three.js texture mappings types
:param index:
:type index: int
:returns: tuple with the mapping values
"""
types = (constants.MAPPING_TYPES.UV,
constants.MAPPING_TYPES.CUBE_REFLECTION,
constants.MAPPING_TYPES.CUBE_REFRACTION,
constants.MAPPING_TYPES.SPHERICAL_REFLECTION)
return (types[index], types[index], types[index])
bpy.types.Texture.THREE_mapping = EnumProperty(
name="Mapping",
items=[_mapping(x) for x in range(4)],
default=constants.MAPPING_TYPES.UV)
class ThreeTexture(bpy.types.Panel):
"""Adds custom properties to a texture"""
bl_label = 'THREE'
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'texture'
#@TODO: possible to make cycles compatible?
def draw(self, context):
"""
:param context:
"""
layout = self.layout
tex = context.texture
if tex is not None:
row = layout.row()
row.prop(tex, 'THREE_mapping', text="Mapping")
row = layout.row()
row.prop(tex, 'THREE_mag_filter', text="Mag Filter")
row = layout.row()
row.prop(tex, 'THREE_min_filter', text="Min Filter")
bpy.types.Object.THREE_export = bpy.props.BoolProperty(default=True)
class ThreeObject(bpy.types.Panel):
"""Adds custom properties to an object"""
bl_label = 'THREE'
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'object'
def draw(self, context):
"""
:param context:
"""
layout = self.layout
obj = context.object
row = layout.row()
row.prop(obj, 'THREE_export', text='Export')
class ThreeExportSettings(bpy.types.Operator):
"""Save the current export settings (gets saved in .blend)"""
bl_label = "Save Settings"
bl_idname = "scene.three_export_settings_set"
def execute(self, context):
cycles = context.scene.cycles
cycles.use_samples_final = True
context.scene[constants.EXPORT_SETTINGS_KEY] = set_settings(context.active_operator.properties)
self.report({"INFO"}, "Three Export Settings Saved")
return {"FINISHED"}
def restore_export_settings(properties, settings):
"""Restore the settings
:param properties:
"""
## Geometry {
properties.option_vertices = settings.get(
constants.VERTICES,
constants.EXPORT_OPTIONS[constants.VERTICES])
properties.option_faces = settings.get(
constants.FACES,
constants.EXPORT_OPTIONS[constants.FACES])
properties.option_normals = settings.get(
constants.NORMALS,
constants.EXPORT_OPTIONS[constants.NORMALS])
properties.option_skinning = settings.get(
constants.SKINNING,
constants.EXPORT_OPTIONS[constants.SKINNING])
properties.option_bones = settings.get(
constants.BONES,
constants.EXPORT_OPTIONS[constants.BONES])
properties.option_influences = settings.get(
constants.INFLUENCES_PER_VERTEX,
constants.EXPORT_OPTIONS[constants.INFLUENCES_PER_VERTEX])
properties.option_apply_modifiers = settings.get(
constants.APPLY_MODIFIERS,
constants.EXPORT_OPTIONS[constants.APPLY_MODIFIERS])
properties.option_extra_vgroups = settings.get(
constants.EXTRA_VGROUPS,
constants.EXPORT_OPTIONS[constants.EXTRA_VGROUPS])
properties.option_geometry_type = settings.get(
constants.GEOMETRY_TYPE,
constants.EXPORT_OPTIONS[constants.GEOMETRY_TYPE])
properties.option_index_type = settings.get(
constants.INDEX_TYPE,
constants.EXPORT_OPTIONS[constants.INDEX_TYPE])
## }
## Materials {
properties.option_materials = settings.get(
constants.MATERIALS,
constants.EXPORT_OPTIONS[constants.MATERIALS])
properties.option_uv_coords = settings.get(
constants.UVS,
constants.EXPORT_OPTIONS[constants.UVS])
properties.option_face_materials = settings.get(
constants.FACE_MATERIALS,
constants.EXPORT_OPTIONS[constants.FACE_MATERIALS])
properties.option_maps = settings.get(
constants.MAPS,
constants.EXPORT_OPTIONS[constants.MAPS])
properties.option_colors = settings.get(
constants.COLORS,
constants.EXPORT_OPTIONS[constants.COLORS])
properties.option_mix_colors = settings.get(
constants.MIX_COLORS,
constants.EXPORT_OPTIONS[constants.MIX_COLORS])
## }
## Settings {
properties.option_scale = settings.get(
constants.SCALE,
constants.EXPORT_OPTIONS[constants.SCALE])
properties.option_round_off = settings.get(
constants.ENABLE_PRECISION,
constants.EXPORT_OPTIONS[constants.ENABLE_PRECISION])
properties.option_round_value = settings.get(
constants.PRECISION,
constants.EXPORT_OPTIONS[constants.PRECISION])
properties.option_custom_properties = settings.get(
constants.CUSTOM_PROPERTIES,
constants.EXPORT_OPTIONS[constants.CUSTOM_PROPERTIES])
properties.option_logging = settings.get(
constants.LOGGING,
constants.EXPORT_OPTIONS[constants.LOGGING])
properties.option_compression = settings.get(
constants.COMPRESSION,
constants.NONE)
properties.option_indent = settings.get(
constants.INDENT,
constants.EXPORT_OPTIONS[constants.INDENT])
properties.option_export_textures = settings.get(
constants.EXPORT_TEXTURES,
constants.EXPORT_OPTIONS[constants.EXPORT_TEXTURES])
properties.option_embed_textures = settings.get(
constants.EMBED_TEXTURES,
constants.EXPORT_OPTIONS[constants.EMBED_TEXTURES])
properties.option_texture_folder = settings.get(
constants.TEXTURE_FOLDER,
constants.EXPORT_OPTIONS[constants.TEXTURE_FOLDER])
properties.option_embed_animation = settings.get(
constants.EMBED_ANIMATION,
constants.EXPORT_OPTIONS[constants.EMBED_ANIMATION])
## }
## Scene {
properties.option_export_scene = settings.get(
constants.SCENE,
constants.EXPORT_OPTIONS[constants.SCENE])
#properties.option_embed_geometry = settings.get(
# constants.EMBED_GEOMETRY,
# constants.EXPORT_OPTIONS[constants.EMBED_GEOMETRY])
properties.option_lights = settings.get(
constants.LIGHTS,
constants.EXPORT_OPTIONS[constants.LIGHTS])
properties.option_cameras = settings.get(
constants.CAMERAS,
constants.EXPORT_OPTIONS[constants.CAMERAS])
properties.option_hierarchy = settings.get(
constants.HIERARCHY,
constants.EXPORT_OPTIONS[constants.HIERARCHY])
## }
## Animation {
properties.option_animation_morph = settings.get(
constants.MORPH_TARGETS,
constants.EXPORT_OPTIONS[constants.MORPH_TARGETS])
properties.option_blend_shape = settings.get(
constants.BLEND_SHAPES,
constants.EXPORT_OPTIONS[constants.BLEND_SHAPES])
properties.option_animation_skeletal = settings.get(
constants.ANIMATION,
constants.EXPORT_OPTIONS[constants.ANIMATION])
properties.option_keyframes = settings.get(
constants.KEYFRAMES,
constants.EXPORT_OPTIONS[constants.KEYFRAMES])
properties.option_frame_step = settings.get(
constants.FRAME_STEP,
constants.EXPORT_OPTIONS[constants.FRAME_STEP])
properties.option_frame_index_as_time = settings.get(
constants.FRAME_INDEX_AS_TIME,
constants.EXPORT_OPTIONS[constants.FRAME_INDEX_AS_TIME])
## }
def set_settings(properties):
"""Set the export settings to the correct keys.
:param properties:
:returns: settings
:rtype: dict
"""
settings = {
constants.VERTICES: properties.option_vertices,
constants.FACES: properties.option_faces,
constants.NORMALS: properties.option_normals,
constants.SKINNING: properties.option_skinning,
constants.BONES: properties.option_bones,
constants.EXTRA_VGROUPS: properties.option_extra_vgroups,
constants.APPLY_MODIFIERS: properties.option_apply_modifiers,
constants.GEOMETRY_TYPE: properties.option_geometry_type,
constants.INDEX_TYPE: properties.option_index_type,
constants.MATERIALS: properties.option_materials,
constants.UVS: properties.option_uv_coords,
constants.FACE_MATERIALS: properties.option_face_materials,
constants.MAPS: properties.option_maps,
constants.COLORS: properties.option_colors,
constants.MIX_COLORS: properties.option_mix_colors,
constants.SCALE: properties.option_scale,
constants.ENABLE_PRECISION: properties.option_round_off,
constants.PRECISION: properties.option_round_value,
constants.CUSTOM_PROPERTIES: properties.option_custom_properties,
constants.LOGGING: properties.option_logging,
constants.COMPRESSION: properties.option_compression,
constants.INDENT: properties.option_indent,
constants.EXPORT_TEXTURES: properties.option_export_textures,
constants.EMBED_TEXTURES: properties.option_embed_textures,
constants.TEXTURE_FOLDER: properties.option_texture_folder,
constants.SCENE: properties.option_export_scene,
#constants.EMBED_GEOMETRY: properties.option_embed_geometry,
constants.EMBED_ANIMATION: properties.option_embed_animation,
constants.LIGHTS: properties.option_lights,
constants.CAMERAS: properties.option_cameras,
constants.HIERARCHY: properties.option_hierarchy,
constants.MORPH_TARGETS: properties.option_animation_morph,
constants.BLEND_SHAPES: properties.option_blend_shape,
constants.ANIMATION: properties.option_animation_skeletal,
constants.KEYFRAMES: properties.option_keyframes,
constants.FRAME_STEP: properties.option_frame_step,
constants.FRAME_INDEX_AS_TIME: properties.option_frame_index_as_time,
constants.INFLUENCES_PER_VERTEX: properties.option_influences
}
return settings
def compression_types():
"""Supported compression formats
:rtype: tuple
"""
types = [(constants.NONE, constants.NONE, constants.NONE)]
try:
import msgpack
types.append((constants.MSGPACK, constants.MSGPACK,
constants.MSGPACK))
except ImportError:
pass
return types
def animation_options():
"""The supported skeletal animation types
:returns: list of tuples
"""
anim = [
(constants.OFF, constants.OFF.title(), constants.OFF),
(constants.POSE, constants.POSE.title(), constants.POSE),
(constants.REST, constants.REST.title(), constants.REST)
]
return anim
def resolve_conflicts(self, context):
if(not self.option_export_textures):
self.option_embed_textures = False;
class ExportThree(bpy.types.Operator, ExportHelper):
"""Class that handles the export properties"""
bl_idname = 'export.three'
bl_label = 'Export THREE'
bl_options = {'PRESET'}
filename_ext = constants.EXTENSION
option_vertices = BoolProperty(
name="Vertices",
description="Export vertices",
default=constants.EXPORT_OPTIONS[constants.VERTICES])
option_faces = BoolProperty(
name="Faces",
description="Export faces",
default=constants.EXPORT_OPTIONS[constants.FACES])
option_normals = BoolProperty(
name="Normals",
description="Export normals",
default=constants.EXPORT_OPTIONS[constants.NORMALS])
option_colors = BoolProperty(
name="Vertex Colors",
description="Export vertex colors",
default=constants.EXPORT_OPTIONS[constants.COLORS])
option_mix_colors = BoolProperty(
name="Mix Colors",
description="Mix material and vertex colors",
default=constants.EXPORT_OPTIONS[constants.MIX_COLORS])
option_uv_coords = BoolProperty(
name="UVs",
description="Export texture coordinates",
default=constants.EXPORT_OPTIONS[constants.UVS])
option_materials = BoolProperty(
name="Materials",
description="Export materials",
default=constants.EXPORT_OPTIONS[constants.MATERIALS])
option_face_materials = BoolProperty(
name="Face Materials",
description="Face mapping materials",
default=constants.EXPORT_OPTIONS[constants.FACE_MATERIALS])
option_maps = BoolProperty(
name="Textures",
description="Include texture maps",
default=constants.EXPORT_OPTIONS[constants.MAPS])
option_skinning = BoolProperty(
name="Skinning",
description="Export skin data",
default=constants.EXPORT_OPTIONS[constants.SKINNING])
option_bones = BoolProperty(
name="Bones",
description="Export bones",
default=constants.EXPORT_OPTIONS[constants.BONES])
option_extra_vgroups = StringProperty(
name="Extra Vertex Groups",
description="Non-skinning vertex groups to export (comma-separated, w/ star wildcard, BufferGeometry only).",
default=constants.EXPORT_OPTIONS[constants.EXTRA_VGROUPS])
option_apply_modifiers = BoolProperty(
name="Apply Modifiers",
description="Apply Modifiers to mesh objects",
default=constants.EXPORT_OPTIONS[constants.APPLY_MODIFIERS]
)
index_buffer_types = [
(constants.NONE,) * 3,
(constants.UINT_16,) * 3,
(constants.UINT_32,) * 3]
option_index_type = EnumProperty(
name="Index Buffer",
description="Index buffer type that will be used for BufferGeometry objects.",
items=index_buffer_types,
default=constants.EXPORT_OPTIONS[constants.INDEX_TYPE])
option_scale = FloatProperty(
name="Scale",
description="Scale vertices",
min=0.01,
max=1000.0,
soft_min=0.01,
soft_max=1000.0,
default=constants.EXPORT_OPTIONS[constants.SCALE])
option_round_off = BoolProperty(
name="Enable Precision",
description="Round off floating point values",
default=constants.EXPORT_OPTIONS[constants.ENABLE_PRECISION])
option_round_value = IntProperty(
name="Precision",
min=0,
max=16,
description="Floating point precision",
default=constants.EXPORT_OPTIONS[constants.PRECISION])
option_custom_properties = BoolProperty(
name="Custom Props",
description="Export custom properties as userData",
default=False)
logging_types = [
(constants.DISABLED, constants.DISABLED, constants.DISABLED),
(constants.DEBUG, constants.DEBUG, constants.DEBUG),
(constants.INFO, constants.INFO, constants.INFO),
(constants.WARNING, constants.WARNING, constants.WARNING),
(constants.ERROR, constants.ERROR, constants.ERROR),
(constants.CRITICAL, constants.CRITICAL, constants.CRITICAL)]
option_logging = EnumProperty(
name="",
description="Logging verbosity level",
items=logging_types,
default=constants.DISABLED)
option_geometry_type = EnumProperty(
name="Type",
description="Geometry type",
items=_geometry_types()[1:],
default=constants.EXPORT_OPTIONS[constants.GEOMETRY_TYPE])
option_export_scene = BoolProperty(
name="Scene",
description="Export scene",
default=constants.EXPORT_OPTIONS[constants.SCENE])
#@TODO: removing this option since the ObjectLoader doesn't have
# support for handling external geometry data
#option_embed_geometry = BoolProperty(
# name="Embed geometry",
# description="Embed geometry",
# default=constants.EXPORT_OPTIONS[constants.EMBED_GEOMETRY])
option_embed_animation = BoolProperty(
name="Embed animation",
description="Embed animation data with the geometry data",
default=constants.EXPORT_OPTIONS[constants.EMBED_ANIMATION])
option_export_textures = BoolProperty(
name="Export textures",
description="Export textures",
default=constants.EXPORT_OPTIONS[constants.EXPORT_TEXTURES],
update=resolve_conflicts)
option_embed_textures = BoolProperty(
name="Embed textures",
description="Embed base64 textures in .json",
default=constants.EXPORT_OPTIONS[constants.EMBED_TEXTURES])
option_texture_folder = StringProperty(
name="Texture folder",
description="add this folder to textures path",
default=constants.EXPORT_OPTIONS[constants.TEXTURE_FOLDER])
option_lights = BoolProperty(
name="Lights",
description="Export default scene lights",
default=False)
option_cameras = BoolProperty(
name="Cameras",
description="Export default scene cameras",
default=False)
option_hierarchy = BoolProperty(
name="Hierarchy",
description="Export object hierarchy",
default=False)
option_animation_morph = BoolProperty(
name="Morph animation",
description="Export animation (morphs)",
default=constants.EXPORT_OPTIONS[constants.MORPH_TARGETS])
option_blend_shape = BoolProperty(
name="Blend Shape animation",
description="Export Blend Shapes",
default=constants.EXPORT_OPTIONS[constants.BLEND_SHAPES])
option_animation_skeletal = EnumProperty(
name="",
description="Export animation (skeletal)",
items=animation_options(),
default=constants.OFF)
option_keyframes = BoolProperty(
name="Keyframe animation",
description="Export animation (keyframes)",
default=constants.EXPORT_OPTIONS[constants.KEYFRAMES])
option_frame_index_as_time = BoolProperty(
name="Frame index as time",
description="Use (original) frame index as frame time",
default=constants.EXPORT_OPTIONS[constants.FRAME_INDEX_AS_TIME])
option_frame_step = IntProperty(
name="Frame step",
description="Animation frame step",
min=1,
max=1000,
soft_min=1,
soft_max=1000,
default=1)
option_indent = BoolProperty(
name="Indent JSON",
description="Disable this to reduce the file size",
default=constants.EXPORT_OPTIONS[constants.INDENT])
option_compression = EnumProperty(
name="",
description="Compression options",
items=compression_types(),
default=constants.NONE)
option_influences = IntProperty(
name="Influences",
description="Maximum number of bone influences",
min=1,
max=4,
default=2)
def invoke(self, context, event):
settings = context.scene.get(constants.EXPORT_SETTINGS_KEY)
if settings:
try:
restore_export_settings(self.properties, settings)
except AttributeError as e:
logging.error("Loading export settings failed:")
logging.exception(e)
logging.debug("Removed corrupted settings")
del context.scene[constants.EXPORT_SETTINGS_KEY]
return ExportHelper.invoke(self, context, event)
@classmethod
def poll(cls, context):
"""
:param context:
"""
return context.active_object is not None
def execute(self, context):
"""
:param context:
"""
if not self.properties.filepath:
raise Exception("filename not set")
settings = set_settings(self.properties)
settings['addon_version'] = bl_info['version']
filepath = self.filepath
if settings[constants.COMPRESSION] == constants.MSGPACK:
filepath = "%s%s" % (filepath[:-4], constants.PACK)
from io_three import exporter
if settings[constants.SCENE]:
exporter.export_scene(filepath, settings)
else:
exporter.export_geometry(filepath, settings)
return {'FINISHED'}
def draw(self, context):
"""
:param context:
"""
layout = self.layout
## Geometry {
row = layout.row()
row.label(text="GEOMETRY:")
row = layout.row()
row.prop(self.properties, 'option_vertices')
row.prop(self.properties, 'option_faces')
row = layout.row()
row.prop(self.properties, 'option_normals')
row.prop(self.properties, 'option_uv_coords')
row = layout.row()
row.prop(self.properties, 'option_bones')
row.prop(self.properties, 'option_skinning')
row = layout.row()
row.prop(self.properties, 'option_extra_vgroups')
row = layout.row()
row.prop(self.properties, 'option_apply_modifiers')
row = layout.row()
row.prop(self.properties, 'option_geometry_type')
row = layout.row()
row.prop(self.properties, 'option_index_type')
## }
layout.separator()
## Materials {
row = layout.row()
row.label(text="- Shading:")
row = layout.row()
row.prop(self.properties, 'option_face_materials')
row = layout.row()
row.prop(self.properties, 'option_colors')
row = layout.row()
row.prop(self.properties, 'option_mix_colors')
## }
layout.separator()
## Animation {
row = layout.row()
row.label(text="- Animation:")
row = layout.row()
row.prop(self.properties, 'option_animation_morph')
row = layout.row()
row.prop(self.properties, 'option_blend_shape')
row = layout.row()
row.label(text="Skeletal animations:")
row = layout.row()
row.prop(self.properties, 'option_animation_skeletal')
row = layout.row()
row.label(text="Keyframe animations:")
row = layout.row()
row.prop(self.properties, 'option_keyframes')
layout.row()
row = layout.row()
row.prop(self.properties, 'option_influences')
row = layout.row()
row.prop(self.properties, 'option_frame_step')
row = layout.row()
row.prop(self.properties, 'option_frame_index_as_time')
row = layout.row()
row.prop(self.properties, 'option_embed_animation')
## }
layout.separator()
## Scene {
row = layout.row()
row.label(text="SCENE:")
row = layout.row()
row.prop(self.properties, 'option_export_scene')
row.prop(self.properties, 'option_materials')
#row = layout.row()
#row.prop(self.properties, 'option_embed_geometry')
row = layout.row()
row.prop(self.properties, 'option_lights')
row.prop(self.properties, 'option_cameras')
## }
row = layout.row()
row.prop(self.properties, 'option_hierarchy')
layout.separator()
## Settings {
row = layout.row()
row.label(text="SETTINGS:")
row = layout.row()
row.prop(self.properties, 'option_maps')
row = layout.row()
row.prop(self.properties, 'option_export_textures')
row = layout.row()
row.prop(self.properties, 'option_embed_textures')
row.enabled = self.properties.option_export_textures
row = layout.row()
row.prop(self.properties, 'option_texture_folder')
row = layout.row()
row.prop(self.properties, 'option_scale')
layout.row()
row = layout.row()
row.prop(self.properties, 'option_round_off')
row = layout.row()
row.prop(self.properties, 'option_round_value')
layout.row()
row = layout.row()
row.label(text="Custom Properties")
row = layout.row()
row.prop(self.properties, 'option_custom_properties')
layout.row()
row = layout.row()
row.label(text="Logging verbosity:")
row = layout.row()
row.prop(self.properties, 'option_logging')
row = layout.row()
row.label(text="File compression format:")
row = layout.row()
row.prop(self.properties, 'option_compression')
row = layout.row()
row.prop(self.properties, 'option_indent')
## }
## Operators {
has_settings = context.scene.get(constants.EXPORT_SETTINGS_KEY, False)
row = layout.row()
row.operator(
ThreeExportSettings.bl_idname,
ThreeExportSettings.bl_label,
icon="%s" % "PINNED" if has_settings else "UNPINNED")
## }
def menu_func_export(self, context):
"""
:param self:
:param context:
"""
default_path = bpy.data.filepath.replace('.blend', constants.EXTENSION)
text = "Three.js (%s)" % constants.EXTENSION
operator = self.layout.operator(ExportThree.bl_idname, text=text)
operator.filepath = default_path
def register():
"""Registers the addon (Blender boilerplate)"""
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_export.append(menu_func_export)
def unregister():
"""Unregisters the addon (Blender boilerplate)"""
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_export.remove(menu_func_export)
if __name__ == '__main__':
register()
| mit |
apanju/GMIO_Odoo | openerp/workflow/instance.py | 314 | 5594 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2014 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import workitem
from openerp.workflow.helpers import Session
from openerp.workflow.helpers import Record
from openerp.workflow.workitem import WorkflowItem
class WorkflowInstance(object):
def __init__(self, session, record, values):
assert isinstance(session, Session)
assert isinstance(record, Record)
self.session = session
self.record = record
if not values:
values = {}
assert isinstance(values, dict)
self.instance = values
@classmethod
def create(cls, session, record, workflow_id):
assert isinstance(session, Session)
assert isinstance(record, Record)
assert isinstance(workflow_id, (int, long))
cr = session.cr
cr.execute('insert into wkf_instance (res_type,res_id,uid,wkf_id,state) values (%s,%s,%s,%s,%s) RETURNING id', (record.model, record.id, session.uid, workflow_id, 'active'))
instance_id = cr.fetchone()[0]
cr.execute('select * from wkf_activity where flow_start=True and wkf_id=%s', (workflow_id,))
stack = []
activities = cr.dictfetchall()
for activity in activities:
WorkflowItem.create(session, record, activity, instance_id, stack)
cr.execute('SELECT * FROM wkf_instance WHERE id = %s', (instance_id,))
values = cr.dictfetchone()
wi = WorkflowInstance(session, record, values)
wi.update()
return wi
def delete(self):
self.session.cr.execute('delete from wkf_instance where res_id=%s and res_type=%s', (self.record.id, self.record.model))
def validate(self, signal, force_running=False):
assert isinstance(signal, basestring)
assert isinstance(force_running, bool)
cr = self.session.cr
cr.execute("select * from wkf_workitem where inst_id=%s", (self.instance['id'],))
stack = []
for work_item_values in cr.dictfetchall():
wi = WorkflowItem(self.session, self.record, work_item_values)
wi.process(signal=signal, force_running=force_running, stack=stack)
# An action is returned
self._update_end()
return stack and stack[0] or False
def update(self):
cr = self.session.cr
cr.execute("select * from wkf_workitem where inst_id=%s", (self.instance['id'],))
for work_item_values in cr.dictfetchall():
stack = []
WorkflowItem(self.session, self.record, work_item_values).process(stack=stack)
return self._update_end()
def _update_end(self):
cr = self.session.cr
instance_id = self.instance['id']
cr.execute('select wkf_id from wkf_instance where id=%s', (instance_id,))
wkf_id = cr.fetchone()[0]
cr.execute('select state,flow_stop from wkf_workitem w left join wkf_activity a on (a.id=w.act_id) where w.inst_id=%s', (instance_id,))
ok=True
for r in cr.fetchall():
if (r[0]<>'complete') or not r[1]:
ok=False
break
if ok:
cr.execute('select distinct a.name from wkf_activity a left join wkf_workitem w on (a.id=w.act_id) where w.inst_id=%s', (instance_id,))
act_names = cr.fetchall()
cr.execute("update wkf_instance set state='complete' where id=%s", (instance_id,))
cr.execute("update wkf_workitem set state='complete' where subflow_id=%s", (instance_id,))
cr.execute("select i.id,w.osv,i.res_id from wkf_instance i left join wkf w on (i.wkf_id=w.id) where i.id IN (select inst_id from wkf_workitem where subflow_id=%s)", (instance_id,))
for cur_instance_id, cur_model_name, cur_record_id in cr.fetchall():
cur_record = Record(cur_model_name, cur_record_id)
for act_name in act_names:
WorkflowInstance(self.session, cur_record, {'id':cur_instance_id}).validate('subflow.%s' % act_name[0])
return ok
def create(session, record, workflow_id):
return WorkflowInstance(session, record).create(workflow_id)
def delete(session, record):
return WorkflowInstance(session, record).delete()
def validate(session, record, instance_id, signal, force_running=False):
return WorkflowInstance(session, record).validate(instance_id, signal, force_running)
def update(session, record, instance_id):
return WorkflowInstance(session, record).update(instance_id)
def _update_end(session, record, instance_id):
return WorkflowInstance(session, record)._update_end(instance_id)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
dbentley/pants | src/python/pants/backend/python/pants_requirement.py | 14 | 1653 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.python.python_requirement import PythonRequirement
from pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary
from pants.base.build_environment import pants_version
class PantsRequirement(object):
"""Exports a `python_requirement_library` pointing at the active pants' corresponding sdist.
This requirement is useful for custom plugin authors who want to build and test their plugin with
pants itself. Using the resulting target as a dependency of their plugin target ensures the
dependency stays true to the surrounding repo's version of pants.
NB: The requirement generated is for official pants releases on pypi; so may not be appropriate
for use in a repo that tracks `pantsbuild/pants` or otherwise uses custom pants sdists.
:API: public
"""
def __init__(self, parse_context):
self._parse_context = parse_context
def __call__(self, name=None):
"""
:param string name: The name to use for the target, defaults to the parent dir name.
"""
name = name or os.path.basename(self._parse_context.rel_path)
requirement = PythonRequirement(requirement='pantsbuild.pants=={}'.format(pants_version()))
self._parse_context.create_object(PythonRequirementLibrary, name=name,
requirements=[requirement])
| apache-2.0 |
printedheart/h2o-3 | py2/testdir_single_jvm/test_parse_covtype.py | 20 | 3618 | import unittest, sys
sys.path.extend(['.','..','../..','py'])
import os
import h2o2 as h2o
import h2o_cmd, h2o_import as h2i, h2o_browse as h2b
from h2o_test import find_file, dump_json, verboseprint
expectedZeros = [0, 4914, 656, 24603, 38665, 124, 13, 5, 1338, 51, 320216, 551128, 327648, 544044, 577981,
573487, 576189, 568616, 579415, 574437, 580907, 580833, 579865, 548378, 568602, 551041,
563581, 580413, 581009, 578167, 577590, 579113, 576991, 571753, 580174, 547639, 523260,
559734, 580538, 578423, 579926, 580066, 465765, 550842, 555346, 528493, 535858, 579401,
579121, 580893, 580714, 565439, 567206, 572262, 0]
CAUSE_FAIL = False
def assertEqualMsg(a, b): assert a == b, "%s %s" % (a, b)
def parseKeyIndexedCheck(frames_result, multiplyExpected):
# get the name of the frame?
print ""
frame = frames_result['frames'][0]
rows = frame['rows']
columns = frame['columns']
for i,c in enumerate(columns):
print "i:", i, "c:", c
label = c['label']
stype = c['type']
# information is no longer valid
missing = c['missing_count']
zeros = c['zero_count']
domain = c['domain']
print "column: %s label: %s type: %s missing: %s zeros: %s domain: %s" %\
(i,label,stype,missing,zeros,domain)
# files are concats of covtype. so multiply expected
# assertEqualMsg(zeros, expectedZeros[i] * multiplyExpected)
assertEqualMsg(label,"C%s" % (i+1))
assertEqualMsg(stype,"int")
# assertEqualMsg(missing, 0)
assertEqualMsg(domain, None)
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_parse_covtype(self):
tryList = [
('covtype.data', 1, 30),
('covtype20x.data', 20, 120),
]
for (csvFilename, multiplyExpected, timeoutSecs) in tryList:
# h2o-dev doesn't take ../.. type paths? make find_file return absolute pathj
a_node = h2o.nodes[0]
importFolderPath = os.path.expanduser("~/home-0xdiag-datasets/standard")
csvPathname = importFolderPath + "/" + csvFilename
importResult = a_node.import_files(path=csvPathname)
# print "importResult:", dump_json(importResult)
hex_key = importResult['destination_frames'][0]
if CAUSE_FAIL:
frames_result = a_node.frames(key=k, row_count=5, timeoutSecs=timeoutSecs)
# print "frames_result from the first importResult key", dump_json(frames_result)
parseResult = a_node.parse(key=hex_key, timeoutSecs=timeoutSecs, chunk_size=4194304*4)
pA = h2o_cmd.ParseObj(parseResult)
iA = h2o_cmd.InspectObj(pA.parse_key, expectedNumRows=581012*multiplyExpected,
expectedNumCols=55, expectedMissinglist=[])
print iA.missingList, iA.labelList, iA.numRows, iA.numCols
for i in range(0):
print "Summary on column", i
co = h2o_cmd.runSummary(key=hex_key, column=i)
k = parseResult['frames'][0]['frame_id']['name']
# print "parseResult:", dump_json(parseResult)
frames_result = a_node.frames(key=k, row_count=5)
# print "frames_result from the first parseResult key", dump_json(frames_result)
parseKeyIndexedCheck(frames_result, multiplyExpected)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
SaptakS/pune.pycon.org | fabfile.py | 4 | 5076 | import json
import os
import re
from fabric.api import cd, env, get, hide, local, put, require, run, settings, sudo, task
from fabric.colors import red
from fabric.contrib import files, project
from fabric.utils import abort, error
# Directory structure
PROJECT_ROOT = os.path.dirname(__file__)
env.project = 'pycon'
env.project_user = os.environ['LOGNAME']
env.shell = '/bin/bash -c'
env.settings = 'symposion.settings'
env.use_ssh_config = True
@task
def staging():
env.environment = 'staging'
env.hosts = ['pycon-staging.iad1.psf.io']
env.site_hostname = 'staging-pycon.python.org'
env.root = '/srv/pycon'
env.branch = 'staging'
setup_path()
@task
def production():
env.environment = 'production'
env.hosts = ['pycon-prod.iad1.psf.io']
env.site_hostname = 'us.pycon.org'
env.root = '/srv/pycon'
env.branch = 'production'
setup_path()
def setup_path():
env.home = '/home/psf-users/%(project_user)s/' % env
env.code_root = os.path.join(env.root, 'pycon')
env.virtualenv_root = os.path.join(env.root, 'env')
env.media_root = os.path.join(env.root, 'media')
@task
def manage_run(command):
"""Run a Django management command on the remote server."""
if command == 'dbshell':
# Need custom code for dbshell to work
dbshell()
return
require('environment')
manage_cmd = ("{env.virtualenv_root}/bin/python "
"manage.py {command}").format(env=env, command=command)
dotenv_path = os.path.join(env.root, 'shared')
with cd(env.code_root):
sudo(manage_cmd)
@task
def manage_shell():
"""Drop into the remote Django shell."""
manage_run("shell")
@task
def deploy():
"""Deploy to a given environment."""
# NOTE: salt will check every 15 minutes whether the
# repo has changed, and if so, redeploy. Or you can use this
# to make it run immediately.
require('environment')
sudo('salt-call state.highstate')
@task
def ssh():
"""Ssh to a given server"""
require('environment')
local("ssh %s" % env.hosts[0])
@task
def dbshell():
require('environment')
dsn = sudo('/srv/pycon/env/bin/python /srv/pycon/pycon/manage.py sqldsn -q -R default 2>/dev/null', user='pycon').stdout
host = '%s@%s' % (env.user, env.hosts[0])
psql = 'psql "%s"' % dsn
local("ssh -t %s \'%s\'" % (host, psql))
@task
def get_db_dump(dbname, clean=True):
"""Overwrite your local `dbname` database with the data from the server.
The name of your local db is required as an argument, e.g.:
fab staging get_db_dump:dbname=mydbname
"""
require('environment')
run('sudo -u pycon /srv/pycon/env/bin/python /srv/pycon/pycon/manage.py sqldsn -q -s pgpass -R default 2>/dev/null > ~/.pgpass')
run('chmod 600 ~/.pgpass')
dump_file = '%(project)s-%(environment)s.sql' % env
flags = '-Ox'
dsn = sudo('/srv/pycon/env/bin/python /srv/pycon/pycon/manage.py sqldsn -q -R default 2>/dev/null', user='pycon').stdout
if clean:
flags += 'c'
pg_dump = 'pg_dump "%s" %s' % (dsn, flags)
host = '%s@%s' % (env.user, env.hosts[0])
# save pg_dump output to file in local home directory
local('ssh -C %s \'%s\' > ~/%s' % (host, pg_dump, dump_file))
local('dropdb %s; createdb %s' % (dbname, dbname))
local('psql %s -f ~/%s' % (dbname, dump_file))
@task
def get_media(root='site_media/media'):
"""Syncs media files from server to a local dir.
Defaults to ./site_media/media; you can override by passing
a different relative path as root:
fab server get_media:root=my_dir/media/foo
Local dir ought to exist already.
"""
rsync = 'rsync -rvaz %(user)s@%(host)s:%(media_root)s/' % env
cmd = '%s ./%s' % (rsync, root)
local(cmd)
@task
def load_db_dump(dump_file):
"""Given a dump on your home dir on the server, load it to the server's
database, overwriting any existing data. BE CAREFUL!"""
require('environment')
run('sudo -u pycon /srv/pycon/env/bin/python /srv/pycon/pycon/manage.py sqldsn -q -s pgpass -R default 2>/dev/null > ~/.pgpass')
run('chmod 600 ~/.pgpass')
temp_file = os.path.join(env.home, '%(project)s-%(environment)s.sql' % env)
put(dump_file, temp_file)
dsn = sudo('/srv/pycon/env/bin/python /srv/pycon/pycon/manage.py sqldsn -q -R default 2>/dev/null', user='pycon').stdout
run('psql "%s" -f %s' % (dsn, temp_file))
@task
def make_messages():
"""Extract English text from code and templates, and update the
.po files for translators to translate"""
# Make sure gettext is installed
local("gettext --help >/dev/null 2>&1")
if os.path.exists("locale/fr/LC_MESSAGES/django.po"):
local("python manage.py makemessages -a")
else:
local("python manage.py makemessages -l fr")
@task
def compile_messages():
"""Compile the translated .po files into more efficient .mo
files for runtime use"""
# Make sure gettext is installed
local("gettext --help >/dev/null 2>&1")
local("python manage.py compilemessages")
| bsd-3-clause |
azumimuo/family-xbmc-addon | plugin.video.specto/resources/lib/sources/primewire_mv_tv.py | 2 | 9486 | # -*- coding: utf-8 -*-
'''
Specto Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,base64
from resources.lib.libraries import cleantitle
from resources.lib.libraries import client
from resources.lib.libraries import control
from resources.lib import resolvers
class source:
def __init__(self):
self.base_link = 'http://www.primewire.ag'
self.key_link = '/index.php?search'
self.link_1 = 'http://www.primewire.ag'
self.link_2 = 'http://www.primewire.org'
self.link_3 = 'http://www.primewire.is'
self.moviesearch_link = '/index.php?search_keywords=%s&key=%s&search_section=1'
self.tvsearch_link = '/index.php?search_keywords=%s&key=%s&search_section=2'
self.headers = {'Connection' : 'keep-alive'}
def get_movie(self, imdb, title, year):
try:
key = urlparse.urljoin(self.base_link, self.key_link)
key = client.request(key, 'searchform')
key = client.parseDOM(key, 'input', ret='value', attrs = {'name': 'key'})[0]
query = self.moviesearch_link % (urllib.quote_plus(cleantitle.query(title)), key)
query = urlparse.urljoin(self.base_link, query)
result = str(client.request(query, 'index_item'))
if 'page=2' in result or 'page%3D2' in result: result += str(client.request(query + '&page=2', 'index_item'))
result = client.parseDOM(result, 'div', attrs = {'class': 'index_item.+?'})
title = 'watch' + cleantitle.get(title)
years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in result]
result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
result = [i for i in result if any(x in i[1] for x in years)]
r = []
for i in result:
u = i[0]
try: u = urlparse.parse_qs(urlparse.urlparse(u).query)['u'][0]
except: pass
try: u = urlparse.parse_qs(urlparse.urlparse(u).query)['q'][0]
except: pass
r += [(u, i[1])]
match = [i[0] for i in r if title == cleantitle.get(i[1]) and '(%s)' % str(year) in i[1]]
match2 = [i[0] for i in r]
match2 = [x for y,x in enumerate(match2) if x not in match2[:y]]
if match2 == []: return
for i in match2[:5]:
try:
if len(match) > 0: url = match[0] ; break
r = client.request(urlparse.urljoin(self.base_link, i), 'choose_tabs')
if imdb in str(r): url = i ; break
except:
pass
url = re.findall('(?://.+?|)(/.+)', url)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_show(self, imdb, tvdb, tvshowtitle, year):
try:
key = urlparse.urljoin(self.base_link, self.key_link)
key = client.request(key, 'searchform')
key = client.parseDOM(key, 'input', ret='value', attrs = {'name': 'key'})[0]
query = self.tvsearch_link % (urllib.quote_plus(cleantitle.query(tvshowtitle)), key)
query = urlparse.urljoin(self.base_link, query)
result = str(client.request(query, 'index_item'))
if 'page=2' in result or 'page%3D2' in result: result += str(client.request(query + '&page=2', 'index_item'))
result = client.parseDOM(result, 'div', attrs = {'class': 'index_item.+?'})
tvshowtitle = 'watch' + cleantitle.get(tvshowtitle)
years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in result]
result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
result = [i for i in result if any(x in i[1] for x in years)]
r = []
for i in result:
u = i[0]
try: u = urlparse.parse_qs(urlparse.urlparse(u).query)['u'][0]
except: pass
try: u = urlparse.parse_qs(urlparse.urlparse(u).query)['q'][0]
except: pass
r += [(u, i[1])]
match = [i[0] for i in r if tvshowtitle == cleantitle.get(i[1]) and '(%s)' % str(year) in i[1]]
match2 = [i[0] for i in r]
match2 = [x for y,x in enumerate(match2) if x not in match2[:y]]
if match2 == []: return
for i in match2[:5]:
try:
if len(match) > 0: url = match[0] ; break
r = client.request(urlparse.urljoin(self.base_link, i), 'tv_episode_item')
if imdb in str(r): url = i ; break
except:
pass
url = re.findall('(?://.+?|)(/.+)', url)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_episode(self, url, imdb, tvdb, title, date, season, episode):
try:
if url == None: return
url = urlparse.urljoin(self.base_link, url)
result = client.request(url, 'tv_episode_item')
result = client.parseDOM(result, 'div', attrs = {'class': 'tv_episode_item'})
title = cleantitle.get(title)
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'class': 'tv_episode_name'}), re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in result]
result = [(i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0]
result = [(i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0] + [(i[0], i[1], None) for i in result if len(i[2]) == 0]
result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0]
url = [i for i in result if title == cleantitle.get(i[1]) and date == i[2]][:1]
if len(url) == 0: url = [i for i in result if date == i[2]]
if len(url) == 0 or len(url) > 1: url = [i for i in result if 'season-%01d-episode-%01d' % (int(season), int(episode)) in i[0]]
url = client.replaceHTMLCodes(url[0][0])
try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
except: pass
try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
except: pass
url = re.findall('(?://.+?|)(/.+)', url)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
result = client.request(url, 'choose_tabs')
links = client.parseDOM(result, 'tbody')
for i in links:
try:
url = client.parseDOM(i, 'a', ret='href')[0]
try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
except: pass
try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
except: pass
url = urlparse.parse_qs(urlparse.urlparse(url).query)['url'][0]
url = base64.b64decode(url)
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
quality = client.parseDOM(i, 'span', ret='class')[0]
if quality == 'quality_cam' or quality == 'quality_ts': quality = 'CAM'
elif quality == 'quality_dvd': quality = 'SD'
else: raise Exception()
sources.append({'source': host, 'quality': quality, 'provider': 'Primewire', 'url': url})
except:
pass
return sources
except Exception as e:
control.log('ERROR PRIME %s' % e)
return sources
def resolve(self, url):
try:
url = resolvers.request(url)
return url
except:
return
| gpl-2.0 |
elliotthill/django-oscar | runtests.py | 2 | 3121 | #!/usr/bin/env python
"""
Custom test runner
If args or options, we run the testsuite as quickly as possible.
If args but no options, we default to using the spec plugin and aborting on
first error/failure.
If options, we ignore defaults and pass options onto Nose.
Examples:
Run all tests (as fast as possible)
$ ./runtests.py
Run all unit tests (using spec output)
$ ./runtests.py tests/unit
Run all checkout unit tests (using spec output)
$ ./runtests.py tests/unit/checkout
Run all tests relating to shipping
$ ./runtests.py --attr=shipping
Re-run failing tests (needs to be run twice to first build the index)
$ ./runtests.py ... --failed
Drop into pdb when a test fails
$ ./runtests.py ... --pdb-failures
"""
import sys
import logging
import warnings
from tests.config import configure
from six.moves import map
# No logging
logging.disable(logging.CRITICAL)
def run_tests(verbosity, *test_args):
from django_nose import NoseTestSuiteRunner
test_runner = NoseTestSuiteRunner(verbosity=verbosity)
if not test_args:
test_args = ['tests']
num_failures = test_runner.run_tests(test_args)
if num_failures:
sys.exit(num_failures)
if __name__ == '__main__':
args = sys.argv[1:]
verbosity = 1
if not args:
# If run with no args, try and run the testsuite as fast as possible.
# That means across all cores and with no high-falutin' plugins.
import multiprocessing
try:
num_cores = multiprocessing.cpu_count()
except NotImplementedError:
num_cores = 4 # Guess
args = ['--nocapture', '--stop', '--processes=%s' % num_cores]
else:
# Some args/options specified. Check to see if any nose options have
# been specified. If they have, then don't set any
has_options = any(map(lambda x: x.startswith('--'), args))
if not has_options:
# Default options:
# --stop Abort on first error/failure
# --nocapture Don't capture STDOUT
args.extend(['--nocapture', '--stop', '--with-specplugin'])
else:
# Remove options as nose will pick these up from sys.argv
for arg in args:
if arg.startswith('--verbosity'):
verbosity = int(arg[-1])
args = [arg for arg in args if not arg.startswith('-')]
configure()
with warnings.catch_warnings():
# The warnings module in default configuration will never cause tests to
# fail, as it never raises an exception.
# We alter that behaviour by turning DeprecationWarnings into
# exceptions, but exclude warnings triggered by third-party libs
# Note: The context manager is not thread safe. Behaviour with multiple
# threads is undefined.
warnings.filterwarnings('error', category=DeprecationWarning)
warnings.filterwarnings('ignore',
r'django.utils.simplejson is deprecated.*',
DeprecationWarning, r'sorl\.thumbnail\.helpers')
run_tests(verbosity, *args)
| bsd-3-clause |
40223104/test_lego | static/Brython3.1.1-20150328-091302/Lib/importlib/basehook.py | 608 | 1396 | from javascript import JSObject
from browser import window
import urllib.request
class TempMod:
def __init__(self, name):
self.name=name
#define my custom import hook (just to see if it get called etc).
class BaseHook:
def __init__(self, fullname=None, path=None):
self._fullname=fullname
self._path=path # we don't are about this...
self._modpath=''
self._module=''
def find_module(self, name=None, path=None):
if name is None:
name=self._fullname
for _i in ('libs/%s.js' % name, 'Lib/%s.py' % name,
'Lib/%s/__init__.py' % name):
_path="%s%s" % (__BRYTHON__.brython_path, _i)
try:
_fp,_,_headers=urllib.request.urlopen(_path)
if _headers['status'] != 200:
continue
self._module=_fp.read()
self._modpath=_path
return self
except urllib.error.HTTPError as e:
print(str(e))
self._modpath=''
self._module=''
raise ImportError
def is_package(self):
return '.' in self._fullname
def load_module(self, name):
if name is None:
name=self._fullname
window.eval('__BRYTHON__.imported["%s"]={}' % name)
return JSObject(__BRYTHON__.run_py)(TempMod(name),
self._modpath, self._module)
| gpl-3.0 |
AlexGrig/GPy | GPy/models/mrd.py | 8 | 14617 | # ## Copyright (c) 2013, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
import itertools, logging
from ..kern import Kern
from ..core.parameterization.variational import NormalPosterior, NormalPrior
from ..core.parameterization import Param, Parameterized
from ..core.parameterization.observable_array import ObsAr
from ..inference.latent_function_inference.var_dtc import VarDTC
from ..inference.latent_function_inference import InferenceMethodList
from ..likelihoods import Gaussian
from ..util.initialization import initialize_latent
from ..core.sparse_gp import SparseGP, GP
from GPy.core.parameterization.variational import VariationalPosterior
from GPy.models.bayesian_gplvm_minibatch import BayesianGPLVMMiniBatch
from GPy.models.sparse_gp_minibatch import SparseGPMiniBatch
class MRD(BayesianGPLVMMiniBatch):
"""
!WARNING: This is bleeding edge code and still in development.
Functionality may change fundamentally during development!
Apply MRD to all given datasets Y in Ylist.
Y_i in [n x p_i]
If Ylist is a dictionary, the keys of the dictionary are the names, and the
values are the different datasets to compare.
The samples n in the datasets need
to match up, whereas the dimensionality p_d can differ.
:param [array-like] Ylist: List of datasets to apply MRD on
:param input_dim: latent dimensionality
:type input_dim: int
:param array-like X: mean of starting latent space q in [n x q]
:param array-like X_variance: variance of starting latent space q in [n x q]
:param initx: initialisation method for the latent space :
* 'concat' - PCA on concatenation of all datasets
* 'single' - Concatenation of PCA on datasets, respectively
* 'random' - Random draw from a Normal(0,1)
:type initx: ['concat'|'single'|'random']
:param initz: initialisation method for inducing inputs
:type initz: 'permute'|'random'
:param num_inducing: number of inducing inputs to use
:param Z: initial inducing inputs
:param kernel: list of kernels or kernel to copy for each output
:type kernel: [GPy.kernels.kernels] | GPy.kernels.kernels | None (default)
:param :class:`~GPy.inference.latent_function_inference inference_method:
InferenceMethodList of inferences, or one inference method for all
:param :class:`~GPy.likelihoodss.likelihoods.likelihoods` likelihoods: the likelihoods to use
:param str name: the name of this model
:param [str] Ynames: the names for the datasets given, must be of equal length as Ylist or None
:param bool|Norm normalizer: How to normalize the data?
:param bool stochastic: Should this model be using stochastic gradient descent over the dimensions?
:param bool|[bool] batchsize: either one batchsize for all, or one batchsize per dataset.
"""
def __init__(self, Ylist, input_dim, X=None, X_variance=None,
initx = 'PCA', initz = 'permute',
num_inducing=10, Z=None, kernel=None,
inference_method=None, likelihoods=None, name='mrd',
Ynames=None, normalizer=False, stochastic=False, batchsize=10):
self.logger = logging.getLogger(self.__class__.__name__)
self.input_dim = input_dim
self.num_inducing = num_inducing
if isinstance(Ylist, dict):
Ynames, Ylist = zip(*Ylist.items())
self.logger.debug("creating observable arrays")
self.Ylist = [ObsAr(Y) for Y in Ylist]
#The next line is a fix for Python 3. It replicates the python 2 behaviour from the above comprehension
Y = Ylist[-1]
if Ynames is None:
self.logger.debug("creating Ynames")
Ynames = ['Y{}'.format(i) for i in range(len(Ylist))]
self.names = Ynames
assert len(self.names) == len(self.Ylist), "one name per dataset, or None if Ylist is a dict"
if inference_method is None:
self.inference_method = InferenceMethodList([VarDTC() for _ in range(len(self.Ylist))])
else:
assert isinstance(inference_method, InferenceMethodList), "please provide one inference method per Y in the list and provide it as InferenceMethodList, inference_method given: {}".format(inference_method)
self.inference_method = inference_method
if X is None:
X, fracs = self._init_X(initx, Ylist)
else:
fracs = [X.var(0)]*len(Ylist)
Z = self._init_Z(initz, X)
self.Z = Param('inducing inputs', Z)
self.num_inducing = self.Z.shape[0] # ensure M==N if M>N
# sort out the kernels
self.logger.info("building kernels")
if kernel is None:
from ..kern import RBF
kernels = [RBF(input_dim, ARD=1, lengthscale=1./fracs[i]) for i in range(len(Ylist))]
elif isinstance(kernel, Kern):
kernels = []
for i in range(len(Ylist)):
k = kernel.copy()
kernels.append(k)
else:
assert len(kernel) == len(Ylist), "need one kernel per output"
assert all([isinstance(k, Kern) for k in kernel]), "invalid kernel object detected!"
kernels = kernel
self.variational_prior = NormalPrior()
#self.X = NormalPosterior(X, X_variance)
if likelihoods is None:
likelihoods = [Gaussian(name='Gaussian_noise'.format(i)) for i in range(len(Ylist))]
else: likelihoods = likelihoods
self.logger.info("adding X and Z")
super(MRD, self).__init__(Y, input_dim, X=X, X_variance=X_variance, num_inducing=num_inducing,
Z=self.Z, kernel=None, inference_method=self.inference_method, likelihood=Gaussian(),
name='manifold relevance determination', normalizer=None,
missing_data=False, stochastic=False, batchsize=1)
self._log_marginal_likelihood = 0
self.unlink_parameter(self.likelihood)
self.unlink_parameter(self.kern)
del self.kern
del self.likelihood
self.num_data = Ylist[0].shape[0]
if isinstance(batchsize, int):
batchsize = itertools.repeat(batchsize)
self.bgplvms = []
for i, n, k, l, Y, im, bs in zip(itertools.count(), Ynames, kernels, likelihoods, Ylist, self.inference_method, batchsize):
assert Y.shape[0] == self.num_data, "All datasets need to share the number of datapoints, and those have to correspond to one another"
md = np.isnan(Y).any()
spgp = BayesianGPLVMMiniBatch(Y, input_dim, X, X_variance,
Z=Z, kernel=k, likelihood=l,
inference_method=im, name=n,
normalizer=normalizer,
missing_data=md,
stochastic=stochastic,
batchsize=bs)
spgp.kl_factr = 1./len(Ynames)
spgp.unlink_parameter(spgp.Z)
spgp.unlink_parameter(spgp.X)
del spgp.Z
del spgp.X
spgp.Z = self.Z
spgp.X = self.X
self.link_parameter(spgp, i+2)
self.bgplvms.append(spgp)
self.posterior = None
self.logger.info("init done")
def parameters_changed(self):
self._log_marginal_likelihood = 0
self.Z.gradient[:] = 0.
self.X.gradient[:] = 0.
for b, i in zip(self.bgplvms, self.inference_method):
self._log_marginal_likelihood += b._log_marginal_likelihood
self.logger.info('working on im <{}>'.format(hex(id(i))))
self.Z.gradient[:] += b.Z.gradient#full_values['Zgrad']
#grad_dict = b.full_values
if self.has_uncertain_inputs():
self.X.gradient += b._Xgrad
else:
self.X.gradient += b._Xgrad
#if self.has_uncertain_inputs():
# # update for the KL divergence
# self.variational_prior.update_gradients_KL(self.X)
# self._log_marginal_likelihood -= self.variational_prior.KL_divergence(self.X)
# pass
def log_likelihood(self):
return self._log_marginal_likelihood
def _init_X(self, init='PCA', Ylist=None):
if Ylist is None:
Ylist = self.Ylist
if init in "PCA_concat":
X, fracs = initialize_latent('PCA', self.input_dim, np.hstack(Ylist))
fracs = [fracs]*len(Ylist)
elif init in "PCA_single":
X = np.zeros((Ylist[0].shape[0], self.input_dim))
fracs = []
for qs, Y in zip(np.array_split(np.arange(self.input_dim), len(Ylist)), Ylist):
x,frcs = initialize_latent('PCA', len(qs), Y)
X[:, qs] = x
fracs.append(frcs)
else: # init == 'random':
X = np.random.randn(Ylist[0].shape[0], self.input_dim)
fracs = X.var(0)
fracs = [fracs]*len(Ylist)
X -= X.mean()
X /= X.std()
return X, fracs
def _init_Z(self, init="permute", X=None):
if X is None:
X = self.X
if init in "permute":
Z = np.random.permutation(X.copy())[:self.num_inducing]
elif init in "random":
Z = np.random.randn(self.num_inducing, self.input_dim) * X.var()
return Z
def _handle_plotting(self, fignum, axes, plotf, sharex=False, sharey=False):
import matplotlib.pyplot as plt
if axes is None:
fig = plt.figure(num=fignum)
sharex_ax = None
sharey_ax = None
plots = []
for i, g in enumerate(self.bgplvms):
try:
if sharex:
sharex_ax = ax # @UndefinedVariable
sharex = False # dont set twice
if sharey:
sharey_ax = ax # @UndefinedVariable
sharey = False # dont set twice
except:
pass
if axes is None:
ax = fig.add_subplot(1, len(self.bgplvms), i + 1, sharex=sharex_ax, sharey=sharey_ax)
elif isinstance(axes, (tuple, list, np.ndarray)):
ax = axes[i]
else:
raise ValueError("Need one axes per latent dimension input_dim")
plots.append(plotf(i, g, ax))
if sharey_ax is not None:
plt.setp(ax.get_yticklabels(), visible=False)
plt.draw()
if axes is None:
try:
fig.tight_layout()
except:
pass
return plots
def predict(self, Xnew, full_cov=False, Y_metadata=None, kern=None, Yindex=0):
"""
Prediction for data set Yindex[default=0].
This predicts the output mean and variance for the dataset given in Ylist[Yindex]
"""
b = self.bgplvms[Yindex]
self.posterior = b.posterior
self.kern = b.kern
self.likelihood = b.likelihood
return super(MRD, self).predict(Xnew, full_cov, Y_metadata, kern)
#===============================================================================
# TODO: Predict! Maybe even change to several bgplvms, which share an X?
#===============================================================================
# def plot_predict(self, fignum=None, ax=None, sharex=False, sharey=False, **kwargs):
# fig = self._handle_plotting(fignum,
# ax,
# lambda i, g, ax: ax.imshow(g.predict(g.X)[0], **kwargs),
# sharex=sharex, sharey=sharey)
# return fig
def plot_scales(self, fignum=None, ax=None, titles=None, sharex=False, sharey=True, *args, **kwargs):
"""
TODO: Explain other parameters
:param titles: titles for axes of datasets
"""
if titles is None:
titles = [r'${}$'.format(name) for name in self.names]
ymax = reduce(max, [np.ceil(max(g.kern.input_sensitivity())) for g in self.bgplvms])
def plotf(i, g, ax):
#ax.set_ylim([0,ymax])
return g.kern.plot_ARD(ax=ax, title=titles[i], *args, **kwargs)
fig = self._handle_plotting(fignum, ax, plotf, sharex=sharex, sharey=sharey)
return fig
def plot_latent(self, labels=None, which_indices=None,
resolution=50, ax=None, marker='o', s=40,
fignum=None, plot_inducing=True, legend=True,
plot_limits=None,
aspect='auto', updates=False, predict_kwargs={}, imshow_kwargs={}):
"""
see plotting.matplot_dep.dim_reduction_plots.plot_latent
if predict_kwargs is None, will plot latent spaces for 0th dataset (and kernel), otherwise give
predict_kwargs=dict(Yindex='index') for plotting only the latent space of dataset with 'index'.
"""
import sys
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from matplotlib import pyplot as plt
from ..plotting.matplot_dep import dim_reduction_plots
if "Yindex" not in predict_kwargs:
predict_kwargs['Yindex'] = 0
Yindex = predict_kwargs['Yindex']
if ax is None:
fig = plt.figure(num=fignum)
ax = fig.add_subplot(111)
else:
fig = ax.figure
self.kern = self.bgplvms[Yindex].kern
self.likelihood = self.bgplvms[Yindex].likelihood
plot = dim_reduction_plots.plot_latent(self, labels, which_indices,
resolution, ax, marker, s,
fignum, plot_inducing, legend,
plot_limits, aspect, updates, predict_kwargs, imshow_kwargs)
ax.set_title(self.bgplvms[Yindex].name)
try:
fig.tight_layout()
except:
pass
return plot
def __getstate__(self):
state = super(MRD, self).__getstate__()
if 'kern' in state:
del state['kern']
if 'likelihood' in state:
del state['likelihood']
return state
def __setstate__(self, state):
# TODO:
super(MRD, self).__setstate__(state)
self.kern = self.bgplvms[0].kern
self.likelihood = self.bgplvms[0].likelihood
self.parameters_changed()
| bsd-3-clause |
calebjordan/klayout-macros | pymacros/cpw_design.py | 1 | 4069 | from scipy.constants import c, epsilon_0, mu_0
from scipy.special import ellipk, ellipkm1
from numpy import sqrt, sinh, log, pi
class cpw:
def __init__(self, w=10., s=6., t=.1, h=500., l=1000., e1=11.6, material="nb", tgdelta=1e-8):
self.w = w*1e-6
self.s = s*1e-6
self.t = t*1e-6
self.h = h*1e-6
self.l = l*1e-6
self.e1 = e1
self.tgdelta = tgdelta
self.material = material
if material == "al":
self.Tc = 1.23
self.rho = 4e-9
else: #Assume Nb
self.Tc = 8
self.rho = 4e-9
self.l0 = 1.05e-3*sqrt(self.rho/self.Tc)
# Effective Dielectric Constant from Silicon-Air Interface
def k0(self):
return self.w/(self.w+2*self.s)
def kp0(self):
return sqrt(1-self.k0()**2)
def k1(self):
return sinh(pi*self.w/(4*self.h))/sinh(pi*(2*self.s+self.w)/(4*self.h))
def kp1(self):
return sqrt(1-self.k1()**2)
def Eeff(self):
return 1 + ((self.e1-1)*ellipk(self.k1())*ellipk(self.kp0()))/(2*ellipk(self.kp1())*ellipk(self.k0()))
# Kinetic Inductance Calculation
def g(self):
a = -log(self.t/(4*self.w))
b = -self.w/(self.w+2*self.s)*log(self.t/(4*(self.w+2*self.s)))
c = 2*(self.w+self.s)/(self.w+2*self.s)*log(self.s/(self.w+self.s))
return 1/(1*self.k0()**2*ellipk(self.k0())**2) * (a+b+c)
def Llk(self):
return mu_0*self.l0**2/(self.w*self.t)*self.g()
# Circuit Parameters
def Ll(self):
return mu_0*ellipk(self.kp0())/(4*ellipk(self.k0())) + self.Llk()
def Cl(self):
return 4*epsilon_0*self.Eeff()*ellipk(self.k0())/ellipk(self.kp0())
def vph(self):
return 1/sqrt(self.Ll()*self.Cl())
def f0(self):
return c/(sqrt(self.Eeff())*2*self.l)
def z0(self):
return sqrt(self.Ll()/self.Cl())
# Loss
def k(self):
return 2*pi*self.f0()*sqrt(self.Eeff())/c
def alpha_d(self):
return self.e1/sqrt(self.Eeff())*(self.Eeff()-1)/(self.e1-1)*self.tgdelta*self.k()/2
# Circuit Parameters with Loss
def L(self):
return 2*self.Ll()*self.l/(pi**2)
def C(self):
return self.Cl()*self.l/2
def R(self):
return self.z0()/(self.alpha_d()*self.l)
def Qint(self):
return self.R()*self.C()/sqrt(self.L()*self.C())
def wn(self):
return self.Qint()/(self.R()*self.C())
def fn(self):
return self.wn()/(2*pi)
class resonator:
def __init__(self, cpw, cin, cout):
self.cpw = cpw
self.cki = cin
self.cko = cout
def Rin(self):
# Effective input resistance to ground
return (1. + (self.cpw.wn()*self.cki*50.)**2)/(self.cpw.wn()*self.cki*50.)**2
def Rout(self):
# Effective output resistance to ground
return (1. + (self.cpw.wn()*self.cko*50.)**2)/(self.cpw.wn()*self.cko*50.)**2
def Cin(self):
# Effective input capacitance to ground
return self.cki/(1. + (self.cpw.wn()*self.cki*50.)**2)
def Cout(self):
# Effective output capacitance to ground
return self.cko/(1. + (self.cpw.wn()*self.cko*50.)**2)
def wl(self):
# Loaded frequency in rad/s
return 1./sqrt(self.cpw.L()*(self.cpw.C() + self.Cin() + self.Cout()))
def fl(self):
# Loaded frequency in GHz
return self.wl()/2/pi/1e9
def Qc(self):
# Total coupling Q
return self.cpw.wn() * (self.cpw.C() + self.Cin() + self.Cout())/(1./self.cpw.R() + 1./self.Rin() + 1./self.Rout())
def Ql(self):
# Loaded Q
return 1/(1/self.cpw.Qint() + 1/self.Qc())
def kappa(self):
# Photon loss rate
return self.wl()/self.Ql()
def __str__(self):
return "l = {} um\nf = {} GHz\nQ = {}\nk = {} MHz".format(self.cpw.l*1e6,self.fl(), self.Ql(), self.kappa()/2e6/pi) | mit |
presidentielcoin/presidentielcoin | qa/rpc-tests/rpcbind_test.py | 1 | 4663 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Presidentielcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Test for -rpcbind, as well as -rpcallowip and -rpcconnect
import tempfile
import traceback
from test_framework.test_framework import PresidentielcoinTestFramework
from test_framework.util import *
from test_framework.netutil import *
class RPCBindTest(PresidentielcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self):
pass
def setup_nodes(self):
pass
def run_bind_test(self, allow_ips, connect_to, addresses, expected):
'''
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
'''
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [base_args + binds], connect_to)
try:
pid = presidentielcoind_processes[0].pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
finally:
stop_nodes(self.nodes)
def run_allowip_test(self, allow_ips, rpchost, rpcport):
'''
Start a node with rpcallow IP, and request getinfo
at a non-localhost IP.
'''
base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips]
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [base_args])
try:
# connect to node through non-loopback interface
node = get_rpc_proxy(rpc_url(0, "%s:%d" % (rpchost, rpcport)), 0)
node.getinfo()
finally:
node = None # make sure connection will be garbage collected and closed
stop_nodes(self.nodes)
def run_test(self):
# due to OS-specific network stats queries, this test works only on Linux
assert(sys.platform.startswith('linux'))
# find the first non-loopback interface for testing
non_loopback_ip = None
for name,ip in all_interfaces():
if ip != '127.0.0.1':
non_loopback_ip = ip
break
if non_loopback_ip is None:
assert(not 'This test requires at least one non-loopback IPv4 interface')
print("Using interface %s for testing" % non_loopback_ip)
defaultport = rpc_port(0)
# check default without rpcallowip (IPv4 and IPv6 localhost)
self.run_bind_test(None, '127.0.0.1', [],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check default with rpcallowip (IPv6 any)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', [],
[('::0', defaultport)])
# check only IPv4 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
[('127.0.0.1', defaultport)])
# check only IPv4 localhost (explicit) with alternative port
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
[('127.0.0.1', 32171)])
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
# check only IPv6 localhost (explicit)
self.run_bind_test(['[::1]'], '[::1]', ['[::1]'],
[('::1', defaultport)])
# check both IPv4 and IPv6 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check only non-loopback interface
self.run_bind_test([non_loopback_ip], non_loopback_ip, [non_loopback_ip],
[(non_loopback_ip, defaultport)])
# Check that with invalid rpcallowip, we are denied
self.run_allowip_test([non_loopback_ip], non_loopback_ip, defaultport)
try:
self.run_allowip_test(['1.1.1.1'], non_loopback_ip, defaultport)
assert(not 'Connection not denied by rpcallowip as expected')
except JSONRPCException:
pass
if __name__ == '__main__':
RPCBindTest ().main ()
| mit |
sourcefabric/Booktype | lib/booktype/apps/edit/migrations/0004_auto_20170816_1539.py | 6 | 1498 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2017-08-16 15:39
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('editor', '0009_chapter_content_json'),
('edit', '0003_auto_20170420_0144'),
]
operations = [
migrations.CreateModel(
name='ChatMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('datetime', models.DateTimeField(auto_now_add=True, db_index=True)),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ChatThread',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('book', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='editor.Book')),
],
),
migrations.AddField(
model_name='chatmessage',
name='thread',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='edit.ChatThread'),
),
]
| agpl-3.0 |
fubecka/f5-dashboard | flask/lib/python2.6/site-packages/sqlalchemy/ext/instrumentation.py | 56 | 14646 | """Extensible class instrumentation.
The :mod:`sqlalchemy.ext.instrumentation` package provides for alternate
systems of class instrumentation within the ORM. Class instrumentation
refers to how the ORM places attributes on the class which maintain
data and track changes to that data, as well as event hooks installed
on the class.
.. note::
The extension package is provided for the benefit of integration
with other object management packages, which already perform
their own instrumentation. It is not intended for general use.
For examples of how the instrumentation extension is used,
see the example :ref:`examples_instrumentation`.
.. versionchanged:: 0.8
The :mod:`sqlalchemy.orm.instrumentation` was split out so
that all functionality having to do with non-standard
instrumentation was moved out to :mod:`sqlalchemy.ext.instrumentation`.
When imported, the module installs itself within
:mod:`sqlalchemy.orm.instrumentation` so that it
takes effect, including recognition of
``__sa_instrumentation_manager__`` on mapped classes, as
well :data:`.instrumentation_finders`
being used to determine class instrumentation resolution.
"""
from ..orm import instrumentation as orm_instrumentation
from ..orm.instrumentation import (
ClassManager, InstrumentationFactory, _default_state_getter,
_default_dict_getter, _default_manager_getter
)
from ..orm import attributes, collections, base as orm_base
from .. import util
from ..orm import exc as orm_exc
import weakref
INSTRUMENTATION_MANAGER = '__sa_instrumentation_manager__'
"""Attribute, elects custom instrumentation when present on a mapped class.
Allows a class to specify a slightly or wildly different technique for
tracking changes made to mapped attributes and collections.
Only one instrumentation implementation is allowed in a given object
inheritance hierarchy.
The value of this attribute must be a callable and will be passed a class
object. The callable must return one of:
- An instance of an InstrumentationManager or subclass
- An object implementing all or some of InstrumentationManager (TODO)
- A dictionary of callables, implementing all or some of the above (TODO)
- An instance of a ClassManager or subclass
This attribute is consulted by SQLAlchemy instrumentation
resolution, once the :mod:`sqlalchemy.ext.instrumentation` module
has been imported. If custom finders are installed in the global
instrumentation_finders list, they may or may not choose to honor this
attribute.
"""
def find_native_user_instrumentation_hook(cls):
"""Find user-specified instrumentation management for a class."""
return getattr(cls, INSTRUMENTATION_MANAGER, None)
instrumentation_finders = [find_native_user_instrumentation_hook]
"""An extensible sequence of callables which return instrumentation
implementations
When a class is registered, each callable will be passed a class object.
If None is returned, the
next finder in the sequence is consulted. Otherwise the return must be an
instrumentation factory that follows the same guidelines as
sqlalchemy.ext.instrumentation.INSTRUMENTATION_MANAGER.
By default, the only finder is find_native_user_instrumentation_hook, which
searches for INSTRUMENTATION_MANAGER. If all finders return None, standard
ClassManager instrumentation is used.
"""
class ExtendedInstrumentationRegistry(InstrumentationFactory):
"""Extends :class:`.InstrumentationFactory` with additional
bookkeeping, to accommodate multiple types of
class managers.
"""
_manager_finders = weakref.WeakKeyDictionary()
_state_finders = weakref.WeakKeyDictionary()
_dict_finders = weakref.WeakKeyDictionary()
_extended = False
def _locate_extended_factory(self, class_):
for finder in instrumentation_finders:
factory = finder(class_)
if factory is not None:
manager = self._extended_class_manager(class_, factory)
return manager, factory
else:
return None, None
def _check_conflicts(self, class_, factory):
existing_factories = self._collect_management_factories_for(class_).\
difference([factory])
if existing_factories:
raise TypeError(
"multiple instrumentation implementations specified "
"in %s inheritance hierarchy: %r" % (
class_.__name__, list(existing_factories)))
def _extended_class_manager(self, class_, factory):
manager = factory(class_)
if not isinstance(manager, ClassManager):
manager = _ClassInstrumentationAdapter(class_, manager)
if factory != ClassManager and not self._extended:
# somebody invoked a custom ClassManager.
# reinstall global "getter" functions with the more
# expensive ones.
self._extended = True
_install_instrumented_lookups()
self._manager_finders[class_] = manager.manager_getter()
self._state_finders[class_] = manager.state_getter()
self._dict_finders[class_] = manager.dict_getter()
return manager
def _collect_management_factories_for(self, cls):
"""Return a collection of factories in play or specified for a
hierarchy.
Traverses the entire inheritance graph of a cls and returns a
collection of instrumentation factories for those classes. Factories
are extracted from active ClassManagers, if available, otherwise
instrumentation_finders is consulted.
"""
hierarchy = util.class_hierarchy(cls)
factories = set()
for member in hierarchy:
manager = self.manager_of_class(member)
if manager is not None:
factories.add(manager.factory)
else:
for finder in instrumentation_finders:
factory = finder(member)
if factory is not None:
break
else:
factory = None
factories.add(factory)
factories.discard(None)
return factories
def unregister(self, class_):
if class_ in self._manager_finders:
del self._manager_finders[class_]
del self._state_finders[class_]
del self._dict_finders[class_]
super(ExtendedInstrumentationRegistry, self).unregister(class_)
def manager_of_class(self, cls):
if cls is None:
return None
return self._manager_finders.get(cls, _default_manager_getter)(cls)
def state_of(self, instance):
if instance is None:
raise AttributeError("None has no persistent state.")
return self._state_finders.get(
instance.__class__, _default_state_getter)(instance)
def dict_of(self, instance):
if instance is None:
raise AttributeError("None has no persistent state.")
return self._dict_finders.get(
instance.__class__, _default_dict_getter)(instance)
orm_instrumentation._instrumentation_factory = \
_instrumentation_factory = ExtendedInstrumentationRegistry()
orm_instrumentation.instrumentation_finders = instrumentation_finders
class InstrumentationManager(object):
"""User-defined class instrumentation extension.
:class:`.InstrumentationManager` can be subclassed in order
to change
how class instrumentation proceeds. This class exists for
the purposes of integration with other object management
frameworks which would like to entirely modify the
instrumentation methodology of the ORM, and is not intended
for regular usage. For interception of class instrumentation
events, see :class:`.InstrumentationEvents`.
The API for this class should be considered as semi-stable,
and may change slightly with new releases.
.. versionchanged:: 0.8
:class:`.InstrumentationManager` was moved from
:mod:`sqlalchemy.orm.instrumentation` to
:mod:`sqlalchemy.ext.instrumentation`.
"""
# r4361 added a mandatory (cls) constructor to this interface.
# given that, perhaps class_ should be dropped from all of these
# signatures.
def __init__(self, class_):
pass
def manage(self, class_, manager):
setattr(class_, '_default_class_manager', manager)
def dispose(self, class_, manager):
delattr(class_, '_default_class_manager')
def manager_getter(self, class_):
def get(cls):
return cls._default_class_manager
return get
def instrument_attribute(self, class_, key, inst):
pass
def post_configure_attribute(self, class_, key, inst):
pass
def install_descriptor(self, class_, key, inst):
setattr(class_, key, inst)
def uninstall_descriptor(self, class_, key):
delattr(class_, key)
def install_member(self, class_, key, implementation):
setattr(class_, key, implementation)
def uninstall_member(self, class_, key):
delattr(class_, key)
def instrument_collection_class(self, class_, key, collection_class):
return collections.prepare_instrumentation(collection_class)
def get_instance_dict(self, class_, instance):
return instance.__dict__
def initialize_instance_dict(self, class_, instance):
pass
def install_state(self, class_, instance, state):
setattr(instance, '_default_state', state)
def remove_state(self, class_, instance):
delattr(instance, '_default_state')
def state_getter(self, class_):
return lambda instance: getattr(instance, '_default_state')
def dict_getter(self, class_):
return lambda inst: self.get_instance_dict(class_, inst)
class _ClassInstrumentationAdapter(ClassManager):
"""Adapts a user-defined InstrumentationManager to a ClassManager."""
def __init__(self, class_, override):
self._adapted = override
self._get_state = self._adapted.state_getter(class_)
self._get_dict = self._adapted.dict_getter(class_)
ClassManager.__init__(self, class_)
def manage(self):
self._adapted.manage(self.class_, self)
def dispose(self):
self._adapted.dispose(self.class_)
def manager_getter(self):
return self._adapted.manager_getter(self.class_)
def instrument_attribute(self, key, inst, propagated=False):
ClassManager.instrument_attribute(self, key, inst, propagated)
if not propagated:
self._adapted.instrument_attribute(self.class_, key, inst)
def post_configure_attribute(self, key):
super(_ClassInstrumentationAdapter, self).post_configure_attribute(key)
self._adapted.post_configure_attribute(self.class_, key, self[key])
def install_descriptor(self, key, inst):
self._adapted.install_descriptor(self.class_, key, inst)
def uninstall_descriptor(self, key):
self._adapted.uninstall_descriptor(self.class_, key)
def install_member(self, key, implementation):
self._adapted.install_member(self.class_, key, implementation)
def uninstall_member(self, key):
self._adapted.uninstall_member(self.class_, key)
def instrument_collection_class(self, key, collection_class):
return self._adapted.instrument_collection_class(
self.class_, key, collection_class)
def initialize_collection(self, key, state, factory):
delegate = getattr(self._adapted, 'initialize_collection', None)
if delegate:
return delegate(key, state, factory)
else:
return ClassManager.initialize_collection(self, key,
state, factory)
def new_instance(self, state=None):
instance = self.class_.__new__(self.class_)
self.setup_instance(instance, state)
return instance
def _new_state_if_none(self, instance):
"""Install a default InstanceState if none is present.
A private convenience method used by the __init__ decorator.
"""
if self.has_state(instance):
return False
else:
return self.setup_instance(instance)
def setup_instance(self, instance, state=None):
self._adapted.initialize_instance_dict(self.class_, instance)
if state is None:
state = self._state_constructor(instance, self)
# the given instance is assumed to have no state
self._adapted.install_state(self.class_, instance, state)
return state
def teardown_instance(self, instance):
self._adapted.remove_state(self.class_, instance)
def has_state(self, instance):
try:
self._get_state(instance)
except orm_exc.NO_STATE:
return False
else:
return True
def state_getter(self):
return self._get_state
def dict_getter(self):
return self._get_dict
def _install_instrumented_lookups():
"""Replace global class/object management functions
with ExtendedInstrumentationRegistry implementations, which
allow multiple types of class managers to be present,
at the cost of performance.
This function is called only by ExtendedInstrumentationRegistry
and unit tests specific to this behavior.
The _reinstall_default_lookups() function can be called
after this one to re-establish the default functions.
"""
_install_lookups(
dict(
instance_state=_instrumentation_factory.state_of,
instance_dict=_instrumentation_factory.dict_of,
manager_of_class=_instrumentation_factory.manager_of_class
)
)
def _reinstall_default_lookups():
"""Restore simplified lookups."""
_install_lookups(
dict(
instance_state=_default_state_getter,
instance_dict=_default_dict_getter,
manager_of_class=_default_manager_getter
)
)
def _install_lookups(lookups):
global instance_state, instance_dict, manager_of_class
instance_state = lookups['instance_state']
instance_dict = lookups['instance_dict']
manager_of_class = lookups['manager_of_class']
orm_base.instance_state = attributes.instance_state = \
orm_instrumentation.instance_state = instance_state
orm_base.instance_dict = attributes.instance_dict = \
orm_instrumentation.instance_dict = instance_dict
orm_base.manager_of_class = attributes.manager_of_class = \
orm_instrumentation.manager_of_class = manager_of_class
| apache-2.0 |
rherault-insa/numpy | numpy/core/tests/test_numeric.py | 1 | 93637 | from __future__ import division, absolute_import, print_function
import sys
import warnings
import itertools
import platform
from decimal import Decimal
import numpy as np
from numpy.core import umath
from numpy.random import rand, randint, randn
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_equal, assert_raises,
assert_raises_regex, assert_array_equal, assert_almost_equal,
assert_array_almost_equal, dec
)
class TestResize(TestCase):
def test_copies(self):
A = np.array([[1, 2], [3, 4]])
Ar1 = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
assert_equal(np.resize(A, (2, 4)), Ar1)
Ar2 = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
assert_equal(np.resize(A, (4, 2)), Ar2)
Ar3 = np.array([[1, 2, 3], [4, 1, 2], [3, 4, 1], [2, 3, 4]])
assert_equal(np.resize(A, (4, 3)), Ar3)
def test_zeroresize(self):
A = np.array([[1, 2], [3, 4]])
Ar = np.resize(A, (0,))
assert_array_equal(Ar, np.array([]))
assert_equal(A.dtype, Ar.dtype)
def test_reshape_from_zero(self):
# See also gh-6740
A = np.zeros(0, dtype=[('a', np.float32, 1)])
Ar = np.resize(A, (2, 1))
assert_array_equal(Ar, np.zeros((2, 1), Ar.dtype))
assert_equal(A.dtype, Ar.dtype)
class TestNonarrayArgs(TestCase):
# check that non-array arguments to functions wrap them in arrays
def test_choose(self):
choices = [[0, 1, 2],
[3, 4, 5],
[5, 6, 7]]
tgt = [5, 1, 5]
a = [2, 0, 1]
out = np.choose(a, choices)
assert_equal(out, tgt)
def test_clip(self):
arr = [-1, 5, 2, 3, 10, -4, -9]
out = np.clip(arr, 2, 7)
tgt = [2, 5, 2, 3, 7, 2, 2]
assert_equal(out, tgt)
def test_compress(self):
arr = [[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]]
tgt = [[5, 6, 7, 8, 9]]
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
def test_cumproduct(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.all(np.cumproduct(A) == np.array([1, 2, 6, 24, 120, 720])))
def test_diagonal(self):
a = [[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11]]
out = np.diagonal(a)
tgt = [0, 5, 10]
assert_equal(out, tgt)
def test_mean(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.mean(A) == 3.5)
assert_(np.all(np.mean(A, 0) == np.array([2.5, 3.5, 4.5])))
assert_(np.all(np.mean(A, 1) == np.array([2., 5.])))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.mean([])))
assert_(w[0].category is RuntimeWarning)
def test_ptp(self):
a = [3, 4, 5, 10, -3, -5, 6.0]
assert_equal(np.ptp(a, axis=0), 15.0)
def test_prod(self):
arr = [[1, 2, 3, 4],
[5, 6, 7, 9],
[10, 3, 4, 5]]
tgt = [24, 1890, 600]
assert_equal(np.prod(arr, axis=-1), tgt)
def test_ravel(self):
a = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tgt = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
assert_equal(np.ravel(a), tgt)
def test_repeat(self):
a = [1, 2, 3]
tgt = [1, 1, 2, 2, 3, 3]
out = np.repeat(a, 2)
assert_equal(out, tgt)
def test_reshape(self):
arr = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
assert_equal(np.reshape(arr, (2, 6)), tgt)
def test_round(self):
arr = [1.56, 72.54, 6.35, 3.25]
tgt = [1.6, 72.5, 6.4, 3.2]
assert_equal(np.around(arr, decimals=1), tgt)
def test_searchsorted(self):
arr = [-8, -5, -1, 3, 6, 10]
out = np.searchsorted(arr, 0)
assert_equal(out, 3)
def test_size(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.size(A) == 6)
assert_(np.size(A, 0) == 2)
assert_(np.size(A, 1) == 3)
def test_squeeze(self):
A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]]
assert_(np.squeeze(A).shape == (3, 3))
def test_std(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_almost_equal(np.std(A), 1.707825127659933)
assert_almost_equal(np.std(A, 0), np.array([1.5, 1.5, 1.5]))
assert_almost_equal(np.std(A, 1), np.array([0.81649658, 0.81649658]))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.std([])))
assert_(w[0].category is RuntimeWarning)
def test_swapaxes(self):
tgt = [[[0, 4], [2, 6]], [[1, 5], [3, 7]]]
a = [[[0, 1], [2, 3]], [[4, 5], [6, 7]]]
out = np.swapaxes(a, 0, 2)
assert_equal(out, tgt)
def test_sum(self):
m = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
tgt = [[6], [15], [24]]
out = np.sum(m, axis=1, keepdims=True)
assert_equal(tgt, out)
def test_take(self):
tgt = [2, 3, 5]
indices = [1, 2, 4]
a = [1, 2, 3, 4, 5]
out = np.take(a, indices)
assert_equal(out, tgt)
def test_trace(self):
c = [[1, 2], [3, 4], [5, 6]]
assert_equal(np.trace(c), 5)
def test_transpose(self):
arr = [[1, 2], [3, 4], [5, 6]]
tgt = [[1, 3, 5], [2, 4, 6]]
assert_equal(np.transpose(arr, (1, 0)), tgt)
def test_var(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_almost_equal(np.var(A), 2.9166666666666665)
assert_almost_equal(np.var(A, 0), np.array([2.25, 2.25, 2.25]))
assert_almost_equal(np.var(A, 1), np.array([0.66666667, 0.66666667]))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.var([])))
assert_(w[0].category is RuntimeWarning)
class TestBoolScalar(TestCase):
def test_logical(self):
f = np.False_
t = np.True_
s = "xyz"
self.assertTrue((t and s) is s)
self.assertTrue((f and s) is f)
def test_bitwise_or(self):
f = np.False_
t = np.True_
self.assertTrue((t | t) is t)
self.assertTrue((f | t) is t)
self.assertTrue((t | f) is t)
self.assertTrue((f | f) is f)
def test_bitwise_and(self):
f = np.False_
t = np.True_
self.assertTrue((t & t) is t)
self.assertTrue((f & t) is f)
self.assertTrue((t & f) is f)
self.assertTrue((f & f) is f)
def test_bitwise_xor(self):
f = np.False_
t = np.True_
self.assertTrue((t ^ t) is f)
self.assertTrue((f ^ t) is t)
self.assertTrue((t ^ f) is t)
self.assertTrue((f ^ f) is f)
class TestBoolArray(TestCase):
def setUp(self):
# offset for simd tests
self.t = np.array([True] * 41, dtype=np.bool)[1::]
self.f = np.array([False] * 41, dtype=np.bool)[1::]
self.o = np.array([False] * 42, dtype=np.bool)[2::]
self.nm = self.f.copy()
self.im = self.t.copy()
self.nm[3] = True
self.nm[-2] = True
self.im[3] = False
self.im[-2] = False
def test_all_any(self):
self.assertTrue(self.t.all())
self.assertTrue(self.t.any())
self.assertFalse(self.f.all())
self.assertFalse(self.f.any())
self.assertTrue(self.nm.any())
self.assertTrue(self.im.any())
self.assertFalse(self.nm.all())
self.assertFalse(self.im.all())
# check bad element in all positions
for i in range(256 - 7):
d = np.array([False] * 256, dtype=np.bool)[7::]
d[i] = True
self.assertTrue(np.any(d))
e = np.array([True] * 256, dtype=np.bool)[7::]
e[i] = False
self.assertFalse(np.all(e))
assert_array_equal(e, ~d)
# big array test for blocked libc loops
for i in list(range(9, 6000, 507)) + [7764, 90021, -10]:
d = np.array([False] * 100043, dtype=np.bool)
d[i] = True
self.assertTrue(np.any(d), msg="%r" % i)
e = np.array([True] * 100043, dtype=np.bool)
e[i] = False
self.assertFalse(np.all(e), msg="%r" % i)
def test_logical_not_abs(self):
assert_array_equal(~self.t, self.f)
assert_array_equal(np.abs(~self.t), self.f)
assert_array_equal(np.abs(~self.f), self.t)
assert_array_equal(np.abs(self.f), self.f)
assert_array_equal(~np.abs(self.f), self.t)
assert_array_equal(~np.abs(self.t), self.f)
assert_array_equal(np.abs(~self.nm), self.im)
np.logical_not(self.t, out=self.o)
assert_array_equal(self.o, self.f)
np.abs(self.t, out=self.o)
assert_array_equal(self.o, self.t)
def test_logical_and_or_xor(self):
assert_array_equal(self.t | self.t, self.t)
assert_array_equal(self.f | self.f, self.f)
assert_array_equal(self.t | self.f, self.t)
assert_array_equal(self.f | self.t, self.t)
np.logical_or(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.t)
assert_array_equal(self.t & self.t, self.t)
assert_array_equal(self.f & self.f, self.f)
assert_array_equal(self.t & self.f, self.f)
assert_array_equal(self.f & self.t, self.f)
np.logical_and(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.t)
assert_array_equal(self.t ^ self.t, self.f)
assert_array_equal(self.f ^ self.f, self.f)
assert_array_equal(self.t ^ self.f, self.t)
assert_array_equal(self.f ^ self.t, self.t)
np.logical_xor(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.f)
assert_array_equal(self.nm & self.t, self.nm)
assert_array_equal(self.im & self.f, False)
assert_array_equal(self.nm & True, self.nm)
assert_array_equal(self.im & False, self.f)
assert_array_equal(self.nm | self.t, self.t)
assert_array_equal(self.im | self.f, self.im)
assert_array_equal(self.nm | True, self.t)
assert_array_equal(self.im | False, self.im)
assert_array_equal(self.nm ^ self.t, self.im)
assert_array_equal(self.im ^ self.f, self.im)
assert_array_equal(self.nm ^ True, self.im)
assert_array_equal(self.im ^ False, self.im)
class TestBoolCmp(TestCase):
def setUp(self):
self.f = np.ones(256, dtype=np.float32)
self.ef = np.ones(self.f.size, dtype=np.bool)
self.d = np.ones(128, dtype=np.float64)
self.ed = np.ones(self.d.size, dtype=np.bool)
# generate values for all permutation of 256bit simd vectors
s = 0
for i in range(32):
self.f[s:s+8] = [i & 2**x for x in range(8)]
self.ef[s:s+8] = [(i & 2**x) != 0 for x in range(8)]
s += 8
s = 0
for i in range(16):
self.d[s:s+4] = [i & 2**x for x in range(4)]
self.ed[s:s+4] = [(i & 2**x) != 0 for x in range(4)]
s += 4
self.nf = self.f.copy()
self.nd = self.d.copy()
self.nf[self.ef] = np.nan
self.nd[self.ed] = np.nan
self.inff = self.f.copy()
self.infd = self.d.copy()
self.inff[::3][self.ef[::3]] = np.inf
self.infd[::3][self.ed[::3]] = np.inf
self.inff[1::3][self.ef[1::3]] = -np.inf
self.infd[1::3][self.ed[1::3]] = -np.inf
self.inff[2::3][self.ef[2::3]] = np.nan
self.infd[2::3][self.ed[2::3]] = np.nan
self.efnonan = self.ef.copy()
self.efnonan[2::3] = False
self.ednonan = self.ed.copy()
self.ednonan[2::3] = False
self.signf = self.f.copy()
self.signd = self.d.copy()
self.signf[self.ef] *= -1.
self.signd[self.ed] *= -1.
self.signf[1::6][self.ef[1::6]] = -np.inf
self.signd[1::6][self.ed[1::6]] = -np.inf
self.signf[3::6][self.ef[3::6]] = -np.nan
self.signd[3::6][self.ed[3::6]] = -np.nan
self.signf[4::6][self.ef[4::6]] = -0.
self.signd[4::6][self.ed[4::6]] = -0.
def test_float(self):
# offset for alignment test
for i in range(4):
assert_array_equal(self.f[i:] > 0, self.ef[i:])
assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:])
assert_array_equal(self.f[i:] == 0, ~self.ef[i:])
assert_array_equal(-self.f[i:] < 0, self.ef[i:])
assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:])
r = self.f[i:] != 0
assert_array_equal(r, self.ef[i:])
r2 = self.f[i:] != np.zeros_like(self.f[i:])
r3 = 0 != self.f[i:]
assert_array_equal(r, r2)
assert_array_equal(r, r3)
# check bool == 0x1
assert_array_equal(r.view(np.int8), r.astype(np.int8))
assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
# isnan on amd64 takes the same code path
assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:])
assert_array_equal(np.isfinite(self.nf[i:]), ~self.ef[i:])
assert_array_equal(np.isfinite(self.inff[i:]), ~self.ef[i:])
assert_array_equal(np.isinf(self.inff[i:]), self.efnonan[i:])
assert_array_equal(np.signbit(self.signf[i:]), self.ef[i:])
def test_double(self):
# offset for alignment test
for i in range(2):
assert_array_equal(self.d[i:] > 0, self.ed[i:])
assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:])
assert_array_equal(self.d[i:] == 0, ~self.ed[i:])
assert_array_equal(-self.d[i:] < 0, self.ed[i:])
assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:])
r = self.d[i:] != 0
assert_array_equal(r, self.ed[i:])
r2 = self.d[i:] != np.zeros_like(self.d[i:])
r3 = 0 != self.d[i:]
assert_array_equal(r, r2)
assert_array_equal(r, r3)
# check bool == 0x1
assert_array_equal(r.view(np.int8), r.astype(np.int8))
assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
# isnan on amd64 takes the same code path
assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:])
assert_array_equal(np.isfinite(self.nd[i:]), ~self.ed[i:])
assert_array_equal(np.isfinite(self.infd[i:]), ~self.ed[i:])
assert_array_equal(np.isinf(self.infd[i:]), self.ednonan[i:])
assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:])
class TestSeterr(TestCase):
def test_default(self):
err = np.geterr()
self.assertEqual(err, dict(
divide='warn',
invalid='warn',
over='warn',
under='ignore',
))
def test_set(self):
with np.errstate():
err = np.seterr()
old = np.seterr(divide='print')
self.assertTrue(err == old)
new = np.seterr()
self.assertTrue(new['divide'] == 'print')
np.seterr(over='raise')
self.assertTrue(np.geterr()['over'] == 'raise')
self.assertTrue(new['divide'] == 'print')
np.seterr(**old)
self.assertTrue(np.geterr() == old)
@dec.skipif(platform.machine() == "armv5tel", "See gh-413.")
def test_divide_err(self):
with np.errstate(divide='raise'):
try:
np.array([1.]) / np.array([0.])
except FloatingPointError:
pass
else:
self.fail()
np.seterr(divide='ignore')
np.array([1.]) / np.array([0.])
def test_errobj(self):
olderrobj = np.geterrobj()
self.called = 0
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with np.errstate(divide='warn'):
np.seterrobj([20000, 1, None])
np.array([1.]) / np.array([0.])
self.assertEqual(len(w), 1)
def log_err(*args):
self.called += 1
extobj_err = args
assert_(len(extobj_err) == 2)
assert_("divide" in extobj_err[0])
with np.errstate(divide='ignore'):
np.seterrobj([20000, 3, log_err])
np.array([1.]) / np.array([0.])
self.assertEqual(self.called, 1)
np.seterrobj(olderrobj)
with np.errstate(divide='ignore'):
np.divide(1., 0., extobj=[20000, 3, log_err])
self.assertEqual(self.called, 2)
finally:
np.seterrobj(olderrobj)
del self.called
def test_errobj_noerrmask(self):
# errmask = 0 has a special code path for the default
olderrobj = np.geterrobj()
try:
# set errobj to something non default
np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT,
umath.ERR_DEFAULT + 1, None])
# call a ufunc
np.isnan(np.array([6]))
# same with the default, lots of times to get rid of possible
# pre-existing stack in the code
for i in range(10000):
np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT, umath.ERR_DEFAULT,
None])
np.isnan(np.array([6]))
finally:
np.seterrobj(olderrobj)
class TestFloatExceptions(TestCase):
def assert_raises_fpe(self, fpeerr, flop, x, y):
ftype = type(x)
try:
flop(x, y)
assert_(False,
"Type %s did not raise fpe error '%s'." % (ftype, fpeerr))
except FloatingPointError as exc:
assert_(str(exc).find(fpeerr) >= 0,
"Type %s raised wrong fpe error '%s'." % (ftype, exc))
def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2):
# Check that fpe exception is raised.
#
# Given a floating operation `flop` and two scalar values, check that
# the operation raises the floating point exception specified by
# `fpeerr`. Tests all variants with 0-d array scalars as well.
self.assert_raises_fpe(fpeerr, flop, sc1, sc2)
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2)
self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()])
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()])
@dec.knownfailureif(True, "See ticket #2350")
def test_floating_exceptions(self):
# Test basic arithmetic function errors
with np.errstate(all='raise'):
# Test for all real and complex float types
for typecode in np.typecodes['AllFloat']:
ftype = np.obj2sctype(typecode)
if np.dtype(ftype).kind == 'f':
# Get some extreme values for the type
fi = np.finfo(ftype)
ft_tiny = fi.tiny
ft_max = fi.max
ft_eps = fi.eps
underflow = 'underflow'
divbyzero = 'divide by zero'
else:
# 'c', complex, corresponding real dtype
rtype = type(ftype(0).real)
fi = np.finfo(rtype)
ft_tiny = ftype(fi.tiny)
ft_max = ftype(fi.max)
ft_eps = ftype(fi.eps)
# The complex types raise different exceptions
underflow = ''
divbyzero = ''
overflow = 'overflow'
invalid = 'invalid'
self.assert_raises_fpe(underflow,
lambda a, b: a/b, ft_tiny, ft_max)
self.assert_raises_fpe(underflow,
lambda a, b: a*b, ft_tiny, ft_tiny)
self.assert_raises_fpe(overflow,
lambda a, b: a*b, ft_max, ftype(2))
self.assert_raises_fpe(overflow,
lambda a, b: a/b, ft_max, ftype(0.5))
self.assert_raises_fpe(overflow,
lambda a, b: a+b, ft_max, ft_max*ft_eps)
self.assert_raises_fpe(overflow,
lambda a, b: a-b, -ft_max, ft_max*ft_eps)
self.assert_raises_fpe(overflow,
np.power, ftype(2), ftype(2**fi.nexp))
self.assert_raises_fpe(divbyzero,
lambda a, b: a/b, ftype(1), ftype(0))
self.assert_raises_fpe(invalid,
lambda a, b: a/b, ftype(np.inf), ftype(np.inf))
self.assert_raises_fpe(invalid,
lambda a, b: a/b, ftype(0), ftype(0))
self.assert_raises_fpe(invalid,
lambda a, b: a-b, ftype(np.inf), ftype(np.inf))
self.assert_raises_fpe(invalid,
lambda a, b: a+b, ftype(np.inf), ftype(-np.inf))
self.assert_raises_fpe(invalid,
lambda a, b: a*b, ftype(0), ftype(np.inf))
def test_warnings(self):
# test warning code path
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with np.errstate(all="warn"):
np.divide(1, 0.)
self.assertEqual(len(w), 1)
self.assertTrue("divide by zero" in str(w[0].message))
np.array(1e300) * np.array(1e300)
self.assertEqual(len(w), 2)
self.assertTrue("overflow" in str(w[-1].message))
np.array(np.inf) - np.array(np.inf)
self.assertEqual(len(w), 3)
self.assertTrue("invalid value" in str(w[-1].message))
np.array(1e-300) * np.array(1e-300)
self.assertEqual(len(w), 4)
self.assertTrue("underflow" in str(w[-1].message))
class TestTypes(TestCase):
def check_promotion_cases(self, promote_func):
# tests that the scalars get coerced correctly.
b = np.bool_(0)
i8, i16, i32, i64 = np.int8(0), np.int16(0), np.int32(0), np.int64(0)
u8, u16, u32, u64 = np.uint8(0), np.uint16(0), np.uint32(0), np.uint64(0)
f32, f64, fld = np.float32(0), np.float64(0), np.longdouble(0)
c64, c128, cld = np.complex64(0), np.complex128(0), np.clongdouble(0)
# coercion within the same kind
assert_equal(promote_func(i8, i16), np.dtype(np.int16))
assert_equal(promote_func(i32, i8), np.dtype(np.int32))
assert_equal(promote_func(i16, i64), np.dtype(np.int64))
assert_equal(promote_func(u8, u32), np.dtype(np.uint32))
assert_equal(promote_func(f32, f64), np.dtype(np.float64))
assert_equal(promote_func(fld, f32), np.dtype(np.longdouble))
assert_equal(promote_func(f64, fld), np.dtype(np.longdouble))
assert_equal(promote_func(c128, c64), np.dtype(np.complex128))
assert_equal(promote_func(cld, c128), np.dtype(np.clongdouble))
assert_equal(promote_func(c64, fld), np.dtype(np.clongdouble))
# coercion between kinds
assert_equal(promote_func(b, i32), np.dtype(np.int32))
assert_equal(promote_func(b, u8), np.dtype(np.uint8))
assert_equal(promote_func(i8, u8), np.dtype(np.int16))
assert_equal(promote_func(u8, i32), np.dtype(np.int32))
assert_equal(promote_func(i64, u32), np.dtype(np.int64))
assert_equal(promote_func(u64, i32), np.dtype(np.float64))
assert_equal(promote_func(i32, f32), np.dtype(np.float64))
assert_equal(promote_func(i64, f32), np.dtype(np.float64))
assert_equal(promote_func(f32, i16), np.dtype(np.float32))
assert_equal(promote_func(f32, u32), np.dtype(np.float64))
assert_equal(promote_func(f32, c64), np.dtype(np.complex64))
assert_equal(promote_func(c128, f32), np.dtype(np.complex128))
assert_equal(promote_func(cld, f64), np.dtype(np.clongdouble))
# coercion between scalars and 1-D arrays
assert_equal(promote_func(np.array([b]), i8), np.dtype(np.int8))
assert_equal(promote_func(np.array([b]), u8), np.dtype(np.uint8))
assert_equal(promote_func(np.array([b]), i32), np.dtype(np.int32))
assert_equal(promote_func(np.array([b]), u32), np.dtype(np.uint32))
assert_equal(promote_func(np.array([i8]), i64), np.dtype(np.int8))
assert_equal(promote_func(u64, np.array([i32])), np.dtype(np.int32))
assert_equal(promote_func(i64, np.array([u32])), np.dtype(np.uint32))
assert_equal(promote_func(np.int32(-1), np.array([u64])),
np.dtype(np.float64))
assert_equal(promote_func(f64, np.array([f32])), np.dtype(np.float32))
assert_equal(promote_func(fld, np.array([f32])), np.dtype(np.float32))
assert_equal(promote_func(np.array([f64]), fld), np.dtype(np.float64))
assert_equal(promote_func(fld, np.array([c64])),
np.dtype(np.complex64))
assert_equal(promote_func(c64, np.array([f64])),
np.dtype(np.complex128))
assert_equal(promote_func(np.complex64(3j), np.array([f64])),
np.dtype(np.complex128))
# coercion between scalars and 1-D arrays, where
# the scalar has greater kind than the array
assert_equal(promote_func(np.array([b]), f64), np.dtype(np.float64))
assert_equal(promote_func(np.array([b]), i64), np.dtype(np.int64))
assert_equal(promote_func(np.array([b]), u64), np.dtype(np.uint64))
assert_equal(promote_func(np.array([i8]), f64), np.dtype(np.float64))
assert_equal(promote_func(np.array([u16]), f64), np.dtype(np.float64))
# uint and int are treated as the same "kind" for
# the purposes of array-scalar promotion.
assert_equal(promote_func(np.array([u16]), i32), np.dtype(np.uint16))
# float and complex are treated as the same "kind" for
# the purposes of array-scalar promotion, so that you can do
# (0j + float32array) to get a complex64 array instead of
# a complex128 array.
assert_equal(promote_func(np.array([f32]), c128),
np.dtype(np.complex64))
def test_coercion(self):
def res_type(a, b):
return np.add(a, b).dtype
self.check_promotion_cases(res_type)
# Use-case: float/complex scalar * bool/int8 array
# shouldn't narrow the float/complex type
for a in [np.array([True, False]), np.array([-3, 12], dtype=np.int8)]:
b = 1.234 * a
assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype)
b = np.longdouble(1.234) * a
assert_equal(b.dtype, np.dtype(np.longdouble),
"array type %s" % a.dtype)
b = np.float64(1.234) * a
assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype)
b = np.float32(1.234) * a
assert_equal(b.dtype, np.dtype('f4'), "array type %s" % a.dtype)
b = np.float16(1.234) * a
assert_equal(b.dtype, np.dtype('f2'), "array type %s" % a.dtype)
b = 1.234j * a
assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype)
b = np.clongdouble(1.234j) * a
assert_equal(b.dtype, np.dtype(np.clongdouble),
"array type %s" % a.dtype)
b = np.complex128(1.234j) * a
assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype)
b = np.complex64(1.234j) * a
assert_equal(b.dtype, np.dtype('c8'), "array type %s" % a.dtype)
# The following use-case is problematic, and to resolve its
# tricky side-effects requires more changes.
#
# Use-case: (1-t)*a, where 't' is a boolean array and 'a' is
# a float32, shouldn't promote to float64
#
# a = np.array([1.0, 1.5], dtype=np.float32)
# t = np.array([True, False])
# b = t*a
# assert_equal(b, [1.0, 0.0])
# assert_equal(b.dtype, np.dtype('f4'))
# b = (1-t)*a
# assert_equal(b, [0.0, 1.5])
# assert_equal(b.dtype, np.dtype('f4'))
#
# Probably ~t (bitwise negation) is more proper to use here,
# but this is arguably less intuitive to understand at a glance, and
# would fail if 't' is actually an integer array instead of boolean:
#
# b = (~t)*a
# assert_equal(b, [0.0, 1.5])
# assert_equal(b.dtype, np.dtype('f4'))
def test_result_type(self):
self.check_promotion_cases(np.result_type)
assert_(np.result_type(None) == np.dtype(None))
def test_promote_types_endian(self):
# promote_types should always return native-endian types
assert_equal(np.promote_types('<i8', '<i8'), np.dtype('i8'))
assert_equal(np.promote_types('>i8', '>i8'), np.dtype('i8'))
assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U21'))
assert_equal(np.promote_types('<i8', '<U16'), np.dtype('U21'))
assert_equal(np.promote_types('>U16', '>i8'), np.dtype('U21'))
assert_equal(np.promote_types('<U16', '<i8'), np.dtype('U21'))
assert_equal(np.promote_types('<S5', '<U8'), np.dtype('U8'))
assert_equal(np.promote_types('>S5', '>U8'), np.dtype('U8'))
assert_equal(np.promote_types('<U8', '<S5'), np.dtype('U8'))
assert_equal(np.promote_types('>U8', '>S5'), np.dtype('U8'))
assert_equal(np.promote_types('<U5', '<U8'), np.dtype('U8'))
assert_equal(np.promote_types('>U8', '>U5'), np.dtype('U8'))
assert_equal(np.promote_types('<M8', '<M8'), np.dtype('M8'))
assert_equal(np.promote_types('>M8', '>M8'), np.dtype('M8'))
assert_equal(np.promote_types('<m8', '<m8'), np.dtype('m8'))
assert_equal(np.promote_types('>m8', '>m8'), np.dtype('m8'))
def test_promote_types_strings(self):
assert_equal(np.promote_types('bool', 'S'), np.dtype('S5'))
assert_equal(np.promote_types('b', 'S'), np.dtype('S4'))
assert_equal(np.promote_types('u1', 'S'), np.dtype('S3'))
assert_equal(np.promote_types('u2', 'S'), np.dtype('S5'))
assert_equal(np.promote_types('u4', 'S'), np.dtype('S10'))
assert_equal(np.promote_types('u8', 'S'), np.dtype('S20'))
assert_equal(np.promote_types('i1', 'S'), np.dtype('S4'))
assert_equal(np.promote_types('i2', 'S'), np.dtype('S6'))
assert_equal(np.promote_types('i4', 'S'), np.dtype('S11'))
assert_equal(np.promote_types('i8', 'S'), np.dtype('S21'))
assert_equal(np.promote_types('bool', 'U'), np.dtype('U5'))
assert_equal(np.promote_types('b', 'U'), np.dtype('U4'))
assert_equal(np.promote_types('u1', 'U'), np.dtype('U3'))
assert_equal(np.promote_types('u2', 'U'), np.dtype('U5'))
assert_equal(np.promote_types('u4', 'U'), np.dtype('U10'))
assert_equal(np.promote_types('u8', 'U'), np.dtype('U20'))
assert_equal(np.promote_types('i1', 'U'), np.dtype('U4'))
assert_equal(np.promote_types('i2', 'U'), np.dtype('U6'))
assert_equal(np.promote_types('i4', 'U'), np.dtype('U11'))
assert_equal(np.promote_types('i8', 'U'), np.dtype('U21'))
assert_equal(np.promote_types('bool', 'S1'), np.dtype('S5'))
assert_equal(np.promote_types('bool', 'S30'), np.dtype('S30'))
assert_equal(np.promote_types('b', 'S1'), np.dtype('S4'))
assert_equal(np.promote_types('b', 'S30'), np.dtype('S30'))
assert_equal(np.promote_types('u1', 'S1'), np.dtype('S3'))
assert_equal(np.promote_types('u1', 'S30'), np.dtype('S30'))
assert_equal(np.promote_types('u2', 'S1'), np.dtype('S5'))
assert_equal(np.promote_types('u2', 'S30'), np.dtype('S30'))
assert_equal(np.promote_types('u4', 'S1'), np.dtype('S10'))
assert_equal(np.promote_types('u4', 'S30'), np.dtype('S30'))
assert_equal(np.promote_types('u8', 'S1'), np.dtype('S20'))
assert_equal(np.promote_types('u8', 'S30'), np.dtype('S30'))
def test_can_cast(self):
assert_(np.can_cast(np.int32, np.int64))
assert_(np.can_cast(np.float64, np.complex))
assert_(not np.can_cast(np.complex, np.float))
assert_(np.can_cast('i8', 'f8'))
assert_(not np.can_cast('i8', 'f4'))
assert_(np.can_cast('i4', 'S11'))
assert_(np.can_cast('i8', 'i8', 'no'))
assert_(not np.can_cast('<i8', '>i8', 'no'))
assert_(np.can_cast('<i8', '>i8', 'equiv'))
assert_(not np.can_cast('<i4', '>i8', 'equiv'))
assert_(np.can_cast('<i4', '>i8', 'safe'))
assert_(not np.can_cast('<i8', '>i4', 'safe'))
assert_(np.can_cast('<i8', '>i4', 'same_kind'))
assert_(not np.can_cast('<i8', '>u4', 'same_kind'))
assert_(np.can_cast('<i8', '>u4', 'unsafe'))
assert_(np.can_cast('bool', 'S5'))
assert_(not np.can_cast('bool', 'S4'))
assert_(np.can_cast('b', 'S4'))
assert_(not np.can_cast('b', 'S3'))
assert_(np.can_cast('u1', 'S3'))
assert_(not np.can_cast('u1', 'S2'))
assert_(np.can_cast('u2', 'S5'))
assert_(not np.can_cast('u2', 'S4'))
assert_(np.can_cast('u4', 'S10'))
assert_(not np.can_cast('u4', 'S9'))
assert_(np.can_cast('u8', 'S20'))
assert_(not np.can_cast('u8', 'S19'))
assert_(np.can_cast('i1', 'S4'))
assert_(not np.can_cast('i1', 'S3'))
assert_(np.can_cast('i2', 'S6'))
assert_(not np.can_cast('i2', 'S5'))
assert_(np.can_cast('i4', 'S11'))
assert_(not np.can_cast('i4', 'S10'))
assert_(np.can_cast('i8', 'S21'))
assert_(not np.can_cast('i8', 'S20'))
assert_(np.can_cast('bool', 'S5'))
assert_(not np.can_cast('bool', 'S4'))
assert_(np.can_cast('b', 'U4'))
assert_(not np.can_cast('b', 'U3'))
assert_(np.can_cast('u1', 'U3'))
assert_(not np.can_cast('u1', 'U2'))
assert_(np.can_cast('u2', 'U5'))
assert_(not np.can_cast('u2', 'U4'))
assert_(np.can_cast('u4', 'U10'))
assert_(not np.can_cast('u4', 'U9'))
assert_(np.can_cast('u8', 'U20'))
assert_(not np.can_cast('u8', 'U19'))
assert_(np.can_cast('i1', 'U4'))
assert_(not np.can_cast('i1', 'U3'))
assert_(np.can_cast('i2', 'U6'))
assert_(not np.can_cast('i2', 'U5'))
assert_(np.can_cast('i4', 'U11'))
assert_(not np.can_cast('i4', 'U10'))
assert_(np.can_cast('i8', 'U21'))
assert_(not np.can_cast('i8', 'U20'))
assert_raises(TypeError, np.can_cast, 'i4', None)
assert_raises(TypeError, np.can_cast, None, 'i4')
# Custom exception class to test exception propagation in fromiter
class NIterError(Exception):
pass
class TestFromiter(TestCase):
def makegen(self):
for x in range(24):
yield x**2
def test_types(self):
ai32 = np.fromiter(self.makegen(), np.int32)
ai64 = np.fromiter(self.makegen(), np.int64)
af = np.fromiter(self.makegen(), float)
self.assertTrue(ai32.dtype == np.dtype(np.int32))
self.assertTrue(ai64.dtype == np.dtype(np.int64))
self.assertTrue(af.dtype == np.dtype(float))
def test_lengths(self):
expected = np.array(list(self.makegen()))
a = np.fromiter(self.makegen(), int)
a20 = np.fromiter(self.makegen(), int, 20)
self.assertTrue(len(a) == len(expected))
self.assertTrue(len(a20) == 20)
self.assertRaises(ValueError, np.fromiter,
self.makegen(), int, len(expected) + 10)
def test_values(self):
expected = np.array(list(self.makegen()))
a = np.fromiter(self.makegen(), int)
a20 = np.fromiter(self.makegen(), int, 20)
self.assertTrue(np.alltrue(a == expected, axis=0))
self.assertTrue(np.alltrue(a20 == expected[:20], axis=0))
def load_data(self, n, eindex):
# Utility method for the issue 2592 tests.
# Raise an exception at the desired index in the iterator.
for e in range(n):
if e == eindex:
raise NIterError('error at index %s' % eindex)
yield e
def test_2592(self):
# Test iteration exceptions are correctly raised.
count, eindex = 10, 5
self.assertRaises(NIterError, np.fromiter,
self.load_data(count, eindex), dtype=int, count=count)
def test_2592_edge(self):
# Test iter. exceptions, edge case (exception at end of iterator).
count = 10
eindex = count-1
self.assertRaises(NIterError, np.fromiter,
self.load_data(count, eindex), dtype=int, count=count)
class TestNonzero(TestCase):
def test_nonzero_trivial(self):
assert_equal(np.count_nonzero(np.array([])), 0)
assert_equal(np.count_nonzero(np.array([], dtype='?')), 0)
assert_equal(np.nonzero(np.array([])), ([],))
assert_equal(np.count_nonzero(np.array(0)), 0)
assert_equal(np.count_nonzero(np.array(0, dtype='?')), 0)
assert_equal(np.nonzero(np.array(0)), ([],))
assert_equal(np.count_nonzero(np.array(1)), 1)
assert_equal(np.count_nonzero(np.array(1, dtype='?')), 1)
assert_equal(np.nonzero(np.array(1)), ([0],))
def test_nonzero_onedim(self):
x = np.array([1, 0, 2, -1, 0, 0, 8])
assert_equal(np.count_nonzero(x), 4)
assert_equal(np.count_nonzero(x), 4)
assert_equal(np.nonzero(x), ([0, 2, 3, 6],))
x = np.array([(1, 2), (0, 0), (1, 1), (-1, 3), (0, 7)],
dtype=[('a', 'i4'), ('b', 'i2')])
assert_equal(np.count_nonzero(x['a']), 3)
assert_equal(np.count_nonzero(x['b']), 4)
assert_equal(np.nonzero(x['a']), ([0, 2, 3],))
assert_equal(np.nonzero(x['b']), ([0, 2, 3, 4],))
def test_nonzero_twodim(self):
x = np.array([[0, 1, 0], [2, 0, 3]])
assert_equal(np.count_nonzero(x), 3)
assert_equal(np.nonzero(x), ([0, 1, 1], [1, 0, 2]))
x = np.eye(3)
assert_equal(np.count_nonzero(x), 3)
assert_equal(np.nonzero(x), ([0, 1, 2], [0, 1, 2]))
x = np.array([[(0, 1), (0, 0), (1, 11)],
[(1, 1), (1, 0), (0, 0)],
[(0, 0), (1, 5), (0, 1)]], dtype=[('a', 'f4'), ('b', 'u1')])
assert_equal(np.count_nonzero(x['a']), 4)
assert_equal(np.count_nonzero(x['b']), 5)
assert_equal(np.nonzero(x['a']), ([0, 1, 1, 2], [2, 0, 1, 1]))
assert_equal(np.nonzero(x['b']), ([0, 0, 1, 2, 2], [0, 2, 0, 1, 2]))
assert_(not x['a'].T.flags.aligned)
assert_equal(np.count_nonzero(x['a'].T), 4)
assert_equal(np.count_nonzero(x['b'].T), 5)
assert_equal(np.nonzero(x['a'].T), ([0, 1, 1, 2], [1, 1, 2, 0]))
assert_equal(np.nonzero(x['b'].T), ([0, 0, 1, 2, 2], [0, 1, 2, 0, 2]))
def test_sparse(self):
# test special sparse condition boolean code path
for i in range(20):
c = np.zeros(200, dtype=np.bool)
c[i::20] = True
assert_equal(np.nonzero(c)[0], np.arange(i, 200 + i, 20))
c = np.zeros(400, dtype=np.bool)
c[10 + i:20 + i] = True
c[20 + i*2] = True
assert_equal(np.nonzero(c)[0],
np.concatenate((np.arange(10 + i, 20 + i), [20 + i*2])))
def test_return_type(self):
class C(np.ndarray):
pass
for view in (C, np.ndarray):
for nd in range(1, 4):
shape = tuple(range(2, 2+nd))
x = np.arange(np.prod(shape)).reshape(shape).view(view)
for nzx in (np.nonzero(x), x.nonzero()):
for nzx_i in nzx:
assert_(type(nzx_i) is np.ndarray)
assert_(nzx_i.flags.writeable)
# Tests that the array method
# call works
def test_array_method(self):
m = np.array([[1, 0, 0], [4, 0, 6]])
tgt = [[0, 1, 1], [0, 0, 2]]
assert_equal(m.nonzero(), tgt)
class TestIndex(TestCase):
def test_boolean(self):
a = rand(3, 5, 8)
V = rand(5, 8)
g1 = randint(0, 5, size=15)
g2 = randint(0, 8, size=15)
V[g1, g2] = -V[g1, g2]
assert_((np.array([a[0][V > 0], a[1][V > 0], a[2][V > 0]]) == a[:, V > 0]).all())
def test_boolean_edgecase(self):
a = np.array([], dtype='int32')
b = np.array([], dtype='bool')
c = a[b]
assert_equal(c, [])
assert_equal(c.dtype, np.dtype('int32'))
class TestBinaryRepr(TestCase):
def test_zero(self):
assert_equal(np.binary_repr(0), '0')
def test_positive(self):
assert_equal(np.binary_repr(10), '1010')
assert_equal(np.binary_repr(12522),
'11000011101010')
assert_equal(np.binary_repr(10736848),
'101000111101010011010000')
def test_negative(self):
assert_equal(np.binary_repr(-1), '-1')
assert_equal(np.binary_repr(-10), '-1010')
assert_equal(np.binary_repr(-12522),
'-11000011101010')
assert_equal(np.binary_repr(-10736848),
'-101000111101010011010000')
def test_sufficient_width(self):
assert_equal(np.binary_repr(0, width=5), '00000')
assert_equal(np.binary_repr(10, width=7), '0001010')
assert_equal(np.binary_repr(-5, width=7), '1111011')
class TestBaseRepr(TestCase):
def test_base3(self):
assert_equal(np.base_repr(3**5, 3), '100000')
def test_positive(self):
assert_equal(np.base_repr(12, 10), '12')
assert_equal(np.base_repr(12, 10, 4), '000012')
assert_equal(np.base_repr(12, 4), '30')
assert_equal(np.base_repr(3731624803700888, 36), '10QR0ROFCEW')
def test_negative(self):
assert_equal(np.base_repr(-12, 10), '-12')
assert_equal(np.base_repr(-12, 10, 4), '-000012')
assert_equal(np.base_repr(-12, 4), '-30')
def test_base_range(self):
with self.assertRaises(ValueError):
np.base_repr(1, 1)
with self.assertRaises(ValueError):
np.base_repr(1, 37)
class TestArrayComparisons(TestCase):
def test_array_equal(self):
res = np.array_equal(np.array([1, 2]), np.array([1, 2]))
assert_(res)
assert_(type(res) is bool)
res = np.array_equal(np.array([1, 2]), np.array([1, 2, 3]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equal(np.array([1, 2]), np.array([3, 4]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equal(np.array([1, 2]), np.array([1, 3]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equal(np.array(['a'], dtype='S1'), np.array(['a'], dtype='S1'))
assert_(res)
assert_(type(res) is bool)
res = np.array_equal(np.array([('a', 1)], dtype='S1,u4'),
np.array([('a', 1)], dtype='S1,u4'))
assert_(res)
assert_(type(res) is bool)
def test_array_equiv(self):
res = np.array_equiv(np.array([1, 2]), np.array([1, 2]))
assert_(res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([1, 2, 3]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([3, 4]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([1, 3]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 1]), np.array([1]))
assert_(res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 1]), np.array([[1], [1]]))
assert_(res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([2]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([[1], [2]]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
assert_(not res)
assert_(type(res) is bool)
def assert_array_strict_equal(x, y):
assert_array_equal(x, y)
# Check flags, 32 bit arches typically don't provide 16 byte alignment
if ((x.dtype.alignment <= 8 or
np.intp().dtype.itemsize != 4) and
sys.platform != 'win32'):
assert_(x.flags == y.flags)
else:
assert_(x.flags.owndata == y.flags.owndata)
assert_(x.flags.writeable == y.flags.writeable)
assert_(x.flags.c_contiguous == y.flags.c_contiguous)
assert_(x.flags.f_contiguous == y.flags.f_contiguous)
assert_(x.flags.updateifcopy == y.flags.updateifcopy)
# check endianness
assert_(x.dtype.isnative == y.dtype.isnative)
class TestClip(TestCase):
def setUp(self):
self.nr = 5
self.nc = 3
def fastclip(self, a, m, M, out=None):
if out is None:
return a.clip(m, M)
else:
return a.clip(m, M, out)
def clip(self, a, m, M, out=None):
# use slow-clip
selector = np.less(a, m) + 2*np.greater(a, M)
return selector.choose((a, m, M), out=out)
# Handy functions
def _generate_data(self, n, m):
return randn(n, m)
def _generate_data_complex(self, n, m):
return randn(n, m) + 1.j * rand(n, m)
def _generate_flt_data(self, n, m):
return (randn(n, m)).astype(np.float32)
def _neg_byteorder(self, a):
a = np.asarray(a)
if sys.byteorder == 'little':
a = a.astype(a.dtype.newbyteorder('>'))
else:
a = a.astype(a.dtype.newbyteorder('<'))
return a
def _generate_non_native_data(self, n, m):
data = randn(n, m)
data = self._neg_byteorder(data)
assert_(not data.dtype.isnative)
return data
def _generate_int_data(self, n, m):
return (10 * rand(n, m)).astype(np.int64)
def _generate_int32_data(self, n, m):
return (10 * rand(n, m)).astype(np.int32)
# Now the real test cases
def test_simple_double(self):
# Test native double input with scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = 0.1
M = 0.6
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_simple_int(self):
# Test native int input with scalar min/max.
a = self._generate_int_data(self.nr, self.nc)
a = a.astype(int)
m = -2
M = 4
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_array_double(self):
# Test native double input with array min/max.
a = self._generate_data(self.nr, self.nc)
m = np.zeros(a.shape)
M = m + 0.5
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_simple_nonnative(self):
# Test non native double input with scalar min/max.
# Test native double input with non native double scalar min/max.
a = self._generate_non_native_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_equal(ac, act)
# Test native double input with non native double scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = self._neg_byteorder(0.6)
assert_(not M.dtype.isnative)
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_equal(ac, act)
def test_simple_complex(self):
# Test native complex input with native double scalar min/max.
# Test native input with complex double scalar min/max.
a = 3 * self._generate_data_complex(self.nr, self.nc)
m = -0.5
M = 1.
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
# Test native input with complex double scalar min/max.
a = 3 * self._generate_data(self.nr, self.nc)
m = -0.5 + 1.j
M = 1. + 2.j
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_clip_complex(self):
# Address Issue gh-5354 for clipping complex arrays
# Test native complex input without explicit min/max
# ie, either min=None or max=None
a = np.ones(10, dtype=np.complex)
m = a.min()
M = a.max()
am = self.fastclip(a, m, None)
aM = self.fastclip(a, None, M)
assert_array_strict_equal(am, a)
assert_array_strict_equal(aM, a)
def test_clip_non_contig(self):
# Test clip for non contiguous native input and native scalar min/max.
a = self._generate_data(self.nr * 2, self.nc * 3)
a = a[::2, ::3]
assert_(not a.flags['F_CONTIGUOUS'])
assert_(not a.flags['C_CONTIGUOUS'])
ac = self.fastclip(a, -1.6, 1.7)
act = self.clip(a, -1.6, 1.7)
assert_array_strict_equal(ac, act)
def test_simple_out(self):
# Test native double input with scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = np.zeros(a.shape)
act = np.zeros(a.shape)
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int32_inout(self):
# Test native int32 input with double min/max and int32 out.
a = self._generate_int32_data(self.nr, self.nc)
m = np.float64(0)
M = np.float64(2)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int64_out(self):
# Test native int32 input with int32 scalar min/max and int64 out.
a = self._generate_int32_data(self.nr, self.nc)
m = np.int32(-1)
M = np.int32(1)
ac = np.zeros(a.shape, dtype=np.int64)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int64_inout(self):
# Test native int32 input with double array min/max and int32 out.
a = self._generate_int32_data(self.nr, self.nc)
m = np.zeros(a.shape, np.float64)
M = np.float64(1)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int32_out(self):
# Test native double input with scalar min/max and int out.
a = self._generate_data(self.nr, self.nc)
m = -1.0
M = 2.0
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_inplace_01(self):
# Test native double input with array min/max in-place.
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = np.zeros(a.shape)
M = 1.0
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a, ac)
def test_simple_inplace_02(self):
# Test native double input with scalar min/max in-place.
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a, ac)
def test_noncontig_inplace(self):
# Test non contiguous double input with double scalar min/max in-place.
a = self._generate_data(self.nr * 2, self.nc * 3)
a = a[::2, ::3]
assert_(not a.flags['F_CONTIGUOUS'])
assert_(not a.flags['C_CONTIGUOUS'])
ac = a.copy()
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_equal(a, ac)
def test_type_cast_01(self):
# Test native double input with scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_02(self):
# Test native int32 input with int32 scalar min/max.
a = self._generate_int_data(self.nr, self.nc)
a = a.astype(np.int32)
m = -2
M = 4
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_03(self):
# Test native int32 input with float64 scalar min/max.
a = self._generate_int32_data(self.nr, self.nc)
m = -2
M = 4
ac = self.fastclip(a, np.float64(m), np.float64(M))
act = self.clip(a, np.float64(m), np.float64(M))
assert_array_strict_equal(ac, act)
def test_type_cast_04(self):
# Test native int32 input with float32 scalar min/max.
a = self._generate_int32_data(self.nr, self.nc)
m = np.float32(-2)
M = np.float32(4)
act = self.fastclip(a, m, M)
ac = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_05(self):
# Test native int32 with double arrays min/max.
a = self._generate_int_data(self.nr, self.nc)
m = -0.5
M = 1.
ac = self.fastclip(a, m * np.zeros(a.shape), M)
act = self.clip(a, m * np.zeros(a.shape), M)
assert_array_strict_equal(ac, act)
def test_type_cast_06(self):
# Test native with NON native scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = 0.5
m_s = self._neg_byteorder(m)
M = 1.
act = self.clip(a, m_s, M)
ac = self.fastclip(a, m_s, M)
assert_array_strict_equal(ac, act)
def test_type_cast_07(self):
# Test NON native with native array min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5 * np.ones(a.shape)
M = 1.
a_s = self._neg_byteorder(a)
assert_(not a_s.dtype.isnative)
act = a_s.clip(m, M)
ac = self.fastclip(a_s, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_08(self):
# Test NON native with native scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 1.
a_s = self._neg_byteorder(a)
assert_(not a_s.dtype.isnative)
ac = self.fastclip(a_s, m, M)
act = a_s.clip(m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_09(self):
# Test native with NON native array min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5 * np.ones(a.shape)
M = 1.
m_s = self._neg_byteorder(m)
assert_(not m_s.dtype.isnative)
ac = self.fastclip(a, m_s, M)
act = self.clip(a, m_s, M)
assert_array_strict_equal(ac, act)
def test_type_cast_10(self):
# Test native int32 with float min/max and float out for output argument.
a = self._generate_int_data(self.nr, self.nc)
b = np.zeros(a.shape, dtype=np.float32)
m = np.float32(-0.5)
M = np.float32(1)
act = self.clip(a, m, M, out=b)
ac = self.fastclip(a, m, M, out=b)
assert_array_strict_equal(ac, act)
def test_type_cast_11(self):
# Test non native with native scalar, min/max, out non native
a = self._generate_non_native_data(self.nr, self.nc)
b = a.copy()
b = b.astype(b.dtype.newbyteorder('>'))
bt = b.copy()
m = -0.5
M = 1.
self.fastclip(a, m, M, out=b)
self.clip(a, m, M, out=bt)
assert_array_strict_equal(b, bt)
def test_type_cast_12(self):
# Test native int32 input and min/max and float out
a = self._generate_int_data(self.nr, self.nc)
b = np.zeros(a.shape, dtype=np.float32)
m = np.int32(0)
M = np.int32(1)
act = self.clip(a, m, M, out=b)
ac = self.fastclip(a, m, M, out=b)
assert_array_strict_equal(ac, act)
def test_clip_with_out_simple(self):
# Test native double input with scalar min/max
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = np.zeros(a.shape)
act = np.zeros(a.shape)
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_simple2(self):
# Test native int32 input with double min/max and int32 out
a = self._generate_int32_data(self.nr, self.nc)
m = np.float64(0)
M = np.float64(2)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_simple_int32(self):
# Test native int32 input with int32 scalar min/max and int64 out
a = self._generate_int32_data(self.nr, self.nc)
m = np.int32(-1)
M = np.int32(1)
ac = np.zeros(a.shape, dtype=np.int64)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_array_int32(self):
# Test native int32 input with double array min/max and int32 out
a = self._generate_int32_data(self.nr, self.nc)
m = np.zeros(a.shape, np.float64)
M = np.float64(1)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_array_outint32(self):
# Test native double input with scalar min/max and int out
a = self._generate_data(self.nr, self.nc)
m = -1.0
M = 2.0
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_inplace_array(self):
# Test native double input with array min/max
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = np.zeros(a.shape)
M = 1.0
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a, ac)
def test_clip_inplace_simple(self):
# Test native double input with scalar min/max
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a, ac)
def test_clip_func_takes_out(self):
# Ensure that the clip() function takes an out=argument.
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = -0.5
M = 0.6
a2 = np.clip(a, m, M, out=a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a2, ac)
self.assertTrue(a2 is a)
def test_clip_nan(self):
d = np.arange(7.)
assert_equal(d.clip(min=np.nan), d)
assert_equal(d.clip(max=np.nan), d)
assert_equal(d.clip(min=np.nan, max=np.nan), d)
assert_equal(d.clip(min=-2, max=np.nan), d)
assert_equal(d.clip(min=np.nan, max=10), d)
class TestAllclose(object):
rtol = 1e-5
atol = 1e-8
def setUp(self):
self.olderr = np.seterr(invalid='ignore')
def tearDown(self):
np.seterr(**self.olderr)
def tst_allclose(self, x, y):
assert_(np.allclose(x, y), "%s and %s not close" % (x, y))
def tst_not_allclose(self, x, y):
assert_(not np.allclose(x, y), "%s and %s shouldn't be close" % (x, y))
def test_ip_allclose(self):
# Parametric test factory.
arr = np.array([100, 1000])
aran = np.arange(125).reshape((5, 5, 5))
atol = self.atol
rtol = self.rtol
data = [([1, 0], [1, 0]),
([atol], [0]),
([1], [1+rtol+atol]),
(arr, arr + arr*rtol),
(arr, arr + arr*rtol + atol*2),
(aran, aran + aran*rtol),
(np.inf, np.inf),
(np.inf, [np.inf])]
for (x, y) in data:
yield (self.tst_allclose, x, y)
def test_ip_not_allclose(self):
# Parametric test factory.
aran = np.arange(125).reshape((5, 5, 5))
atol = self.atol
rtol = self.rtol
data = [([np.inf, 0], [1, np.inf]),
([np.inf, 0], [1, 0]),
([np.inf, np.inf], [1, np.inf]),
([np.inf, np.inf], [1, 0]),
([-np.inf, 0], [np.inf, 0]),
([np.nan, 0], [np.nan, 0]),
([atol*2], [0]),
([1], [1+rtol+atol*2]),
(aran, aran + aran*atol + atol*2),
(np.array([np.inf, 1]), np.array([0, np.inf]))]
for (x, y) in data:
yield (self.tst_not_allclose, x, y)
def test_no_parameter_modification(self):
x = np.array([np.inf, 1])
y = np.array([0, np.inf])
np.allclose(x, y)
assert_array_equal(x, np.array([np.inf, 1]))
assert_array_equal(y, np.array([0, np.inf]))
def test_min_int(self):
# Could make problems because of abs(min_int) == min_int
min_int = np.iinfo(np.int_).min
a = np.array([min_int], dtype=np.int_)
assert_(np.allclose(a, a))
def test_equalnan(self):
x = np.array([1.0, np.nan])
assert_(np.allclose(x, x, equal_nan=True))
def test_return_class_is_ndarray(self):
# Issue gh-6475
# Check that allclose does not preserve subtypes
class Foo(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
a = Foo([1])
assert_(type(np.allclose(a, a)) is bool)
class TestIsclose(object):
rtol = 1e-5
atol = 1e-8
def setup(self):
atol = self.atol
rtol = self.rtol
arr = np.array([100, 1000])
aran = np.arange(125).reshape((5, 5, 5))
self.all_close_tests = [
([1, 0], [1, 0]),
([atol], [0]),
([1], [1 + rtol + atol]),
(arr, arr + arr*rtol),
(arr, arr + arr*rtol + atol),
(aran, aran + aran*rtol),
(np.inf, np.inf),
(np.inf, [np.inf]),
([np.inf, -np.inf], [np.inf, -np.inf]),
]
self.none_close_tests = [
([np.inf, 0], [1, np.inf]),
([np.inf, -np.inf], [1, 0]),
([np.inf, np.inf], [1, -np.inf]),
([np.inf, np.inf], [1, 0]),
([np.nan, 0], [np.nan, -np.inf]),
([atol*2], [0]),
([1], [1 + rtol + atol*2]),
(aran, aran + rtol*1.1*aran + atol*1.1),
(np.array([np.inf, 1]), np.array([0, np.inf])),
]
self.some_close_tests = [
([np.inf, 0], [np.inf, atol*2]),
([atol, 1, 1e6*(1 + 2*rtol) + atol], [0, np.nan, 1e6]),
(np.arange(3), [0, 1, 2.1]),
(np.nan, [np.nan, np.nan, np.nan]),
([0], [atol, np.inf, -np.inf, np.nan]),
(0, [atol, np.inf, -np.inf, np.nan]),
]
self.some_close_results = [
[True, False],
[True, False, False],
[True, True, False],
[False, False, False],
[True, False, False, False],
[True, False, False, False],
]
def test_ip_isclose(self):
self.setup()
tests = self.some_close_tests
results = self.some_close_results
for (x, y), result in zip(tests, results):
yield (assert_array_equal, np.isclose(x, y), result)
def tst_all_isclose(self, x, y):
assert_(np.all(np.isclose(x, y)), "%s and %s not close" % (x, y))
def tst_none_isclose(self, x, y):
msg = "%s and %s shouldn't be close"
assert_(not np.any(np.isclose(x, y)), msg % (x, y))
def tst_isclose_allclose(self, x, y):
msg = "isclose.all() and allclose aren't same for %s and %s"
msg2 = "isclose and allclose aren't same for %s and %s"
if np.isscalar(x) and np.isscalar(y):
assert_(np.isclose(x, y) == np.allclose(x, y), msg=msg2 % (x, y))
else:
assert_array_equal(np.isclose(x, y).all(), np.allclose(x, y), msg % (x, y))
def test_ip_all_isclose(self):
self.setup()
for (x, y) in self.all_close_tests:
yield (self.tst_all_isclose, x, y)
def test_ip_none_isclose(self):
self.setup()
for (x, y) in self.none_close_tests:
yield (self.tst_none_isclose, x, y)
def test_ip_isclose_allclose(self):
self.setup()
tests = (self.all_close_tests + self.none_close_tests +
self.some_close_tests)
for (x, y) in tests:
yield (self.tst_isclose_allclose, x, y)
def test_equal_nan(self):
assert_array_equal(np.isclose(np.nan, np.nan, equal_nan=True), [True])
arr = np.array([1.0, np.nan])
assert_array_equal(np.isclose(arr, arr, equal_nan=True), [True, True])
def test_masked_arrays(self):
# Make sure to test the output type when arguments are interchanged.
x = np.ma.masked_where([True, True, False], np.arange(3))
assert_(type(x) is type(np.isclose(2, x)))
assert_(type(x) is type(np.isclose(x, 2)))
x = np.ma.masked_where([True, True, False], [np.nan, np.inf, np.nan])
assert_(type(x) is type(np.isclose(np.inf, x)))
assert_(type(x) is type(np.isclose(x, np.inf)))
x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan])
y = np.isclose(np.nan, x, equal_nan=True)
assert_(type(x) is type(y))
# Ensure that the mask isn't modified...
assert_array_equal([True, True, False], y.mask)
y = np.isclose(x, np.nan, equal_nan=True)
assert_(type(x) is type(y))
# Ensure that the mask isn't modified...
assert_array_equal([True, True, False], y.mask)
x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan])
y = np.isclose(x, x, equal_nan=True)
assert_(type(x) is type(y))
# Ensure that the mask isn't modified...
assert_array_equal([True, True, False], y.mask)
def test_scalar_return(self):
assert_(np.isscalar(np.isclose(1, 1)))
def test_no_parameter_modification(self):
x = np.array([np.inf, 1])
y = np.array([0, np.inf])
np.isclose(x, y)
assert_array_equal(x, np.array([np.inf, 1]))
assert_array_equal(y, np.array([0, np.inf]))
def test_non_finite_scalar(self):
# GH7014, when two scalars are compared the output should also be a
# scalar
assert_(np.isclose(np.inf, -np.inf) is False)
assert_(np.isclose(0, np.inf) is False)
assert_(type(np.isclose(0, np.inf)) is bool)
class TestStdVar(TestCase):
def setUp(self):
self.A = np.array([1, -1, 1, -1])
self.real_var = 1
def test_basic(self):
assert_almost_equal(np.var(self.A), self.real_var)
assert_almost_equal(np.std(self.A)**2, self.real_var)
def test_scalars(self):
assert_equal(np.var(1), 0)
assert_equal(np.std(1), 0)
def test_ddof1(self):
assert_almost_equal(np.var(self.A, ddof=1),
self.real_var*len(self.A)/float(len(self.A)-1))
assert_almost_equal(np.std(self.A, ddof=1)**2,
self.real_var*len(self.A)/float(len(self.A)-1))
def test_ddof2(self):
assert_almost_equal(np.var(self.A, ddof=2),
self.real_var*len(self.A)/float(len(self.A)-2))
assert_almost_equal(np.std(self.A, ddof=2)**2,
self.real_var*len(self.A)/float(len(self.A)-2))
def test_out_scalar(self):
d = np.arange(10)
out = np.array(0.)
r = np.std(d, out=out)
assert_(r is out)
assert_array_equal(r, out)
r = np.var(d, out=out)
assert_(r is out)
assert_array_equal(r, out)
r = np.mean(d, out=out)
assert_(r is out)
assert_array_equal(r, out)
class TestStdVarComplex(TestCase):
def test_basic(self):
A = np.array([1, 1.j, -1, -1.j])
real_var = 1
assert_almost_equal(np.var(A), real_var)
assert_almost_equal(np.std(A)**2, real_var)
def test_scalars(self):
assert_equal(np.var(1j), 0)
assert_equal(np.std(1j), 0)
class TestCreationFuncs(TestCase):
# Test ones, zeros, empty and filled
def setUp(self):
self.dtypes = ('b', 'i', 'u', 'f', 'c', 'S', 'a', 'U', 'V')
self.orders = {'C': 'c_contiguous', 'F': 'f_contiguous'}
self.ndims = 10
def check_function(self, func, fill_value=None):
par = (
(0, 1, 2),
range(self.ndims),
self.orders,
self.dtypes,
2**np.arange(9)
)
fill_kwarg = {}
if fill_value is not None:
fill_kwarg = {'fill_value': fill_value}
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
for size, ndims, order, type, bytes in itertools.product(*par):
shape = ndims * [size]
try:
dtype = np.dtype('{0}{1}'.format(type, bytes))
except TypeError: # dtype combination does not exist
continue
else:
# do not fill void type
if fill_value is not None and type in 'V':
continue
arr = func(shape, order=order, dtype=dtype,
**fill_kwarg)
assert_(arr.dtype == dtype)
assert_(getattr(arr.flags, self.orders[order]))
if fill_value is not None:
if dtype.str.startswith('|S'):
val = str(fill_value)
else:
val = fill_value
assert_equal(arr, dtype.type(val))
def test_zeros(self):
self.check_function(np.zeros)
def test_ones(self):
self.check_function(np.zeros)
def test_empty(self):
self.check_function(np.empty)
def test_filled(self):
self.check_function(np.full, 0)
self.check_function(np.full, 1)
def test_for_reference_leak(self):
# Make sure we have an object for reference
dim = 1
beg = sys.getrefcount(dim)
np.zeros([dim]*10)
assert_(sys.getrefcount(dim) == beg)
np.ones([dim]*10)
assert_(sys.getrefcount(dim) == beg)
np.empty([dim]*10)
assert_(sys.getrefcount(dim) == beg)
np.full([dim]*10, 0)
assert_(sys.getrefcount(dim) == beg)
class TestLikeFuncs(TestCase):
'''Test ones_like, zeros_like, empty_like and full_like'''
def setUp(self):
self.data = [
# Array scalars
(np.array(3.), None),
(np.array(3), 'f8'),
# 1D arrays
(np.arange(6, dtype='f4'), None),
(np.arange(6), 'c16'),
# 2D C-layout arrays
(np.arange(6).reshape(2, 3), None),
(np.arange(6).reshape(3, 2), 'i1'),
# 2D F-layout arrays
(np.arange(6).reshape((2, 3), order='F'), None),
(np.arange(6).reshape((3, 2), order='F'), 'i1'),
# 3D C-layout arrays
(np.arange(24).reshape(2, 3, 4), None),
(np.arange(24).reshape(4, 3, 2), 'f4'),
# 3D F-layout arrays
(np.arange(24).reshape((2, 3, 4), order='F'), None),
(np.arange(24).reshape((4, 3, 2), order='F'), 'f4'),
# 3D non-C/F-layout arrays
(np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None),
(np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'),
]
def compare_array_value(self, dz, value, fill_value):
if value is not None:
if fill_value:
try:
z = dz.dtype.type(value)
except OverflowError:
pass
else:
assert_(np.all(dz == z))
else:
assert_(np.all(dz == value))
def check_like_function(self, like_function, value, fill_value=False):
if fill_value:
fill_kwarg = {'fill_value': value}
else:
fill_kwarg = {}
for d, dtype in self.data:
# default (K) order, dtype
dz = like_function(d, dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
assert_equal(np.array(dz.strides)*d.dtype.itemsize,
np.array(d.strides)*dz.dtype.itemsize)
assert_equal(d.flags.c_contiguous, dz.flags.c_contiguous)
assert_equal(d.flags.f_contiguous, dz.flags.f_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
self.compare_array_value(dz, value, fill_value)
# C order, default dtype
dz = like_function(d, order='C', dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
assert_(dz.flags.c_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
self.compare_array_value(dz, value, fill_value)
# F order, default dtype
dz = like_function(d, order='F', dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
assert_(dz.flags.f_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
self.compare_array_value(dz, value, fill_value)
# A order
dz = like_function(d, order='A', dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
if d.flags.f_contiguous:
assert_(dz.flags.f_contiguous)
else:
assert_(dz.flags.c_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
self.compare_array_value(dz, value, fill_value)
# Test the 'subok' parameter
a = np.matrix([[1, 2], [3, 4]])
b = like_function(a, **fill_kwarg)
assert_(type(b) is np.matrix)
b = like_function(a, subok=False, **fill_kwarg)
assert_(type(b) is not np.matrix)
def test_ones_like(self):
self.check_like_function(np.ones_like, 1)
def test_zeros_like(self):
self.check_like_function(np.zeros_like, 0)
def test_empty_like(self):
self.check_like_function(np.empty_like, None)
def test_filled_like(self):
self.check_like_function(np.full_like, 0, True)
self.check_like_function(np.full_like, 1, True)
self.check_like_function(np.full_like, 1000, True)
self.check_like_function(np.full_like, 123.456, True)
self.check_like_function(np.full_like, np.inf, True)
class TestCorrelate(TestCase):
def _setup(self, dt):
self.x = np.array([1, 2, 3, 4, 5], dtype=dt)
self.xs = np.arange(1, 20)[::3]
self.y = np.array([-1, -2, -3], dtype=dt)
self.z1 = np.array([ -3., -8., -14., -20., -26., -14., -5.], dtype=dt)
self.z1_4 = np.array([-2., -5., -8., -11., -14., -5.], dtype=dt)
self.z1r = np.array([-15., -22., -22., -16., -10., -4., -1.], dtype=dt)
self.z2 = np.array([-5., -14., -26., -20., -14., -8., -3.], dtype=dt)
self.z2r = np.array([-1., -4., -10., -16., -22., -22., -15.], dtype=dt)
self.zs = np.array([-3., -14., -30., -48., -66., -84.,
-102., -54., -19.], dtype=dt)
def test_float(self):
self._setup(np.float)
z = np.correlate(self.x, self.y, 'full')
assert_array_almost_equal(z, self.z1)
z = np.correlate(self.x, self.y[:-1], 'full')
assert_array_almost_equal(z, self.z1_4)
z = np.correlate(self.y, self.x, 'full')
assert_array_almost_equal(z, self.z2)
z = np.correlate(self.x[::-1], self.y, 'full')
assert_array_almost_equal(z, self.z1r)
z = np.correlate(self.y, self.x[::-1], 'full')
assert_array_almost_equal(z, self.z2r)
z = np.correlate(self.xs, self.y, 'full')
assert_array_almost_equal(z, self.zs)
def test_object(self):
self._setup(Decimal)
z = np.correlate(self.x, self.y, 'full')
assert_array_almost_equal(z, self.z1)
z = np.correlate(self.y, self.x, 'full')
assert_array_almost_equal(z, self.z2)
def test_no_overwrite(self):
d = np.ones(100)
k = np.ones(3)
np.correlate(d, k)
assert_array_equal(d, np.ones(100))
assert_array_equal(k, np.ones(3))
def test_complex(self):
x = np.array([1, 2, 3, 4+1j], dtype=np.complex)
y = np.array([-1, -2j, 3+1j], dtype=np.complex)
r_z = np.array([3-1j, 6, 8+1j, 11+5j, -5+8j, -4-1j], dtype=np.complex)
r_z = r_z[::-1].conjugate()
z = np.correlate(y, x, mode='full')
assert_array_almost_equal(z, r_z)
class TestConvolve(TestCase):
def test_object(self):
d = [1.] * 100
k = [1.] * 3
assert_array_almost_equal(np.convolve(d, k)[2:-2], np.full(98, 3))
def test_no_overwrite(self):
d = np.ones(100)
k = np.ones(3)
np.convolve(d, k)
assert_array_equal(d, np.ones(100))
assert_array_equal(k, np.ones(3))
class TestArgwhere(object):
def test_2D(self):
x = np.arange(6).reshape((2, 3))
assert_array_equal(np.argwhere(x > 1),
[[0, 2],
[1, 0],
[1, 1],
[1, 2]])
def test_list(self):
assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]])
class TestStringFunction(object):
def test_set_string_function(self):
a = np.array([1])
np.set_string_function(lambda x: "FOO", repr=True)
assert_equal(repr(a), "FOO")
np.set_string_function(None, repr=True)
assert_equal(repr(a), "array([1])")
np.set_string_function(lambda x: "FOO", repr=False)
assert_equal(str(a), "FOO")
np.set_string_function(None, repr=False)
assert_equal(str(a), "[1]")
class TestRoll(TestCase):
def test_roll1d(self):
x = np.arange(10)
xr = np.roll(x, 2)
assert_equal(xr, np.array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7]))
def test_roll2d(self):
x2 = np.reshape(np.arange(10), (2, 5))
x2r = np.roll(x2, 1)
assert_equal(x2r, np.array([[9, 0, 1, 2, 3], [4, 5, 6, 7, 8]]))
x2r = np.roll(x2, 1, axis=0)
assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))
x2r = np.roll(x2, 1, axis=1)
assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
def test_roll_empty(self):
x = np.array([])
assert_equal(np.roll(x, 1), np.array([]))
class TestRollaxis(TestCase):
# expected shape indexed by (axis, start) for array of
# shape (1, 2, 3, 4)
tgtshape = {(0, 0): (1, 2, 3, 4), (0, 1): (1, 2, 3, 4),
(0, 2): (2, 1, 3, 4), (0, 3): (2, 3, 1, 4),
(0, 4): (2, 3, 4, 1),
(1, 0): (2, 1, 3, 4), (1, 1): (1, 2, 3, 4),
(1, 2): (1, 2, 3, 4), (1, 3): (1, 3, 2, 4),
(1, 4): (1, 3, 4, 2),
(2, 0): (3, 1, 2, 4), (2, 1): (1, 3, 2, 4),
(2, 2): (1, 2, 3, 4), (2, 3): (1, 2, 3, 4),
(2, 4): (1, 2, 4, 3),
(3, 0): (4, 1, 2, 3), (3, 1): (1, 4, 2, 3),
(3, 2): (1, 2, 4, 3), (3, 3): (1, 2, 3, 4),
(3, 4): (1, 2, 3, 4)}
def test_exceptions(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4)
assert_raises(ValueError, np.rollaxis, a, -5, 0)
assert_raises(ValueError, np.rollaxis, a, 0, -5)
assert_raises(ValueError, np.rollaxis, a, 4, 0)
assert_raises(ValueError, np.rollaxis, a, 0, 5)
def test_results(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
aind = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
for (i, j) in self.tgtshape:
# positive axis, positive start
res = np.rollaxis(a, axis=i, start=j)
i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
assert_(np.all(res[i0, i1, i2, i3] == a))
assert_(res.shape == self.tgtshape[(i, j)], str((i,j)))
assert_(not res.flags['OWNDATA'])
# negative axis, positive start
ip = i + 1
res = np.rollaxis(a, axis=-ip, start=j)
i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
assert_(np.all(res[i0, i1, i2, i3] == a))
assert_(res.shape == self.tgtshape[(4 - ip, j)])
assert_(not res.flags['OWNDATA'])
# positive axis, negative start
jp = j + 1 if j < 4 else j
res = np.rollaxis(a, axis=i, start=-jp)
i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
assert_(np.all(res[i0, i1, i2, i3] == a))
assert_(res.shape == self.tgtshape[(i, 4 - jp)])
assert_(not res.flags['OWNDATA'])
# negative axis, negative start
ip = i + 1
jp = j + 1 if j < 4 else j
res = np.rollaxis(a, axis=-ip, start=-jp)
i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
assert_(np.all(res[i0, i1, i2, i3] == a))
assert_(res.shape == self.tgtshape[(4 - ip, 4 - jp)])
assert_(not res.flags['OWNDATA'])
class TestMoveaxis(TestCase):
def test_move_to_end(self):
x = np.random.randn(5, 6, 7)
for source, expected in [(0, (6, 7, 5)),
(1, (5, 7, 6)),
(2, (5, 6, 7)),
(-1, (5, 6, 7))]:
actual = np.moveaxis(x, source, -1).shape
assert_(actual, expected)
def test_move_new_position(self):
x = np.random.randn(1, 2, 3, 4)
for source, destination, expected in [
(0, 1, (2, 1, 3, 4)),
(1, 2, (1, 3, 2, 4)),
(1, -1, (1, 3, 4, 2)),
]:
actual = np.moveaxis(x, source, destination).shape
assert_(actual, expected)
def test_preserve_order(self):
x = np.zeros((1, 2, 3, 4))
for source, destination in [
(0, 0),
(3, -1),
(-1, 3),
([0, -1], [0, -1]),
([2, 0], [2, 0]),
(range(4), range(4)),
]:
actual = np.moveaxis(x, source, destination).shape
assert_(actual, (1, 2, 3, 4))
def test_move_multiples(self):
x = np.zeros((0, 1, 2, 3))
for source, destination, expected in [
([0, 1], [2, 3], (2, 3, 0, 1)),
([2, 3], [0, 1], (2, 3, 0, 1)),
([0, 1, 2], [2, 3, 0], (2, 3, 0, 1)),
([3, 0], [1, 0], (0, 3, 1, 2)),
([0, 3], [0, 1], (0, 3, 1, 2)),
]:
actual = np.moveaxis(x, source, destination).shape
assert_(actual, expected)
def test_errors(self):
x = np.random.randn(1, 2, 3)
assert_raises_regex(ValueError, 'invalid axis .* `source`',
np.moveaxis, x, 3, 0)
assert_raises_regex(ValueError, 'invalid axis .* `source`',
np.moveaxis, x, -4, 0)
assert_raises_regex(ValueError, 'invalid axis .* `destination`',
np.moveaxis, x, 0, 5)
assert_raises_regex(ValueError, 'repeated axis in `source`',
np.moveaxis, x, [0, 0], [0, 1])
assert_raises_regex(ValueError, 'repeated axis in `destination`',
np.moveaxis, x, [0, 1], [1, 1])
assert_raises_regex(ValueError, 'must have the same number',
np.moveaxis, x, 0, [0, 1])
assert_raises_regex(ValueError, 'must have the same number',
np.moveaxis, x, [0, 1], [0])
def test_array_likes(self):
x = np.ma.zeros((1, 2, 3))
result = np.moveaxis(x, 0, 0)
assert_(x.shape, result.shape)
assert_(isinstance(result, np.ma.MaskedArray))
x = [1, 2, 3]
result = np.moveaxis(x, 0, 0)
assert_(x, list(result))
assert_(isinstance(result, np.ndarray))
class TestCross(TestCase):
def test_2x2(self):
u = [1, 2]
v = [3, 4]
z = -2
cp = np.cross(u, v)
assert_equal(cp, z)
cp = np.cross(v, u)
assert_equal(cp, -z)
def test_2x3(self):
u = [1, 2]
v = [3, 4, 5]
z = np.array([10, -5, -2])
cp = np.cross(u, v)
assert_equal(cp, z)
cp = np.cross(v, u)
assert_equal(cp, -z)
def test_3x3(self):
u = [1, 2, 3]
v = [4, 5, 6]
z = np.array([-3, 6, -3])
cp = np.cross(u, v)
assert_equal(cp, z)
cp = np.cross(v, u)
assert_equal(cp, -z)
def test_broadcasting(self):
# Ticket #2624 (Trac #2032)
u = np.tile([1, 2], (11, 1))
v = np.tile([3, 4], (11, 1))
z = -2
assert_equal(np.cross(u, v), z)
assert_equal(np.cross(v, u), -z)
assert_equal(np.cross(u, u), 0)
u = np.tile([1, 2], (11, 1)).T
v = np.tile([3, 4, 5], (11, 1))
z = np.tile([10, -5, -2], (11, 1))
assert_equal(np.cross(u, v, axisa=0), z)
assert_equal(np.cross(v, u.T), -z)
assert_equal(np.cross(v, v), 0)
u = np.tile([1, 2, 3], (11, 1)).T
v = np.tile([3, 4], (11, 1)).T
z = np.tile([-12, 9, -2], (11, 1))
assert_equal(np.cross(u, v, axisa=0, axisb=0), z)
assert_equal(np.cross(v.T, u.T), -z)
assert_equal(np.cross(u.T, u.T), 0)
u = np.tile([1, 2, 3], (5, 1))
v = np.tile([4, 5, 6], (5, 1)).T
z = np.tile([-3, 6, -3], (5, 1))
assert_equal(np.cross(u, v, axisb=0), z)
assert_equal(np.cross(v.T, u), -z)
assert_equal(np.cross(u, u), 0)
def test_broadcasting_shapes(self):
u = np.ones((2, 1, 3))
v = np.ones((5, 3))
assert_equal(np.cross(u, v).shape, (2, 5, 3))
u = np.ones((10, 3, 5))
v = np.ones((2, 5))
assert_equal(np.cross(u, v, axisa=1, axisb=0).shape, (10, 5, 3))
assert_raises(ValueError, np.cross, u, v, axisa=1, axisb=2)
assert_raises(ValueError, np.cross, u, v, axisa=3, axisb=0)
u = np.ones((10, 3, 5, 7))
v = np.ones((5, 7, 2))
assert_equal(np.cross(u, v, axisa=1, axisc=2).shape, (10, 5, 3, 7))
assert_raises(ValueError, np.cross, u, v, axisa=-5, axisb=2)
assert_raises(ValueError, np.cross, u, v, axisa=1, axisb=-4)
# gh-5885
u = np.ones((3, 4, 2))
for axisc in range(-2, 2):
assert_equal(np.cross(u, u, axisc=axisc).shape, (3, 4))
def test_outer_out_param():
arr1 = np.ones((5,))
arr2 = np.ones((2,))
arr3 = np.linspace(-2, 2, 5)
out1 = np.ndarray(shape=(5,5))
out2 = np.ndarray(shape=(2, 5))
res1 = np.outer(arr1, arr3, out1)
assert_equal(res1, out1)
assert_equal(np.outer(arr2, arr3, out2), out2)
class TestRequire(object):
flag_names = ['C', 'C_CONTIGUOUS', 'CONTIGUOUS',
'F', 'F_CONTIGUOUS', 'FORTRAN',
'A', 'ALIGNED',
'W', 'WRITEABLE',
'O', 'OWNDATA']
def generate_all_false(self, dtype):
arr = np.zeros((2, 2), [('junk', 'i1'), ('a', dtype)])
arr.setflags(write=False)
a = arr['a']
assert_(not a.flags['C'])
assert_(not a.flags['F'])
assert_(not a.flags['O'])
assert_(not a.flags['W'])
assert_(not a.flags['A'])
return a
def set_and_check_flag(self, flag, dtype, arr):
if dtype is None:
dtype = arr.dtype
b = np.require(arr, dtype, [flag])
assert_(b.flags[flag])
assert_(b.dtype == dtype)
# a further call to np.require ought to return the same array
# unless OWNDATA is specified.
c = np.require(b, None, [flag])
if flag[0] != 'O':
assert_(c is b)
else:
assert_(c.flags[flag])
def test_require_each(self):
id = ['f8', 'i4']
fd = [None, 'f8', 'c16']
for idtype, fdtype, flag in itertools.product(id, fd, self.flag_names):
a = self.generate_all_false(idtype)
yield self.set_and_check_flag, flag, fdtype, a
def test_unknown_requirement(self):
a = self.generate_all_false('f8')
assert_raises(KeyError, np.require, a, None, 'Q')
def test_non_array_input(self):
a = np.require([1, 2, 3, 4], 'i4', ['C', 'A', 'O'])
assert_(a.flags['O'])
assert_(a.flags['C'])
assert_(a.flags['A'])
assert_(a.dtype == 'i4')
assert_equal(a, [1, 2, 3, 4])
def test_C_and_F_simul(self):
a = self.generate_all_false('f8')
assert_raises(ValueError, np.require, a, None, ['C', 'F'])
def test_ensure_array(self):
class ArraySubclass(np.ndarray):
pass
a = ArraySubclass((2, 2))
b = np.require(a, None, ['E'])
assert_(type(b) is np.ndarray)
def test_preserve_subtype(self):
class ArraySubclass(np.ndarray):
pass
for flag in self.flag_names:
a = ArraySubclass((2, 2))
yield self.set_and_check_flag, flag, None, a
class TestBroadcast(TestCase):
def test_broadcast_in_args(self):
# gh-5881
arrs = [np.empty((6, 7)), np.empty((5, 6, 1)), np.empty((7,)),
np.empty((5, 1, 7))]
mits = [np.broadcast(*arrs),
np.broadcast(np.broadcast(*arrs[:2]), np.broadcast(*arrs[2:])),
np.broadcast(arrs[0], np.broadcast(*arrs[1:-1]), arrs[-1])]
for mit in mits:
assert_equal(mit.shape, (5, 6, 7))
assert_equal(mit.nd, 3)
assert_equal(mit.numiter, 4)
for a, ia in zip(arrs, mit.iters):
assert_(a is ia.base)
def test_broadcast_single_arg(self):
# gh-6899
arrs = [np.empty((5, 6, 7))]
mit = np.broadcast(*arrs)
assert_equal(mit.shape, (5, 6, 7))
assert_equal(mit.nd, 3)
assert_equal(mit.numiter, 1)
assert_(arrs[0] is mit.iters[0].base)
def test_number_of_arguments(self):
arr = np.empty((5,))
for j in range(35):
arrs = [arr] * j
if j < 1 or j > 32:
assert_raises(ValueError, np.broadcast, *arrs)
else:
mit = np.broadcast(*arrs)
assert_equal(mit.numiter, j)
class TestKeepdims(TestCase):
class sub_array(np.ndarray):
def sum(self, axis=None, dtype=None, out=None):
return np.ndarray.sum(self, axis, dtype, out, keepdims=True)
def test_raise(self):
sub_class = self.sub_array
x = np.arange(30).view(sub_class)
assert_raises(TypeError, np.sum, x, keepdims=True)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
devanlai/pyOCD | pyOCD/gdbserver/syscall.py | 5 | 4191 | """
mbed CMSIS-DAP debugger
Copyright (c) 2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import logging
from ..target.semihost import SemihostIOHandler
# Open mode flags
O_RDONLY = 0x0
O_WRONLY = 0x1
O_RDWR = 0x2
O_APPEND = 0x8
O_CREAT = 0x200
O_TRUNC = 0x400
O_EXCL = 0x800
# Offset added to file descriptor numbers returned from gdb. This offset is to make
# sure we don't overlap with the standard I/O file descriptors 1, 2, and 3 (fds must be
# non-zero for semihosting).
FD_OFFSET = 4
##
# @brief Semihosting file I/O handler that performs GDB syscalls.
class GDBSyscallIOHandler(SemihostIOHandler):
def __init__(self, server):
super(GDBSyscallIOHandler, self).__init__()
self._server = server
def open(self, fnptr, fnlen, mode):
# Handle standard I/O.
fd, _ = self._std_open(fnptr, fnlen, mode)
if fd is not None:
return fd
# Convert mode string to flags.
modeval = 0
hasplus = '+' in mode
if 'r' in mode:
if hasplus:
modeval |= O_RDWR
else:
modeval |= O_RDONLY
elif 'w' in mode:
if hasplus:
modeval |= O_RDWR | O_CREAT | O_TRUNC
else:
modeval |= O_WRONLY | O_CREAT | O_TRUNC
elif 'a' in mode:
if hasplus:
modeval |= O_RDWR | O_APPEND | O_CREAT
else:
modeval |= O_WRONLY | O_APPEND | O_CREAT
result, self._errno = self._server.syscall('open,%x/%x,%x,%x' % (fnptr, fnlen+1, modeval, 0777))
if result != -1:
result += FD_OFFSET
return result
def close(self, fd):
fd -= FD_OFFSET
result, self._errno = self._server.syscall('close,%x' % (fd))
return result
# syscall return: number of bytes written
# semihost return: 0 is success, or number of bytes not written
def write(self, fd, ptr, length):
fd -= FD_OFFSET
result, self._errno = self._server.syscall('write,%x,%x,%x' % (fd, ptr, length))
return length - result
# syscall return: number of bytes read
# semihost return: 0 is success, length is EOF, number of bytes not read
def read(self, fd, ptr, length):
fd -= FD_OFFSET
result, self._errno = self._server.syscall('read,%x,%x,%x' % (fd, ptr, length))
return length - result
def readc(self):
ptr = self.agent.target.readCoreRegister('sp') - 4
result, self._errno = self._server.syscall('read,0,%x,1' % (ptr))
if result != -1:
result = self.agent.target.read8(ptr)
return result
def istty(self, fd):
fd -= FD_OFFSET
result, self._errno = self._server.syscall('isatty,%x' % (fd))
return result
def seek(self, fd, pos):
fd -= FD_OFFSET
result, self._errno = self._server.syscall('lseek,%x,%x,0' % (fd, pos))
return 0 if result is not -1 else -1
def flen(self, fd):
fd -= FD_OFFSET
ptr = self.agent.target.readCoreRegister('sp') - 64
result, self._errno = self._server.syscall('fstat,%x,%x' % (fd, ptr))
if result != -1:
# Fields in stat struct are big endian as written by gdb.
size = self.agent.target.readBlockMemoryUnaligned8(ptr, 8)
result = (size[0] << 56) \
| (size[1] << 48) \
| (size[2] << 40) \
| (size[3] << 32) \
| (size[4] << 24) \
| (size[5] << 16) \
| (size[6] << 8) \
| (size[7])
return result
| apache-2.0 |
nagyistoce/koalacloud | boto/route53/record.py | 4 | 10525 | # Copyright (c) 2010 Chris Moyer http://coredumped.org/
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
RECORD_TYPES = ['A', 'AAAA', 'TXT', 'CNAME', 'MX', 'PTR', 'SRV', 'SPF']
from boto.resultset import ResultSet
class ResourceRecordSets(ResultSet):
"""
A list of resource records.
:ivar hosted_zone_id: The ID of the hosted zone.
:ivar comment: A comment that will be stored with the change.
:ivar changes: A list of changes.
"""
ChangeResourceRecordSetsBody = """<?xml version="1.0" encoding="UTF-8"?>
<ChangeResourceRecordSetsRequest xmlns="https://route53.amazonaws.com/doc/2012-02-29/">
<ChangeBatch>
<Comment>%(comment)s</Comment>
<Changes>%(changes)s</Changes>
</ChangeBatch>
</ChangeResourceRecordSetsRequest>"""
ChangeXML = """<Change>
<Action>%(action)s</Action>
%(record)s
</Change>"""
def __init__(self, connection=None, hosted_zone_id=None, comment=None):
self.connection = connection
self.hosted_zone_id = hosted_zone_id
self.comment = comment
self.changes = []
self.next_record_name = None
self.next_record_type = None
ResultSet.__init__(self, [('ResourceRecordSet', Record)])
def __repr__(self):
return '<ResourceRecordSets: %s>' % self.hosted_zone_id
def add_change(self, action, name, type, ttl=600,
alias_hosted_zone_id=None, alias_dns_name=None, identifier=None,
weight=None, region=None):
"""
Add a change request to the set.
:type action: str
:param action: The action to perform ('CREATE'|'DELETE')
:type name: str
:param name: The name of the domain you want to perform the action on.
:type type: str
:param type: The DNS record type. Valid values are:
* A
* AAAA
* CNAME
* MX
* NS
* PTR
* SOA
* SPF
* SRV
* TXT
:type ttl: int
:param ttl: The resource record cache time to live (TTL), in seconds.
:type alias_hosted_zone_id: str
:param alias_dns_name: *Alias resource record sets only* The value
of the hosted zone ID, CanonicalHostedZoneNameId, for
the LoadBalancer.
:type alias_dns_name: str
:param alias_hosted_zone_id: *Alias resource record sets only*
Information about the domain to which you are redirecting traffic.
:type identifier: str
:param identifier: *Weighted and latency-based resource record sets
only* An identifier that differentiates among multiple resource
record sets that have the same combination of DNS name and type.
:type weight: int
:param weight: *Weighted resource record sets only* Among resource
record sets that have the same combination of DNS name and type,
a value that determines what portion of traffic for the current
resource record set is routed to the associated location
:type region: str
:param region: *Latency-based resource record sets only* Among resource
record sets that have the same combination of DNS name and type,
a value that determines which region this should be associated with
for the latency-based routing
"""
change = Record(name, type, ttl,
alias_hosted_zone_id=alias_hosted_zone_id,
alias_dns_name=alias_dns_name, identifier=identifier,
weight=weight, region=region)
self.changes.append([action, change])
return change
def to_xml(self):
"""Convert this ResourceRecordSet into XML
to be saved via the ChangeResourceRecordSetsRequest"""
changesXML = ""
for change in self.changes:
changeParams = {"action": change[0], "record": change[1].to_xml()}
changesXML += self.ChangeXML % changeParams
params = {"comment": self.comment, "changes": changesXML}
return self.ChangeResourceRecordSetsBody % params
def commit(self):
"""Commit this change"""
if not self.connection:
import boto
self.connection = boto.connect_route53()
return self.connection.change_rrsets(self.hosted_zone_id, self.to_xml())
def endElement(self, name, value, connection):
"""Overwritten to also add the NextRecordName and
NextRecordType to the base object"""
if name == 'NextRecordName':
self.next_record_name = value
elif name == 'NextRecordType':
self.next_record_type = value
else:
return ResultSet.endElement(self, name, value, connection)
def __iter__(self):
"""Override the next function to support paging"""
results = ResultSet.__iter__(self)
while results:
for obj in results:
yield obj
if self.is_truncated:
self.is_truncated = False
results = self.connection.get_all_rrsets(self.hosted_zone_id, name=self.next_record_name, type=self.next_record_type)
else:
results = None
class Record(object):
"""An individual ResourceRecordSet"""
XMLBody = """<ResourceRecordSet>
<Name>%(name)s</Name>
<Type>%(type)s</Type>
%(weight)s
%(body)s
</ResourceRecordSet>"""
WRRBody = """
<SetIdentifier>%(identifier)s</SetIdentifier>
<Weight>%(weight)s</Weight>
"""
RRRBody = """
<SetIdentifier>%(identifier)s</SetIdentifier>
<Region>%(region)s</Region>
"""
ResourceRecordsBody = """
<TTL>%(ttl)s</TTL>
<ResourceRecords>
%(records)s
</ResourceRecords>"""
ResourceRecordBody = """<ResourceRecord>
<Value>%s</Value>
</ResourceRecord>"""
AliasBody = """<AliasTarget>
<HostedZoneId>%s</HostedZoneId>
<DNSName>%s</DNSName>
</AliasTarget>"""
def __init__(self, name=None, type=None, ttl=600, resource_records=None,
alias_hosted_zone_id=None, alias_dns_name=None, identifier=None,
weight=None, region=None):
self.name = name
self.type = type
self.ttl = ttl
if resource_records == None:
resource_records = []
self.resource_records = resource_records
self.alias_hosted_zone_id = alias_hosted_zone_id
self.alias_dns_name = alias_dns_name
self.identifier = identifier
self.weight = weight
self.region = region
def add_value(self, value):
"""Add a resource record value"""
self.resource_records.append(value)
def set_alias(self, alias_hosted_zone_id, alias_dns_name):
"""Make this an alias resource record set"""
self.alias_hosted_zone_id = alias_hosted_zone_id
self.alias_dns_name = alias_dns_name
def to_xml(self):
"""Spit this resource record set out as XML"""
if self.alias_hosted_zone_id != None and self.alias_dns_name != None:
# Use alias
body = self.AliasBody % (self.alias_hosted_zone_id, self.alias_dns_name)
else:
# Use resource record(s)
records = ""
for r in self.resource_records:
records += self.ResourceRecordBody % r
body = self.ResourceRecordsBody % {
"ttl": self.ttl,
"records": records,
}
weight = ""
if self.identifier != None and self.weight != None:
weight = self.WRRBody % {"identifier": self.identifier, "weight":
self.weight}
elif self.identifier != None and self.region != None:
weight = self.RRRBody % {"identifier": self.identifier, "region":
self.region}
params = {
"name": self.name,
"type": self.type,
"weight": weight,
"body": body,
}
return self.XMLBody % params
def to_print(self):
rr = ""
if self.alias_hosted_zone_id != None and self.alias_dns_name != None:
# Show alias
rr = 'ALIAS ' + self.alias_hosted_zone_id + ' ' + self.alias_dns_name
else:
# Show resource record(s)
rr = ",".join(self.resource_records)
if self.identifier != None and self.weight != None:
rr += ' (WRR id=%s, w=%s)' % (self.identifier, self.weight)
return rr
def endElement(self, name, value, connection):
if name == 'Name':
self.name = value
elif name == 'Type':
self.type = value
elif name == 'TTL':
self.ttl = value
elif name == 'Value':
self.resource_records.append(value)
elif name == 'HostedZoneId':
self.alias_hosted_zone_id = value
elif name == 'DNSName':
self.alias_dns_name = value
elif name == 'SetIdentifier':
self.identifier = value
elif name == 'Weight':
self.weight = value
elif name == 'Region':
self.region = value
def startElement(self, name, attrs, connection):
return None
| apache-2.0 |
maljac/odoomrp-wip | quality_control/models/qc_trigger_line.py | 11 | 4288 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import models, fields
def _filter_trigger_lines(trigger_lines):
filtered_trigger_lines = []
unique_tests = []
for trigger_line in trigger_lines:
if trigger_line.test not in unique_tests:
filtered_trigger_lines.append(trigger_line)
unique_tests.append(trigger_line.test)
return filtered_trigger_lines
class QcTriggerLine(models.AbstractModel):
_name = "qc.trigger.line"
_description = "Abstract line for defining triggers"
trigger = fields.Many2one(comodel_name="qc.trigger", required=True)
test = fields.Many2one(comodel_name="qc.test", required=True)
user = fields.Many2one(
comodel_name='res.users', string='Responsible',
track_visibility='always', default=lambda self: self.env.user)
partners = fields.Many2many(
comodel_name='res.partner', string='Partners',
help='If filled, the test will only be created when the action is done'
' for one of the specified partners. If empty, the test will be always'
' created.', domain="[('parent_id', '=', False)]")
def get_trigger_line_for_product(self, trigger, product, partner=False):
"""Overridable method for getting trigger_line associated to a product.
Each inherited model will complete this module to make the search by
product, template or category.
:param trigger: Trigger instance.
:param product: Product instance.
:return: Set of trigger_lines that matches to the given product and
trigger.
"""
return set()
class QcTriggerProductCategoryLine(models.Model):
_inherit = "qc.trigger.line"
_name = "qc.trigger.product_category_line"
product_category = fields.Many2one(comodel_name="product.category")
def get_trigger_line_for_product(self, trigger, product, partner=False):
trigger_lines = super(
QcTriggerProductCategoryLine,
self).get_trigger_line_for_product(trigger, product,
partner=partner)
category = product.categ_id
while category:
for trigger_line in category.qc_triggers.filtered(
lambda r: r.trigger == trigger and (
not r.partners or not partner or
partner.commercial_partner_id in r.partners)):
trigger_lines.add(trigger_line)
category = category.parent_id
return trigger_lines
class QcTriggerProductTemplateLine(models.Model):
_inherit = "qc.trigger.line"
_name = "qc.trigger.product_template_line"
product_template = fields.Many2one(comodel_name="product.template")
def get_trigger_line_for_product(self, trigger, product, partner=False):
trigger_lines = super(
QcTriggerProductTemplateLine,
self).get_trigger_line_for_product(trigger, product,
partner=partner)
for trigger_line in product.product_tmpl_id.qc_triggers.filtered(
lambda r: r.trigger == trigger and (
not r.partners or not partner or
partner.commercial_partner_id in r.partners)):
trigger_lines.add(trigger_line)
return trigger_lines
class QcTriggerProductLine(models.Model):
_inherit = "qc.trigger.line"
_name = "qc.trigger.product_line"
product = fields.Many2one(comodel_name="product.product")
def get_trigger_line_for_product(self, trigger, product, partner=False):
trigger_lines = super(
QcTriggerProductLine,
self).get_trigger_line_for_product(trigger, product,
partner=partner)
for trigger_line in product.qc_triggers.filtered(
lambda r: r.trigger == trigger and (
not r.partners or not partner or
partner.commercial_partner_id in r.partners)):
trigger_lines.add(trigger_line)
return trigger_lines
| agpl-3.0 |
MaximeGLegault/StrategyIA | RULEngine/Communication/protobuf/google/protobuf/internal/decoder.py | 10 | 30808 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#PY25 compatible for GAE.
#
# Copyright 2009 Google Inc. All Rights Reserved.
"""Code for decoding protocol buffer primitives.
This code is very similar to encoder.py -- read the docs for that module first.
A "decoder" is a function with the signature:
Decode(buffer, pos, end, message, field_dict)
The arguments are:
buffer: The string containing the encoded message.
pos: The current position in the string.
end: The position in the string where the current message ends. May be
less than len(buffer) if we're reading a sub-message.
message: The message object into which we're parsing.
field_dict: message._fields (avoids a hashtable lookup).
The decoder reads the field and stores it into field_dict, returning the new
buffer position. A decoder for a repeated field may proactively decode all of
the elements of that field, if they appear consecutively.
Note that decoders may throw any of the following:
IndexError: Indicates a truncated message.
struct.error: Unpacking of a fixed-width field failed.
message.DecodeError: Other errors.
Decoders are expected to raise an exception if they are called with pos > end.
This allows callers to be lax about bounds checking: it's fineto read past
"end" as long as you are sure that someone else will notice and throw an
exception later on.
Something up the call stack is expected to catch IndexError and struct.error
and convert them to message.DecodeError.
Decoders are constructed using decoder constructors with the signature:
MakeDecoder(field_number, is_repeated, is_packed, key, new_default)
The arguments are:
field_number: The field number of the field we want to decode.
is_repeated: Is the field a repeated field? (bool)
is_packed: Is the field a packed field? (bool)
key: The key to use when looking up the field within field_dict.
(This is actually the FieldDescriptor but nothing in this
file should depend on that.)
new_default: A function which takes a message object as a parameter and
returns a new instance of the default value for this field.
(This is called for repeated fields and sub-messages, when an
instance does not already exist.)
As with encoders, we define a decoder constructor for every type of field.
Then, for every field of every message class we construct an actual decoder.
That decoder goes into a dict indexed by tag, so when we decode a message
we repeatedly read a tag, look up the corresponding decoder, and invoke it.
"""
__author__ = 'kenton@google.com (Kenton Varda)'
import struct
import sys ##PY25
_PY2 = sys.version_info[0] < 3 ##PY25
from google.protobuf.internal import encoder
from google.protobuf.internal import wire_format
from google.protobuf import message
# This will overflow and thus become IEEE-754 "infinity". We would use
# "float('inf')" but it doesn't work on Windows pre-Python-2.6.
_POS_INF = 1e10000
_NEG_INF = -_POS_INF
_NAN = _POS_INF * 0
# This is not for optimization, but rather to avoid conflicts with local
# variables named "message".
_DecodeError = message.DecodeError
def _VarintDecoder(mask, result_type):
"""Return an encoder for a basic varint value (does not include tag).
Decoded values will be bitwise-anded with the given mask before being
returned, e.g. to limit them to 32 bits. The returned decoder does not
take the usual "end" parameter -- the caller is expected to do bounds checking
after the fact (often the caller can defer such checking until later). The
decoder returns a (value, new_pos) pair.
"""
local_ord = ord
py2 = _PY2 ##PY25
##!PY25 py2 = str is bytes
def DecodeVarint(buffer, pos):
result = 0
shift = 0
while 1:
b = local_ord(buffer[pos]) if py2 else buffer[pos]
result |= ((b & 0x7f) << shift)
pos += 1
if not (b & 0x80):
result &= mask
result = result_type(result)
return (result, pos)
shift += 7
if shift >= 64:
raise _DecodeError('Too many bytes when decoding varint.')
return DecodeVarint
def _SignedVarintDecoder(mask, result_type):
"""Like _VarintDecoder() but decodes signed values."""
local_ord = ord
py2 = _PY2 ##PY25
##!PY25 py2 = str is bytes
def DecodeVarint(buffer, pos):
result = 0
shift = 0
while 1:
b = local_ord(buffer[pos]) if py2 else buffer[pos]
result |= ((b & 0x7f) << shift)
pos += 1
if not (b & 0x80):
if result > 0x7fffffffffffffff:
result -= (1 << 64)
result |= ~mask
else:
result &= mask
result = result_type(result)
return (result, pos)
shift += 7
if shift >= 64:
raise _DecodeError('Too many bytes when decoding varint.')
return DecodeVarint
# We force 32-bit values to int and 64-bit values to long to make
# alternate implementations where the distinction is more significant
# (e.g. the C++ implementation) simpler.
_DecodeVarint = _VarintDecoder((1 << 64) - 1, int)
_DecodeSignedVarint = _SignedVarintDecoder((1 << 64) - 1, int)
# Use these versions for values which must be limited to 32 bits.
_DecodeVarint32 = _VarintDecoder((1 << 32) - 1, int)
_DecodeSignedVarint32 = _SignedVarintDecoder((1 << 32) - 1, int)
def ReadTag(buffer, pos):
"""Read a tag from the buffer, and return a (tag_bytes, new_pos) tuple.
We return the raw bytes of the tag rather than decoding them. The raw
bytes can then be used to look up the proper decoder. This effectively allows
us to trade some work that would be done in pure-python (decoding a varint)
for work that is done in C (searching for a byte string in a hash table).
In a low-level language it would be much cheaper to decode the varint and
use that, but not in Python.
"""
py2 = _PY2 ##PY25
##!PY25 py2 = str is bytes
start = pos
while (ord(buffer[pos]) if py2 else buffer[pos]) & 0x80:
pos += 1
pos += 1
return (buffer[start:pos], pos)
# --------------------------------------------------------------------
def _SimpleDecoder(wire_type, decode_value):
"""Return a constructor for a decoder for fields of a particular type.
Args:
wire_type: The field's wire type.
decode_value: A function which decodes an individual value, e.g.
_DecodeVarint()
"""
def SpecificDecoder(field_number, is_repeated, is_packed, key, new_default):
if is_packed:
local_DecodeVarint = _DecodeVarint
def DecodePackedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
(endpoint, pos) = local_DecodeVarint(buffer, pos)
endpoint += pos
if endpoint > end:
raise _DecodeError('Truncated message.')
while pos < endpoint:
(element, pos) = decode_value(buffer, pos)
value.append(element)
if pos > endpoint:
del value[-1] # Discard corrupt value.
raise _DecodeError('Packed element was truncated.')
return pos
return DecodePackedField
elif is_repeated:
tag_bytes = encoder.TagBytes(field_number, wire_type)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(element, new_pos) = decode_value(buffer, pos)
value.append(element)
# Predict that the next tag is another copy of the same repeated
# field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos >= end:
# Prediction failed. Return.
if new_pos > end:
raise _DecodeError('Truncated message.')
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(field_dict[key], pos) = decode_value(buffer, pos)
if pos > end:
del field_dict[key] # Discard corrupt value.
raise _DecodeError('Truncated message.')
return pos
return DecodeField
return SpecificDecoder
def _ModifiedDecoder(wire_type, decode_value, modify_value):
"""Like SimpleDecoder but additionally invokes modify_value on every value
before storing it. Usually modify_value is ZigZagDecode.
"""
# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but
# not enough to make a significant difference.
def InnerDecode(buffer, pos):
(result, new_pos) = decode_value(buffer, pos)
return (modify_value(result), new_pos)
return _SimpleDecoder(wire_type, InnerDecode)
def _StructPackDecoder(wire_type, format):
"""Return a constructor for a decoder for a fixed-width field.
Args:
wire_type: The field's wire type.
format: The format string to pass to struct.unpack().
"""
value_size = struct.calcsize(format)
local_unpack = struct.unpack
# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but
# not enough to make a significant difference.
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
def InnerDecode(buffer, pos):
new_pos = pos + value_size
result = local_unpack(format, buffer[pos:new_pos])[0]
return (result, new_pos)
return _SimpleDecoder(wire_type, InnerDecode)
def _FloatDecoder():
"""Returns a decoder for a float field.
This code works around a bug in struct.unpack for non-finite 32-bit
floating-point values.
"""
local_unpack = struct.unpack
b = (lambda x:x) if _PY2 else lambda x:x.encode('latin1') ##PY25
def InnerDecode(buffer, pos):
# We expect a 32-bit value in little-endian byte order. Bit 1 is the sign
# bit, bits 2-9 represent the exponent, and bits 10-32 are the significand.
new_pos = pos + 4
float_bytes = buffer[pos:new_pos]
# If this value has all its exponent bits set, then it's non-finite.
# In Python 2.4, struct.unpack will convert it to a finite 64-bit value.
# To avoid that, we parse it specially.
if ((float_bytes[3:4] in b('\x7F\xFF')) ##PY25
##!PY25 if ((float_bytes[3:4] in b'\x7F\xFF')
and (float_bytes[2:3] >= b('\x80'))): ##PY25
##!PY25 and (float_bytes[2:3] >= b'\x80')):
# If at least one significand bit is set...
if float_bytes[0:3] != b('\x00\x00\x80'): ##PY25
##!PY25 if float_bytes[0:3] != b'\x00\x00\x80':
return (_NAN, new_pos)
# If sign bit is set...
if float_bytes[3:4] == b('\xFF'): ##PY25
##!PY25 if float_bytes[3:4] == b'\xFF':
return (_NEG_INF, new_pos)
return (_POS_INF, new_pos)
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
result = local_unpack('<f', float_bytes)[0]
return (result, new_pos)
return _SimpleDecoder(wire_format.WIRETYPE_FIXED32, InnerDecode)
def _DoubleDecoder():
"""Returns a decoder for a double field.
This code works around a bug in struct.unpack for not-a-number.
"""
local_unpack = struct.unpack
b = (lambda x:x) if _PY2 else lambda x:x.encode('latin1') ##PY25
def InnerDecode(buffer, pos):
# We expect a 64-bit value in little-endian byte order. Bit 1 is the sign
# bit, bits 2-12 represent the exponent, and bits 13-64 are the significand.
new_pos = pos + 8
double_bytes = buffer[pos:new_pos]
# If this value has all its exponent bits set and at least one significand
# bit set, it's not a number. In Python 2.4, struct.unpack will treat it
# as inf or -inf. To avoid that, we treat it specially.
##!PY25 if ((double_bytes[7:8] in b'\x7F\xFF')
##!PY25 and (double_bytes[6:7] >= b'\xF0')
##!PY25 and (double_bytes[0:7] != b'\x00\x00\x00\x00\x00\x00\xF0')):
if ((double_bytes[7:8] in b('\x7F\xFF')) ##PY25
and (double_bytes[6:7] >= b('\xF0')) ##PY25
and (double_bytes[0:7] != b('\x00\x00\x00\x00\x00\x00\xF0'))): ##PY25
return (_NAN, new_pos)
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
result = local_unpack('<d', double_bytes)[0]
return (result, new_pos)
return _SimpleDecoder(wire_format.WIRETYPE_FIXED64, InnerDecode)
def EnumDecoder(field_number, is_repeated, is_packed, key, new_default):
enum_type = key.enum_type
if is_packed:
local_DecodeVarint = _DecodeVarint
def DecodePackedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
(endpoint, pos) = local_DecodeVarint(buffer, pos)
endpoint += pos
if endpoint > end:
raise _DecodeError('Truncated message.')
while pos < endpoint:
value_start_pos = pos
(element, pos) = _DecodeSignedVarint32(buffer, pos)
if element in enum_type.values_by_number:
value.append(element)
else:
if not message._unknown_fields:
message._unknown_fields = []
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_VARINT)
message._unknown_fields.append(
(tag_bytes, buffer[value_start_pos:pos]))
if pos > endpoint:
if element in enum_type.values_by_number:
del value[-1] # Discard corrupt value.
else:
del message._unknown_fields[-1]
raise _DecodeError('Packed element was truncated.')
return pos
return DecodePackedField
elif is_repeated:
tag_bytes = encoder.TagBytes(field_number, wire_format.WIRETYPE_VARINT)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(element, new_pos) = _DecodeSignedVarint32(buffer, pos)
if element in enum_type.values_by_number:
value.append(element)
else:
if not message._unknown_fields:
message._unknown_fields = []
message._unknown_fields.append(
(tag_bytes, buffer[pos:new_pos]))
# Predict that the next tag is another copy of the same repeated
# field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos >= end:
# Prediction failed. Return.
if new_pos > end:
raise _DecodeError('Truncated message.')
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
value_start_pos = pos
(enum_value, pos) = _DecodeSignedVarint32(buffer, pos)
if pos > end:
raise _DecodeError('Truncated message.')
if enum_value in enum_type.values_by_number:
field_dict[key] = enum_value
else:
if not message._unknown_fields:
message._unknown_fields = []
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_VARINT)
message._unknown_fields.append(
(tag_bytes, buffer[value_start_pos:pos]))
return pos
return DecodeField
# --------------------------------------------------------------------
Int32Decoder = _SimpleDecoder(
wire_format.WIRETYPE_VARINT, _DecodeSignedVarint32)
Int64Decoder = _SimpleDecoder(
wire_format.WIRETYPE_VARINT, _DecodeSignedVarint)
UInt32Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint32)
UInt64Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint)
SInt32Decoder = _ModifiedDecoder(
wire_format.WIRETYPE_VARINT, _DecodeVarint32, wire_format.ZigZagDecode)
SInt64Decoder = _ModifiedDecoder(
wire_format.WIRETYPE_VARINT, _DecodeVarint, wire_format.ZigZagDecode)
# Note that Python conveniently guarantees that when using the '<' prefix on
# formats, they will also have the same size across all platforms (as opposed
# to without the prefix, where their sizes depend on the C compiler's basic
# type sizes).
Fixed32Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED32, '<I')
Fixed64Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED64, '<Q')
SFixed32Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED32, '<i')
SFixed64Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED64, '<q')
FloatDecoder = _FloatDecoder()
DoubleDecoder = _DoubleDecoder()
BoolDecoder = _ModifiedDecoder(
wire_format.WIRETYPE_VARINT, _DecodeVarint, bool)
def StringDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a string field."""
local_DecodeVarint = _DecodeVarint
local_unicode = str
def _ConvertToUnicode(byte_str):
try:
return local_unicode(byte_str, 'utf-8')
except UnicodeDecodeError as e:
# add more information to the error message and re-raise it.
e.reason = '%s in field: %s' % (e, key.full_name)
raise
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
value.append(_ConvertToUnicode(buffer[pos:new_pos]))
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
field_dict[key] = _ConvertToUnicode(buffer[pos:new_pos])
return new_pos
return DecodeField
def BytesDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a bytes field."""
local_DecodeVarint = _DecodeVarint
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
value.append(buffer[pos:new_pos])
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
field_dict[key] = buffer[pos:new_pos]
return new_pos
return DecodeField
def GroupDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a group field."""
end_tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_END_GROUP)
end_tag_len = len(end_tag_bytes)
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_START_GROUP)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read sub-message.
pos = value.add()._InternalParse(buffer, pos, end)
# Read end tag.
new_pos = pos+end_tag_len
if buffer[pos:new_pos] != end_tag_bytes or new_pos > end:
raise _DecodeError('Missing group end tag.')
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read sub-message.
pos = value._InternalParse(buffer, pos, end)
# Read end tag.
new_pos = pos+end_tag_len
if buffer[pos:new_pos] != end_tag_bytes or new_pos > end:
raise _DecodeError('Missing group end tag.')
return new_pos
return DecodeField
def MessageDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a message field."""
local_DecodeVarint = _DecodeVarint
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read length.
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated message.')
# Read sub-message.
if value.add()._InternalParse(buffer, pos, new_pos) != new_pos:
# The only reason _InternalParse would return early is if it
# encountered an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read length.
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated message.')
# Read sub-message.
if value._InternalParse(buffer, pos, new_pos) != new_pos:
# The only reason _InternalParse would return early is if it encountered
# an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
return new_pos
return DecodeField
# --------------------------------------------------------------------
MESSAGE_SET_ITEM_TAG = encoder.TagBytes(1, wire_format.WIRETYPE_START_GROUP)
def MessageSetItemDecoder(extensions_by_number):
"""Returns a decoder for a MessageSet item.
The parameter is the _extensions_by_number map for the message class.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
type_id_tag_bytes = encoder.TagBytes(2, wire_format.WIRETYPE_VARINT)
message_tag_bytes = encoder.TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED)
item_end_tag_bytes = encoder.TagBytes(1, wire_format.WIRETYPE_END_GROUP)
local_ReadTag = ReadTag
local_DecodeVarint = _DecodeVarint
local_SkipField = SkipField
def DecodeItem(buffer, pos, end, message, field_dict):
message_set_item_start = pos
type_id = -1
message_start = -1
message_end = -1
# Technically, type_id and message can appear in any order, so we need
# a little loop here.
while 1:
(tag_bytes, pos) = local_ReadTag(buffer, pos)
if tag_bytes == type_id_tag_bytes:
(type_id, pos) = local_DecodeVarint(buffer, pos)
elif tag_bytes == message_tag_bytes:
(size, message_start) = local_DecodeVarint(buffer, pos)
pos = message_end = message_start + size
elif tag_bytes == item_end_tag_bytes:
break
else:
pos = SkipField(buffer, pos, end, tag_bytes)
if pos == -1:
raise _DecodeError('Missing group end tag.')
if pos > end:
raise _DecodeError('Truncated message.')
if type_id == -1:
raise _DecodeError('MessageSet item missing type_id.')
if message_start == -1:
raise _DecodeError('MessageSet item missing message.')
extension = extensions_by_number.get(type_id)
if extension is not None:
value = field_dict.get(extension)
if value is None:
value = field_dict.setdefault(
extension, extension.message_type._concrete_class())
if value._InternalParse(buffer, message_start,message_end) != message_end:
# The only reason _InternalParse would return early is if it encountered
# an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
else:
if not message._unknown_fields:
message._unknown_fields = []
message._unknown_fields.append((MESSAGE_SET_ITEM_TAG,
buffer[message_set_item_start:pos]))
return pos
return DecodeItem
# --------------------------------------------------------------------
# Optimization is not as heavy here because calls to SkipField() are rare,
# except for handling end-group tags.
def _SkipVarint(buffer, pos, end):
"""Skip a varint value. Returns the new position."""
# Previously ord(buffer[pos]) raised IndexError when pos is out of range.
# With this code, ord(b'') raises TypeError. Both are handled in
# python_message.py to generate a 'Truncated message' error.
while ord(buffer[pos:pos+1]) & 0x80:
pos += 1
pos += 1
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _SkipFixed64(buffer, pos, end):
"""Skip a fixed64 value. Returns the new position."""
pos += 8
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _SkipLengthDelimited(buffer, pos, end):
"""Skip a length-delimited value. Returns the new position."""
(size, pos) = _DecodeVarint(buffer, pos)
pos += size
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _SkipGroup(buffer, pos, end):
"""Skip sub-group. Returns the new position."""
while 1:
(tag_bytes, pos) = ReadTag(buffer, pos)
new_pos = SkipField(buffer, pos, end, tag_bytes)
if new_pos == -1:
return pos
pos = new_pos
def _EndGroup(buffer, pos, end):
"""Skipping an END_GROUP tag returns -1 to tell the parent loop to break."""
return -1
def _SkipFixed32(buffer, pos, end):
"""Skip a fixed32 value. Returns the new position."""
pos += 4
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _RaiseInvalidWireType(buffer, pos, end):
"""Skip function for unknown wire types. Raises an exception."""
raise _DecodeError('Tag had invalid wire type.')
def _FieldSkipper():
"""Constructs the SkipField function."""
WIRETYPE_TO_SKIPPER = [
_SkipVarint,
_SkipFixed64,
_SkipLengthDelimited,
_SkipGroup,
_EndGroup,
_SkipFixed32,
_RaiseInvalidWireType,
_RaiseInvalidWireType,
]
wiretype_mask = wire_format.TAG_TYPE_MASK
def SkipField(buffer, pos, end, tag_bytes):
"""Skips a field with the specified tag.
|pos| should point to the byte immediately after the tag.
Returns:
The new position (after the tag value), or -1 if the tag is an end-group
tag (in which case the calling loop should break).
"""
# The wire type is always in the first byte since varints are little-endian.
wire_type = ord(tag_bytes[0:1]) & wiretype_mask
return WIRETYPE_TO_SKIPPER[wire_type](buffer, pos, end)
return SkipField
SkipField = _FieldSkipper()
| mit |
yencarnacion/jaikuengine | .google_appengine/lib/django-1.5/django/contrib/gis/admin/widgets.py | 96 | 5761 | import logging
from django.forms.widgets import Textarea
from django.template import loader, Context
from django.templatetags.static import static
from django.utils import six
from django.utils import translation
from django.contrib.gis.gdal import OGRException
from django.contrib.gis.geos import GEOSGeometry, GEOSException, fromstr
# Creating a template context that contains Django settings
# values needed by admin map templates.
geo_context = Context({'LANGUAGE_BIDI' : translation.get_language_bidi()})
logger = logging.getLogger('django.contrib.gis')
class OpenLayersWidget(Textarea):
"""
Renders an OpenLayers map using the WKT of the geometry.
"""
def render(self, name, value, attrs=None):
# Update the template parameters with any attributes passed in.
if attrs: self.params.update(attrs)
# Defaulting the WKT value to a blank string -- this
# will be tested in the JavaScript and the appropriate
# interface will be constructed.
self.params['wkt'] = ''
# If a string reaches here (via a validation error on another
# field) then just reconstruct the Geometry.
if isinstance(value, six.string_types):
try:
value = GEOSGeometry(value)
except (GEOSException, ValueError) as err:
logger.error(
"Error creating geometry from value '%s' (%s)" % (
value, err)
)
value = None
if value and value.geom_type.upper() != self.geom_type:
value = None
# Constructing the dictionary of the map options.
self.params['map_options'] = self.map_options()
# Constructing the JavaScript module name using the name of
# the GeometryField (passed in via the `attrs` keyword).
# Use the 'name' attr for the field name (rather than 'field')
self.params['name'] = name
# note: we must switch out dashes for underscores since js
# functions are created using the module variable
js_safe_name = self.params['name'].replace('-','_')
self.params['module'] = 'geodjango_%s' % js_safe_name
if value:
# Transforming the geometry to the projection used on the
# OpenLayers map.
srid = self.params['srid']
if value.srid != srid:
try:
ogr = value.ogr
ogr.transform(srid)
wkt = ogr.wkt
except OGRException as err:
logger.error(
"Error transforming geometry from srid '%s' to srid '%s' (%s)" % (
value.srid, srid, err)
)
wkt = ''
else:
wkt = value.wkt
# Setting the parameter WKT with that of the transformed
# geometry.
self.params['wkt'] = wkt
return loader.render_to_string(self.template, self.params,
context_instance=geo_context)
def map_options(self):
"Builds the map options hash for the OpenLayers template."
# JavaScript construction utilities for the Bounds and Projection.
def ol_bounds(extent):
return 'new OpenLayers.Bounds(%s)' % str(extent)
def ol_projection(srid):
return 'new OpenLayers.Projection("EPSG:%s")' % srid
# An array of the parameter name, the name of their OpenLayers
# counterpart, and the type of variable they are.
map_types = [('srid', 'projection', 'srid'),
('display_srid', 'displayProjection', 'srid'),
('units', 'units', str),
('max_resolution', 'maxResolution', float),
('max_extent', 'maxExtent', 'bounds'),
('num_zoom', 'numZoomLevels', int),
('max_zoom', 'maxZoomLevels', int),
('min_zoom', 'minZoomLevel', int),
]
# Building the map options hash.
map_options = {}
for param_name, js_name, option_type in map_types:
if self.params.get(param_name, False):
if option_type == 'srid':
value = ol_projection(self.params[param_name])
elif option_type == 'bounds':
value = ol_bounds(self.params[param_name])
elif option_type in (float, int):
value = self.params[param_name]
elif option_type in (str,):
value = '"%s"' % self.params[param_name]
else:
raise TypeError
map_options[js_name] = value
return map_options
def _has_changed(self, initial, data):
""" Compare geographic value of data with its initial value. """
# Ensure we are dealing with a geographic object
if isinstance(initial, six.string_types):
try:
initial = GEOSGeometry(initial)
except (GEOSException, ValueError):
initial = None
# Only do a geographic comparison if both values are available
if initial and data:
data = fromstr(data)
data.transform(initial.srid)
# If the initial value was not added by the browser, the geometry
# provided may be slightly different, the first time it is saved.
# The comparison is done with a very low tolerance.
return not initial.equals_exact(data, tolerance=0.000001)
else:
# Check for change of state of existence
return bool(initial) != bool(data)
| apache-2.0 |
maartenq/ansible | lib/ansible/module_utils/network/asa/asa.py | 86 | 5729 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback, return_values
from ansible.module_utils.network.common.utils import to_list, EntityCollection
from ansible.module_utils.connection import exec_command
from ansible.module_utils.connection import Connection, ConnectionError
_DEVICE_CONFIGS = {}
_CONNECTION = None
asa_provider_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'),
'auth_pass': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS']), no_log=True),
'timeout': dict(type='int'),
'context': dict(),
'passwords': dict()
}
asa_argument_spec = {
'provider': dict(type='dict', options=asa_provider_spec),
}
asa_top_spec = {
'host': dict(removed_in_version=2.9),
'port': dict(removed_in_version=2.9, type='int'),
'username': dict(removed_in_version=2.9),
'password': dict(removed_in_version=2.9, no_log=True),
'ssh_keyfile': dict(removed_in_version=2.9, type='path'),
'authorize': dict(type='bool'),
'auth_pass': dict(removed_in_version=2.9, no_log=True),
'timeout': dict(removed_in_version=2.9, type='int'),
'context': dict(),
'passwords': dict()
}
asa_argument_spec.update(asa_top_spec)
command_spec = {
'command': dict(key=True),
'prompt': dict(),
'answer': dict()
}
def get_provider_argspec():
return asa_provider_spec
def check_args(module):
pass
def get_connection(module):
global _CONNECTION
if _CONNECTION:
return _CONNECTION
_CONNECTION = Connection(module._socket_path)
context = module.params['context']
if context:
if context == 'system':
command = 'changeto system'
else:
command = 'changeto context %s' % context
_CONNECTION.get(command)
return _CONNECTION
def to_commands(module, commands):
if not isinstance(commands, list):
raise AssertionError('argument must be of type <list>')
transform = EntityCollection(module, command_spec)
commands = transform(commands)
for index, item in enumerate(commands):
if module.check_mode and not item['command'].startswith('show'):
module.warn('only show commands are supported when using check '
'mode, not executing `%s`' % item['command'])
return commands
def run_commands(module, commands, check_rc=True):
connection = get_connection(module)
commands = to_commands(module, to_list(commands))
responses = list()
for cmd in commands:
out = connection.get(**cmd)
responses.append(to_text(out, errors='surrogate_then_replace'))
return responses
def get_config(module, flags=None):
flags = [] if flags is None else flags
passwords = module.params['passwords']
if passwords:
cmd = 'more system:running-config'
else:
cmd = 'show running-config '
cmd += ' '.join(flags)
cmd = cmd.strip()
try:
return _DEVICE_CONFIGS[cmd]
except KeyError:
conn = get_connection(module)
out = conn.get(cmd)
cfg = to_text(out, errors='surrogate_then_replace').strip()
_DEVICE_CONFIGS[cmd] = cfg
return cfg
def load_config(module, config):
try:
conn = get_connection(module)
conn.edit_config(config)
except ConnectionError as exc:
module.fail_json(msg=to_text(exc))
def get_defaults_flag(module):
rc, out, err = exec_command(module, 'show running-config ?')
out = to_text(out, errors='surrogate_then_replace')
commands = set()
for line in out.splitlines():
if line:
commands.add(line.strip().split()[0])
if 'all' in commands:
return 'all'
else:
return 'full'
| gpl-3.0 |
sdague/home-assistant | homeassistant/components/w800rf32/__init__.py | 24 | 1619 | """Support for w800rf32 devices."""
import logging
import W800rf32 as w800
import voluptuous as vol
from homeassistant.const import (
CONF_DEVICE,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import dispatcher_send
DATA_W800RF32 = "data_w800rf32"
DOMAIN = "w800rf32"
W800RF32_DEVICE = "w800rf32_{}"
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema({vol.Required(CONF_DEVICE): cv.string})}, extra=vol.ALLOW_EXTRA
)
def setup(hass, config):
"""Set up the w800rf32 component."""
# Declare the Handle event
def handle_receive(event):
"""Handle received messages from w800rf32 gateway."""
# Log event
if not event.device:
return
_LOGGER.debug("Receive W800rf32 event in handle_receive")
# Get device_type from device_id in hass.data
device_id = event.device.lower()
signal = W800RF32_DEVICE.format(device_id)
dispatcher_send(hass, signal, event)
# device --> /dev/ttyUSB0
device = config[DOMAIN][CONF_DEVICE]
w800_object = w800.Connect(device, None)
def _start_w800rf32(event):
w800_object.event_callback = handle_receive
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, _start_w800rf32)
def _shutdown_w800rf32(event):
"""Close connection with w800rf32."""
w800_object.close_connection()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, _shutdown_w800rf32)
hass.data[DATA_W800RF32] = w800_object
return True
| apache-2.0 |
cloudera/hue | desktop/core/ext-py/Django-1.11.29/tests/foreign_object/tests.py | 9 | 20137 | import datetime
from operator import attrgetter
from django.core.exceptions import FieldError
from django.db import models
from django.db.models.fields.related import ForeignObject
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from django.test.utils import isolate_apps
from django.utils import translation
from .models import (
Article, ArticleIdea, ArticleTag, ArticleTranslation, Country, Friendship,
Group, Membership, NewsArticle, Person,
)
# Note that these tests are testing internal implementation details.
# ForeignObject is not part of public API.
class MultiColumnFKTests(TestCase):
def setUp(self):
# Creating countries
self.usa = Country.objects.create(name="United States of America")
self.soviet_union = Country.objects.create(name="Soviet Union")
Person()
# Creating People
self.bob = Person()
self.bob.name = 'Bob'
self.bob.person_country = self.usa
self.bob.save()
self.jim = Person.objects.create(name='Jim', person_country=self.usa)
self.george = Person.objects.create(name='George', person_country=self.usa)
self.jane = Person.objects.create(name='Jane', person_country=self.soviet_union)
self.mark = Person.objects.create(name='Mark', person_country=self.soviet_union)
self.sam = Person.objects.create(name='Sam', person_country=self.soviet_union)
# Creating Groups
self.kgb = Group.objects.create(name='KGB', group_country=self.soviet_union)
self.cia = Group.objects.create(name='CIA', group_country=self.usa)
self.republican = Group.objects.create(name='Republican', group_country=self.usa)
self.democrat = Group.objects.create(name='Democrat', group_country=self.usa)
def test_get_succeeds_on_multicolumn_match(self):
# Membership objects have access to their related Person if both
# country_ids match between them
membership = Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id)
person = membership.person
self.assertEqual((person.id, person.name), (self.bob.id, "Bob"))
def test_get_fails_on_multicolumn_mismatch(self):
# Membership objects returns DoesNotExist error when the there is no
# Person with the same id and country_id
membership = Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.jane.id, group_id=self.cia.id)
with self.assertRaises(Person.DoesNotExist):
getattr(membership, 'person')
def test_reverse_query_returns_correct_result(self):
# Creating a valid membership because it has the same country has the person
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id)
# Creating an invalid membership because it has a different country has the person
Membership.objects.create(
membership_country_id=self.soviet_union.id, person_id=self.bob.id,
group_id=self.republican.id)
self.assertQuerysetEqual(
self.bob.membership_set.all(), [
self.cia.id
],
attrgetter("group_id")
)
def test_query_filters_correctly(self):
# Creating a to valid memberships
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id)
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id)
# Creating an invalid membership
Membership.objects.create(membership_country_id=self.soviet_union.id,
person_id=self.george.id, group_id=self.cia.id)
self.assertQuerysetEqual(
Membership.objects.filter(person__name__contains='o'), [
self.bob.id
],
attrgetter("person_id")
)
def test_reverse_query_filters_correctly(self):
timemark = datetime.datetime.utcnow()
timedelta = datetime.timedelta(days=1)
# Creating a to valid memberships
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id,
group_id=self.cia.id, date_joined=timemark - timedelta)
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id, date_joined=timemark + timedelta)
# Creating an invalid membership
Membership.objects.create(
membership_country_id=self.soviet_union.id, person_id=self.george.id,
group_id=self.cia.id, date_joined=timemark + timedelta)
self.assertQuerysetEqual(
Person.objects.filter(membership__date_joined__gte=timemark), [
'Jim'
],
attrgetter('name')
)
def test_forward_in_lookup_filters_correctly(self):
Membership.objects.create(membership_country_id=self.usa.id, person_id=self.bob.id,
group_id=self.cia.id)
Membership.objects.create(membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id)
# Creating an invalid membership
Membership.objects.create(
membership_country_id=self.soviet_union.id, person_id=self.george.id,
group_id=self.cia.id)
self.assertQuerysetEqual(
Membership.objects.filter(person__in=[self.george, self.jim]), [
self.jim.id,
],
attrgetter('person_id')
)
self.assertQuerysetEqual(
Membership.objects.filter(person__in=Person.objects.filter(name='Jim')), [
self.jim.id,
],
attrgetter('person_id')
)
def test_double_nested_query(self):
m1 = Membership.objects.create(membership_country_id=self.usa.id, person_id=self.bob.id,
group_id=self.cia.id)
m2 = Membership.objects.create(membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id)
Friendship.objects.create(from_friend_country_id=self.usa.id, from_friend_id=self.bob.id,
to_friend_country_id=self.usa.id, to_friend_id=self.jim.id)
self.assertSequenceEqual(
Membership.objects.filter(
person__in=Person.objects.filter(
from_friend__in=Friendship.objects.filter(to_friend__in=Person.objects.all())
)
),
[m1]
)
self.assertSequenceEqual(
Membership.objects.exclude(
person__in=Person.objects.filter(
from_friend__in=Friendship.objects.filter(to_friend__in=Person.objects.all())
)
),
[m2]
)
def test_select_related_foreignkey_forward_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(1):
people = [m.person for m in Membership.objects.select_related('person').order_by('pk')]
normal_people = [m.person for m in Membership.objects.all().order_by('pk')]
self.assertEqual(people, normal_people)
def test_prefetch_foreignkey_forward_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
people = [
m.person for m in Membership.objects.prefetch_related('person').order_by('pk')]
normal_people = [m.person for m in Membership.objects.order_by('pk')]
self.assertEqual(people, normal_people)
def test_prefetch_foreignkey_reverse_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
membership_sets = [
list(p.membership_set.all())
for p in Person.objects.prefetch_related('membership_set').order_by('pk')]
normal_membership_sets = [list(p.membership_set.all())
for p in Person.objects.order_by('pk')]
self.assertEqual(membership_sets, normal_membership_sets)
def test_m2m_through_forward_returns_valid_members(self):
# We start out by making sure that the Group 'CIA' has no members.
self.assertQuerysetEqual(
self.cia.members.all(),
[]
)
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.cia)
# Let's check to make sure that it worked. Bob and Jim should be members of the CIA.
self.assertQuerysetEqual(
self.cia.members.all(), [
'Bob',
'Jim'
], attrgetter("name")
)
def test_m2m_through_reverse_returns_valid_members(self):
# We start out by making sure that Bob is in no groups.
self.assertQuerysetEqual(
self.bob.groups.all(),
[]
)
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.bob,
group=self.republican)
# Bob should be in the CIA and a Republican
self.assertQuerysetEqual(
self.bob.groups.all(), [
'CIA',
'Republican'
], attrgetter("name")
)
def test_m2m_through_forward_ignores_invalid_members(self):
# We start out by making sure that the Group 'CIA' has no members.
self.assertQuerysetEqual(
self.cia.members.all(),
[]
)
# Something adds jane to group CIA but Jane is in Soviet Union which isn't CIA's country
Membership.objects.create(membership_country=self.usa, person=self.jane, group=self.cia)
# There should still be no members in CIA
self.assertQuerysetEqual(
self.cia.members.all(),
[]
)
def test_m2m_through_reverse_ignores_invalid_members(self):
# We start out by making sure that Jane has no groups.
self.assertQuerysetEqual(
self.jane.groups.all(),
[]
)
# Something adds jane to group CIA but Jane is in Soviet Union which isn't CIA's country
Membership.objects.create(membership_country=self.usa, person=self.jane, group=self.cia)
# Jane should still not be in any groups
self.assertQuerysetEqual(
self.jane.groups.all(),
[]
)
def test_m2m_through_on_self_works(self):
self.assertQuerysetEqual(
self.jane.friends.all(),
[]
)
Friendship.objects.create(
from_friend_country=self.jane.person_country, from_friend=self.jane,
to_friend_country=self.george.person_country, to_friend=self.george)
self.assertQuerysetEqual(
self.jane.friends.all(),
['George'], attrgetter("name")
)
def test_m2m_through_on_self_ignores_mismatch_columns(self):
self.assertQuerysetEqual(self.jane.friends.all(), [])
# Note that we use ids instead of instances. This is because instances on ForeignObject
# properties will set all related field off of the given instance
Friendship.objects.create(
from_friend_id=self.jane.id, to_friend_id=self.george.id,
to_friend_country_id=self.jane.person_country_id,
from_friend_country_id=self.george.person_country_id)
self.assertQuerysetEqual(self.jane.friends.all(), [])
def test_prefetch_related_m2m_forward_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
members_lists = [list(g.members.all())
for g in Group.objects.prefetch_related('members')]
normal_members_lists = [list(g.members.all()) for g in Group.objects.all()]
self.assertEqual(members_lists, normal_members_lists)
def test_prefetch_related_m2m_reverse_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
groups_lists = [list(p.groups.all()) for p in Person.objects.prefetch_related('groups')]
normal_groups_lists = [list(p.groups.all()) for p in Person.objects.all()]
self.assertEqual(groups_lists, normal_groups_lists)
@translation.override('fi')
def test_translations(self):
a1 = Article.objects.create(pub_date=datetime.date.today())
at1_fi = ArticleTranslation(article=a1, lang='fi', title='Otsikko', body='Diipadaapa')
at1_fi.save()
at2_en = ArticleTranslation(article=a1, lang='en', title='Title', body='Lalalalala')
at2_en.save()
self.assertEqual(Article.objects.get(pk=a1.pk).active_translation, at1_fi)
with self.assertNumQueries(1):
fetched = Article.objects.select_related('active_translation').get(
active_translation__title='Otsikko')
self.assertEqual(fetched.active_translation.title, 'Otsikko')
a2 = Article.objects.create(pub_date=datetime.date.today())
at2_fi = ArticleTranslation(article=a2, lang='fi', title='Atsikko', body='Diipadaapa',
abstract='dipad')
at2_fi.save()
a3 = Article.objects.create(pub_date=datetime.date.today())
at3_en = ArticleTranslation(article=a3, lang='en', title='A title', body='lalalalala',
abstract='lala')
at3_en.save()
# Test model initialization with active_translation field.
a3 = Article(id=a3.id, pub_date=a3.pub_date, active_translation=at3_en)
a3.save()
self.assertEqual(
list(Article.objects.filter(active_translation__abstract=None)),
[a1, a3])
self.assertEqual(
list(Article.objects.filter(active_translation__abstract=None,
active_translation__pk__isnull=False)),
[a1])
with translation.override('en'):
self.assertEqual(
list(Article.objects.filter(active_translation__abstract=None)),
[a1, a2])
def test_foreign_key_raises_informative_does_not_exist(self):
referrer = ArticleTranslation()
with self.assertRaisesMessage(Article.DoesNotExist, 'ArticleTranslation has no article'):
referrer.article
def test_foreign_key_related_query_name(self):
a1 = Article.objects.create(pub_date=datetime.date.today())
ArticleTag.objects.create(article=a1, name="foo")
self.assertEqual(Article.objects.filter(tag__name="foo").count(), 1)
self.assertEqual(Article.objects.filter(tag__name="bar").count(), 0)
with self.assertRaises(FieldError):
Article.objects.filter(tags__name="foo")
def test_many_to_many_related_query_name(self):
a1 = Article.objects.create(pub_date=datetime.date.today())
i1 = ArticleIdea.objects.create(name="idea1")
a1.ideas.add(i1)
self.assertEqual(Article.objects.filter(idea_things__name="idea1").count(), 1)
self.assertEqual(Article.objects.filter(idea_things__name="idea2").count(), 0)
with self.assertRaises(FieldError):
Article.objects.filter(ideas__name="idea1")
@translation.override('fi')
def test_inheritance(self):
na = NewsArticle.objects.create(pub_date=datetime.date.today())
ArticleTranslation.objects.create(
article=na, lang="fi", title="foo", body="bar")
self.assertSequenceEqual(
NewsArticle.objects.select_related('active_translation'),
[na]
)
with self.assertNumQueries(1):
self.assertEqual(
NewsArticle.objects.select_related(
'active_translation')[0].active_translation.title,
"foo")
@skipUnlessDBFeature('has_bulk_insert')
def test_batch_create_foreign_object(self):
objs = [Person(name="abcd_%s" % i, person_country=self.usa) for i in range(0, 5)]
Person.objects.bulk_create(objs, 10)
def test_isnull_lookup(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group_id=None)
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
self.assertQuerysetEqual(
Membership.objects.filter(group__isnull=True),
['<Membership: Bob is a member of NULL>']
)
self.assertQuerysetEqual(
Membership.objects.filter(group__isnull=False),
['<Membership: Bob is a member of CIA>']
)
class TestModelCheckTests(SimpleTestCase):
@isolate_apps('foreign_object')
def test_check_composite_foreign_object(self):
class Parent(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
class Meta:
unique_together = (('a', 'b'),)
class Child(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
value = models.CharField(max_length=255)
parent = ForeignObject(
Parent,
on_delete=models.SET_NULL,
from_fields=('a', 'b'),
to_fields=('a', 'b'),
related_name='children',
)
self.assertEqual(Child._meta.get_field('parent').check(from_model=Child), [])
@isolate_apps('foreign_object')
def test_check_subset_composite_foreign_object(self):
class Parent(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
c = models.PositiveIntegerField()
class Meta:
unique_together = (('a', 'b'),)
class Child(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
c = models.PositiveIntegerField()
d = models.CharField(max_length=255)
parent = ForeignObject(
Parent,
on_delete=models.SET_NULL,
from_fields=('a', 'b', 'c'),
to_fields=('a', 'b', 'c'),
related_name='children',
)
self.assertEqual(Child._meta.get_field('parent').check(from_model=Child), [])
class TestExtraJoinFilterQ(TestCase):
@translation.override('fi')
def test_extra_join_filter_q(self):
a = Article.objects.create(pub_date=datetime.datetime.today())
ArticleTranslation.objects.create(article=a, lang='fi', title='title', body='body')
qs = Article.objects.all()
with self.assertNumQueries(2):
self.assertEqual(qs[0].active_translation_q.title, 'title')
qs = qs.select_related('active_translation_q')
with self.assertNumQueries(1):
self.assertEqual(qs[0].active_translation_q.title, 'title')
| apache-2.0 |
almarklein/visvis.dev | functions/solidCylinder.py | 5 | 4868 | # -*- coding: utf-8 -*-
# Copyright (C) 2012, Almar Klein
#
# Visvis is distributed under the terms of the (new) BSD License.
# The full license can be found in 'license.txt'.
import visvis as vv
import numpy as np
from visvis.pypoints import Pointset
def solidCylinder(translation=None, scaling=None, direction=None, rotation=None,
N=16, M=16, axesAdjust=True, axes=None):
""" solidCylinder(
translation=None, scaling=None, direction=None, rotation=None,
N=16, M=16, axesAdjust=True, axes=None)
Creates a cylinder object with quad faces and its base at the origin.
Returns an OrientableMesh instance.
Parameters
----------
Note that translation, scaling, and direction can also be given
using a Point instance.
translation : (dx, dy, dz), optional
The translation in world units of the created world object.
scaling: (sx, sy, sz), optional
The scaling in world units of the created world object.
direction: (nx, ny, nz), optional
Normal vector that indicates the direction of the created world object.
rotation: scalar, optional
The anle (in degrees) to rotate the created world object around its
direction vector.
N : int
The number of subdivisions around its axis. If smaller
than 8, flat shading is used instead of smooth shading.
M : int
The number of subdivisions along its axis. If smaller
than 8, flat shading is used instead of smooth shading.
axesAdjust : bool
If True, this function will call axes.SetLimits(), and set
the camera type to 3D. If daspectAuto has not been set yet,
it is set to False.
axes : Axes instance
Display the bars in the given axes, or the current axes if not given.
"""
# Note that the number of vertices around the axis is N+1. This
# would not be necessary per see, but it helps create a nice closed
# texture when it is mapped. There are N number of faces though.
# Similarly, to obtain M faces along the axis, we need M+1
# vertices.
# Quick access
pi2 = np.pi*2
cos = np.cos
sin = np.sin
sl = N+1
# Calculate vertices, normals and texcords
vertices = Pointset(3)
normals = Pointset(3)
texcords = Pointset(2)
# Round part
for m in range(M+1):
z = 1.0 - float(m)/M # between 0 and 1
v = float(m)/M
#
for n in range(N+1):
b = pi2 * float(n) / N
u = float(n) / (N)
x = cos(b)
y = sin(b)
vertices.append(x,y,z)
normals.append(x,y,0)
texcords.append(u,v)
# Top
for m in range(2):
for n in range(N+1):
b = pi2 * float(n) / N
u = float(n) / (N)
x = cos(b) * m # todo: check which ones are frontfacing!
y = sin(b) * m
vertices.append(x,y,1)
normals.append(0,0,1)
texcords.append(u,0)
# Bottom
for m in range(2):
for n in range(N+1):
b = pi2 * float(n) / N
u = float(n) / (N)
x = cos(b) * (1-m)
y = sin(b) * (1-m)
vertices.append(x,y,0)
normals.append(0,0,-1)
texcords.append(u,1)
# Normalize normals
normals = normals.normalize()
# Calculate indices
indices = []
for j in range(M):
for i in range(N):
#indices.extend([j*sl+i, j*sl+i+1, (j+1)*sl+i+1, (j+1)*sl+i])
indices.extend([(j+1)*sl+i, (j+1)*sl+i+1, j*sl+i+1, j*sl+i])
j = M+1
for i in range(N):
indices.extend([(j+1)*sl+i, (j+1)*sl+i+1, j*sl+i+1, j*sl+i])
j = M+3
for i in range(N):
indices.extend([(j+1)*sl+i, (j+1)*sl+i+1, j*sl+i+1, j*sl+i])
# Make indices a numpy array
indices = np.array(indices, dtype=np.uint32)
## Visualization
# Create axes
if axes is None:
axes = vv.gca()
# Create mesh
m = vv.OrientableMesh(axes, vertices, indices, normals, values=texcords,
verticesPerFace=4)
#
if translation is not None:
m.translation = translation
if scaling is not None:
m.scaling = scaling
if direction is not None:
m.direction = direction
if rotation is not None:
m.rotation = rotation
# Set flat shading?
if N<8 or M<8:
m.faceShading = 'flat'
# Adjust axes
if axesAdjust:
if axes.daspectAuto is None:
axes.daspectAuto = False
axes.cameraType = '3d'
axes.SetLimits()
# Done
axes.Draw()
return m
if __name__ == '__main__':
vv.figure()
m1 = solidCylinder(N=6)
m2 = solidCylinder(translation=(0,0,0.1), scaling=(0.5,0.5,2.5))
| bsd-3-clause |
camlorn/Unspoken | libaudioverse/bindings/python/examples/sim3d.py | 1 | 1046 | #demonstrates how to use the 3d simulation.
import libaudioverse
import collections
libaudioverse.initialize()
sim = libaudioverse.Simulation()
sim.set_output_device()
world = libaudioverse.EnvironmentNode(sim, "default")
world.panning_strategy = libaudioverse.PanningStrategies.hrtf
source = libaudioverse.SourceNode(sim, world)
print("Enter a path to a sound file.")
filepath = input()
n = libaudioverse.BufferNode(sim)
b = libaudioverse.Buffer(sim)
b.load_from_file(filepath)
n.buffer = b
n.connect(0, source, 0)
n.looping.value = True
world.connect_simulation(0)
print("""Enter python expressions that evaluate to 3-tuples (x, y, z).
Positive x is to your right, positive y is above you, and positive z is behind you.
Enter quit to quit.""")
while True:
command = input()
if command == 'quit':
break
vect = eval(command)
if not isinstance(vect, collections.Sized) or len(vect) != 3:
print("Must evaluate to a 3-tuple. Try again")
continue
source.position.value = vect
libaudioverse.shutdown() | gpl-2.0 |
taniwha/io_object_mu | quickhull/testharness.py | 2 | 1639 | # vim:ts=4:et
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import sys
import os
import getopt
import gzip
import time
from binary import BinaryReader
from rawmesh import RawMesh
from quickhull import QuickHull
shortopts = ''
longopts = [
'dump',
]
options, datafiles = getopt.getopt(sys.argv[1:], shortopts, longopts)
for opt, arg in options:
if opt == "--dump":
QuickHull.dump_faces = True
error = False
for df in datafiles:
if df[-3:] == ".gz":
f = gzip.open(df, "rb")
else:
f = open(df, "rb")
br= BinaryReader(f)
mesh = RawMesh()
mesh.read(br)
print(f"{df} - {len(mesh.verts)} points")
br.close()
qh = QuickHull(mesh)
start = time.perf_counter()
hull = qh.GetHull()
error |= qh.error
end = time.perf_counter()
print(f" - {len(hull)} faces {(end - start) * 1000}ms")
sys.exit(1 if error else 0)
| gpl-2.0 |
hainm/rope | ropetest/refactor/inlinetest.py | 5 | 30374 | try:
import unittest2 as unittest
except ImportError:
import unittest
import rope.base.exceptions
from rope.refactor import inline
from ropetest import testutils
class InlineTest(unittest.TestCase):
def setUp(self):
super(InlineTest, self).setUp()
self.project = testutils.sample_project()
self.pycore = self.project.pycore
self.mod = testutils.create_module(self.project, 'mod')
self.mod2 = testutils.create_module(self.project, 'mod2')
def tearDown(self):
testutils.remove_project(self.project)
super(InlineTest, self).tearDown()
def _inline(self, code, offset, **kwds):
self.mod.write(code)
self._inline2(self.mod, offset, **kwds)
return self.mod.read()
def _inline2(self, resource, offset, **kwds):
inliner = inline.create_inline(self.project, resource, offset)
changes = inliner.get_changes(**kwds)
self.project.do(changes)
return self.mod.read()
def test_simple_case(self):
code = 'a_var = 10\nanother_var = a_var\n'
refactored = self._inline(code, code.index('a_var') + 1)
self.assertEquals('another_var = 10\n', refactored)
def test_empty_case(self):
code = 'a_var = 10\n'
refactored = self._inline(code, code.index('a_var') + 1)
self.assertEquals('', refactored)
def test_long_definition(self):
code = 'a_var = 10 + (10 + 10)\nanother_var = a_var\n'
refactored = self._inline(code, code.index('a_var') + 1)
self.assertEquals('another_var = 10 + (10 + 10)\n', refactored)
def test_explicit_continuation(self):
code = 'a_var = (10 +\n 10)\nanother_var = a_var\n'
refactored = self._inline(code, code.index('a_var') + 1)
self.assertEquals('another_var = (10 + 10)\n', refactored)
def test_implicit_continuation(self):
code = 'a_var = 10 +\\\n 10\nanother_var = a_var\n'
refactored = self._inline(code, code.index('a_var') + 1)
self.assertEquals('another_var = 10 + 10\n', refactored)
def test_inlining_at_the_end_of_input(self):
code = 'a = 1\nb = a'
refactored = self._inline(code, code.index('a') + 1)
self.assertEquals('b = 1', refactored)
def test_on_classes(self):
code = 'class AClass(object):\n pass\n'
with self.assertRaises(rope.base.exceptions.RefactoringError):
self._inline(code, code.index('AClass') + 1)
def test_multiple_assignments(self):
code = 'a_var = 10\na_var = 20\n'
with self.assertRaises(rope.base.exceptions.RefactoringError):
self._inline(code, code.index('a_var') + 1)
def test_tuple_assignments(self):
code = 'a_var, another_var = (20, 30)\n'
with self.assertRaises(rope.base.exceptions.RefactoringError):
self._inline(code, code.index('a_var') + 1)
def test_on_unknown_vars(self):
code = 'a_var = another_var\n'
with self.assertRaises(rope.base.exceptions.RefactoringError):
self._inline(code, code.index('another_var') + 1)
def test_attribute_inlining(self):
code = 'class A(object):\n def __init__(self):\n' \
' self.an_attr = 3\n range(self.an_attr)\n'
refactored = self._inline(code, code.index('an_attr') + 1)
expected = 'class A(object):\n def __init__(self):\n' \
' range(3)\n'
self.assertEquals(expected, refactored)
def test_attribute_inlining2(self):
code = 'class A(object):\n def __init__(self):\n' \
' self.an_attr = 3\n range(self.an_attr)\n' \
'a = A()\nrange(a.an_attr)'
refactored = self._inline(code, code.index('an_attr') + 1)
expected = 'class A(object):\n def __init__(self):\n' \
' range(3)\n' \
'a = A()\nrange(3)'
self.assertEquals(expected, refactored)
def test_a_function_with_no_occurance(self):
self.mod.write('def a_func():\n pass\n')
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
self.assertEquals('', self.mod.read())
def test_a_function_with_no_occurance2(self):
self.mod.write('a_var = 10\ndef a_func():\n pass\nprint(a_var)\n')
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
self.assertEquals('a_var = 10\nprint(a_var)\n', self.mod.read())
def test_replacing_calls_with_function_definition_in_other_modules(self):
self.mod.write('def a_func():\n print(1)\n')
mod1 = testutils.create_module(self.project, 'mod1')
mod1.write('import mod\nmod.a_func()\n')
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
self.assertEquals('import mod\nprint(1)\n', mod1.read())
def test_replacing_calls_with_function_definition_in_other_modules2(self):
self.mod.write('def a_func():\n print(1)\n')
mod1 = testutils.create_module(self.project, 'mod1')
mod1.write('import mod\nif True:\n mod.a_func()\n')
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
self.assertEquals('import mod\nif True:\n print(1)\n', mod1.read())
def test_replacing_calls_with_method_definition_in_other_modules(self):
self.mod.write('class A(object):\n var = 10\n'
' def a_func(self):\n print(1)\n')
mod1 = testutils.create_module(self.project, 'mod1')
mod1.write('import mod\nmod.A().a_func()\n')
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
self.assertEquals('import mod\nprint(1)\n', mod1.read())
self.assertEquals('class A(object):\n var = 10\n', self.mod.read())
def test_replacing_calls_with_function_definition_in_defining_module(self):
self.mod.write('def a_func():\n print(1)\na_func()\n')
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
self.assertEquals('print(1)\n', self.mod.read())
def test_replac_calls_with_function_definition_in_defining_module2(self):
self.mod.write('def a_func():\n '
'for i in range(10):\n print(1)\na_func()\n')
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
self.assertEquals('for i in range(10):\n print(1)\n',
self.mod.read())
def test_replacing_calls_with_method_definition_in_defining_modules(self):
self.mod.write('class A(object):\n var = 10\n'
' def a_func(self):\n print(1)\nA().a_func()')
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
self.assertEquals('class A(object):\n var = 10\nprint(1)\n',
self.mod.read())
def test_parameters_with_the_same_name_as_passed(self):
self.mod.write('def a_func(var):\n '
'print(var)\nvar = 1\na_func(var)\n')
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
self.assertEquals('var = 1\nprint(var)\n', self.mod.read())
def test_parameters_with_the_same_name_as_passed2(self):
self.mod.write('def a_func(var):\n '
'print(var)\nvar = 1\na_func(var=var)\n')
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
self.assertEquals('var = 1\nprint(var)\n', self.mod.read())
def test_simple_parameters_renaming(self):
self.mod.write('def a_func(param):\n '
'print(param)\nvar = 1\na_func(var)\n')
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
self.assertEquals('var = 1\nprint(var)\n', self.mod.read())
def test_simple_parameters_renaming_for_multiple_params(self):
self.mod.write('def a_func(param1, param2):\n p = param1 + param2\n'
'var1 = 1\nvar2 = 1\na_func(var1, var2)\n')
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
self.assertEquals('var1 = 1\nvar2 = 1\np = var1 + var2\n',
self.mod.read())
def test_parameters_renaming_for_passed_constants(self):
self.mod.write('def a_func(param):\n print(param)\na_func(1)\n')
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
self.assertEquals('print(1)\n', self.mod.read())
def test_parameters_renaming_for_passed_statements(self):
self.mod.write('def a_func(param):\n '
'print(param)\na_func((1 + 2) / 3)\n')
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
self.assertEquals('print((1 + 2) / 3)\n', self.mod.read())
def test_simple_parameters_renam_for_multiple_params_using_keywords(self):
self.mod.write('def a_func(param1, param2):\n '
'p = param1 + param2\n'
'var1 = 1\nvar2 = 1\n'
'a_func(param2=var1, param1=var2)\n')
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
self.assertEquals('var1 = 1\nvar2 = 1\np = var2 + var1\n',
self.mod.read())
def test_simple_params_renam_for_multi_params_using_mixed_keywords(self):
self.mod.write('def a_func(param1, param2):\n p = param1 + param2\n'
'var1 = 1\nvar2 = 1\na_func(var2, param2=var1)\n')
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
self.assertEquals('var1 = 1\nvar2 = 1\np = var2 + var1\n',
self.mod.read())
def test_simple_putting_in_default_arguments(self):
self.mod.write('def a_func(param=None):\n print(param)\n'
'a_func()\n')
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
self.assertEquals('print(None)\n', self.mod.read())
def test_overriding_default_arguments(self):
self.mod.write('def a_func(param1=1, param2=2):'
'\n print(param1, param2)\n'
'a_func(param2=3)\n')
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
self.assertEquals('print(1, 3)\n', self.mod.read())
def test_badly_formatted_text(self):
self.mod.write('def a_func ( param1 = 1 ,param2 = 2 ) :'
'\n print(param1, param2)\n'
'a_func ( param2 \n = 3 ) \n')
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
self.assertEquals('print(1, 3)\n', self.mod.read())
def test_passing_first_arguments_for_methods(self):
a_class = 'class A(object):\n' \
' def __init__(self):\n' \
' self.var = 1\n' \
' self.a_func(self.var)\n' \
' def a_func(self, param):\n' \
' print(param)\n'
self.mod.write(a_class)
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
expected = 'class A(object):\n' \
' def __init__(self):\n' \
' self.var = 1\n' \
' print(self.var)\n'
self.assertEquals(expected, self.mod.read())
def test_passing_first_arguments_for_methods2(self):
a_class = 'class A(object):\n' \
' def __init__(self):\n' \
' self.var = 1\n' \
' def a_func(self, param):\n' \
' print(param, self.var)\n' \
'an_a = A()\n' \
'an_a.a_func(1)\n'
self.mod.write(a_class)
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
expected = 'class A(object):\n' \
' def __init__(self):\n' \
' self.var = 1\n' \
'an_a = A()\n' \
'print(1, an_a.var)\n'
self.assertEquals(expected, self.mod.read())
def test_passing_first_arguments_for_methods3(self):
a_class = 'class A(object):\n' \
' def __init__(self):\n' \
' self.var = 1\n' \
' def a_func(self, param):\n' \
' print(param, self.var)\n' \
'an_a = A()\n' \
'A.a_func(an_a, 1)\n'
self.mod.write(a_class)
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
expected = 'class A(object):\n' \
' def __init__(self):\n' \
' self.var = 1\n' \
'an_a = A()\n' \
'print(1, an_a.var)\n'
self.assertEquals(expected, self.mod.read())
def test_inlining_staticmethods(self):
a_class = 'class A(object):\n' \
' @staticmethod\n' \
' def a_func(param):\n' \
' print(param)\n' \
'A.a_func(1)\n'
self.mod.write(a_class)
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
expected = 'class A(object):\n' \
' pass\n' \
'print(1)\n'
self.assertEquals(expected, self.mod.read())
def test_static_methods2(self):
a_class = 'class A(object):\n' \
' var = 10\n' \
' @staticmethod\n' \
' def a_func(param):\n' \
' print(param)\n' \
'an_a = A()\n' \
'an_a.a_func(1)\n' \
'A.a_func(2)\n'
self.mod.write(a_class)
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
expected = 'class A(object):\n' \
' var = 10\n' \
'an_a = A()\n' \
'print(1)\n' \
'print(2)\n'
self.assertEquals(expected, self.mod.read())
def test_inlining_classmethods(self):
a_class = 'class A(object):\n' \
' @classmethod\n' \
' def a_func(cls, param):\n' \
' print(param)\n' \
'A.a_func(1)\n'
self.mod.write(a_class)
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
expected = 'class A(object):\n' \
' pass\n' \
'print(1)\n'
self.assertEquals(expected, self.mod.read())
def test_inlining_classmethods2(self):
a_class = 'class A(object):\n' \
' @classmethod\n' \
' def a_func(cls, param):\n' \
' return cls\n' \
'print(A.a_func(1))\n'
self.mod.write(a_class)
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
expected = 'class A(object):\n' \
' pass\n' \
'print(A)\n'
self.assertEquals(expected, self.mod.read())
def test_simple_return_values_and_inlining_functions(self):
self.mod.write('def a_func():\n return 1\na = a_func()\n')
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
self.assertEquals('a = 1\n',
self.mod.read())
def test_simple_return_values_and_inlining_lonely_functions(self):
self.mod.write('def a_func():\n return 1\na_func()\n')
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
self.assertEquals('1\n', self.mod.read())
def test_empty_returns_and_inlining_lonely_functions(self):
self.mod.write('def a_func():\n '
'if True:\n return\na_func()\n')
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
self.assertEquals('if True:\n pass\n', self.mod.read())
def test_multiple_returns(self):
self.mod.write('def less_than_five(var):\n if var < 5:\n'
' return True\n return False\n'
'a = less_than_five(2)\n')
with self.assertRaises(rope.base.exceptions.RefactoringError):
self._inline2(self.mod, self.mod.read().index('less') + 1)
def test_multiple_returns_and_not_using_the_value(self):
self.mod.write('def less_than_five(var):\n if var < 5:\n'
' return True\n '
'return False\nless_than_five(2)\n')
self._inline2(self.mod, self.mod.read().index('less') + 1)
self.assertEquals('if 2 < 5:\n True\nFalse\n', self.mod.read())
def test_raising_exception_for_list_arguments(self):
self.mod.write('def a_func(*args):\n print(args)\na_func(1)\n')
with self.assertRaises(rope.base.exceptions.RefactoringError):
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
def test_raising_exception_for_list_keywods(self):
self.mod.write('def a_func(**kwds):\n print(kwds)\na_func(n=1)\n')
with self.assertRaises(rope.base.exceptions.RefactoringError):
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
def test_function_parameters_and_returns_in_other_functions(self):
code = 'def a_func(param1, param2):\n' \
' return param1 + param2\n' \
'range(a_func(20, param2=abs(10)))\n'
self.mod.write(code)
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
self.assertEquals('range(20 + abs(10))\n', self.mod.read())
def test_function_references_other_than_call(self):
self.mod.write('def a_func(param):\n print(param)\nf = a_func\n')
with self.assertRaises(rope.base.exceptions.RefactoringError):
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
def test_function_referencing_itself(self):
self.mod.write('def a_func(var):\n func = a_func\n')
with self.assertRaises(rope.base.exceptions.RefactoringError):
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
def test_recursive_functions(self):
self.mod.write('def a_func(var):\n a_func(var)\n')
with self.assertRaises(rope.base.exceptions.RefactoringError):
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
# TODO: inlining on function parameters
def xxx_test_inlining_function_default_parameters(self):
self.mod.write('def a_func(p1=1):\n pass\na_func()\n')
self._inline2(self.mod, self.mod.read().index('p1') + 1)
self.assertEquals('def a_func(p1=1):\n pass\na_func()\n',
self.mod.read())
def test_simple_inlining_after_extra_indented_lines(self):
self.mod.write('def a_func():\n for i in range(10):\n pass\n'
'if True:\n pass\na_func()\n')
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
self.assertEquals('if True:\n pass\nfor i in range(10):'
'\n pass\n',
self.mod.read())
def test_inlining_a_function_with_pydoc(self):
self.mod.write('def a_func():\n """docs"""\n a = 1\na_func()')
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
self.assertEquals('a = 1\n', self.mod.read())
def test_inlining_methods(self):
self.mod.write("class A(object):\n name = 'hey'\n"
" def get_name(self):\n return self.name\n"
"a = A()\nname = a.get_name()\n")
self._inline2(self.mod, self.mod.read().rindex('get_name') + 1)
self.assertEquals("class A(object):\n name = 'hey'\n"
"a = A()\nname = a.name\n", self.mod.read())
def test_simple_returns_with_backslashes(self):
self.mod.write('def a_func():\n return 1'
'\\\n + 2\na = a_func()\n')
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
self.assertEquals('a = 1 + 2\n', self.mod.read())
def test_a_function_with_pass_body(self):
self.mod.write('def a_func():\n print(1)\na = a_func()\n')
self._inline2(self.mod, self.mod.read().index('a_func') + 1)
self.assertEquals('print(1)\na = None\n', self.mod.read())
def test_inlining_the_last_method_of_a_class(self):
self.mod.write('class A(object):\n'
' def a_func(self):\n pass\n')
self._inline2(self.mod, self.mod.read().rindex('a_func') + 1)
self.assertEquals('class A(object):\n pass\n',
self.mod.read())
def test_adding_needed_imports_in_the_dest_module(self):
self.mod.write('import sys\n\ndef ver():\n print(sys.version)\n')
self.mod2.write('import mod\n\nmod.ver()')
self._inline2(self.mod, self.mod.read().index('ver') + 1)
self.assertEquals('import mod\nimport sys\n\nprint(sys.version)\n',
self.mod2.read())
def test_adding_needed_imports_in_the_dest_module_removing_selfs(self):
self.mod.write('import mod2\n\ndef f():\n print(mod2.var)\n')
self.mod2.write('import mod\n\nvar = 1\nmod.f()\n')
self._inline2(self.mod, self.mod.read().index('f(') + 1)
self.assertEquals('import mod\n\nvar = 1\nprint(var)\n',
self.mod2.read())
def test_handling_relative_imports_when_inlining(self):
pkg = testutils.create_package(self.project, 'pkg')
mod3 = testutils.create_module(self.project, 'mod3', pkg)
mod4 = testutils.create_module(self.project, 'mod4', pkg)
mod4.write('var = 1\n')
mod3.write('from . import mod4\n\ndef f():\n print(mod4.var)\n')
self.mod.write('import pkg.mod3\n\npkg.mod3.f()\n')
self._inline2(self.mod, self.mod.read().index('f(') + 1)
# Cannot determine the exact import
self.assertTrue('\n\nprint(mod4.var)\n' in self.mod.read())
def test_adding_needed_imports_for_elements_in_source(self):
self.mod.write('def f1():\n return f2()\ndef f2():\n return 1\n')
self.mod2.write('import mod\n\nprint(mod.f1())\n')
self._inline2(self.mod, self.mod.read().index('f1') + 1)
self.assertEquals('import mod\nfrom mod import f2\n\nprint(f2())\n',
self.mod2.read())
def test_relative_imports_and_changing_inlining_body(self):
pkg = testutils.create_package(self.project, 'pkg')
mod3 = testutils.create_module(self.project, 'mod3', pkg)
mod4 = testutils.create_module(self.project, 'mod4', pkg)
mod4.write('var = 1\n')
mod3.write('import mod4\n\ndef f():\n print(mod4.var)\n')
self.mod.write('import pkg.mod3\n\npkg.mod3.f()\n')
self._inline2(self.mod, self.mod.read().index('f(') + 1)
self.assertEquals(
'import pkg.mod3\nimport pkg.mod4\n\nprint(pkg.mod4.var)\n',
self.mod.read())
def test_inlining_with_different_returns(self):
self.mod.write('def f(p):\n return p\n'
'print(f(1))\nprint(f(2))\nprint(f(1))\n')
self._inline2(self.mod, self.mod.read().index('f(') + 1)
self.assertEquals('print(1)\nprint(2)\nprint(1)\n',
self.mod.read())
def test_not_removing_definition_for_variables(self):
code = 'a_var = 10\nanother_var = a_var\n'
refactored = self._inline(code, code.index('a_var') + 1,
remove=False)
self.assertEquals('a_var = 10\nanother_var = 10\n', refactored)
def test_not_removing_definition_for_methods(self):
code = 'def func():\n print(1)\n\nfunc()\n'
refactored = self._inline(code, code.index('func') + 1,
remove=False)
self.assertEquals('def func():\n print(1)\n\nprint(1)\n',
refactored)
def test_only_current_for_methods(self):
code = 'def func():\n print(1)\n\nfunc()\nfunc()\n'
refactored = self._inline(code, code.rindex('func') + 1,
remove=False, only_current=True)
self.assertEquals('def func():\n print(1)\n\nfunc()\nprint(1)\n',
refactored)
def test_only_current_for_variables(self):
code = 'one = 1\n\na = one\nb = one\n'
refactored = self._inline(code, code.rindex('one') + 1,
remove=False, only_current=True)
self.assertEquals('one = 1\n\na = one\nb = 1\n', refactored)
def test_inlining_one_line_functions(self):
code = 'def f(): return 1\nvar = f()\n'
refactored = self._inline(code, code.rindex('f'))
self.assertEquals('var = 1\n', refactored)
def test_inlining_one_line_functions_with_breaks(self):
code = 'def f(\np): return p\nvar = f(1)\n'
refactored = self._inline(code, code.rindex('f'))
self.assertEquals('var = 1\n', refactored)
def test_inlining_one_line_functions_with_breaks2(self):
code = 'def f(\n): return 1\nvar = f()\n'
refactored = self._inline(code, code.rindex('f'))
self.assertEquals('var = 1\n', refactored)
def test_resources_parameter(self):
self.mod.write('def a_func():\n print(1)\n')
mod1 = testutils.create_module(self.project, 'mod1')
mod1.write('import mod\nmod.a_func()\n')
self._inline2(self.mod, self.mod.read().index('a_func'),
resources=[self.mod])
self.assertEquals('', self.mod.read())
self.assertEquals('import mod\nmod.a_func()\n', mod1.read())
def test_inlining_parameters(self):
code = 'def f(p=1):\n pass\nf()\n'
result = self._inline(code, code.index('p'))
self.assertEquals('def f(p=1):\n pass\nf(1)\n', result)
def test_inlining_function_with_line_breaks_in_args(self):
code = 'def f(p): return p\nvar = f(1 +\n1)\n'
refactored = self._inline(code, code.rindex('f'))
self.assertEquals('var = 1 + 1\n', refactored)
def test_inlining_variables_before_comparison(self):
code = 'start = 1\nprint(start <= 2)\n'
refactored = self._inline(code, code.index('start'))
self.assertEquals('print(1 <= 2)\n', refactored)
def test_inlining_variables_in_other_modules(self):
self.mod.write('myvar = 1\n')
self.mod2.write('import mod\nprint(mod.myvar)\n')
self._inline2(self.mod, 2)
self.assertEquals('import mod\nprint(1)\n', self.mod2.read())
def test_inlining_variables_and_back_importing(self):
self.mod.write('mainvar = 1\nmyvar = mainvar\n')
self.mod2.write('import mod\nprint(mod.myvar)\n')
self._inline2(self.mod, self.mod.read().index('myvar'))
expected = 'import mod\n' \
'from mod import mainvar\n' \
'print(mainvar)\n'
self.assertEquals(expected, self.mod2.read())
def test_inlining_variables_and_importing_used_imports(self):
self.mod.write('import sys\nmyvar = sys.argv\n')
self.mod2.write('import mod\nprint(mod.myvar)\n')
self._inline2(self.mod, self.mod.read().index('myvar'))
expected = 'import mod\n' \
'import sys\n' \
'print(sys.argv)\n'
self.assertEquals(expected, self.mod2.read())
def test_inlining_variables_and_removing_old_froms(self):
self.mod.write('var = 1\n')
self.mod2.write('from mod import var\nprint(var)\n')
self._inline2(self.mod2, self.mod2.read().rindex('var'))
self.assertEquals('print(1)\n', self.mod2.read())
def test_inlining_method_and_removing_old_froms(self):
self.mod.write('def f(): return 1\n')
self.mod2.write('from mod import f\nprint(f())\n')
self._inline2(self.mod2, self.mod2.read().rindex('f'))
self.assertEquals('print(1)\n', self.mod2.read())
def test_inlining_functions_in_other_modules_and_only_current(self):
code1 = 'def f():\n' \
' return 1\n' \
'print(f())\n'
code2 = 'import mod\n' \
'print(mod.f())\n' \
'print(mod.f())\n'
self.mod.write(code1)
self.mod2.write(code2)
self._inline2(self.mod2, self.mod2.read().rindex('f'),
remove=False, only_current=True)
expected2 = 'import mod\n' \
'print(mod.f())\n' \
'print(1)\n'
self.assertEquals(code1, self.mod.read())
self.assertEquals(expected2, self.mod2.read())
def test_inlining_variables_in_other_modules_and_only_current(self):
code1 = 'var = 1\n' \
'print(var)\n'
code2 = 'import mod\n' \
'print(mod.var)\n' \
'print(mod.var)\n'
self.mod.write(code1)
self.mod2.write(code2)
self._inline2(self.mod2, self.mod2.read().rindex('var'),
remove=False, only_current=True)
expected2 = 'import mod\n' \
'print(mod.var)\n' \
'print(1)\n'
self.assertEquals(code1, self.mod.read())
self.assertEquals(expected2, self.mod2.read())
def test_inlining_does_not_change_string_constants(self):
code = 'var = 1\n' \
'print("var\\\n' \
'")\n'
expected = 'var = 1\n' \
'print("var\\\n' \
'")\n'
refactored = self._inline(code, code.rindex('var'),
remove=False, only_current=True, docs=False)
self.assertEquals(expected, refactored)
def test_inlining_does_change_string_constants_if_docs_is_set(self):
code = 'var = 1\n' \
'print("var\\\n' \
'")\n'
expected = 'var = 1\n' \
'print("1\\\n' \
'")\n'
refactored = self._inline(code, code.rindex('var'),
remove=False, only_current=True, docs=True)
self.assertEquals(expected, refactored)
def suite():
result = unittest.TestSuite()
result.addTests(unittest.makeSuite(InlineTest))
return result
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
alexpilotti/python-glanceclient | glanceclient/__init__.py | 3 | 1132 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#NOTE(bcwaldon): this try/except block is needed to run setup.py due to
# its need to import local code before installing required dependencies
try:
import glanceclient.client
Client = glanceclient.client.Client
except ImportError:
import warnings
warnings.warn("Could not import glanceclient.client", ImportWarning)
import pbr.version
version_info = pbr.version.VersionInfo('python-glanceclient')
try:
__version__ = version_info.version_string()
except AttributeError:
__version__ = None
| apache-2.0 |
linan7788626/arrayfire-python | arrayfire/signal.py | 1 | 6320 | #######################################################
# Copyright (c) 2015, ArrayFire
# All rights reserved.
#
# This file is distributed under 3-clause BSD license.
# The complete license agreement can be obtained at:
# http://arrayfire.com/licenses/BSD-3-Clause
########################################################
from .library import *
from .array import *
def approx1(signal, pos0, method=AF_INTERP_LINEAR, off_grid=0.0):
output = array()
safe_call(clib.af_approx1(ct.pointer(output.arr), signal.arr, pos0.arr,\
method, ct.c_double(off_grid)))
return output
def approx2(signal, pos0, pos1, method=AF_INTERP_LINEAR, off_grid=0.0):
output = array()
safe_call(clib.af_approx2(ct.pointer(output.arr), signal.arr, \
pos0.arr, pos1.arr, method, ct.c_double(off_grid)))
return output
def fft(signal, dim0 = None , scale = None):
if dim0 is None:
dim0 = 0
if scale is None:
scale = 1.0
output = array()
safe_call(clib.af_fft(ct.pointer(output.arr), signal.arr, ct.c_double(scale), ct.c_longlong(dim0)))
return output
def fft2(signal, dim0 = None, dim1 = None , scale = None):
if dim0 is None:
dim0 = 0
if dim1 is None:
dim1 = 0
if scale is None:
scale = 1.0
output = array()
safe_call(clib.af_fft2(ct.pointer(output.arr), signal.arr, ct.c_double(scale),\
ct.c_longlong(dim0), ct.c_longlong(dim1)))
return output
def fft3(signal, dim0 = None, dim1 = None , dim2 = None, scale = None):
if dim0 is None:
dim0 = 0
if dim1 is None:
dim1 = 0
if dim2 is None:
dim2 = 0
if scale is None:
scale = 1.0
output = array()
safe_call(clib.af_fft3(ct.pointer(output.arr), signal.arr, ct.c_double(scale),\
ct.c_longlong(dim0), ct.c_longlong(dim1), ct.c_longlong(dim2)))
return output
def ifft(signal, dim0 = None , scale = None):
if dim0 is None:
dim0 = signal.dims()[0]
if scale is None:
scale = 1.0/float(dim0)
output = array()
safe_call(clib.af_ifft(ct.pointer(output.arr), signal.arr, ct.c_double(scale), ct.c_longlong(dim0)))
return output
def ifft2(signal, dim0 = None, dim1 = None , scale = None):
dims = signal.dims()
if (len(dims) < 2):
return ifft(signal)
if dim0 is None:
dim0 = dims[0]
if dim1 is None:
dim1 = dims[1]
if scale is None:
scale = 1.0/float(dim0 * dim1)
output = array()
safe_call(clib.af_ifft2(ct.pointer(output.arr), signal.arr, ct.c_double(scale),\
ct.c_longlong(dim0), ct.c_longlong(dim1)))
return output
def ifft3(signal, dim0 = None, dim1 = None , dim2 = None, scale = None):
dims = signal.dims()
if (len(dims) < 3):
return ifft2(signal)
if dim0 is None:
dim0 = dims[0]
if dim1 is None:
dim1 = dims[1]
if dim2 is None:
dim2 = dims[2]
if scale is None:
scale = 1.0 / float(dim0 * dim1 * dim2)
output = array()
safe_call(clib.af_ifft3(ct.pointer(output.arr), signal.arr, ct.c_double(scale),\
ct.c_longlong(dim0), ct.c_longlong(dim1), ct.c_longlong(dim2)))
return output
def dft(signal, scale = None, odims=(None, None, None, None)):
odims4 = dim4_tuple(odims, default=None)
dims = signal.dims()
ndims = len(dims)
if (ndims == 1):
return fft(signal, scale, dims[0])
elif (ndims == 2):
return fft2(signal, scale, dims[0], dims[1])
else:
return fft3(signal, scale, dims[0], dims[1], dims[2])
def idft(signal, scale = None, odims=(None, None, None, None)):
odims4 = dim4_tuple(odims, default=None)
dims = signal.dims()
ndims = len(dims)
if (ndims == 1):
return ifft(signal, scale, dims[0])
elif (ndims == 2):
return ifft2(signal, scale, dims[0], dims[1])
else:
return ifft3(signal, scale, dims[0], dims[1], dims[2])
def convolve1(signal, kernel, conv_mode = AF_CONV_DEFAULT, conv_domain = AF_CONV_AUTO):
output = array()
safe_call(clib.af_convolve1(ct.pointer(output.arr), signal.arr, kernel.arr, conv_mode, conv_domain))
return output
def convolve2(signal, kernel, conv_mode = AF_CONV_DEFAULT, conv_domain = AF_CONV_AUTO):
output = array()
safe_call(clib.af_convolve2(ct.pointer(output.arr), signal.arr, kernel.arr, conv_mode, conv_domain))
return output
def convolve3(signal, kernel, conv_mode = AF_CONV_DEFAULT, conv_domain = AF_CONV_AUTO):
output = array()
safe_call(clib.af_convolve3(ct.pointer(output.arr), signal.arr, kernel.arr, conv_mode, conv_domain))
return output
def convolve(signal, kernel, conv_mode = AF_CONV_DEFAULT, conv_domain = AF_CONV_AUTO):
dims = signal.dims()
ndims = len(dims)
if (ndims == 1):
return convolve1(signal, kernel, conv_mode, conv_domain)
elif (ndims == 2):
return convolve2(signal, kernel, conv_mode, conv_domain)
else:
return convolve3(signal, kernel, conv_mode, conv_domain)
def fft_convolve1(signal, kernel, conv_mode = AF_CONV_DEFAULT):
output = array()
safe_call(clib.af_fft_convolve1(ct.pointer(output.arr), signal.arr, kernel.arr, conv_mode))
return output
def fft_convolve2(signal, kernel, conv_mode = AF_CONV_DEFAULT):
output = array()
safe_call(clib.af_fft_convolve2(ct.pointer(output.arr), signal.arr, kernel.arr, conv_mode))
return output
def fft_convolve3(signal, kernel, conv_mode = AF_CONV_DEFAULT):
output = array()
safe_call(clib.af_fft_convolve3(ct.pointer(output.arr), signal.arr, kernel.arr, conv_mode))
return output
def fft_convolve(signal, kernel, conv_mode = AF_CONV_DEFAULT):
dims = signal.dims()
ndims = len(dims)
if (ndims == 1):
return fft_convolve1(signal, kernel, conv_mode)
elif (ndims == 2):
return fft_convolve2(signal, kernel, conv_mode)
else:
return fft_convolve3(signal, kernel, conv_mode)
def fir(B, X):
Y = array()
safe_call(clib.af_fir(ct.pointer(Y.arr), B.arr, X.arr))
return Y
def iir(B, A, X):
Y = array()
safe_call(clib.af_iir(ct.pointer(Y.arr), B.arr, A.arr, X.arr))
return Y
| bsd-3-clause |
jostep/tensorflow | tensorflow/python/profiler/profiler.py | 50 | 1944 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""profiler python module provides APIs to profile TensorFlow models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.core.profiler.tfprof_log_pb2 import OpLogProto
from tensorflow.core.profiler.tfprof_output_pb2 import AdviceProto
from tensorflow.core.profiler.tfprof_output_pb2 import GraphNodeProto
from tensorflow.core.profiler.tfprof_output_pb2 import MultiGraphNodeProto
from tensorflow.python.profiler.model_analyzer import advise
from tensorflow.python.profiler.model_analyzer import profile
from tensorflow.python.profiler.model_analyzer import Profiler
from tensorflow.python.profiler.option_builder import ProfileOptionBuilder
from tensorflow.python.profiler.tfprof_logger import write_op_log
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'Profiler',
'profile',
'ProfileOptionBuilder',
'advise',
'write_op_log',
]
_allowed_symbols.extend([
'GraphNodeProto',
'MultiGraphNodeProto',
'AdviceProto',
'OpLogProto',
])
remove_undocumented(__name__, _allowed_symbols, [
Profiler,
profile,
ProfileOptionBuilder,
advise,
write_op_log,
])
| apache-2.0 |
doismellburning/edx-platform | common/djangoapps/student/tests/test_linkedin.py | 150 | 2265 | # -*- coding: utf-8 -*-
"""Tests for LinkedIn Add to Profile configuration. """
import ddt
from urllib import urlencode
from django.test import TestCase
from opaque_keys.edx.locator import CourseLocator
from student.models import LinkedInAddToProfileConfiguration
@ddt.ddt
class LinkedInAddToProfileUrlTests(TestCase):
"""Tests for URL generation of LinkedInAddToProfileConfig. """
COURSE_KEY = CourseLocator(org="edx", course="DemoX", run="Demo_Course")
COURSE_NAME = u"Test Course ☃"
CERT_URL = u"http://s3.edx/cert"
@ddt.data(
('honor', u'edX+Honor+Code+Certificate+for+Test+Course+%E2%98%83'),
('verified', u'edX+Verified+Certificate+for+Test+Course+%E2%98%83'),
('professional', u'edX+Professional+Certificate+for+Test+Course+%E2%98%83'),
('default_mode', u'edX+Certificate+for+Test+Course+%E2%98%83')
)
@ddt.unpack
def test_linked_in_url(self, cert_mode, expected_cert_name):
config = LinkedInAddToProfileConfiguration(
company_identifier='0_mC_o2MizqdtZEmkVXjH4eYwMj4DnkCWrZP_D9',
enabled=True
)
expected_url = (
'http://www.linkedin.com/profile/add'
'?_ed=0_mC_o2MizqdtZEmkVXjH4eYwMj4DnkCWrZP_D9&'
'pfCertificationName={expected_cert_name}&'
'pfCertificationUrl=http%3A%2F%2Fs3.edx%2Fcert&'
'source=o'
).format(expected_cert_name=expected_cert_name)
actual_url = config.add_to_profile_url(
self.COURSE_KEY,
self.COURSE_NAME,
cert_mode,
self.CERT_URL
)
self.assertEqual(actual_url, expected_url)
def test_linked_in_url_tracking_code(self):
config = LinkedInAddToProfileConfiguration(
company_identifier="abcd123",
trk_partner_name="edx",
enabled=True
)
expected_param = urlencode({
'trk': u'edx-{course_key}_honor-dashboard'.format(
course_key=self.COURSE_KEY
)
})
actual_url = config.add_to_profile_url(
self.COURSE_KEY,
self.COURSE_NAME,
'honor',
self.CERT_URL
)
self.assertIn(expected_param, actual_url)
| agpl-3.0 |
davduran/thumbor | tests/test_console.py | 7 | 1563 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
from unittest import TestCase
from preggy import expect
from thumbor.console import get_server_parameters
class ConsoleTestCase(TestCase):
def test_can_get_default_server_parameters(self):
params = get_server_parameters()
expect(params.port).to_equal(8888)
expect(params.ip).to_equal('0.0.0.0')
expect(params.config_path).to_be_null()
expect(params.keyfile).to_be_null()
expect(params.log_level).to_equal('warning')
expect(params.app_class).to_equal('thumbor.app.ThumborServiceApp')
expect(params.fd).to_be_null()
def test_can_get_custom_server_parameters(self):
params = get_server_parameters([
'--port=9999',
'--ip=127.0.0.1',
'--conf=/tmp/conf.conf',
'--keyfile=./tests/fixtures/thumbor.key',
'--log-level=debug',
'--app=custom.app',
'--fd=/tmp/fd',
])
expect(params.port).to_equal(9999)
expect(params.ip).to_equal('127.0.0.1')
expect(params.config_path).to_equal('/tmp/conf.conf')
expect(params.keyfile).to_equal('./tests/fixtures/thumbor.key')
expect(params.log_level).to_equal('debug')
expect(params.app_class).to_equal('custom.app')
expect(params.fd).to_equal('/tmp/fd')
| mit |
florianjacob/pelican-plugins | better_figures_and_images/better_figures_and_images.py | 12 | 4332 | """
Better Figures & Images
------------------------
This plugin:
- Adds a style="width: ???px; height: auto;" to each image in the content
- Also adds the width of the contained image to any parent div.figures.
- If RESPONSIVE_IMAGES == True, also adds style="max-width: 100%;"
- Corrects alt text: if alt == image filename, set alt = ''
TODO: Need to add a test.py for this plugin.
"""
from __future__ import unicode_literals
from os import path, access, R_OK
import os
from pelican import signals
from bs4 import BeautifulSoup
from PIL import Image
import pysvg.parser
import logging
logger = logging.getLogger(__name__)
def content_object_init(instance):
if instance._content is not None:
content = instance._content
soup = BeautifulSoup(content, 'html.parser')
for img in soup(['img', 'object']):
logger.debug('Better Fig. PATH: %s', instance.settings['PATH'])
if img.name == 'img':
logger.debug('Better Fig. img.src: %s', img['src'])
img_path, img_filename = path.split(img['src'])
else:
logger.debug('Better Fig. img.data: %s', img['data'])
img_path, img_filename = path.split(img['data'])
logger.debug('Better Fig. img_path: %s', img_path)
logger.debug('Better Fig. img_fname: %s', img_filename)
# Strip off {filename}, |filename| or /static
if img_path.startswith(('{filename}', '|filename|')):
img_path = img_path[10:]
elif img_path.startswith('/static'):
img_path = img_path[7:]
elif img_path.startswith('data:image'):
# Image is encoded in-line (not a file).
continue
else:
logger.warning('Better Fig. Error: img_path should start with either {filename}, |filename| or /static')
# search src path list
# 1. Build the source image filename from PATH
# 2. Build the source image filename from STATIC_PATHS
# if img_path start with '/', remove it.
img_path = os.path.sep.join([el for el in img_path.split("/") if len(el) > 0])
# style: {filename}/static/foo/bar.png
src = os.path.join(instance.settings['PATH'], img_path, img_filename)
src_candidates = [src]
# style: {filename}../static/foo/bar.png
src_candidates += [os.path.join(instance.settings['PATH'], static_path, img_path, img_filename) for static_path in instance.settings['STATIC_PATHS']]
src_candidates = [f for f in src_candidates if path.isfile(f) and access(f, R_OK)]
if not src_candidates:
logger.error('Better Fig. Error: image not found: %s', src)
logger.debug('Better Fig. Skip src: %s', img_path + '/' + img_filename)
continue
src = src_candidates[0]
logger.debug('Better Fig. src: %s', src)
# Open the source image and query dimensions; build style string
try:
if img.name == 'img':
im = Image.open(src)
extra_style = 'width: {}px; height: auto;'.format(im.size[0])
else:
svg = pysvg.parser.parse(src)
extra_style = 'width: {}px; height: auto;'.format(svg.get_width())
except IOError as e:
logger.debug('Better Fig. Failed to open: %s', src)
extra_style = 'width: 100%; height: auto;'
if 'RESPONSIVE_IMAGES' in instance.settings and instance.settings['RESPONSIVE_IMAGES']:
extra_style += ' max-width: 100%;'
if img.get('style'):
img['style'] += extra_style
else:
img['style'] = extra_style
if img.name == 'img':
if img['alt'] == img['src']:
img['alt'] = ''
fig = img.find_parent('div', 'figure')
if fig:
if fig.get('style'):
fig['style'] += extra_style
else:
fig['style'] = extra_style
instance._content = soup.decode()
def register():
signals.content_object_init.connect(content_object_init)
| agpl-3.0 |
Mausy5043/lnxdiagd | daemons/lnxsvc98d.py | 1 | 5211 | #!/usr/bin/env python3
# daemon98.py file post-processor.
# - graphs
# - MySQL queries
# - upload
import configparser
import os
import subprocess
import sys
import syslog
import time
import traceback
from mausy5043libs.libdaemon3 import Daemon
from mausy5043libs.libgraph3 import Graph
from mausy5043libs.libsqldata3 import SqlDataFetch
import mausy5043funcs.fileops3 as mf
# constants
DEBUG = False
IS_JOURNALD = os.path.isfile('/bin/journalctl')
MYID = "".join(list(filter(str.isdigit, os.path.realpath(__file__).split('/')[-1])))
MYAPP = os.path.realpath(__file__).split('/')[-3]
NODE = os.uname()[1]
HOME = os.environ['HOME']
GRAPH_UPDATE = 10 # in minutes
SQL_UPDATE_HOUR = GRAPH_UPDATE # in minutes (shouldn't be shorter than GRAPH_UPDATE)
SQL_UPDATE_DAY = 27 # in minutes
SQL_UPDATE_WEEK = 4 # in hours
SQL_UPDATE_YEAR = 8 # in hours
# initialise logging
syslog.openlog(ident=MYAPP, facility=syslog.LOG_LOCAL0)
class MyDaemon(Daemon):
"""Definition of daemon."""
@staticmethod
def run():
iniconf = configparser.ConfigParser()
inisection = MYID
s = iniconf.read(HOME + '/' + MYAPP + '/config.ini')
mf.syslog_trace("Config file : {0}".format(s), False, DEBUG)
mf.syslog_trace("Options : {0}".format(iniconf.items(inisection)), False, DEBUG)
reporttime = iniconf.getint(inisection, "reporttime")
samplespercycle = iniconf.getint(inisection, "samplespercycle")
flock = iniconf.get(inisection, "lockfile")
scriptname = iniconf.get(inisection, "lftpscript")
sampletime = reporttime/samplespercycle # time [s] between samples
sqldata.fetch()
if (trendgraph.make() == 0):
upload_page(scriptname)
while True:
try:
# starttime = time.time()
do_stuff(flock, HOME, scriptname)
# not syncing to top of the minute
waittime = sampletime # - (time.time() - starttime) - (starttime % sampletime)
if (waittime > 0):
mf.syslog_trace("Waiting : {0}s".format(waittime), False, DEBUG)
mf.syslog_trace("................................", False, DEBUG)
time.sleep(waittime)
except Exception:
mf.syslog_trace("Unexpected error in run()", syslog.LOG_CRIT, DEBUG)
mf.syslog_trace(traceback.format_exc(), syslog.LOG_CRIT, DEBUG)
raise
def do_stuff(flock, homedir, script):
# wait 4 seconds for processes to finish
time.sleep(4)
# Retrieve data from MySQL database
# CLAIM
# CHECK
result = sqldata.fetch()
mf.syslog_trace("...datafetch: {0}".format(result), False, DEBUG)
# RELEASE
# Create the graphs based on the MySQL data
result = trendgraph.make()
mf.syslog_trace("...trendgrph: {0}".format(result), False, DEBUG)
if (result == 0):
upload_page(script)
def upload_page(script):
try:
# Upload the webpage and graphs
if os.path.isfile('/tmp/' + MYAPP + '/site/text.md'):
write_lftp(script)
cmnd = ['lftp', '-f', script]
mf.syslog_trace("...: {0}".format(cmnd), False, DEBUG)
cmnd = subprocess.check_output(cmnd, timeout=20)
mf.syslog_trace("...uploadpag: {0}".format(cmnd), False, DEBUG)
except subprocess.TimeoutExpired:
mf.syslog_trace("***TIMEOUT***: {0}".format(cmnd), syslog.LOG_ERR, DEBUG)
time.sleep(17*60) # wait 17 minutes for the router to restart.
pass
except subprocess.CalledProcessError:
mf.syslog_trace("***ERROR***: {0}".format(cmnd), syslog.LOG_ERR, DEBUG)
time.sleep(17*60) # wait 17 minutes for the router to restart.
pass
def write_lftp(script):
with open(script, 'w') as f:
f.write('# DO NOT EDIT\n')
f.write('# This file is created automatically by ' + MYAPP + '\n\n')
f.write('# lftp script\n\n')
f.write('set cmd:fail-exit yes;\n')
f.write('open hendrixnet.nl;\n')
f.write('cd 04.status/;\n')
f.write('set cmd:fail-exit no;\n')
f.write('mkdir -p -f _' + NODE + ' ;\n')
f.write('set cmd:fail-exit yes;\n')
f.write('cd _' + NODE + ' ;\n')
f.write('mirror --reverse --delete --verbose=3 -c /tmp/' + MYAPP + '/site/ . ;\n')
f.write('\n')
if __name__ == "__main__":
if len(sys.argv) == 2:
if 'debug' == sys.argv[1]:
DEBUG = True
daemon = MyDaemon('/tmp/' + MYAPP + '/' + MYID + '.pid')
trendgraph = Graph(HOME + '/' + MYAPP + '/mkgraphs.sh', GRAPH_UPDATE, DEBUG)
sqldata = SqlDataFetch(HOME + '/' + MYAPP + '/queries', '/srv/semaphores', SQL_UPDATE_HOUR, SQL_UPDATE_DAY, SQL_UPDATE_WEEK, SQL_UPDATE_YEAR, DEBUG)
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
elif 'debug' == sys.argv[1]:
# assist with debugging.
print("Debug-mode started. Use <Ctrl>+C to stop.")
DEBUG = True
mf.syslog_trace("Daemon logging is ON", syslog.LOG_DEBUG, DEBUG)
daemon.run()
else:
print("Unknown command")
sys.exit(2)
sys.exit(0)
else:
print("usage: {0!s} start|stop|restart|debug".format(sys.argv[0]))
sys.exit(2)
| mit |
Bandito43/namebench | libnamebench/mocks.py | 175 | 2503 | # Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mocks for tests."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import time
import nameserver
# external dependencies (from third_party)
import dns.message
import dns.rdataclass
import dns.query
GOOD_IP = '127.0.0.1'
SLOW_IP = '9.9.9.9'
PERFECT_IP = '127.127.127.127'
NO_RESPONSE_IP = '10.0.0.1'
BROKEN_IP = '192.168.0.1'
class MockNameServer(nameserver.NameServer):
"""Act like Nameserver, but do not issue any actual queries!"""
def FakeAnswer(self, request, no_answer=False):
if not request:
request = self.CreateRequest('www.com', 'A', dns.rdataclass.IN)
response_text = """id 999
opcode QUERY
rcode NOERROR
flags QR RD RA
;QUESTION
www.paypal.com. IN A
;ANSWER
www.paypal.com. 159 IN A 66.211.169.65
www.paypal.com. 159 IN A 66.211.169.2
;AUTHORITY
paypal.com. 3459 IN NS ppns1.den.paypal.com.
paypal.com. 3459 IN NS ppns1.phx.paypal.com.
paypal.com. 3459 IN NS ppns2.den.paypal.com.
paypal.com. 3459 IN NS ppns2.phx.paypal.com.
;ADDITIONAL
ppns1.den.paypal.com. 165480 IN A 216.113.188.121
ppns1.phx.paypal.com. 73170 IN A 66.211.168.226
ppns2.den.paypal.com. 73170 IN A 216.113.188.122
ppns2.phx.paypal.com. 73170 IN A 66.211.168.227"""
msg = dns.message.from_text(response_text)
msg.question = request.question
if no_answer:
msg.answer = None
return msg
def Query(self, request, timeout):
"""Return a falsified DNS response."""
question = str(request.question[0])
if self.ip == BROKEN_IP:
raise dns.query.BadResponse('This sucks.')
if self.ip == NO_RESPONSE_IP:
answer = self.FakeAnswer(request, no_answer=True)
elif self.ip == GOOD_IP and 'www.google.com' in question:
answer = self.FakeAnswer(request, no_answer=True)
else:
answer = self.FakeAnswer(request)
if self.ip == GOOD_IP:
time.sleep(0.001)
elif self.ip == SLOW_IP:
time.sleep(0.03)
return answer
| apache-2.0 |
allink/plata | examples/custom/models.py | 5 | 2202 | import sys
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext_lazy as _
from plata.product.models import ProductBase
from plata.shop.models import PriceBase
class Product(ProductBase):
"""(Nearly) the simplest product model ever"""
is_active = models.BooleanField(_('is active'), default=True)
name = models.CharField(_('name'), max_length=100)
slug = models.SlugField(_('slug'), unique=True)
ordering = models.PositiveIntegerField(_('ordering'), default=0)
description = models.TextField(_('description'), blank=True)
class Meta:
ordering = ['ordering', 'name']
verbose_name = _('product')
verbose_name_plural = _('products')
def __unicode__(self):
return self.name
@models.permalink
def get_absolute_url(self):
return ('plata_product_detail', (), {'object_id': self.pk})
class ProductPrice(PriceBase):
product = models.ForeignKey(Product, verbose_name=_('product'),
related_name='prices')
class Meta:
get_latest_by = 'id'
ordering = ['-id']
verbose_name = _('price')
verbose_name_plural = _('prices')
class Contact(models.Model):
ADDRESS_FIELDS = ['company', 'first_name', 'last_name', 'address',
'zip_code', 'city', 'country']
user = models.OneToOneField(User, verbose_name=_('user'),
related_name='contactuser')
#currency = CurrencyField(help_text=_('Preferred currency.'))
company = models.CharField(_('company'), max_length=100, blank=True)
first_name = models.CharField(_('first name'), max_length=100)
last_name = models.CharField(_('last name'), max_length=100)
address = models.TextField(_('address'))
zip_code = models.CharField(_('ZIP code'), max_length=50)
city = models.CharField(_('city'), max_length=100)
country = models.CharField(_('country'), max_length=3, blank=True)
def __unicode__(self):
return unicode(self.user)
def update_from_order(self, order, request=None):
for field in self.ADDRESS_FIELDS:
f = 'billing_' + field
setattr(self, field, getattr(order, f))
| bsd-3-clause |
cstipkovic/spidermonkey-research | testing/puppeteer/firefox/firefox_puppeteer/ui/browser/notifications.py | 1 | 3150 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from abc import ABCMeta
from marionette_driver import By
from firefox_puppeteer.ui_base_lib import UIBaseLib
class BaseNotification(UIBaseLib):
"""Abstract base class for any kind of notification."""
__metaclass__ = ABCMeta
@property
def close_button(self):
"""Provide access to the close button.
:returns: The close button.
"""
return self.element.find_element(By.ANON_ATTRIBUTE,
{'anonid': 'closebutton'})
@property
def label(self):
"""Provide access to the notification label.
:returns: The notification label.
"""
return self.element.get_attribute('label')
@property
def origin(self):
"""Provide access to the notification origin.
:returns: The notification origin.
"""
return self.element.get_attribute('origin')
def close(self, force=False):
"""Close the notification.
:param force: Optional, if True force close the notification.
Defaults to False.
"""
if force:
self.marionette.execute_script('arguments[0].click()',
script_args=[self.close_button])
else:
self.close_button.click()
self.window.wait_for_notification(None)
class AddOnInstallBlockedNotification(BaseNotification):
"""Add-on install blocked notification."""
@property
def allow_button(self):
"""Provide access to the allow button.
:returns: The allow button.
"""
return self.element.find_element(
By.ANON_ATTRIBUTE, {'anonid': 'button'}).find_element(
By.ANON_ATTRIBUTE, {'anonid': 'button'})
class AddOnInstallConfirmationNotification(BaseNotification):
"""Add-on install confirmation notification."""
@property
def addon_name(self):
"""Provide access to the add-on name.
:returns: The add-on name.
"""
label = self.element.find_element(
By.CSS_SELECTOR, '#addon-install-confirmation-content label')
return label.get_attribute('value')
def cancel_button(self):
"""Provide access to the cancel button.
:returns: The cancel button.
"""
return self.element.find_element(
By.ID, 'addon-install-confirmation-cancel')
def install_button(self):
"""Provide access to the install button.
:returns: The install button.
"""
return self.element.find_element(
By.ID, 'addon-install-confirmation-accept')
class AddOnInstallCompleteNotification(BaseNotification):
"""Add-on install complete notification."""
pass
class AddOnInstallFailedNotification(BaseNotification):
"""Add-on install failed notification."""
pass
class AddOnProgressNotification(BaseNotification):
"""Add-on progress notification."""
pass
| mpl-2.0 |
mattuuh7/incubator-airflow | airflow/example_dags/example_http_operator.py | 10 | 2614 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
### Example HTTP operator and sensor
"""
import airflow
from airflow import DAG
from airflow.operators.http_operator import SimpleHttpOperator
from airflow.operators.sensors import HttpSensor
from datetime import timedelta
import json
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': airflow.utils.dates.days_ago(2),
'email': ['airflow@example.com'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
}
dag = DAG('example_http_operator', default_args=default_args)
dag.doc_md = __doc__
# t1, t2 and t3 are examples of tasks created by instantiating operators
t1 = SimpleHttpOperator(
task_id='post_op',
endpoint='api/v1.0/nodes',
data=json.dumps({"priority": 5}),
headers={"Content-Type": "application/json"},
response_check=lambda response: True if len(response.json()) == 0 else False,
dag=dag)
t5 = SimpleHttpOperator(
task_id='post_op_formenc',
endpoint='nodes/url',
data="name=Joe",
headers={"Content-Type": "application/x-www-form-urlencoded"},
dag=dag)
t2 = SimpleHttpOperator(
task_id='get_op',
method='GET',
endpoint='api/v1.0/nodes',
data={"param1": "value1", "param2": "value2"},
headers={},
dag=dag)
t3 = SimpleHttpOperator(
task_id='put_op',
method='PUT',
endpoint='api/v1.0/nodes',
data=json.dumps({"priority": 5}),
headers={"Content-Type": "application/json"},
dag=dag)
t4 = SimpleHttpOperator(
task_id='del_op',
method='DELETE',
endpoint='api/v1.0/nodes',
data="some=data",
headers={"Content-Type": "application/x-www-form-urlencoded"},
dag=dag)
sensor = HttpSensor(
task_id='http_sensor_check',
http_conn_id='http_default',
endpoint='',
params={},
response_check=lambda response: True if "Google" in response.content else False,
poke_interval=5,
dag=dag)
t1.set_upstream(sensor)
t2.set_upstream(t1)
t3.set_upstream(t2)
t4.set_upstream(t3)
t5.set_upstream(t4)
| apache-2.0 |
srivassumit/servo | tests/wpt/css-tests/css21_dev/xhtml1print/support/fonts/makegsubfonts.py | 1616 | 14125 |
import os
import textwrap
from xml.etree import ElementTree
from fontTools.ttLib import TTFont, newTable
from fontTools.misc.psCharStrings import T2CharString
from fontTools.ttLib.tables.otTables import GSUB,\
ScriptList, ScriptRecord, Script, DefaultLangSys,\
FeatureList, FeatureRecord, Feature,\
LookupList, Lookup, AlternateSubst, SingleSubst
# paths
directory = os.path.dirname(__file__)
shellSourcePath = os.path.join(directory, "gsubtest-shell.ttx")
shellTempPath = os.path.join(directory, "gsubtest-shell.otf")
featureList = os.path.join(directory, "gsubtest-features.txt")
javascriptData = os.path.join(directory, "gsubtest-features.js")
outputPath = os.path.join(os.path.dirname(directory), "gsubtest-lookup%d")
baseCodepoint = 0xe000
# -------
# Features
# -------
f = open(featureList, "rb")
text = f.read()
f.close()
mapping = []
for line in text.splitlines():
line = line.strip()
if not line:
continue
if line.startswith("#"):
continue
# parse
values = line.split("\t")
tag = values.pop(0)
mapping.append(tag);
# --------
# Outlines
# --------
def addGlyphToCFF(glyphName=None, program=None, private=None, globalSubrs=None, charStringsIndex=None, topDict=None, charStrings=None):
charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs)
charStringsIndex.append(charString)
glyphID = len(topDict.charset)
charStrings.charStrings[glyphName] = glyphID
topDict.charset.append(glyphName)
def makeLookup1():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup1")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
# bump this up so that the sequence is the same as the lookup 3 font
cp += 3
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 1
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = SingleSubst()
subtable.Format = 2
subtable.LookupType = 1
subtable.mapping = {
"%s.pass" % tag : "%s.fail" % tag,
"%s.fail" % tag : "%s.pass" % tag,
}
lookup.SubTable.append(subtable)
path = outputPath % 1 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeLookup3():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup3")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
# tag.default
glyphName = "%s.default" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.alt1,2,3
for i in range(1,4):
glyphName = "%s.alt%d" % (tag, i)
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 3
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = AlternateSubst()
subtable.Format = 1
subtable.LookupType = 3
subtable.alternates = {
"%s.default" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt1" % tag : ["%s.pass" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt2" % tag : ["%s.fail" % tag, "%s.pass" % tag, "%s.fail" % tag],
"%s.alt3" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.pass" % tag]
}
lookup.SubTable.append(subtable)
path = outputPath % 3 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeJavascriptData():
features = sorted(mapping)
outStr = []
outStr.append("")
outStr.append("/* This file is autogenerated by makegsubfonts.py */")
outStr.append("")
outStr.append("/* ")
outStr.append(" Features defined in gsubtest fonts with associated base")
outStr.append(" codepoints for each feature:")
outStr.append("")
outStr.append(" cp = codepoint for feature featX")
outStr.append("")
outStr.append(" cp default PASS")
outStr.append(" cp featX=1 FAIL")
outStr.append(" cp featX=2 FAIL")
outStr.append("")
outStr.append(" cp+1 default FAIL")
outStr.append(" cp+1 featX=1 PASS")
outStr.append(" cp+1 featX=2 FAIL")
outStr.append("")
outStr.append(" cp+2 default FAIL")
outStr.append(" cp+2 featX=1 FAIL")
outStr.append(" cp+2 featX=2 PASS")
outStr.append("")
outStr.append("*/")
outStr.append("")
outStr.append("var gFeatures = {");
cp = baseCodepoint
taglist = []
for tag in features:
taglist.append("\"%s\": 0x%x" % (tag, cp))
cp += 4
outStr.append(textwrap.fill(", ".join(taglist), initial_indent=" ", subsequent_indent=" "))
outStr.append("};");
outStr.append("");
if os.path.exists(javascriptData):
os.remove(javascriptData)
f = open(javascriptData, "wb")
f.write("\n".join(outStr))
f.close()
# build fonts
print "Making lookup type 1 font..."
makeLookup1()
print "Making lookup type 3 font..."
makeLookup3()
# output javascript data
print "Making javascript data file..."
makeJavascriptData()
| mpl-2.0 |
RafaelPalomar/girder | tests/mock_s3.py | 6 | 6611 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2014 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import argparse
import boto
import errno
import logging
import os
import socket
import threading
import time
import moto.server
import moto.s3
from girder.utility.s3_assetstore_adapter import makeBotoConnectParams, \
botoConnectS3, S3AssetstoreAdapter
from six.moves import range
_startPort = 31100
_maxTries = 100
def createBucket(botoConnect, bucketName):
"""
Create a bucket if it doesn't already exist.
:param botoConnect: connection parameters to pass to use with boto.
:type botoConnect: dict
:param bucketName: the bucket name
:type bucket: str
:returns: a boto bucket.
"""
conn = botoConnectS3(botoConnect)
bucket = conn.lookup(bucket_name=bucketName, validate=True)
# if found, return
if bucket is not None:
return
bucket = conn.create_bucket(bucketName)
# I would have preferred to set the CORS for the bucket we created, but
# moto doesn't support that.
# setBucketCors(bucket)
return bucket
def setBucketCors(bucket):
"""
Set the cors access values on a boto bucket to allow general access to that
bucket.
:param bucket: a boto bucket to set.
"""
cors = boto.s3.cors.CORSConfiguration()
cors.add_rule(
id='girder_cors_rule',
allowed_method=['HEAD', 'GET', 'PUT', 'POST', 'DELETE'],
allowed_origin=['*'],
allowed_header=['Content-Disposition', 'Content-Type',
'x-amz-meta-authorized-length', 'x-amz-acl',
'x-amz-meta-uploader-ip', 'x-amz-meta-uploader-id'],
expose_header=['ETag'],
max_age_seconds=3000
)
bucket.set_cors(cors)
def startMockS3Server():
"""
Start a server using the defaults and adding a configuration parameter to
the system so that the s3 assetstore handler will know to use this
server. Attempt to bind to any port within the range specified by
_startPort and _maxTries. Bias it with the pid of the current process so
as to reduce potential conflicts with parallel tests that are started
nearly simultaneously.
:returns: the started server.
"""
# Reduce the chunk size to allow faster testing.
S3AssetstoreAdapter.CHUNK_LEN = 1024 * 256
moto.s3.models.UPLOAD_PART_MIN_SIZE = 1024 * 256
# turn off logging from the S3 server unless we've asked to keep it
if 'mocks3' not in os.environ.get('EXTRADEBUG', '').split():
logging.getLogger('werkzeug').setLevel(logging.CRITICAL)
selectedPort = None
for porttry in range(_maxTries):
port = _startPort + ((porttry + os.getpid()) % _maxTries)
test_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
test_socket.bind(('0.0.0.0', port))
selectedPort = port
except socket.error as err:
# Allow address in use errors to fail quietly
if err.errno != errno.EADDRINUSE:
raise
test_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
test_socket.close()
if selectedPort is not None:
break
server = MockS3Server(selectedPort)
server.start()
# add a bucket named 'bucketname' to simplify testing
createBucket(server.botoConnect, 'bucketname')
return server
class MockS3Server(threading.Thread):
def __init__(self, port=_startPort):
threading.Thread.__init__(self)
self.port = port
self.daemon = True
self.service = 'http://127.0.0.1:%d' % port
self.botoConnect = makeBotoConnectParams('abc', '123', self.service)
def run(self):
"""Start and run the mock S3 server."""
app = moto.server.DomainDispatcherApplication(_create_app,
service='s3bucket_path')
moto.server.run_simple('0.0.0.0', self.port, app, threaded=True)
def _create_app(service):
"""
Create the S3 server using moto, altering the responses to allow CORS
requests.
:param service: the amazon service we wish to mimic. This should probably
be 's3bucket_path'.
"""
app = moto.server.create_backend_app(service)
# I would have preferred to set the CORS for the bucket we have, but moto
# doesn't support that, so I have to add the values here.
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Methods',
'HEAD, GET, PUT, POST, OPTIONS, DELETE')
response.headers.add(
'Access-Control-Allow-Headers',
'Content-Disposition,Content-Type,'
'x-amz-meta-authorized-length,x-amz-acl,x-amz-meta-uploader-ip,'
'x-amz-meta-uploader-id'
)
response.headers.add('Access-Control-Expose-Headers', 'ETag')
return response
return app
if __name__ == '__main__':
"""
Provide a simple stand-alone program so that developers can run Girder with
a modified conf file to simulate an S3 store.
"""
parser = argparse.ArgumentParser(
description='Run a mock S3 server. All data will be lost when it is '
'stopped.')
parser.add_argument('-p', '--port', type=int, help='The port to run on',
default=_startPort)
parser.add_argument('-b', '--bucket', type=str,
help='The name of a bucket to create', default='')
parser.add_argument('-v', '--verbose', action='count',
help='Increase verbosity.', default=0)
args = parser.parse_args()
server = MockS3Server(args.port)
server.start()
if args.bucket:
createBucket(server.botoConnect, args.bucket)
while True:
time.sleep(10000)
| apache-2.0 |
gnieboer/tensorflow | tensorflow/python/tools/optimize_for_inference.py | 106 | 4714 | # pylint: disable=g-bad-file-header
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Removes parts of a graph that are only needed for training.
There are several common transformations that can be applied to GraphDefs
created to train a model, that help reduce the amount of computation needed when
the network is used only for inference. These include:
- Removing training-only operations like checkpoint saving.
- Stripping out parts of the graph that are never reached.
- Removing debug operations like CheckNumerics.
- Folding batch normalization ops into the pre-calculated weights.
- Fusing common operations into unified versions.
This script takes either a frozen binary GraphDef file (where the weight
variables have been converted into constants by the freeze_graph script), or a
text GraphDef proto file (the weight variables are stored in a separate
checkpoint file), and outputs a new GraphDef with the optimizations applied.
If the input graph is a text graph file, make sure to include the node that
restores the variable weights in output_names. That node is usually named
"restore_all".
An example of command-line usage is:
bazel build tensorflow/python/tools:optimize_for_inference && \
bazel-bin/tensorflow/python/tools/optimize_for_inference \
--input=frozen_inception_graph.pb \
--output=optimized_inception_graph.pb \
--frozen_graph=True \
--input_names=Mul \
--output_names=softmax
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_io
from tensorflow.python.platform import app
from tensorflow.python.platform import gfile
from tensorflow.python.tools import optimize_for_inference_lib
FLAGS = None
def main(unused_args):
if not gfile.Exists(FLAGS.input):
print("Input graph file '" + FLAGS.input + "' does not exist!")
return -1
input_graph_def = graph_pb2.GraphDef()
with gfile.Open(FLAGS.input, "rb") as f:
data = f.read()
if FLAGS.frozen_graph:
input_graph_def.ParseFromString(data)
else:
text_format.Merge(data.decode("utf-8"), input_graph_def)
output_graph_def = optimize_for_inference_lib.optimize_for_inference(
input_graph_def,
FLAGS.input_names.split(","),
FLAGS.output_names.split(","), FLAGS.placeholder_type_enum)
if FLAGS.frozen_graph:
f = gfile.FastGFile(FLAGS.output, "w")
f.write(output_graph_def.SerializeToString())
else:
graph_io.write_graph(output_graph_def,
os.path.dirname(FLAGS.output),
os.path.basename(FLAGS.output))
return 0
def parse_args():
"""Parses command line arguments."""
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--input",
type=str,
default="",
help="TensorFlow \'GraphDef\' file to load.")
parser.add_argument(
"--output",
type=str,
default="",
help="File to save the output graph to.")
parser.add_argument(
"--input_names",
type=str,
default="",
help="Input node names, comma separated.")
parser.add_argument(
"--output_names",
type=str,
default="",
help="Output node names, comma separated.")
parser.add_argument(
"--frozen_graph",
nargs="?",
const=True,
type="bool",
default=True,
help="""\
If true, the input graph is a binary frozen GraphDef
file; if false, it is a text GraphDef proto file.\
""")
parser.add_argument(
"--placeholder_type_enum",
type=int,
default=dtypes.float32.as_datatype_enum,
help="The AttrValue enum to use for placeholders.")
return parser.parse_known_args()
if __name__ == "__main__":
FLAGS, unparsed = parse_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
mdeemer/XlsxWriter | xlsxwriter/test/comparison/test_hyperlink17.py | 8 | 1236 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'hyperlink17.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with hyperlinks.This example doesn't have any link formatting and tests the relationshiplinkage code."""
workbook = Workbook(self.got_filename)
# Turn off default URL format for testing.
workbook.default_url_format = None
worksheet = workbook.add_worksheet()
worksheet.write_url('A1', 'http://google.com/some link')
workbook.close()
self.assertExcelEqual()
| bsd-2-clause |
michael-dev2rights/ansible | lib/ansible/modules/cloud/rackspace/rax_mon_entity.py | 29 | 5779 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_mon_entity
short_description: Create or delete a Rackspace Cloud Monitoring entity
description:
- Create or delete a Rackspace Cloud Monitoring entity, which represents a device
to monitor. Entities associate checks and alarms with a target system and
provide a convenient, centralized place to store IP addresses. Rackspace
monitoring module flow | *rax_mon_entity* -> rax_mon_check ->
rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm
version_added: "2.0"
options:
label:
description:
- Defines a name for this entity. Must be a non-empty string between 1 and
255 characters long.
required: true
state:
description:
- Ensure that an entity with this C(name) exists or does not exist.
choices: ["present", "absent"]
agent_id:
description:
- Rackspace monitoring agent on the target device to which this entity is
bound. Necessary to collect C(agent.) rax_mon_checks against this entity.
named_ip_addresses:
description:
- Hash of IP addresses that may be referenced by name by rax_mon_checks
added to this entity. Must be a dictionary of with keys that are names
between 1 and 64 characters long, and values that are valid IPv4 or IPv6
addresses.
metadata:
description:
- Hash of arbitrary C(name), C(value) pairs that are passed to associated
rax_mon_alarms. Names and values must all be between 1 and 255 characters
long.
author: Ash Wilson
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Entity example
gather_facts: False
hosts: local
connection: local
tasks:
- name: Ensure an entity exists
rax_mon_entity:
credentials: ~/.rax_pub
state: present
label: my_entity
named_ip_addresses:
web_box: 192.0.2.4
db_box: 192.0.2.5
meta:
hurf: durf
register: the_entity
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
def cloud_monitoring(module, state, label, agent_id, named_ip_addresses,
metadata):
if len(label) < 1 or len(label) > 255:
module.fail_json(msg='label must be between 1 and 255 characters long')
changed = False
cm = pyrax.cloud_monitoring
if not cm:
module.fail_json(msg='Failed to instantiate client. This typically '
'indicates an invalid region or an incorrectly '
'capitalized region name.')
existing = []
for entity in cm.list_entities():
if label == entity.label:
existing.append(entity)
entity = None
if existing:
entity = existing[0]
if state == 'present':
should_update = False
should_delete = False
should_create = False
if len(existing) > 1:
module.fail_json(msg='%s existing entities have the label %s.' %
(len(existing), label))
if entity:
if named_ip_addresses and named_ip_addresses != entity.ip_addresses:
should_delete = should_create = True
# Change an existing Entity, unless there's nothing to do.
should_update = agent_id and agent_id != entity.agent_id or \
(metadata and metadata != entity.metadata)
if should_update and not should_delete:
entity.update(agent_id, metadata)
changed = True
if should_delete:
entity.delete()
else:
should_create = True
if should_create:
# Create a new Entity.
entity = cm.create_entity(label=label, agent=agent_id,
ip_addresses=named_ip_addresses,
metadata=metadata)
changed = True
else:
# Delete the existing Entities.
for e in existing:
e.delete()
changed = True
if entity:
entity_dict = {
"id": entity.id,
"name": entity.name,
"agent_id": entity.agent_id,
}
module.exit_json(changed=changed, entity=entity_dict)
else:
module.exit_json(changed=changed)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent']),
label=dict(required=True),
agent_id=dict(),
named_ip_addresses=dict(type='dict', default={}),
metadata=dict(type='dict', default={})
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
state = module.params.get('state')
label = module.params.get('label')
agent_id = module.params.get('agent_id')
named_ip_addresses = module.params.get('named_ip_addresses')
metadata = module.params.get('metadata')
setup_rax_module(module, pyrax)
cloud_monitoring(module, state, label, agent_id, named_ip_addresses, metadata)
if __name__ == '__main__':
main()
| gpl-3.0 |
caphrim007/ansible | test/units/modules/network/voss/test_voss_command.py | 11 | 4645 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.voss import voss_command
from units.modules.utils import set_module_args
from .voss_module import TestVossModule, load_fixture
class TestVossCommandModule(TestVossModule):
module = voss_command
def setUp(self):
super(TestVossCommandModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.voss.voss_command.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestVossCommandModule, self).tearDown()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
obj = json.loads(item['command'])
command = obj['command']
except ValueError:
command = item['command']
filename = str(command).replace(' ', '_')
output.append(load_fixture(filename))
return output
self.run_commands.side_effect = load_from_file
def test_voss_command_simple(self):
set_module_args(dict(commands=['show sys-info']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 1)
self.assertTrue(result['stdout'][0].startswith('General Info'))
def test_voss_command_multiple(self):
set_module_args(dict(commands=['show sys-info', 'show sys-info']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 2)
self.assertTrue(result['stdout'][0].startswith('General Info'))
def test_voss_command_wait_for(self):
wait_for = 'result[0] contains "General Info"'
set_module_args(dict(commands=['show sys-info'], wait_for=wait_for))
self.execute_module()
def test_voss_command_wait_for_fails(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show sys-info'], wait_for=wait_for))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 10)
def test_voss_command_retries(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show sys-info'], wait_for=wait_for, retries=2))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 2)
def test_voss_command_match_any(self):
wait_for = ['result[0] contains "General Info"',
'result[0] contains "test string"']
set_module_args(dict(commands=['show sys-info'], wait_for=wait_for, match='any'))
self.execute_module()
def test_voss_command_match_all(self):
wait_for = ['result[0] contains "General Info"',
'result[0] contains "Chassis Info"']
set_module_args(dict(commands=['show sys-info'], wait_for=wait_for, match='all'))
self.execute_module()
def test_voss_command_match_all_failure(self):
wait_for = ['result[0] contains "General Info"',
'result[0] contains "test string"']
commands = ['show sys-info', 'show sys-info']
set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
self.execute_module(failed=True)
def test_voss_command_configure_error(self):
commands = ['configure terminal']
set_module_args({
'commands': commands,
'_ansible_check_mode': True,
})
result = self.execute_module(failed=True)
self.assertEqual(
result['msg'],
'voss_command does not support running config mode commands. Please use voss_config instead'
)
| gpl-3.0 |
samchrisinger/osf.io | api_tests/nodes/views/test_node_links_list.py | 6 | 43226 | # -*- coding: utf-8 -*-
from nose.tools import * # flake8: noqa
from urlparse import urlparse
from framework.auth.core import Auth
from website.models import NodeLog
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase
from tests.factories import (
ProjectFactory,
RegistrationFactory,
AuthUserFactory
)
from tests.utils import assert_logs
node_url_for = lambda n_id: '/{}nodes/{}/'.format(API_BASE, n_id)
class TestNodeLinksList(ApiTestCase):
def setUp(self):
super(TestNodeLinksList, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(is_public=False, creator=self.user)
self.pointer_project = ProjectFactory(is_public=False, creator=self.user)
self.project.add_pointer(self.pointer_project, auth=Auth(self.user))
self.private_url = '/{}nodes/{}/node_links/'.format(API_BASE, self.project._id)
self.public_project = ProjectFactory(is_public=True, creator=self.user)
self.public_pointer_project = ProjectFactory(is_public=True, creator=self.user)
self.public_project.add_pointer(self.public_pointer_project, auth=Auth(self.user))
self.public_url = '/{}nodes/{}/node_links/'.format(API_BASE, self.public_project._id)
self.user_two = AuthUserFactory()
def test_return_embedded_public_node_pointers_logged_out(self):
res = self.app.get(self.public_url)
res_json = res.json['data']
assert_equal(len(res_json), 1)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
embedded = res_json[0]['embeds']['target_node']['data']['id']
assert_equal(embedded, self.public_pointer_project._id)
def test_return_embedded_public_node_pointers_logged_in(self):
res = self.app.get(self.public_url, auth=self.user_two.auth)
res_json = res.json['data']
assert_equal(len(res_json), 1)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
embedded = res_json[0]['embeds']['target_node']['data']['id']
assert_equal(embedded, self.public_pointer_project._id)
def test_return_private_node_pointers_logged_out(self):
res = self.app.get(self.private_url, expect_errors=True)
assert_equal(res.status_code, 401)
assert_in('detail', res.json['errors'][0])
def test_return_private_node_pointers_logged_in_contributor(self):
res = self.app.get(self.private_url, auth=self.user.auth)
res_json = res.json['data']
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(len(res_json), 1)
embedded = res_json[0]['embeds']['target_node']['data']['id']
assert_equal(embedded, self.pointer_project._id)
def test_return_private_node_pointers_logged_in_non_contributor(self):
res = self.app.get(self.private_url, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
def test_deleted_links_not_returned(self):
res = self.app.get(self.public_url, expect_errors=True)
res_json = res.json['data']
original_length = len(res_json)
self.public_pointer_project.is_deleted = True
self.public_pointer_project.save()
res = self.app.get(self.public_url)
res_json = res.json['data']
assert_equal(len(res_json), original_length - 1)
class TestNodeLinkCreate(ApiTestCase):
def setUp(self):
super(TestNodeLinkCreate, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(is_public=False, creator=self.user)
self.pointer_project = ProjectFactory(is_public=False, creator=self.user)
self.private_url = '/{}nodes/{}/node_links/'.format(API_BASE, self.project._id)
self.private_payload = {
'data': {
"type": "node_links",
"relationships": {
'nodes': {
'data': {
'id': self.pointer_project._id,
'type': 'nodes'
}
}
}
}
}
self.public_project = ProjectFactory(is_public=True, creator=self.user)
self.public_pointer_project = ProjectFactory(is_public=True, creator=self.user)
self.public_url = '/{}nodes/{}/node_links/'.format(API_BASE, self.public_project._id)
self.public_payload = {
'data': {
"type": "node_links",
"relationships": {
'nodes': {
'data': {
'id': self.public_pointer_project._id,
'type': 'nodes'
}
}
}
}
}
self.fake_url = '/{}nodes/{}/node_links/'.format(API_BASE, 'fdxlq')
self.fake_payload = {
'data': {
"type": "node_links",
"relationships": {
'nodes': {
'data': {
'id': 'fdxlq',
'type': 'nodes'
}
}
}
}
}
self.point_to_itself_payload = {
'data': {
"type": "node_links",
"relationships": {
'nodes': {
'data': {
'id': self.public_project._id,
'type': 'nodes'
}
}
}
}
}
self.user_two = AuthUserFactory()
self.user_two_project = ProjectFactory(is_public=True, creator=self.user_two)
self.user_two_url = '/{}nodes/{}/node_links/'.format(API_BASE, self.user_two_project._id)
self.user_two_payload = {
'data': {
'type': 'node_links',
'relationships': {
'nodes': {
'data': {
'id': self.user_two_project._id,
'type': 'nodes'
}
}
}
}
}
def test_add_node_link_relationships_is_a_list(self):
data = {
'data': {
'type': 'node_links',
'relationships': [{'target_node_id': self.public_pointer_project._id}]
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "Malformed request.")
def test_create_node_link_invalid_data(self):
res = self.app.post_json_api(self.public_url, "Incorrect data", auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "Malformed request.")
def test_add_node_link_no_relationships(self):
data = {
'data': {
'type': 'node_links',
'attributes': {
'id': self.public_pointer_project._id
}
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/relationships')
def test_add_node_links_empty_relationships(self):
data = {
'data': {
'type': 'node_links',
'relationships': {}
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/relationships')
def test_add_node_links_no_nodes_key_in_relationships(self):
data = {
'data': {
'type': 'node_links',
'relationships': {
'data': {
'id': self.public_pointer_project._id,
'type': 'nodes'
}
}
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Malformed request.')
def test_add_node_links_no_data_in_relationships(self):
data = {
'data': {
'type': 'node_links',
'relationships': {
'nodes': {
'id': self.public_pointer_project._id,
'type': 'nodes'
}
}
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Request must include /data.')
def test_add_node_links_no_target_type_in_relationships(self):
data = {
'data': {
'type': 'node_links',
'relationships': {
'nodes': {
'data': {
'id': self.public_pointer_project._id
}
}
}
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Request must include /type.')
def test_add_node_links_no_target_id_in_relationships(self):
data = {
'data': {
'type': 'node_links',
'relationships': {
'nodes': {
'data': {
'type': 'nodes'
}
}
}
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/id')
def test_add_node_links_incorrect_target_id_in_relationships(self):
data = {
'data': {
'type': 'node_links',
'relationships': {
'nodes': {
'data': {
'type': 'nodes',
'id': '12345'
}
}
}
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_add_node_links_incorrect_target_type_in_relationships(self):
data = {
'data': {
'type': 'nodes',
'relationships': {
'nodes': {
'data': {
'type': 'Incorrect!',
'id': self.public_pointer_project._id
}
}
}
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 409)
def test_creates_node_link_target_not_nested(self):
payload = {
'data': {
'type': 'node_links',
'id': self.pointer_project._id
}
}
res = self.app.post_json_api(self.public_url, payload, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/relationships')
assert_equal(res.json['errors'][0]['detail'], 'Request must include /data/relationships.')
def test_creates_public_node_pointer_logged_out(self):
res = self.app.post_json_api(self.public_url, self.public_payload, expect_errors=True)
assert_equal(res.status_code, 401)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.POINTER_CREATED, 'public_project')
def test_creates_public_node_pointer_logged_in(self):
res = self.app.post_json_api(self.public_url, self.public_payload, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
res = self.app.post_json_api(self.public_url, self.public_payload, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_equal(res.content_type, 'application/vnd.api+json')
res_json = res.json['data']
embedded = res_json['embeds']['target_node']['data']['id']
assert_equal(embedded, self.public_pointer_project._id)
def test_creates_private_node_pointer_logged_out(self):
res = self.app.post_json_api(self.private_url, self.private_payload, expect_errors=True)
assert_equal(res.status_code, 401)
assert_in('detail', res.json['errors'][0])
def test_creates_private_node_pointer_logged_in_contributor(self):
res = self.app.post_json_api(self.private_url, self.private_payload, auth=self.user.auth)
assert_equal(res.status_code, 201)
res_json = res.json['data']
embedded = res_json['embeds']['target_node']['data']['id']
assert_equal(embedded, self.pointer_project._id)
assert_equal(res.content_type, 'application/vnd.api+json')
def test_creates_private_node_pointer_logged_in_non_contributor(self):
res = self.app.post_json_api(self.private_url, self.private_payload, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
def test_create_node_pointer_non_contributing_node_to_contributing_node(self):
res = self.app.post_json_api(self.private_url, self.user_two_payload, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.POINTER_CREATED, 'project')
def test_create_node_pointer_contributing_node_to_non_contributing_node(self):
res = self.app.post_json_api(self.private_url, self.user_two_payload, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_equal(res.content_type, 'application/vnd.api+json')
res_json = res.json['data']
embedded = res_json['embeds']['target_node']['data']['id']
assert_equal(embedded, self.user_two_project._id)
def test_create_pointer_non_contributing_node_to_fake_node(self):
res = self.app.post_json_api(self.private_url, self.fake_payload, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
def test_create_pointer_contributing_node_to_fake_node(self):
res = self.app.post_json_api(self.private_url, self.fake_payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_in('detail', res.json['errors'][0])
def test_create_fake_node_pointing_to_contributing_node(self):
res = self.app.post_json_api(self.fake_url, self.private_payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
assert_in('detail', res.json['errors'][0])
res = self.app.post_json_api(self.fake_url, self.private_payload, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 404)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.POINTER_CREATED, 'public_project')
def test_create_node_pointer_to_itself(self):
res = self.app.post_json_api(self.public_url, self.point_to_itself_payload, auth=self.user.auth)
res_json = res.json['data']
assert_equal(res.status_code, 201)
assert_equal(res.content_type, 'application/vnd.api+json')
embedded = res_json['embeds']['target_node']['data']['id']
assert_equal(embedded, self.public_project._id)
def test_create_node_pointer_to_itself_unauthorized(self):
res = self.app.post_json_api(self.public_url, self.point_to_itself_payload, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.POINTER_CREATED, 'public_project')
def test_create_node_pointer_already_connected(self):
res = self.app.post_json_api(self.public_url, self.public_payload, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_equal(res.content_type, 'application/vnd.api+json')
res_json = res.json['data']
embedded = res_json['embeds']['target_node']['data']['id']
assert_equal(embedded, self.public_pointer_project._id)
res = self.app.post_json_api(self.public_url, self.public_payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_in('detail', res.json['errors'][0])
def test_cannot_add_link_to_registration(self):
registration = RegistrationFactory(creator=self.user)
url = '/{}nodes/{}/node_links/'.format(API_BASE, registration._id)
payload = {
'data': {
'type': 'node_links',
'relationships': {
'nodes': {
'data': {
'id': self.public_pointer_project._id,
'type': 'nodes'
}
}
}
}
}
res = self.app.post_json_api(url, payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
def test_create_node_pointer_no_type(self):
payload = {
'data': {
'relationships': {
'nodes': {
'data': {
'id': self.user_two_project._id,
'type': 'nodes'
}
}
}
}
}
res = self.app.post_json_api(self.private_url, payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'This field may not be null.')
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/type')
def test_create_node_pointer_incorrect_type(self):
payload = {
'data': {
'type': 'Wrong type.',
'relationships': {
'nodes': {
'data': {
'id': self.user_two_project._id,
'type': 'nodes'
}
}
}
}
}
res = self.app.post_json_api(self.private_url, payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 409)
assert_equal(res.json['errors'][0]['detail'], 'This resource has a type of "node_links", but you set the json body\'s type field to "Wrong type.". You probably need to change the type field to match the resource\'s type.')
class TestNodeLinksBulkCreate(ApiTestCase):
def setUp(self):
super(TestNodeLinksBulkCreate, self).setUp()
self.user = AuthUserFactory()
self.private_project = ProjectFactory(is_public=False, creator=self.user)
self.private_pointer_project = ProjectFactory(is_public=False, creator=self.user)
self.private_pointer_project_two = ProjectFactory(is_public=False, creator=self.user)
self.private_url = '/{}nodes/{}/node_links/'.format(API_BASE, self.private_project._id)
self.private_payload = {
'data': [{
"type": "node_links",
"relationships": {
'nodes': {
'data': {
"id": self.private_pointer_project._id,
"type": 'nodes'
}
}
}
},
{
"type": "node_links",
"relationships": {
'nodes': {
'data': {
"id": self.private_pointer_project_two._id,
"type": 'nodes'
}
}
}
}]
}
self.public_project = ProjectFactory(is_public=True, creator=self.user)
self.public_pointer_project = ProjectFactory(is_public=True, creator=self.user)
self.public_pointer_project_two = ProjectFactory(is_public=True, creator=self.user)
self.public_url = '/{}nodes/{}/node_links/'.format(API_BASE, self.public_project._id)
self.public_payload = {
'data': [{
"type": "node_links",
"relationships": {
'nodes': {
'data': {
"id": self.public_pointer_project._id,
"type": 'nodes'
}
}
}
},
{
"type": "node_links",
"relationships": {
'nodes': {
'data': {
"id": self.public_pointer_project_two._id,
"type": 'nodes'
}
}
}
}]
}
self.user_two = AuthUserFactory()
self.user_two_project = ProjectFactory(is_public=True, creator=self.user_two)
self.user_two_url = '/{}nodes/{}/node_links/'.format(API_BASE, self.user_two_project._id)
self.user_two_payload = {'data': [{
'type': 'node_links',
'relationships': {
'nodes': {
'data': {
'id': self.user_two_project._id,
'type': 'nodes'
}
}
}
}
]}
def test_bulk_create_node_links_blank_request(self):
res = self.app.post_json_api(self.public_url, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
def test_bulk_creates_pointers_limits(self):
payload = {'data': [self.public_payload['data'][0]] * 101}
res = self.app.post_json_api(self.public_url, payload, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Bulk operation limit is 100, got 101.')
assert_equal(res.json['errors'][0]['source']['pointer'], '/data')
res = self.app.get(self.public_url)
assert_equal(res.json['data'], [])
def test_bulk_creates_project_target_not_nested(self):
payload = {'data': [{'type': 'node_links', 'target_node_id': self.private_pointer_project._id}]}
res = self.app.post_json_api(self.public_url, payload, auth=self.user_two.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/relationships')
assert_equal(res.json['errors'][0]['detail'], 'Request must include /data/relationships.')
def test_bulk_creates_public_node_pointers_logged_out(self):
res = self.app.post_json_api(self.public_url, self.public_payload, expect_errors=True, bulk=True)
assert_equal(res.status_code, 401)
assert_in('detail', res.json['errors'][0])
res = self.app.get(self.public_url)
assert_equal(res.json['data'], [])
def test_bulk_creates_public_node_pointer_logged_in_non_contrib(self):
res = self.app.post_json_api(self.public_url, self.public_payload,
auth=self.user_two.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 403)
@assert_logs(NodeLog.POINTER_CREATED, 'public_project')
def test_bulk_creates_public_node_pointer_logged_in_contrib(self):
res = self.app.post_json_api(self.public_url, self.public_payload, auth=self.user.auth, bulk=True)
assert_equal(res.status_code, 201)
assert_equal(res.content_type, 'application/vnd.api+json')
res_json = res.json['data']
embedded = res_json[0]['embeds']['target_node']['data']['id']
assert_equal(embedded, self.public_pointer_project._id)
embedded = res_json[1]['embeds']['target_node']['data']['id']
assert_equal(embedded, self.public_pointer_project_two._id)
def test_bulk_creates_private_node_pointers_logged_out(self):
res = self.app.post_json_api(self.private_url, self.private_payload, expect_errors=True, bulk=True)
assert_equal(res.status_code, 401)
assert_in('detail', res.json['errors'][0])
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(res.json['data'], [])
@assert_logs(NodeLog.POINTER_CREATED, 'private_project', index=-1)
@assert_logs(NodeLog.POINTER_CREATED, 'private_project')
def test_bulk_creates_private_node_pointer_logged_in_contributor(self):
res = self.app.post_json_api(self.private_url, self.private_payload, auth=self.user.auth, bulk=True)
assert_equal(res.status_code, 201)
res_json = res.json['data']
embedded = res_json[0]['embeds']['target_node']['data']['id']
assert_equal(embedded, self.private_pointer_project._id)
embedded = res_json[1]['embeds']['target_node']['data']['id']
assert_equal(embedded, self.private_pointer_project_two._id)
assert_equal(res.content_type, 'application/vnd.api+json')
def test_bulk_creates_private_node_pointers_logged_in_non_contributor(self):
res = self.app.post_json_api(self.private_url, self.private_payload,
auth=self.user_two.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(res.json['data'], [])
def test_bulk_creates_node_pointers_non_contributing_node_to_contributing_node(self):
res = self.app.post_json_api(self.private_url, self.user_two_payload,
auth=self.user_two.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.POINTER_CREATED, 'private_project')
def test_bulk_creates_node_pointers_contributing_node_to_non_contributing_node(self):
res = self.app.post_json_api(self.private_url, self.user_two_payload, auth=self.user.auth, bulk=True)
assert_equal(res.status_code, 201)
assert_equal(res.content_type, 'application/vnd.api+json')
res_json = res.json['data']
embedded = res_json[0]['embeds']['target_node']['data']['id']
assert_equal(embedded, self.user_two_project._id)
res = self.app.get(self.private_url, auth=self.user.auth)
res_json = res.json['data']
embedded = res_json[0]['embeds']['target_node']['data']['id']
assert_equal(embedded, self.user_two_project._id)
def test_bulk_creates_pointers_non_contributing_node_to_fake_node(self):
fake_payload = {'data': [{'type': 'node_links', 'relationships': {'nodes': {'data': {'id': 'fdxlq', 'type': 'nodes'}}}}]}
res = self.app.post_json_api(self.private_url, fake_payload,
auth=self.user_two.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
def test_bulk_creates_pointers_contributing_node_to_fake_node(self):
fake_payload = {'data': [{'type': 'node_links', 'relationships': {'nodes': {'data': {'id': 'fdxlq', 'type': 'nodes'}}}}]}
res = self.app.post_json_api(self.private_url, fake_payload,
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_in('detail', res.json['errors'][0])
def test_bulk_creates_fake_nodes_pointing_to_contributing_node(self):
fake_url = '/{}nodes/{}/node_links/'.format(API_BASE, 'fdxlq')
res = self.app.post_json_api(fake_url, self.private_payload, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 404)
assert_in('detail', res.json['errors'][0])
res = self.app.post_json_api(fake_url, self.private_payload, auth=self.user_two.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 404)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.POINTER_CREATED, 'public_project')
def test_bulk_creates_node_pointer_to_itself(self):
point_to_itself_payload = {'data': [{'type': 'node_links', 'relationships': {'nodes': {'data': {'type': 'nodes', 'id': self.public_project._id}}}}]}
res = self.app.post_json_api(self.public_url, point_to_itself_payload, auth=self.user.auth, bulk=True)
assert_equal(res.status_code, 201)
assert_equal(res.content_type, 'application/vnd.api+json')
res_json = res.json['data']
embedded = res_json[0]['embeds']['target_node']['data']['id']
assert_equal(embedded, self.public_project._id)
def test_bulk_creates_node_pointer_to_itself_unauthorized(self):
point_to_itself_payload = {'data': [{'type': 'node_links', 'relationships': {'nodes': {'data': {'type': 'nodes', 'id': self.public_project._id}}}}]}
res = self.app.post_json_api(self.public_url, point_to_itself_payload, bulk=True, auth=self.user_two.auth,
expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.POINTER_CREATED, 'public_project')
@assert_logs(NodeLog.POINTER_CREATED, 'public_project', index=-1)
def test_bulk_creates_node_pointer_already_connected(self):
res = self.app.post_json_api(self.public_url, self.public_payload, auth=self.user.auth, bulk=True)
assert_equal(res.status_code, 201)
assert_equal(res.content_type, 'application/vnd.api+json')
res_json = res.json['data']
embedded = res_json[0]['embeds']['target_node']['data']['id']
assert_equal(embedded, self.public_pointer_project._id)
embedded_two = res_json[1]['embeds']['target_node']['data']['id']
assert_equal(embedded_two, self.public_pointer_project_two._id)
res = self.app.post_json_api(self.public_url, self.public_payload, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_in("Target Node '{}' already pointed to by '{}'.".format(self.public_pointer_project._id, self.public_project._id), res.json['errors'][0]['detail'])
def test_bulk_cannot_add_link_to_registration(self):
registration = RegistrationFactory(creator=self.user)
url = '/{}nodes/{}/node_links/'.format(API_BASE, registration._id)
payload = {'data': [{'type': 'node_links', 'relationships': {'nodes': {'data': {'type': 'nodes', 'id': self.public_pointer_project._id}}}}]}
res = self.app.post_json_api(url, payload, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 404)
def test_bulk_creates_node_pointer_no_type(self):
payload = {'data': [{'relationships': {'nodes': {'data': {'type': 'nodes', 'id': self.user_two_project._id}}}}]}
res = self.app.post_json_api(self.private_url, payload, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'This field may not be null.')
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/0/type')
def test_bulk_creates_node_pointer_incorrect_type(self):
payload = {'data': [{'type': 'Wrong type.', 'relationships': {'nodes': {'data': {'type': 'nodes', 'id': self.user_two_project._id}}}}]}
res = self.app.post_json_api(self.private_url, payload, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 409)
assert_equal(res.json['errors'][0]['detail'], 'This resource has a type of "node_links", but you set the json body\'s type field to "Wrong type.". You probably need to change the type field to match the resource\'s type.')
class TestBulkDeleteNodeLinks(ApiTestCase):
def setUp(self):
super(TestBulkDeleteNodeLinks, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user, is_public=False)
self.pointer_project = ProjectFactory(creator=self.user, is_public=True)
self.pointer_project_two = ProjectFactory(creator=self.user, is_public=True)
self.pointer = self.project.add_pointer(self.pointer_project, auth=Auth(self.user), save=True)
self.pointer_two = self.project.add_pointer(self.pointer_project_two, auth=Auth(self.user), save=True)
self.private_payload = {
"data": [
{"type": "node_links", "id": self.pointer._id},
{"type": "node_links", "id": self.pointer_two._id}
]
}
self.private_url = '/{}nodes/{}/node_links/'.format(API_BASE, self.project._id)
self.user_two = AuthUserFactory()
self.public_project = ProjectFactory(is_public=True, creator=self.user)
self.public_pointer_project = ProjectFactory(is_public=True, creator=self.user)
self.public_pointer_project_two = ProjectFactory(is_public=True, creator=self.user)
self.public_pointer = self.public_project.add_pointer(self.public_pointer_project,
auth=Auth(self.user),
save=True)
self.public_pointer_two = self.public_project.add_pointer(self.public_pointer_project_two,
auth=Auth(self.user),
save=True)
self.public_payload = {
'data': [
{'type': 'node_links', 'id': self.public_pointer._id},
{'type': 'node_links', 'id': self.public_pointer_two._id}
]
}
self.public_url = '/{}nodes/{}/node_links/'.format(API_BASE, self.public_project._id)
def test_bulk_delete_node_links_blank_request(self):
res = self.app.delete_json_api(self.public_url, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
def test_bulk_delete_pointer_limits(self):
res = self.app.delete_json_api(self.public_url, {'data': [self.public_payload['data'][0]] * 101},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Bulk operation limit is 100, got 101.')
assert_equal(res.json['errors'][0]['source']['pointer'], '/data')
def test_bulk_delete_dict_inside_data(self):
res = self.app.delete_json_api(self.public_url, {'data': {'id': self.public_project._id, 'type': 'node_links'}},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Expected a list of items but got type "dict".')
def test_bulk_delete_pointers_no_type(self):
payload = {'data': [
{'id': self.public_pointer._id},
{'id': self.public_pointer_two._id}
]}
res = self.app.delete_json_api(self.public_url, payload, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['source']['pointer'], "/data/type")
def test_bulk_delete_pointers_incorrect_type(self):
payload = {'data': [
{'id': self.public_pointer._id, 'type': 'Incorrect type.'},
{'id': self.public_pointer_two._id, 'type': 'Incorrect type.'}
]}
res = self.app.delete_json_api(self.public_url, payload, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 409)
def test_bulk_delete_pointers_no_id(self):
payload = {'data': [
{'type': 'node_links'},
{'type': 'node_links'}
]}
res = self.app.delete_json_api(self.public_url, payload, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['source']['pointer'], "/data/id")
def test_bulk_delete_pointers_no_data(self):
res = self.app.delete_json_api(self.public_url, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Request must contain array of resource identifier objects.')
def test_bulk_delete_pointers_payload_is_empty_dict(self):
res = self.app.delete_json_api(self.public_url, {}, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Request must include /data.')
def test_cannot_delete_if_registration(self):
registration = RegistrationFactory(project=self.public_project)
url = '/{}registrations/{}/node_links/'.format(API_BASE, registration._id)
res = self.app.delete_json_api(url, self.public_payload, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 405)
def test_bulk_deletes_public_node_pointers_logged_out(self):
res = self.app.delete_json_api(self.public_url, self.public_payload, expect_errors=True, bulk=True)
assert_equal(res.status_code, 401)
assert_in('detail', res.json['errors'][0])
def test_bulk_deletes_public_node_pointers_fails_if_bad_auth(self):
node_count_before = len(self.public_project.nodes_pointer)
res = self.app.delete_json_api(self.public_url, self.public_payload,
auth=self.user_two.auth, expect_errors=True, bulk=True)
# This is could arguably be a 405, but we don't need to go crazy with status codes
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
self.public_project.reload()
assert_equal(node_count_before, len(self.public_project.nodes_pointer))
@assert_logs(NodeLog.POINTER_REMOVED, 'public_project')
@assert_logs(NodeLog.POINTER_REMOVED, 'public_project', index=-1)
def test_bulk_deletes_public_node_pointers_succeeds_as_owner(self):
node_count_before = len(self.public_project.nodes_pointer)
res = self.app.delete_json_api(self.public_url, self.public_payload, auth=self.user.auth, bulk=True)
self.public_project.reload()
assert_equal(res.status_code, 204)
assert_equal(node_count_before - 2, len(self.public_project.nodes_pointer))
self.public_project.reload()
def test_bulk_deletes_private_node_pointers_logged_out(self):
res = self.app.delete_json_api(self.private_url, self.private_payload, expect_errors=True, bulk=True)
assert_equal(res.status_code, 401)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.POINTER_REMOVED, 'project', index=-1)
@assert_logs(NodeLog.POINTER_REMOVED, 'project')
def test_bulk_deletes_private_node_pointers_logged_in_contributor(self):
res = self.app.delete_json_api(self.private_url, self.private_payload, auth=self.user.auth, bulk=True)
self.project.reload() # Update the model to reflect changes made by post request
assert_equal(res.status_code, 204)
assert_equal(len(self.project.nodes_pointer), 0)
def test_bulk_deletes_private_node_pointers_logged_in_non_contributor(self):
res = self.app.delete_json_api(self.private_url, self.private_payload,
auth=self.user_two.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.POINTER_REMOVED, 'public_project', index=-1)
@assert_logs(NodeLog.POINTER_REMOVED, 'public_project')
def test_return_bulk_deleted_public_node_pointer(self):
res = self.app.delete_json_api(self.public_url, self.public_payload, auth=self.user.auth, bulk=True)
self.public_project.reload() # Update the model to reflect changes made by post request
assert_equal(res.status_code, 204)
pointer_url = '/{}nodes/{}/node_links/{}/'.format(API_BASE, self.public_project._id, self.public_pointer._id)
#check that deleted pointer can not be returned
res = self.app.get(pointer_url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
@assert_logs(NodeLog.POINTER_REMOVED, 'project', index=-1)
@assert_logs(NodeLog.POINTER_REMOVED, 'project')
def test_return_bulk_deleted_private_node_pointer(self):
res = self.app.delete_json_api(self.private_url, self.private_payload, auth=self.user.auth, bulk=True)
self.project.reload() # Update the model to reflect changes made by post request
assert_equal(res.status_code, 204)
pointer_url = '/{}nodes/{}/node_links/{}/'.format(API_BASE, self.project._id, self.pointer._id)
#check that deleted pointer can not be returned
res = self.app.get(pointer_url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
# Regression test for https://openscience.atlassian.net/browse/OSF-4322
def test_bulk_delete_link_that_is_not_linked_to_correct_node(self):
project = ProjectFactory(creator=self.user)
# The node link belongs to a different project
res = self.app.delete_json_api(
self.private_url, self.public_payload,
auth=self.user.auth,
expect_errors=True,
bulk=True
)
assert_equal(res.status_code, 400)
errors = res.json['errors']
assert_equal(len(errors), 1)
assert_equal(errors[0]['detail'], 'Node link does not belong to the requested node.')
| apache-2.0 |
arthurfait/HMM-3 | Gen_Synthetic_Data/work/EvalProtTop.py | 1 | 1306 | import sys
def main():
if len(sys.argv)<2:
print "usage :",sys.argv[0],"ProtTop.results, ProtTopEval.results"
sys.exit(-1)
ifname = sys.argv[1]
ofname = sys.argv[2]
iPTfile = open(ifname,'r')
toplines = iPTfile.readlines()
iPTfile.close()
num_lines = 0
tot_num = 0.0
num_diff_seg = 0.0 # different segments, wrong topology and wrong topography
num_correct_top = 0.0 # correct topology
num_wrong_top = 0.0 # wrong topology and correct topography
for line in toplines:
elems = line.rstrip().split()
#print elems
num_lines += 1
if int(elems[0]) == 0 and int(elems[1]) == 1:
num_diff_seg += 1
elif int(elems[0]) == 1 and int(elems[1]) == 1:
num_correct_top += 1
else:
num_wrong_top += 1
tot_num = num_wrong_top + num_correct_top + num_diff_seg
print "Num Proteins: %s"%num_lines
out_str = "Total Num Proteins: %s, \nPercentage wrong topology: %s, \nPercentage different segments: %s, \nPercentage correct topology: %s \n"%(tot_num,num_wrong_top/tot_num,num_diff_seg/tot_num,num_correct_top/tot_num)
oPTfile = open(ofname,"w")
oPTfile.write(out_str)
oPTfile.close()
if __name__ == '__main__':
main() | gpl-3.0 |
dpgaspar/Flask-AppBuilder | examples/quickcharts2/config.py | 1 | 1726 | import os
basedir = os.path.abspath(os.path.dirname(__file__))
CSRF_ENABLED = True
SECRET_KEY = "\2\1thisismyscretkey\1\2\e\y\y\h"
OPENID_PROVIDERS = [
{"name": "Google", "url": "https://www.google.com/accounts/o8/id"},
{"name": "Yahoo", "url": "https://me.yahoo.com"},
{"name": "AOL", "url": "http://openid.aol.com/<username>"},
{"name": "Flickr", "url": "http://www.flickr.com/<username>"},
{"name": "MyOpenID", "url": "https://www.myopenid.com"},
]
SQLALCHEMY_DATABASE_URI = "sqlite:///" + os.path.join(basedir, "app.db")
# SQLALCHEMY_DATABASE_URI = 'mysql://root:password@localhost/quickhowto'
# SQLALCHEMY_DATABASE_URI = 'postgresql://scott:tiger@localhost:5432/myapp'
# SQLALCHEMY_ECHO = True
BABEL_DEFAULT_LOCALE = "en"
BABEL_DEFAULT_FOLDER = "translations"
LANGUAGES = {
"en": {"flag": "gb", "name": "English"},
"pt": {"flag": "pt", "name": "Portugal"},
"es": {"flag": "es", "name": "Espanol"},
}
# ------------------------------
# GLOBALS FOR GENERAL APP's
# ------------------------------
UPLOAD_FOLDER = basedir + "/app/static/uploads/"
IMG_UPLOAD_FOLDER = basedir + "/app/static/uploads/"
IMG_UPLOAD_URL = "/static/uploads/"
AUTH_TYPE = 1
# AUTH_LDAP_SERVER = "ldap://dc.domain.net"
# AUTH_LDAP_USE_TLS = False
AUTH_ROLE_ADMIN = "Admin"
AUTH_ROLE_PUBLIC = "Public"
APP_NAME = "F.A.B. Example"
APP_THEME = "" # default
# APP_THEME = "cerulean.css" # COOL
# APP_THEME = "amelia.css"
# APP_THEME = "cosmo.css"
# APP_THEME = "cyborg.css" # COOL
# APP_THEME = "flatly.css"
# APP_THEME = "journal.css"
# APP_THEME = "readable.css"
# APP_THEME = "simplex.css"
# APP_THEME = "slate.css" # COOL
# APP_THEME = "spacelab.css" # NICE
# APP_THEME = "united.css"
| bsd-3-clause |
lifuzu/node-gyp | gyp/pylib/gyp/MSVSUserFile.py | 2710 | 5094 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio user preferences file writer."""
import os
import re
import socket # for gethostname
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version, name):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
version: Version info.
name: Name of the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.name = name
self.configurations = {}
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self.configurations[name] = ['Configuration', {'Name': name}]
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
abs_command = _FindCommandInPath(command[0])
if environment and isinstance(environment, dict):
env_list = ['%s="%s"' % (key, val)
for (key,val) in environment.iteritems()]
environment = ' '.join(env_list)
else:
environment = ''
n_cmd = ['DebugSettings',
{'Command': abs_command,
'WorkingDirectory': working_directory,
'CommandArguments': " ".join(command[1:]),
'RemoteMachine': socket.gethostname(),
'Environment': environment,
'EnvironmentMerge': 'true',
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
'Attach': 'false',
'DebuggerType': '3', # 'auto' debugger
'Remote': '1',
'RemoteCommand': '',
'HttpUrl': '',
'PDBPath': '',
'SQLDebugging': '',
'DebuggerFlavor': '0',
'MPIRunCommand': '',
'MPIRunArguments': '',
'MPIRunWorkingDirectory': '',
'ApplicationCommand': '',
'ApplicationArguments': '',
'ShimCommand': '',
'MPIAcceptMode': '',
'MPIAcceptFilter': ''
}]
# Find the config, and add it if it doesn't exist.
if config_name not in self.configurations:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
self.configurations[config_name].append(n_cmd)
def WriteIfChanged(self):
"""Writes the user file."""
configs = ['Configurations']
for config, spec in sorted(self.configurations.iteritems()):
configs.append(spec)
content = ['VisualStudioUserFile',
{'Version': self.version.ProjectVersion(),
'Name': self.name
},
configs]
easy_xml.WriteXmlIfChanged(content, self.user_file_path,
encoding="Windows-1252")
| mit |
ryfeus/lambda-packs | Keras_tensorflow_nightly/source2.7/tensorflow/contrib/signal/python/ops/spectral_ops.py | 27 | 12618 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Spectral operations (e.g. Short-time Fourier Transform)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.contrib.signal.python.ops import reconstruction_ops
from tensorflow.contrib.signal.python.ops import shape_ops
from tensorflow.contrib.signal.python.ops import window_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import spectral_ops
def stft(signals, frame_length, frame_step, fft_length=None,
window_fn=functools.partial(window_ops.hann_window, periodic=True),
pad_end=False, name=None):
"""Computes the [Short-time Fourier Transform][stft] of `signals`.
Implemented with GPU-compatible ops and supports gradients.
Args:
signals: A `[..., samples]` `float32` `Tensor` of real-valued signals.
frame_length: An integer scalar `Tensor`. The window length in samples.
frame_step: An integer scalar `Tensor`. The number of samples to step.
fft_length: An integer scalar `Tensor`. The size of the FFT to apply.
If not provided, uses the smallest power of 2 enclosing `frame_length`.
window_fn: A callable that takes a window length and a `dtype` keyword
argument and returns a `[window_length]` `Tensor` of samples in the
provided datatype. If set to `None`, no windowing is used.
pad_end: Whether to pad the end of `signals` with zeros when the provided
frame length and step produces a frame that lies partially past its end.
name: An optional name for the operation.
Returns:
A `[..., frames, fft_unique_bins]` `Tensor` of `complex64` STFT values where
`fft_unique_bins` is `fft_length // 2 + 1` (the unique components of the
FFT).
Raises:
ValueError: If `signals` is not at least rank 1, `frame_length` is
not scalar, or `frame_step` is not scalar.
[stft]: https://en.wikipedia.org/wiki/Short-time_Fourier_transform
"""
with ops.name_scope(name, 'stft', [signals, frame_length,
frame_step]):
signals = ops.convert_to_tensor(signals, name='signals')
signals.shape.with_rank_at_least(1)
frame_length = ops.convert_to_tensor(frame_length, name='frame_length')
frame_length.shape.assert_has_rank(0)
frame_step = ops.convert_to_tensor(frame_step, name='frame_step')
frame_step.shape.assert_has_rank(0)
if fft_length is None:
fft_length = _enclosing_power_of_two(frame_length)
else:
fft_length = ops.convert_to_tensor(fft_length, name='fft_length')
framed_signals = shape_ops.frame(
signals, frame_length, frame_step, pad_end=pad_end)
# Optionally window the framed signals.
if window_fn is not None:
window = window_fn(frame_length, dtype=framed_signals.dtype)
framed_signals *= window
# spectral_ops.rfft produces the (fft_length/2 + 1) unique components of the
# FFT of the real windowed signals in framed_signals.
return spectral_ops.rfft(framed_signals, [fft_length])
def inverse_stft_window_fn(frame_step,
forward_window_fn=functools.partial(
window_ops.hann_window, periodic=True),
name=None):
"""Generates a window function that can be used in `inverse_stft`.
Constructs a window that is equal to the forward window with a further
pointwise amplitude correction. `inverse_stft_window_fn` is equivalent to
`forward_window_fn` in the case where it would produce an exact inverse.
See examples in `inverse_stft` documentation for usage.
Args:
frame_step: An integer scalar `Tensor`. The number of samples to step.
forward_window_fn: window_fn used in the forward transform, `stft`.
name: An optional name for the operation.
Returns:
A callable that takes a window length and a `dtype` keyword argument and
returns a `[window_length]` `Tensor` of samples in the provided datatype.
The returned window is suitable for reconstructing original waveform in
inverse_stft.
"""
with ops.name_scope(name, 'inverse_stft_window_fn', [forward_window_fn]):
frame_step = ops.convert_to_tensor(frame_step, name='frame_step')
frame_step.shape.assert_has_rank(0)
def inverse_stft_window_fn_inner(frame_length, dtype):
"""Computes a window that can be used in `inverse_stft`.
Args:
frame_length: An integer scalar `Tensor`. The window length in samples.
dtype: Data type of waveform passed to `stft`.
Returns:
A window suitable for reconstructing original waveform in `inverse_stft`.
Raises:
ValueError: If `frame_length` is not scalar, `forward_window_fn` is not a
callable that takes a window length and a `dtype` keyword argument and
returns a `[window_length]` `Tensor` of samples in the provided datatype
`frame_step` is not scalar, or `frame_step` is not scalar.
"""
with ops.name_scope(name, 'inverse_stft_window_fn', [forward_window_fn]):
frame_length = ops.convert_to_tensor(frame_length, name='frame_length')
frame_length.shape.assert_has_rank(0)
# Use equation 7 from Griffin + Lim.
forward_window = forward_window_fn(frame_length, dtype=dtype)
denom = math_ops.square(forward_window)
overlaps = -(-frame_length // frame_step) # Ceiling division.
denom = array_ops.pad(denom, [(0, overlaps * frame_step - frame_length)])
denom = array_ops.reshape(denom, [overlaps, frame_step])
denom = math_ops.reduce_sum(denom, 0, keepdims=True)
denom = array_ops.tile(denom, [overlaps, 1])
denom = array_ops.reshape(denom, [overlaps * frame_step])
return forward_window / denom[:frame_length]
return inverse_stft_window_fn_inner
def inverse_stft(stfts,
frame_length,
frame_step,
fft_length=None,
window_fn=functools.partial(window_ops.hann_window,
periodic=True),
name=None):
"""Computes the inverse [Short-time Fourier Transform][stft] of `stfts`.
To reconstruct an original waveform, a complimentary window function should
be used in inverse_stft. Such a window function can be constructed with
tf.contrib.signal.inverse_stft_window_fn.
Example:
```python
frame_length = 400
frame_step = 160
waveform = tf.placeholder(dtype=tf.float32, shape=[1000])
stft = tf.contrib.signal.stft(waveform, frame_length, frame_step)
inverse_stft = tf.contrib.signal.inverse_stft(
stft, frame_length, frame_step,
window_fn=tf.contrib.signal.inverse_stft_window_fn(frame_step))
```
if a custom window_fn is used in stft, it must be passed to
inverse_stft_window_fn:
```python
frame_length = 400
frame_step = 160
window_fn = functools.partial(window_ops.hamming_window, periodic=True),
waveform = tf.placeholder(dtype=tf.float32, shape=[1000])
stft = tf.contrib.signal.stft(
waveform, frame_length, frame_step, window_fn=window_fn)
inverse_stft = tf.contrib.signal.inverse_stft(
stft, frame_length, frame_step,
window_fn=tf.contrib.signal.inverse_stft_window_fn(
frame_step, forward_window_fn=window_fn))
```
Implemented with GPU-compatible ops and supports gradients.
Args:
stfts: A `complex64` `[..., frames, fft_unique_bins]` `Tensor` of STFT bins
representing a batch of `fft_length`-point STFTs where `fft_unique_bins`
is `fft_length // 2 + 1`
frame_length: An integer scalar `Tensor`. The window length in samples.
frame_step: An integer scalar `Tensor`. The number of samples to step.
fft_length: An integer scalar `Tensor`. The size of the FFT that produced
`stfts`. If not provided, uses the smallest power of 2 enclosing
`frame_length`.
window_fn: A callable that takes a window length and a `dtype` keyword
argument and returns a `[window_length]` `Tensor` of samples in the
provided datatype. If set to `None`, no windowing is used.
name: An optional name for the operation.
Returns:
A `[..., samples]` `Tensor` of `float32` signals representing the inverse
STFT for each input STFT in `stfts`.
Raises:
ValueError: If `stfts` is not at least rank 2, `frame_length` is not scalar,
`frame_step` is not scalar, or `fft_length` is not scalar.
[stft]: https://en.wikipedia.org/wiki/Short-time_Fourier_transform
"""
with ops.name_scope(name, 'inverse_stft', [stfts]):
stfts = ops.convert_to_tensor(stfts, name='stfts')
stfts.shape.with_rank_at_least(2)
frame_length = ops.convert_to_tensor(frame_length, name='frame_length')
frame_length.shape.assert_has_rank(0)
frame_step = ops.convert_to_tensor(frame_step, name='frame_step')
frame_step.shape.assert_has_rank(0)
if fft_length is None:
fft_length = _enclosing_power_of_two(frame_length)
else:
fft_length = ops.convert_to_tensor(fft_length, name='fft_length')
fft_length.shape.assert_has_rank(0)
real_frames = spectral_ops.irfft(stfts, [fft_length])
# frame_length may be larger or smaller than fft_length, so we pad or
# truncate real_frames to frame_length.
frame_length_static = tensor_util.constant_value(frame_length)
# If we don't know the shape of real_frames's inner dimension, pad and
# truncate to frame_length.
if (frame_length_static is None or
real_frames.shape.ndims is None or
real_frames.shape[-1].value is None):
real_frames = real_frames[..., :frame_length]
real_frames_rank = array_ops.rank(real_frames)
real_frames_shape = array_ops.shape(real_frames)
paddings = array_ops.concat(
[array_ops.zeros([real_frames_rank - 1, 2],
dtype=frame_length.dtype),
[[0, math_ops.maximum(0, frame_length - real_frames_shape[-1])]]], 0)
real_frames = array_ops.pad(real_frames, paddings)
# We know real_frames's last dimension and frame_length statically. If they
# are different, then pad or truncate real_frames to frame_length.
elif real_frames.shape[-1].value > frame_length_static:
real_frames = real_frames[..., :frame_length_static]
elif real_frames.shape[-1].value < frame_length_static:
pad_amount = frame_length_static - real_frames.shape[-1].value
real_frames = array_ops.pad(real_frames,
[[0, 0]] * (real_frames.shape.ndims - 1) +
[[0, pad_amount]])
# The above code pads the inner dimension of real_frames to frame_length,
# but it does so in a way that may not be shape-inference friendly.
# Restore shape information if we are able to.
if frame_length_static is not None and real_frames.shape.ndims is not None:
real_frames.set_shape([None] * (real_frames.shape.ndims - 1) +
[frame_length_static])
# Optionally window and overlap-add the inner 2 dimensions of real_frames
# into a single [samples] dimension.
if window_fn is not None:
window = window_fn(frame_length, dtype=stfts.dtype.real_dtype)
real_frames *= window
return reconstruction_ops.overlap_and_add(real_frames, frame_step)
def _enclosing_power_of_two(value):
"""Return 2**N for integer N such that 2**N >= value."""
value_static = tensor_util.constant_value(value)
if value_static is not None:
return constant_op.constant(
int(2**np.ceil(np.log(value_static) / np.log(2.0))), value.dtype)
return math_ops.cast(
math_ops.pow(2.0, math_ops.ceil(
math_ops.log(math_ops.to_float(value)) / math_ops.log(2.0))),
value.dtype)
| mit |
absoludity/servo | tests/wpt/harness/wptrunner/config.py | 196 | 1851 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import ConfigParser
import os
import sys
from collections import OrderedDict
here = os.path.split(__file__)[0]
class ConfigDict(dict):
def __init__(self, base_path, *args, **kwargs):
self.base_path = base_path
dict.__init__(self, *args, **kwargs)
def get_path(self, key, default=None):
if key not in self:
return default
path = self[key]
os.path.expanduser(path)
return os.path.abspath(os.path.join(self.base_path, path))
def read(config_path):
config_path = os.path.abspath(config_path)
config_root = os.path.split(config_path)[0]
parser = ConfigParser.SafeConfigParser()
success = parser.read(config_path)
assert config_path in success, success
subns = {"pwd": os.path.abspath(os.path.curdir)}
rv = OrderedDict()
for section in parser.sections():
rv[section] = ConfigDict(config_root)
for key in parser.options(section):
rv[section][key] = parser.get(section, key, False, subns)
return rv
def path(argv=None):
if argv is None:
argv = []
path = None
for i, arg in enumerate(argv):
if arg == "--config":
if i + 1 < len(argv):
path = argv[i + 1]
elif arg.startswith("--config="):
path = arg.split("=", 1)[1]
if path is not None:
break
if path is None:
if os.path.exists("wptrunner.ini"):
path = os.path.abspath("wptrunner.ini")
else:
path = os.path.join(here, "..", "wptrunner.default.ini")
return os.path.abspath(path)
def load():
return read(path(sys.argv))
| mpl-2.0 |
CompassionCH/l10n-switzerland | l10n_ch_base_bank/models/bank.py | 1 | 12111 | # -*- coding: utf-8 -*-
# Copyright 2012-2017 Camptocamp
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import re
from odoo import models, fields, api, _
from odoo.tools import mod10r
from odoo import exceptions
from odoo.addons.base_iban.models.res_partner_bank import normalize_iban
class BankCommon(object):
def is_swiss_postal_num(self, number):
return (self._check_9_pos_postal_num(number) or
self._check_5_pos_postal_num(number))
def _check_9_pos_postal_num(self, number):
"""
Predicate that checks if a postal number
is in format xx-xxxxxx-x is correct,
return true if it matches the pattern
and if check sum mod10 is ok
:param number: postal number to validate
:returns: True if is it a 9 len postal account
:rtype: bool
"""
pattern = r'^[0-9]{2}-[0-9]{1,6}-[0-9]$'
if not re.search(pattern, number):
return False
nums = number.split('-')
prefix = nums[0]
num = nums[1].rjust(6, '0')
checksum = nums[2]
expected_checksum = mod10r(prefix + num)[-1]
return expected_checksum == checksum
def _check_5_pos_postal_num(self, number):
"""
Predicate that checks if a postal number
is in format xxxxx is correct,
return true if it matches the pattern
and if check sum mod10 is ok
:param number: postal number to validate
:returns: True if is it a 5 len postal account
:rtype: bool
"""
pattern = r'^[0-9]{1,5}$'
if not re.search(pattern, number):
return False
return True
def _convert_iban_to_ccp(self, iban):
"""
Convert a Postfinance IBAN into an old postal number
"""
iban = normalize_iban(iban)
if not iban[:2].upper() == 'CH':
return False
part1 = iban[-9:-7]
part2 = iban[-7:-1].lstrip('0')
part3 = iban[-1:].lstrip('0')
ccp = '{}-{}-{}'.format(part1, part2, part3)
if not self._check_9_pos_postal_num(ccp):
return False
return ccp
def _convert_iban_to_clearing(self, iban):
"""
Convert a Swiss Iban to a clearing
"""
iban = normalize_iban(iban)
if not iban[:2].upper() == 'CH':
return False
clearing = iban[4:9].lstrip('0')
return clearing
class Bank(models.Model, BankCommon):
"""Inherit res.bank class in order to add swiss specific field"""
_inherit = 'res.bank'
code = fields.Char(
string='Code',
help='Internal reference'
)
clearing = fields.Char(
string='Clearing number',
help='Swiss unique bank identifier also used in IBAN number'
)
city = fields.Char(
string='City',
help="City of the bank"
)
ccp = fields.Char(
string='CCP/CP-Konto',
size=11,
help="CCP/CP-Konto of the bank"
)
country_code = fields.Char(
string="Country code",
related="country.code",
readonly=True,
)
@api.constrains('ccp')
def _check_postal_num(self):
"""Validate postal number format"""
for bank in self:
if not bank.ccp:
continue
if not self.is_swiss_postal_num(bank.ccp):
raise exceptions.ValidationError(
_('Please enter a correct postal number. '
'(01-23456-1 or 12345)')
)
return True
@api.multi
def is_swiss_post(self):
return self.bic == 'POFICHBEXXX'
@api.multi
def name_get(self):
"""Format displayed name"""
res = []
cols = ('bic', 'name', 'street', 'city')
for bank in self:
vals = (bank[x] for x in cols if bank[x])
res.append((bank.id, ' - '.join(vals)))
return res
@api.model
def name_search(self, name, args=None, operator='ilike', limit=80):
"""Extends to look on bank code, bic, name, street and city"""
if args is None:
args = []
ids = []
cols = ('code', 'bic', 'name', 'street', 'city')
if name:
for val in name.split(' '):
for col in cols:
tmp_ids = self.search(
[(col, 'ilike', val)] + args,
limit=limit
)
if tmp_ids:
ids += tmp_ids.ids
break
else:
ids = self.search(
args,
limit=limit
).ids
# we sort by occurence
to_ret_ids = list(set(ids))
to_ret_ids = sorted(
to_ret_ids,
key=lambda x: ids.count(x),
reverse=True
)
return self.browse(to_ret_ids).name_get()
class ResPartnerBank(models.Model, BankCommon):
"""Inherit res.partner.bank class in order to add swiss specific fields
and state controls
"""
_inherit = 'res.partner.bank'
_compile_check_bvr_add_num = re.compile('[0-9]*$')
bvr_adherent_num = fields.Char(
string='Bank BVR/ESR adherent number', size=11,
help="Your Bank adherent number to be printed "
"in references of your BVR/ESR. "
"This is not a postal account number."
)
acc_number = fields.Char(
string='Account/IBAN Number'
)
ccp = fields.Char(
string='CCP/CP-Konto',
store=True
)
@api.one
@api.depends('acc_number')
def _compute_acc_type(self):
if (self.acc_number and
self.is_swiss_postal_num(self.acc_number)):
self.acc_type = 'postal'
return
super(ResPartnerBank, self)._compute_acc_type()
@api.multi
def get_account_number(self):
"""Retrieve the correct bank number to used based on
account type
"""
if self.ccp:
return self.ccp
else:
return self.acc_number
@api.constrains('bvr_adherent_num')
def _check_adherent_number(self):
for p_bank in self:
if not p_bank.bvr_adherent_num:
continue
valid = self._compile_check_bvr_add_num.match(
p_bank.bvr_adherent_num
)
if not valid:
raise exceptions.ValidationError(
_('Your bank BVR/ESR adherent number must contain only '
'digits!\nPlease check your company bank account.')
)
return True
@api.constrains('ccp')
def _check_postal_num(self):
"""Validate postal number format"""
for bank in self:
if not bank.ccp:
continue
if not self.is_swiss_postal_num(bank.ccp):
raise exceptions.ValidationError(
_('Please enter a correct postal number. '
'(01-23456-1 or 12345)')
)
return True
@api.multi
def _get_acc_name(self):
""" Return an account name for a bank account
to use with a ccp for BVR.
This method make sure to generate a unique name
"""
part_name = self.partner_id.name
if part_name:
acc_name = _("Bank/CCP {}").format(self.partner_id.name)
else:
acc_name = _("Bank/CCP Undefined")
exist_count = self.env['res.partner.bank'].search_count(
[('acc_number', '=like', acc_name)])
if exist_count:
name_exist = exist_count
while name_exist:
new_name = acc_name + " ({})".format(exist_count)
name_exist = self.env['res.partner.bank'].search_count(
[('acc_number', '=', new_name)])
exist_count += 1
acc_name = new_name
return acc_name
@api.multi
def _get_ch_bank_from_iban(self):
""" Extract clearing number from iban to find the bank """
if self.acc_type != 'iban':
return False
clearing = self._convert_iban_to_clearing(self.acc_number)
return clearing and self.env['res.bank'].search(
[('clearing', '=', clearing)], limit=1)
@api.onchange('acc_number', 'acc_type')
def onchange_acc_number_set_swiss_bank(self):
""" Set the bank when possible
and set ccp when undefined
Bank is defined as:
- Found bank with CCP matching Bank CCP
- Swiss post when CCP is no matching a Bank CCP
- Found bank by clearing when using iban
For CCP it can be:
- a postal account, we copy acc_number
- a postal account in iban format, we transform acc_number
- a bank account with CCP on the bank, we use ccp of the bank
- otherwise there is no CCP to use
"""
bank = self.bank_id
ccp = False
if self.acc_type == 'postal':
ccp = self.acc_number
# Try to find a matching bank to the ccp entered in acc_number
# Overwrite existing bank if there is a match
bank = (
self.env['res.bank'].search([('ccp', '=', ccp)], limit=1) or
bank or
self.env['res.bank'].search([('bic', '=', 'POFICHBEXXX')],
limit=1))
if not bank.is_swiss_post():
self.acc_number = self._get_acc_name()
elif self.acc_type == 'iban':
if not bank:
bank = self._get_ch_bank_from_iban()
if bank:
if bank.is_swiss_post():
ccp = self._convert_iban_to_ccp(self.acc_number.strip())
else:
ccp = bank.ccp
elif self.bank_id.ccp:
ccp = self.bank_id.ccp
self.bank_id = bank
if not self.ccp:
self.ccp = ccp
@api.onchange('ccp')
def onchange_ccp_set_empty_acc_number(self):
""" If acc_number is empty and bank ccp is defined fill it """
if self.bank_id:
if not self.acc_number and self.ccp:
if self.bank_id.is_swiss_post():
self.acc_number = self.ccp
else:
self.acc_number = self._get_acc_name()
return
ccp = self.ccp
if ccp and self.is_swiss_postal_num(ccp):
bank = (
self.env['res.bank'].search([('ccp', '=', ccp)], limit=1) or
self.env['res.bank'].search([('bic', '=', 'POFICHBEXXX')],
limit=1))
if not self.acc_number:
if not bank.is_swiss_post():
self.acc_number = self._get_acc_name()
else:
self.acc_number = self.ccp
self.bank_id = bank
@api.onchange('bank_id')
def onchange_bank_set_acc_number(self):
""" If acc_number is empty and bank ccp is defined fill it """
if not self.bank_id:
return
if self.bank_id.is_swiss_post():
if not self.acc_number:
self.acc_number = self.ccp
elif not self.ccp and self.is_swiss_postal_num(self.acc_number):
self.ccp = self.acc_number
else:
if not self.acc_number and self.ccp:
self.acc_number = self._get_acc_name()
elif self.acc_number and self.is_swiss_postal_num(self.acc_number):
self.ccp = self.acc_number
self.acc_number = self._get_acc_name()
@api.onchange('partner_id')
def onchange_partner_set_acc_number(self):
if self.acc_type == 'bank' and self.ccp:
if 'Bank/CCP' in self.acc_number:
self.acc_number = self._get_acc_name()
_sql_constraints = [('bvr_adherent_uniq', 'unique (bvr_adherent_num, ccp)',
'The BVR adherent number/ccp pair must be unique !')]
| agpl-3.0 |
Lab305/django-registration | registration/forms.py | 20 | 4873 | """
Forms and validation code for user registration.
"""
from django.contrib.auth.models import User
from django import forms
from django.utils.translation import ugettext_lazy as _
# I put this on all required fields, because it's easier to pick up
# on them with CSS or JavaScript if they have a class of "required"
# in the HTML. Your mileage may vary. If/when Django ticket #3515
# lands in trunk, this will no longer be necessary.
attrs_dict = {'class': 'required'}
class RegistrationForm(forms.Form):
"""
Form for registering a new user account.
Validates that the requested username is not already in use, and
requires the password to be entered twice to catch typos.
Subclasses should feel free to add any additional validation they
need, but should avoid defining a ``save()`` method -- the actual
saving of collected user data is delegated to the active
registration backend.
"""
username = forms.RegexField(regex=r'^[\w.@+-]+$',
max_length=30,
widget=forms.TextInput(attrs=attrs_dict),
label=_("Username"),
error_messages={'invalid': _("This value may contain only letters, numbers and @/./+/-/_ characters.")})
email = forms.EmailField(widget=forms.TextInput(attrs=dict(attrs_dict,
maxlength=75)),
label=_("E-mail"))
password1 = forms.CharField(widget=forms.PasswordInput(attrs=attrs_dict, render_value=False),
label=_("Password"))
password2 = forms.CharField(widget=forms.PasswordInput(attrs=attrs_dict, render_value=False),
label=_("Password (again)"))
def clean_username(self):
"""
Validate that the username is alphanumeric and is not already
in use.
"""
existing = User.objects.filter(username__iexact=self.cleaned_data['username'])
if existing.exists():
raise forms.ValidationError(_("A user with that username already exists."))
else:
return self.cleaned_data['username']
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise forms.ValidationError(_("The two password fields didn't match."))
return self.cleaned_data
class RegistrationFormTermsOfService(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which adds a required checkbox
for agreeing to a site's Terms of Service.
"""
tos = forms.BooleanField(widget=forms.CheckboxInput(attrs=attrs_dict),
label=_(u'I have read and agree to the Terms of Service'),
error_messages={'required': _("You must agree to the terms to register")})
class RegistrationFormUniqueEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which enforces uniqueness of
email addresses.
"""
def clean_email(self):
"""
Validate that the supplied email address is unique for the
site.
"""
if User.objects.filter(email__iexact=self.cleaned_data['email']):
raise forms.ValidationError(_("This email address is already in use. Please supply a different email address."))
return self.cleaned_data['email']
class RegistrationFormNoFreeEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which disallows registration with
email addresses from popular free webmail services; moderately
useful for preventing automated spam registrations.
To change the list of banned domains, subclass this form and
override the attribute ``bad_domains``.
"""
bad_domains = ['aim.com', 'aol.com', 'email.com', 'gmail.com',
'googlemail.com', 'hotmail.com', 'hushmail.com',
'msn.com', 'mail.ru', 'mailinator.com', 'live.com',
'yahoo.com']
def clean_email(self):
"""
Check the supplied email address against a list of known free
webmail domains.
"""
email_domain = self.cleaned_data['email'].split('@')[1]
if email_domain in self.bad_domains:
raise forms.ValidationError(_("Registration using free email addresses is prohibited. Please supply a different email address."))
return self.cleaned_data['email']
| bsd-3-clause |
andim27/magiccamp | tests/regressiontests/comment_tests/tests/comment_view_tests.py | 10 | 10582 | import re
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.comments import signals
from django.contrib.comments.models import Comment
from regressiontests.comment_tests.models import Article, Book
from regressiontests.comment_tests.tests import CommentTestCase
post_redirect_re = re.compile(r'^http://testserver/posted/\?c=(?P<pk>\d+$)')
class CommentViewTests(CommentTestCase):
def testPostCommentHTTPMethods(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
response = self.client.get("/post/", data)
self.assertEqual(response.status_code, 405)
self.assertEqual(response["Allow"], "POST")
def testPostCommentMissingCtype(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
del data["content_type"]
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testPostCommentBadCtype(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["content_type"] = "Nobody expects the Spanish Inquisition!"
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testPostCommentMissingObjectPK(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
del data["object_pk"]
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testPostCommentBadObjectPK(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["object_pk"] = "14"
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testPostInvalidIntegerPK(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["comment"] = "This is another comment"
data["object_pk"] = u'\ufffd'
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testPostInvalidDecimalPK(self):
b = Book.objects.get(pk='12.34')
data = self.getValidData(b)
data["comment"] = "This is another comment"
data["object_pk"] = 'cookies'
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testCommentPreview(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["preview"] = "Preview"
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "comments/preview.html")
def testHashTampering(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["security_hash"] = "Nobody expects the Spanish Inquisition!"
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testDebugCommentErrors(self):
"""The debug error template should be shown only if DEBUG is True"""
olddebug = settings.DEBUG
settings.DEBUG = True
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["security_hash"] = "Nobody expects the Spanish Inquisition!"
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
self.assertTemplateUsed(response, "comments/400-debug.html")
settings.DEBUG = False
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
self.assertTemplateNotUsed(response, "comments/400-debug.html")
settings.DEBUG = olddebug
def testCreateValidComment(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
self.response = self.client.post("/post/", data, REMOTE_ADDR="1.2.3.4")
self.assertEqual(self.response.status_code, 302)
self.assertEqual(Comment.objects.count(), 1)
c = Comment.objects.all()[0]
self.assertEqual(c.ip_address, "1.2.3.4")
self.assertEqual(c.comment, "This is my comment")
def testPostAsAuthenticatedUser(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data['name'] = data['email'] = ''
self.client.login(username="normaluser", password="normaluser")
self.response = self.client.post("/post/", data, REMOTE_ADDR="1.2.3.4")
self.assertEqual(self.response.status_code, 302)
self.assertEqual(Comment.objects.count(), 1)
c = Comment.objects.all()[0]
self.assertEqual(c.ip_address, "1.2.3.4")
u = User.objects.get(username='normaluser')
self.assertEqual(c.user, u)
self.assertEqual(c.user_name, u.get_full_name())
self.assertEqual(c.user_email, u.email)
def testPostAsAuthenticatedUserWithoutFullname(self):
"""
Check that the user's name in the comment is populated for
authenticated users without first_name and last_name.
"""
user = User.objects.create_user(username='jane_other',
email='jane@example.com', password='jane_other')
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data['name'] = data['email'] = ''
self.client.login(username="jane_other", password="jane_other")
self.response = self.client.post("/post/", data, REMOTE_ADDR="1.2.3.4")
c = Comment.objects.get(user=user)
self.assertEqual(c.ip_address, "1.2.3.4")
self.assertEqual(c.user_name, 'jane_other')
user.delete()
def testPreventDuplicateComments(self):
"""Prevent posting the exact same comment twice"""
a = Article.objects.get(pk=1)
data = self.getValidData(a)
self.client.post("/post/", data)
self.client.post("/post/", data)
self.assertEqual(Comment.objects.count(), 1)
# This should not trigger the duplicate prevention
self.client.post("/post/", dict(data, comment="My second comment."))
self.assertEqual(Comment.objects.count(), 2)
def testCommentSignals(self):
"""Test signals emitted by the comment posting view"""
# callback
def receive(sender, **kwargs):
self.assertEqual(kwargs['comment'].comment, "This is my comment")
self.assert_('request' in kwargs)
received_signals.append(kwargs.get('signal'))
# Connect signals and keep track of handled ones
received_signals = []
excepted_signals = [signals.comment_will_be_posted, signals.comment_was_posted]
for signal in excepted_signals:
signal.connect(receive)
# Post a comment and check the signals
self.testCreateValidComment()
self.assertEqual(received_signals, excepted_signals)
def testWillBePostedSignal(self):
"""
Test that the comment_will_be_posted signal can prevent the comment from
actually getting saved
"""
def receive(sender, **kwargs): return False
signals.comment_will_be_posted.connect(receive, dispatch_uid="comment-test")
a = Article.objects.get(pk=1)
data = self.getValidData(a)
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
self.assertEqual(Comment.objects.count(), 0)
signals.comment_will_be_posted.disconnect(dispatch_uid="comment-test")
def testWillBePostedSignalModifyComment(self):
"""
Test that the comment_will_be_posted signal can modify a comment before
it gets posted
"""
def receive(sender, **kwargs):
# a bad but effective spam filter :)...
kwargs['comment'].is_public = False
signals.comment_will_be_posted.connect(receive)
self.testCreateValidComment()
c = Comment.objects.all()[0]
self.failIf(c.is_public)
def testCommentNext(self):
"""Test the different "next" actions the comment view can take"""
a = Article.objects.get(pk=1)
data = self.getValidData(a)
response = self.client.post("/post/", data)
location = response["Location"]
match = post_redirect_re.match(location)
self.failUnless(match != None, "Unexpected redirect location: %s" % location)
data["next"] = "/somewhere/else/"
data["comment"] = "This is another comment"
response = self.client.post("/post/", data)
location = response["Location"]
match = re.search(r"^http://testserver/somewhere/else/\?c=\d+$", location)
self.failUnless(match != None, "Unexpected redirect location: %s" % location)
def testCommentDoneView(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
response = self.client.post("/post/", data)
location = response["Location"]
match = post_redirect_re.match(location)
self.failUnless(match != None, "Unexpected redirect location: %s" % location)
pk = int(match.group('pk'))
response = self.client.get(location)
self.assertTemplateUsed(response, "comments/posted.html")
self.assertEqual(response.context[0]["comment"], Comment.objects.get(pk=pk))
def testCommentNextWithQueryString(self):
"""
The `next` key needs to handle already having a query string (#10585)
"""
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["next"] = "/somewhere/else/?foo=bar"
data["comment"] = "This is another comment"
response = self.client.post("/post/", data)
location = response["Location"]
match = re.search(r"^http://testserver/somewhere/else/\?foo=bar&c=\d+$", location)
self.failUnless(match != None, "Unexpected redirect location: %s" % location)
def testCommentPostRedirectWithInvalidIntegerPK(self):
"""
Tests that attempting to retrieve the location specified in the
post redirect, after adding some invalid data to the expected
querystring it ends with, doesn't cause a server error.
"""
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["comment"] = "This is another comment"
response = self.client.post("/post/", data)
location = response["Location"]
broken_location = location + u"\ufffd"
response = self.client.get(broken_location)
self.assertEqual(response.status_code, 200)
| bsd-3-clause |
kyoren/https-github.com-h2oai-h2o-3 | h2o-py/tests/testdir_misc/pyunit_plot.py | 1 | 1106 | import sys
sys.path.insert(1, "../../")
import h2o, tests
def plot_test():
kwargs = {}
kwargs['server'] = True
air = h2o.import_file(tests.locate("smalldata/airlines/AirlinesTrain.csv.zip"))
# Constructing test and train sets by sampling (20/80)
s = air[0].runif()
air_train = air[s <= 0.8]
air_valid = air[s > 0.8]
myX = ["Origin", "Dest", "Distance", "UniqueCarrier", "fMonth", "fDayofMonth", "fDayOfWeek"]
myY = "IsDepDelayed"
air_gbm = h2o.gbm(x=air_train[myX], y=air_train[myY], validation_x=air_valid[myX], validation_y=air_valid[myY],
distribution="bernoulli", ntrees=100, max_depth=3, learn_rate=0.01)
# Plot ROC for training and validation sets
air_gbm.plot(type="roc", train=True, **kwargs)
air_gbm.plot(type="roc", valid=True, **kwargs)
air_test = h2o.import_file(tests.locate("smalldata/airlines/AirlinesTest.csv.zip"))
perf = air_gbm.model_performance(air_test)
#Plot ROC for test set
perf.plot(type="roc", **kwargs)
if __name__ == "__main__":
tests.run_test(sys.argv, plot_test)
| apache-2.0 |
rcos/Observatory | observatory/dashboard/fetch/fetch_repositories_continously.py | 2 | 1409 | #!/usr/bin/env python
# Copyright (c) 2010, individual contributors (see AUTHORS file)
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
FETCH_EVERY_SECONDS = 5 * 60
import datetime, os, subprocess
from sys import executable as python
from time import time, sleep
this_dir = os.path.abspath(os.path.dirname(__file__))
fetch_script = os.path.join(this_dir, "fetch_repositories.py")
while True:
start_time = time()
process = subprocess.Popen([python, fetch_script])
process.wait()
delta = time() - start_time
if delta < FETCH_EVERY_SECONDS:
print "Waiting for {0} seconds".format(FETCH_EVERY_SECONDS - delta)
sleep(FETCH_EVERY_SECONDS - delta)
else:
print "That took a while, restarting immediately..."
| isc |
anandsubbu/incubator-metron | metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/service_check.py | 9 | 2735 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from alerts_ui_commands import AlertsUICommands
from enrichment_commands import EnrichmentCommands
from indexing_commands import IndexingCommands
from management_ui_commands import ManagementUICommands
from parser_commands import ParserCommands
from profiler_commands import ProfilerCommands
from resource_management.core.logger import Logger
from resource_management.libraries.script import Script
from rest_master import RestCommands
class ServiceCheck(Script):
def service_check(self, env):
from params import params
# check the parsers
Logger.info("Performing Parser service check")
parser_cmds = ParserCommands(params)
parser_cmds.service_check(env)
# check enrichment
Logger.info("Performing Enrichment service check")
enrichment_cmds = EnrichmentCommands(params)
enrichment_cmds.service_check(env)
# check indexing
Logger.info("Performing Indexing service check")
indexing_cmds = IndexingCommands(params)
indexing_cmds.service_check(env)
# check the profiler
Logger.info("Performing Profiler service check")
profiler_cmds = ProfilerCommands(params)
profiler_cmds.service_check(env)
# check the rest api
Logger.info("Performing REST application service check")
rest_cmds = RestCommands(params)
rest_cmds.service_check(env)
# check the management UI
Logger.info("Performing Management UI service check")
mgmt_cmds = ManagementUICommands(params)
mgmt_cmds.service_check(env)
# check the alerts UI
Logger.info("Performing Alerts UI service check")
alerts_cmds = AlertsUICommands(params)
alerts_cmds.service_check(env)
Logger.info("Metron service check completed successfully")
exit(0)
if __name__ == "__main__":
ServiceCheck().execute()
| apache-2.0 |
lamuguo/jsonnet | case_studies/micromanage/service.py | 6 | 4497 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import cmds as cmds_lib
import validate
class Service(object):
def validateCmds(self, root, path):
cmds = validate.array(root, path, validate.is_any_type({'string', 'object'}), [])
for i, cmd in enumerate(cmds):
cmd_path = path + [i]
if isinstance(cmd, basestring):
# Any string will do for validation purposes.
pass
elif isinstance(cmd, dict):
kinds = {'CopyFile', 'LiteralFile', 'EnsureDir'}
kind = validate.path_val(root, cmd_path + ['kind'], validate.is_any_value(kinds))
if kind == 'CopyFile':
fields = {'owner', 'group', 'dirPermissions', 'filePermissions', 'from', 'to'}
for f in fields:
validate.path_val(root, cmd_path + [f], 'string')
validate.obj_only(root, cmd_path, fields | {'kind'})
elif kind == 'LiteralFile':
fields = {'owner', 'group', 'filePermissions', 'content', 'to'}
for f in fields:
validate.path_val(root, cmd_path + [f], 'string')
validate.obj_only(root, cmd_path, fields | {'kind'})
elif cmd['kind'] == 'EnsureDir':
fields = {'owner', 'group', 'dirPermissions', 'dir'}
for f in fields:
validate.path_val(root, cmd_path + [f], 'string')
validate.obj_only(root, cmd_path, fields | {'kind'})
else:
raise RuntimeError('Internal error: %s' % kind)
else:
raise RuntimeError('Internal error: %s' % type(cmd))
def validateImage(self, root, path):
# Superclasses override this method and validate specific image attributes.
# Byt here we can do the cmds.
self.validateCmds(root, path + ['cmds'])
def children(self, service):
for child_name, child in service.iteritems():
if child_name in {'environment', 'infrastructure', 'outputs'}:
continue
yield child_name, child
def validateService(self, root, path):
validate.path_val(root, path + ['outputs'], validate.is_string_map, {})
validate.path_val(root, path + ['infrastructure'], 'object', {})
def fullName(self, ctx, service_name):
return '-'.join(ctx + [service_name])
def preprocess(self, ctx, service_name, service):
def recursive_update(c):
if isinstance(c, dict):
return {
recursive_update(k): recursive_update(v)
for k, v in c.iteritems()
}
elif isinstance(c, list):
return [recursive_update(v) for v in c]
elif isinstance(c, basestring):
return self.translateSelfName(self.fullName(ctx, service_name), c)
else:
return c
return {
'environment': service.get('environment', 'default'),
'infrastructure': recursive_update(service.get('infrastructure',{})),
'outputs': recursive_update(service.get('outputs', {})),
}
def compileStartupScript(self, cmds, bootCmds):
lines = []
lines.append('#!/bin/bash')
lines.append('if [ ! -r /micromanage_instance_initialized ] ; then')
for cmd in cmds:
lines += cmds_lib.compile_command_to_bash(cmd)
lines.append('touch /micromanage_instance_initialized')
lines.append('fi')
for cmd in bootCmds:
lines += compile_command_to_bash(cmd)
return '\n'.join(lines)
_selfNameRegex = re.compile(r'\$\{-\}')
# Convert ${-} to the name of the service
def translateSelfName(self, full_name, v):
return self._selfNameRegex.sub(full_name, v)
| apache-2.0 |
cuttlefishh/emp | legacy/code/scripts/novel_samples.py | 1 | 2392 | #!/usr/bin/env python
from __future__ import division
__author__ = "Jai Ram Rideout"
__copyright__ = "Copyright 2013, The QIIME Project"
__credits__ = ["Jai Ram Rideout"]
__license__ = "GPL"
__version__ = "1.7.0-dev"
__maintainer__ = "Jai Ram Rideout"
__email__ = "jai.rideout@gmail.com"
__status__ = "Development"
from csv import writer
from qiime.util import (get_options_lookup, make_option,
parse_command_line_parameters, qiime_open)
from emp.novel_samples import compute_sample_novelty
options_lookup = get_options_lookup()
script_info = {}
script_info['brief_description'] = ""
script_info['script_description'] = ""
script_info['script_usage'] = [('Sample novelty', 'The following command '
'computes the novelty of all samples in the input BIOM tables. A TSV file is '
'created with sample ID, number of novel OTUs, and percent novel sequences. '
'The samples are ordered by descending number of novel OTUs (second column).',
'%prog -i table1.biom,table2.biom -r rep_set.fna -o novel_samples_out.txt')]
script_info['output_description'] = ""
script_info['required_options'] = [
make_option('-i','--otu_table_fps',type='existing_filepaths',
help='paths to the input OTU tables (i.e., the output from '
'make_otu_table.py). These can either be gzipped or uncompressed'),
make_option('-r','--rep_set_fp',type='existing_filepath',
help='path to representative set of sequences in the reference '
'database used in open-reference OTU picking to create the tables '
'provided via -i/--otu_table_fps. For example, this might be the '
'Greengenes 97% rep set fasta file'),
options_lookup['output_fp']
]
script_info['optional_options'] = []
script_info['version'] = __version__
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
sample_novelty_data = compute_sample_novelty(
[qiime_open(otu_table_fp) for otu_table_fp in opts.otu_table_fps],
qiime_open(opts.rep_set_fp), opts.verbose)
with open(opts.output_fp, 'w') as out_f:
header = ['SampleID', 'Number of novel OTUs',
'Percent novel sequences']
table_writer = writer(out_f, delimiter='\t', lineterminator='\n')
table_writer.writerow(header)
table_writer.writerows(sample_novelty_data)
if __name__ == "__main__":
main()
| bsd-3-clause |
davidzchen/tensorflow | tensorflow/python/keras/layers/preprocessing/normalization_distribution_test.py | 4 | 5270 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for keras.layers.preprocessing.normalization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations as ds_combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import context
from tensorflow.python.framework import test_combinations as combinations
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras.layers.preprocessing import normalization
from tensorflow.python.keras.layers.preprocessing import normalization_v1
from tensorflow.python.keras.layers.preprocessing import preprocessing_test_utils
from tensorflow.python.platform import test
def get_layer_class():
if context.executing_eagerly():
return normalization.Normalization
else:
return normalization_v1.Normalization
def _get_layer_computation_test_cases():
test_cases = ({
"adapt_data": np.array([[1.], [2.], [3.], [4.], [5.]], dtype=np.float32),
"axis": -1,
"test_data": np.array([[1.], [2.], [3.]], np.float32),
"expected": np.array([[-1.414214], [-.707107], [0]], np.float32),
"testcase_name": "2d_single_element"
}, {
"adapt_data": np.array([[1.], [2.], [3.], [4.], [5.]], dtype=np.float32),
"axis": None,
"test_data": np.array([[1.], [2.], [3.]], np.float32),
"expected": np.array([[-1.414214], [-.707107], [0]], np.float32),
"testcase_name": "2d_single_element_none_axis"
}, {
"adapt_data": np.array([[1., 2., 3., 4., 5.]], dtype=np.float32),
"axis": None,
"test_data": np.array([[1.], [2.], [3.]], np.float32),
"expected": np.array([[-1.414214], [-.707107], [0]], np.float32),
"testcase_name": "2d_single_element_none_axis_flat_data"
}, {
"adapt_data":
np.array([[[1., 2., 3.], [2., 3., 4.]], [[3., 4., 5.], [4., 5., 6.]]],
np.float32),
"axis":
1,
"test_data":
np.array([[[1., 2., 3.], [2., 3., 4.]], [[3., 4., 5.], [4., 5., 6.]]],
np.float32),
"expected":
np.array([[[-1.549193, -0.774597, 0.], [-1.549193, -0.774597, 0.]],
[[0., 0.774597, 1.549193], [0., 0.774597, 1.549193]]],
np.float32),
"testcase_name":
"3d_internal_axis"
}, {
"adapt_data":
np.array(
[[[1., 0., 3.], [2., 3., 4.]], [[3., -1., 5.], [4., 5., 8.]]],
np.float32),
"axis": (1, 2),
"test_data":
np.array(
[[[3., 1., -1.], [2., 5., 4.]], [[3., 0., 5.], [2., 5., 8.]]],
np.float32),
"expected":
np.array(
[[[1., 3., -5.], [-1., 1., -1.]], [[1., 1., 1.], [-1., 1., 1.]]],
np.float32),
"testcase_name":
"3d_multiple_axis"
})
crossed_test_cases = []
# Cross above test cases with use_dataset in (True, False)
for use_dataset in (True, False):
for case in test_cases:
case = case.copy()
if use_dataset:
case["testcase_name"] = case["testcase_name"] + "_with_dataset"
case["use_dataset"] = use_dataset
crossed_test_cases.append(case)
return crossed_test_cases
@ds_combinations.generate(
combinations.times(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager", "graph"]), _get_layer_computation_test_cases()))
class NormalizationTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_layer_computation(self, distribution, adapt_data, axis, test_data,
use_dataset, expected):
input_shape = tuple([None for _ in range(test_data.ndim - 1)])
if use_dataset:
# Keras APIs expect batched datasets
adapt_data = dataset_ops.Dataset.from_tensor_slices(adapt_data).batch(
test_data.shape[0] // 2)
test_data = dataset_ops.Dataset.from_tensor_slices(test_data).batch(
test_data.shape[0] // 2)
with distribution.scope():
input_data = keras.Input(shape=input_shape)
layer = get_layer_class()(axis=axis)
layer.adapt(adapt_data)
output = layer(input_data)
model = keras.Model(input_data, output)
output_data = model.predict(test_data)
self.assertAllClose(expected, output_data)
if __name__ == "__main__":
test.main()
| apache-2.0 |
Distrotech/scons | test/packaging/tar/gz.py | 5 | 2021 | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
This tests the SRC 'targz' packager, which does the following:
- create a targz package containing the specified files.
"""
import TestSCons
python = TestSCons.python
test = TestSCons.TestSCons()
tar = test.detect('TAR', 'tar')
if tar:
test.subdir('src')
test.write( [ 'src', 'main.c' ], r"""
int main( int argc, char* argv[] )
{
return 0;
}
""")
test.write('SConstruct', """
Program( 'src/main.c' )
env=Environment(tools=['default', 'packaging'])
env.Package( PACKAGETYPE = 'src_targz',
target = 'src.tar.gz',
PACKAGEROOT = 'test',
source = [ 'src/main.c', 'SConstruct' ] )
""")
test.run(arguments='', stderr = None)
test.must_exist( 'src.tar.gz' )
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit |
pde/torbrowser-launcher | lib/txsocksx-0.0.2/txsocksx/errors.py | 1 | 1224 | from twisted.internet import error
import txsocksx.constants as c
class ParsingError(Exception):
pass
class InvalidServerReply(Exception):
pass
class SOCKSError(Exception):
pass
class MethodsNotAcceptedError(SOCKSError):
pass
class ConnectionError(SOCKSError):
pass
class ConnectionLostEarly(SOCKSError, error.ConnectionLost):
pass
class StateError(Exception):
"""
There was a problem with the State.
"""
pass
class NoAcceptableMethods(SOCKSError):
"""
No Acceptable Methods ( FF )
"""
class ServerFailure(SOCKSError):
"""
General SOCKS server failure ( 1 )
"""
class ConnectionNotAllowed(SOCKSError):
"""
Connection not allowed ( 2 )
"""
class NetworkUnreachable(SOCKSError):
"""
Network unreachable ( 3 )
"""
class HostUnreachable(SOCKSError):
"""
Host unreachable ( 4 )
"""
class ConnectionRefused(SOCKSError):
"""
Connection refused ( 5 )
"""
class TTLExpired(SOCKSError):
"""
TTL expired ( 6 )
"""
class CommandNotSupported(SOCKSError):
"""
Command Not Supported ( 7 )
"""
class AddressNotSupported(SOCKSError):
"""
Address type not supported ( 8 )
"""
| mit |
kanteshraj/ansible | contrib/inventory/gce.py | 113 | 10593 | #!/usr/bin/env python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
'''
GCE external inventory script
=================================
Generates inventory that Ansible can understand by making API requests
Google Compute Engine via the libcloud library. Full install/configuration
instructions for the gce* modules can be found in the comments of
ansible/test/gce_tests.py.
When run against a specific host, this script returns the following variables
based on the data obtained from the libcloud Node object:
- gce_uuid
- gce_id
- gce_image
- gce_machine_type
- gce_private_ip
- gce_public_ip
- gce_name
- gce_description
- gce_status
- gce_zone
- gce_tags
- gce_metadata
- gce_network
When run in --list mode, instances are grouped by the following categories:
- zone:
zone group name examples are us-central1-b, europe-west1-a, etc.
- instance tags:
An entry is created for each tag. For example, if you have two instances
with a common tag called 'foo', they will both be grouped together under
the 'tag_foo' name.
- network name:
the name of the network is appended to 'network_' (e.g. the 'default'
network will result in a group named 'network_default')
- machine type
types follow a pattern like n1-standard-4, g1-small, etc.
- running status:
group name prefixed with 'status_' (e.g. status_running, status_stopped,..)
- image:
when using an ephemeral/scratch disk, this will be set to the image name
used when creating the instance (e.g. debian-7-wheezy-v20130816). when
your instance was created with a root persistent disk it will be set to
'persistent_disk' since there is no current way to determine the image.
Examples:
Execute uname on all instances in the us-central1-a zone
$ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a"
Use the GCE inventory script to print out instance specific information
$ contrib/inventory/gce.py --host my_instance
Author: Eric Johnson <erjohnso@google.com>
Version: 0.0.1
'''
__requires__ = ['pycrypto>=2.6']
try:
import pkg_resources
except ImportError:
# Use pkg_resources to find the correct versions of libraries and set
# sys.path appropriately when there are multiversion installs. We don't
# fail here as there is code that better expresses the errors where the
# library is used.
pass
USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin"
USER_AGENT_VERSION="v1"
import sys
import os
import argparse
import ConfigParser
try:
import json
except ImportError:
import simplejson as json
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
_ = Provider.GCE
except:
print("GCE inventory script requires libcloud >= 0.13")
sys.exit(1)
class GceInventory(object):
def __init__(self):
# Read settings and parse CLI arguments
self.parse_cli_args()
self.driver = self.get_gce_driver()
# Just display data for specific host
if self.args.host:
print(self.json_format_dict(self.node_to_dict(
self.get_instance(self.args.host)),
pretty=self.args.pretty))
sys.exit(0)
# Otherwise, assume user wants all instances grouped
print(self.json_format_dict(self.group_instances(),
pretty=self.args.pretty))
sys.exit(0)
def get_gce_driver(self):
"""Determine the GCE authorization settings and return a
libcloud driver.
"""
gce_ini_default_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "gce.ini")
gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
# Create a ConfigParser.
# This provides empty defaults to each key, so that environment
# variable configuration (as opposed to INI configuration) is able
# to work.
config = ConfigParser.SafeConfigParser(defaults={
'gce_service_account_email_address': '',
'gce_service_account_pem_file_path': '',
'gce_project_id': '',
'libcloud_secrets': '',
})
if 'gce' not in config.sections():
config.add_section('gce')
config.read(gce_ini_path)
# Attempt to get GCE params from a configuration file, if one
# exists.
secrets_path = config.get('gce', 'libcloud_secrets')
secrets_found = False
try:
import secrets
args = list(getattr(secrets, 'GCE_PARAMS', []))
kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
secrets_found = True
except:
pass
if not secrets_found and secrets_path:
if not secrets_path.endswith('secrets.py'):
err = "Must specify libcloud secrets file as "
err += "/absolute/path/to/secrets.py"
print(err)
sys.exit(1)
sys.path.append(os.path.dirname(secrets_path))
try:
import secrets
args = list(getattr(secrets, 'GCE_PARAMS', []))
kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
secrets_found = True
except:
pass
if not secrets_found:
args = [
config.get('gce','gce_service_account_email_address'),
config.get('gce','gce_service_account_pem_file_path')
]
kwargs = {'project': config.get('gce', 'gce_project_id')}
# If the appropriate environment variables are set, they override
# other configuration; process those into our args and kwargs.
args[0] = os.environ.get('GCE_EMAIL', args[0])
args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
# Retrieve and return the GCE driver.
gce = get_driver(Provider.GCE)(*args, **kwargs)
gce.connection.user_agent_append(
'%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION),
)
return gce
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory file based on GCE')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all information about an instance')
parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty format (default: False)')
self.args = parser.parse_args()
def node_to_dict(self, inst):
md = {}
if inst is None:
return {}
if inst.extra['metadata'].has_key('items'):
for entry in inst.extra['metadata']['items']:
md[entry['key']] = entry['value']
net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
return {
'gce_uuid': inst.uuid,
'gce_id': inst.id,
'gce_image': inst.image,
'gce_machine_type': inst.size,
'gce_private_ip': inst.private_ips[0],
'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None,
'gce_name': inst.name,
'gce_description': inst.extra['description'],
'gce_status': inst.extra['status'],
'gce_zone': inst.extra['zone'].name,
'gce_tags': inst.extra['tags'],
'gce_metadata': md,
'gce_network': net,
# Hosts don't have a public name, so we add an IP
'ansible_ssh_host': inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
}
def get_instance(self, instance_name):
'''Gets details about a specific instance '''
try:
return self.driver.ex_get_node(instance_name)
except Exception as e:
return None
def group_instances(self):
'''Group all instances'''
groups = {}
meta = {}
meta["hostvars"] = {}
for node in self.driver.list_nodes():
name = node.name
meta["hostvars"][name] = self.node_to_dict(node)
zone = node.extra['zone'].name
if groups.has_key(zone): groups[zone].append(name)
else: groups[zone] = [name]
tags = node.extra['tags']
for t in tags:
if t.startswith('group-'):
tag = t[6:]
else:
tag = 'tag_%s' % t
if groups.has_key(tag): groups[tag].append(name)
else: groups[tag] = [name]
net = node.extra['networkInterfaces'][0]['network'].split('/')[-1]
net = 'network_%s' % net
if groups.has_key(net): groups[net].append(name)
else: groups[net] = [name]
machine_type = node.size
if groups.has_key(machine_type): groups[machine_type].append(name)
else: groups[machine_type] = [name]
image = node.image and node.image or 'persistent_disk'
if groups.has_key(image): groups[image].append(name)
else: groups[image] = [name]
status = node.extra['status']
stat = 'status_%s' % status.lower()
if groups.has_key(stat): groups[stat].append(name)
else: groups[stat] = [name]
groups["_meta"] = meta
return groups
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
GceInventory()
| gpl-3.0 |
concentricsky/django-allauth | allauth/socialaccount/providers/instagram/provider.py | 75 | 1150 | from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class InstagramAccount(ProviderAccount):
PROFILE_URL = 'http://instagram.com/'
def get_profile_url(self):
return self.PROFILE_URL + self.account.extra_data.get('username')
def get_avatar_url(self):
return self.account.extra_data.get('profile_picture')
def to_str(self):
dflt = super(InstagramAccount, self).to_str()
return self.account.extra_data.get('username', dflt)
class InstagramProvider(OAuth2Provider):
id = 'instagram'
name = 'Instagram'
package = 'allauth.socialaccount.providers.instagram'
account_class = InstagramAccount
def extract_extra_data(self, data):
return data.get('data', {})
def get_default_scope(self):
return ['basic']
def extract_uid(self, data):
return str(data['data']['id'])
def extract_common_fields(self, data):
return dict(username=data['data'].get('username'))
providers.registry.register(InstagramProvider)
| mit |
pk-sam/crosswalk-test-suite | webapi/tct-messaging-mms-tizen-tests/inst.xpk.py | 6 | 7120 | #!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = ""
PKG_SRC_DIR = ""
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user )
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
if line.find("[" + pkg_name + "]") != -1:
pkgidIndex = line.split().index("pkgid")
test_pkg_id = line.split()[pkgidIndex+1].strip("[]")
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".xpk"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t xpk -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
(return_code, output) = doRemoteCMD("rm -rf %s/Images" % SRC_DIR)
if return_code != 0:
action_status = False
(return_code, output) = doRemoteCMD("rm -rf %s/Sounds" % SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".xpk"):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t xpk -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD("mkdir -p %s/Images" % SRC_DIR)
if return_code != 0:
action_status = False
if not doRemoteCopy("%s/webapi-tizen-messaging-test_image.jpg" % SCRIPT_DIR, "%s/Images" % SRC_DIR):
action_status = False
(return_code, output) = doRemoteCMD("mkdir -p %s/Sounds" % SRC_DIR)
if return_code != 0:
action_status = False
if not doRemoteCopy("%s/webapi-tizen-messaging-test_noise.mp3" % SCRIPT_DIR, "%s/Sounds" % SRC_DIR):
action_status = False
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
global SRC_DIR, PKG_SRC_DIR
SRC_DIR = "/home/%s/content" % PARAMETERS.user
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0 :
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket"%str(userid)
else:
print "[Error] cmd commands error : %s"%str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| bsd-3-clause |
xiao26/scrapy | scrapy/utils/sitemap.py | 146 | 1410 | """
Module for processing Sitemaps.
Note: The main purpose of this module is to provide support for the
SitemapSpider, its API is subject to change without notice.
"""
import lxml.etree
class Sitemap(object):
"""Class to parse Sitemap (type=urlset) and Sitemap Index
(type=sitemapindex) files"""
def __init__(self, xmltext):
xmlp = lxml.etree.XMLParser(recover=True, remove_comments=True, resolve_entities=False)
self._root = lxml.etree.fromstring(xmltext, parser=xmlp)
rt = self._root.tag
self.type = self._root.tag.split('}', 1)[1] if '}' in rt else rt
def __iter__(self):
for elem in self._root.getchildren():
d = {}
for el in elem.getchildren():
tag = el.tag
name = tag.split('}', 1)[1] if '}' in tag else tag
if name == 'link':
if 'href' in el.attrib:
d.setdefault('alternate', []).append(el.get('href'))
else:
d[name] = el.text.strip() if el.text else ''
if 'loc' in d:
yield d
def sitemap_urls_from_robots(robots_text):
"""Return an iterator over all sitemap urls contained in the given
robots.txt file
"""
for line in robots_text.splitlines():
if line.lstrip().startswith('Sitemap:'):
yield line.split(':', 1)[1].strip()
| bsd-3-clause |
ttindell2/openshift-ansible | roles/openshift_health_checker/library/etcdkeysize.py | 59 | 3691 | #!/usr/bin/python
"""Ansible module that recursively determines if the size of a key in an etcd cluster exceeds a given limit."""
from ansible.module_utils.basic import AnsibleModule
try:
import etcd
IMPORT_EXCEPTION_MSG = None
except ImportError as err:
IMPORT_EXCEPTION_MSG = str(err)
from collections import namedtuple
EtcdMock = namedtuple("etcd", ["EtcdKeyNotFound"])
etcd = EtcdMock(KeyError)
# pylint: disable=too-many-arguments
def check_etcd_key_size(client, key, size_limit, total_size=0, depth=0, depth_limit=1000, visited=None):
"""Check size of an etcd path starting at given key. Returns tuple (string, bool)"""
if visited is None:
visited = set()
if key in visited:
return 0, False
visited.add(key)
try:
result = client.read(key, recursive=False)
except etcd.EtcdKeyNotFound:
return 0, False
size = 0
limit_exceeded = False
for node in result.leaves:
if depth >= depth_limit:
raise Exception("Maximum recursive stack depth ({}) exceeded.".format(depth_limit))
if size_limit and total_size + size > size_limit:
return size, True
if not node.dir:
size += len(node.value)
continue
key_size, limit_exceeded = check_etcd_key_size(client, node.key,
size_limit,
total_size + size,
depth + 1,
depth_limit, visited)
size += key_size
max_limit_exceeded = limit_exceeded or (total_size + size > size_limit)
return size, max_limit_exceeded
def main(): # pylint: disable=missing-docstring,too-many-branches
module = AnsibleModule(
argument_spec=dict(
size_limit_bytes=dict(type="int", default=0),
paths=dict(type="list", default=["/openshift.io/images"]),
host=dict(type="str", default="127.0.0.1"),
port=dict(type="int", default=4001),
protocol=dict(type="str", default="http"),
version_prefix=dict(type="str", default=""),
allow_redirect=dict(type="bool", default=False),
cert=dict(type="dict", default=""),
ca_cert=dict(type="str", default=None),
),
supports_check_mode=True
)
module.params["cert"] = (
module.params["cert"]["cert"],
module.params["cert"]["key"],
)
size_limit = module.params.pop("size_limit_bytes")
paths = module.params.pop("paths")
limit_exceeded = False
try:
# pylint: disable=no-member
client = etcd.Client(**module.params)
except AttributeError as attrerr:
msg = str(attrerr)
if IMPORT_EXCEPTION_MSG:
msg = IMPORT_EXCEPTION_MSG
if "No module named etcd" in IMPORT_EXCEPTION_MSG:
# pylint: disable=redefined-variable-type
msg = ('Unable to import the python "etcd" dependency. '
'Make sure python-etcd is installed on the host.')
module.exit_json(
failed=True,
changed=False,
size_limit_exceeded=limit_exceeded,
msg=msg,
)
return
size = 0
for path in paths:
path_size, limit_exceeded = check_etcd_key_size(client, path, size_limit - size)
size += path_size
if limit_exceeded:
break
module.exit_json(
changed=False,
size_limit_exceeded=limit_exceeded,
)
if __name__ == '__main__':
main()
| apache-2.0 |
crew/dds-client | Client/Classes/message.py | 1 | 4296 | import json
# TODO: See doctring below
class Message:
""""
A message object to be passed between threads via Queue
I{Sample Usage}:
Making a new message that will send terminate from main to display:
C{newMessage = Message("Main", "Display", "Terminate", \{\})}
Making a slide that will update Display with a new slide:
C{newMessage = Message("Main", "Display", "Update", \{\})}
C{newMessage.add_content("slide1", "http://google.com")}
@attention: It is good practice to use dict accessors as opposed to object attributes
@todo: Un-expose attributes
@ivar src: Name of the thread who sent the message
@type src: String
@ivar dest: Intended message destination
@type dest: String
@ivar pluginDest: Client-side plugin to receive the message
@type pluginDest: String
@ivar action: Action for the client-side plugin to perform
@type action: String
@ivar content: Message payload (destination plugin should be able to interpret content)
@type content: Dictionary
@ivar datetime: Message timestamp
@type datetime: String
@copyright: Northeastern University Crew 2014
"""
def __init__(self, src, dest, pluginDest, action, content, datetime=None):
"""
Message Constructor
@param src: Name of the thread who sent the message
@type src: String
@param dest: Intended message destination
@type dest: String
@param pluginDest: Client-side plugin to receive the message
@type pluginDest: String
@param action: Action for the client-side plugin to perform
@type action: String
@param content: Message payload (destination plugin should be able to interpret content)
@type content: Dictionary
@param datetime: Message timestamp
@type datetime: String
@return: Message with the given parameters
@rtype: Message
"""
self.src = src
self.dest = dest
self.pluginDest = pluginDest
self.action = action
self.content = content
self.datetime = datetime
def add_content(self, key, val):
"""
Sets the given key-value pair to the message's content
@param key: Key to write to in message content
@param val: Value to write in message content
@return: None
"""
self.content[key] = val
def toJSON(self):
"""
Returns the message in JSON format
@return: JSON representation of Message
@rtype: String
"""
text = json.dumps(self.__dict__)
return text
# TODO: (See docstring)
def __str__(self):
"""
@return: String Representation of Message
@rtype: String
@todo: This looks horrendous. We should pretty it up.
"""
return "Src = " + self.src + " dest = " + self.dest + " pluginDest = " + self.pluginDest + \
" action = " + self.action + " content = " + self.content
@staticmethod
def fromJSON(jsonObj):
"""
Creates a message from JSON input
@param jsonObj: A Parsed JSON Input
@type jsonObj: Dictionary
@return: Message represented by the given JSON
@rtype: Message
"""
src = jsonObj['src']
dest = jsonObj['dest']
pluginDest = jsonObj['pluginDest']
action = jsonObj['action']
content = jsonObj['content']
datetime = None
if "datetime" in jsonObj:
datetime = jsonObj["datetime"]
return Message(src, dest, pluginDest, action, content, datetime)
def __getitem__(self, item):
"""
Implements dict-like behavior for Messages
@param item: Message value to fetch
@return: The attribute associated with the given key
"""
if item == "src":
return self.src
elif item == "dest":
return self.dest
elif item == "pluginDest":
return self.pluginDest
elif item == "action":
return self.action
elif item == "content":
return self.content
elif item == "datetime":
return self.datetime
raise Exception("Message does not have an attribute \"" + item + "\"")
| mit |
JesseLivezey/plankton | pylearn2/datasets/stl10.py | 44 | 5563 | """
.. todo::
WRITEME
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import numpy as np
from theano.compat.six.moves import xrange
from pylearn2.datasets import dense_design_matrix
from pylearn2.utils.serial import load
from pylearn2.utils import contains_nan
class STL10(dense_design_matrix.DenseDesignMatrix):
"""
The STL-10 dataset
Adam Coates, Honglak Lee, Andrew Y. Ng An Analysis of Single Layer
Networks in Unsupervised Feature Learning AISTATS, 2011
http://www.stanford.edu/~acoates//stl10/
When reporting results on this dataset, you are meant to use a somewhat
unusal evaluation procedure.
Use STL10(which_set='train') to load the training set. Then restrict the
training set to one of the ten folds using the restrict function below. You
must then train only on the data from that fold.
For the test set, report the average test set performance over the ten
trials obtained by training on each of the ten folds.
The folds here do not define the splits you should use for cross
validation. You are free to make your own split within each fold.
Parameters
----------
which_set : WRITEME
center : WRITEME
example_range : WRITEME
"""
def __init__(self, which_set, center=False, example_range=None):
"""
.. todo::
WRITEME
"""
if which_set == 'train':
train = load('${PYLEARN2_DATA_PATH}/stl10/stl10_matlab/train.mat')
# Load the class names
self.class_names = [array[0].encode('utf-8')
for array in train['class_names'][0]]
# Load the fold indices
fold_indices = train['fold_indices']
assert fold_indices.shape == (1, 10)
self.fold_indices = np.zeros((10, 1000), dtype='uint16')
for i in xrange(10):
indices = fold_indices[0, i]
assert indices.shape == (1000, 1)
assert indices.dtype == 'uint16'
self.fold_indices[i, :] = indices[:, 0]
# The data is stored as uint8
# If we leave it as uint8, it will cause the CAE to silently fail
# since theano will treat derivatives wrt X as 0
X = np.cast['float32'](train['X'])
assert X.shape == (5000, 96 * 96 * 3)
if example_range is not None:
X = X[example_range[0]:example_range[1], :]
y_labels = 10
# this is uint8 but labels range should be corrected
y = train['y'][:, 0] - 1
assert y.shape == (5000,)
elif which_set == 'test':
test = load('${PYLEARN2_DATA_PATH}/stl10/stl10_matlab/test.mat')
# Load the class names
self.class_names = [array[0].encode('utf-8')
for array in test['class_names'][0]]
# The data is stored as uint8
# If we leave it as uint8, it will cause the CAE to silently fail
# since theano will treat derivatives wrt X as 0
X = np.cast['float32'](test['X'])
assert X.shape == (8000, 96 * 96 * 3)
if example_range is not None:
X = X[example_range[0]:example_range[1], :]
y_labels = 10
# this is uint8 but labels range should be corrected
y = test['y'][:, 0] - 1
assert y.shape == (8000,)
elif which_set == 'unlabeled':
unlabeled = load('${PYLEARN2_DATA_PATH}/stl10/stl10_matlab/'
'unlabeled.mat')
X = unlabeled['X']
# this file is stored in HDF format, which transposes everything
assert X.shape == (96 * 96 * 3, 100000)
assert X.dtype == 'uint8'
if example_range is None:
X = X.value
else:
X = X.value[:, example_range[0]:example_range[1]]
X = np.cast['float32'](X.T)
unlabeled.close()
y_labels = None
y = None
else:
raise ValueError('"' + which_set + '" is not an STL10 dataset. '
'Recognized values are "train", "test", and '
'"unlabeled".')
if center:
X -= 127.5
view_converter = dense_design_matrix.DefaultViewConverter((96, 96, 3))
super(STL10, self).__init__(X=X, y=y, y_labels=y_labels,
view_converter=view_converter)
for i in xrange(self.X.shape[0]):
mat = X[i:i + 1, :]
topo = self.get_topological_view(mat)
for j in xrange(topo.shape[3]):
temp = topo[0, :, :, j].T.copy()
topo[0, :, :, j] = temp
mat = self.get_design_matrix(topo)
X[i:i + 1, :] = mat
assert not contains_nan(self.X)
def restrict(dataset, fold):
"""
Restricts the dataset to use the specified fold (1 to 10).
dataset should be the training set.
"""
fold_indices = dataset.fold_indices
assert fold_indices.shape == (10, 1000)
idxs = fold_indices[fold, :] - 1
dataset.X = dataset.X[idxs, :].copy()
assert dataset.X.shape[0] == 1000
dataset.y = dataset.y[idxs, ...].copy()
assert dataset.y.shape[0] == 1000
return dataset
| bsd-3-clause |
agentr13/python-phonenumbers | python/phonenumbers/data/region_HR.py | 10 | 3499 | """Auto-generated file, do not edit by hand. HR metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_HR = PhoneMetadata(id='HR', country_code=385, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[1-7]\\d{5,8}|[89]\\d{6,11}', possible_number_pattern='\\d{6,12}'),
fixed_line=PhoneNumberDesc(national_number_pattern='1\\d{7}|(?:2[0-3]|3[1-5]|4[02-47-9]|5[1-3])\\d{6,7}', possible_number_pattern='\\d{6,9}', example_number='12345678'),
mobile=PhoneNumberDesc(national_number_pattern='9(?:[1-9]\\d{6,10}|01\\d{6,9})', possible_number_pattern='\\d{8,12}', example_number='912345678'),
toll_free=PhoneNumberDesc(national_number_pattern='80[01]\\d{4,7}', possible_number_pattern='\\d{7,10}', example_number='8001234567'),
premium_rate=PhoneNumberDesc(national_number_pattern='6(?:[01459]\\d{4,7})', possible_number_pattern='\\d{6,9}', example_number='611234'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='7[45]\\d{4,7}', possible_number_pattern='\\d{6,9}', example_number='741234567'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='[76]2\\d{6,7}', possible_number_pattern='\\d{8,9}', example_number='62123456'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
national_prefix='0',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='(1)(\\d{4})(\\d{3})', format='\\1 \\2 \\3', leading_digits_pattern=['1'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(6[09])(\\d{4})(\\d{3})', format='\\1 \\2 \\3', leading_digits_pattern=['6[09]'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='([67]2)(\\d{3})(\\d{3,4})', format='\\1 \\2 \\3', leading_digits_pattern=['[67]2'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='([2-5]\\d)(\\d{3})(\\d{3,4})', format='\\1 \\2 \\3', leading_digits_pattern=['[2-5]'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(9\\d)(\\d{3})(\\d{3,4})', format='\\1 \\2 \\3', leading_digits_pattern=['9'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(9\\d)(\\d{4})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['9'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(9\\d)(\\d{3,4})(\\d{3})(\\d{3})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['9'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{2})(\\d{2})(\\d{2,3})', format='\\1 \\2 \\3', leading_digits_pattern=['6[0145]|7'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{2})(\\d{3,4})(\\d{3})', format='\\1 \\2 \\3', leading_digits_pattern=['6[0145]|7'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(80[01])(\\d{2})(\\d{2,3})', format='\\1 \\2 \\3', leading_digits_pattern=['8'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(80[01])(\\d{3,4})(\\d{3})', format='\\1 \\2 \\3', leading_digits_pattern=['8'], national_prefix_formatting_rule='0\\1')],
mobile_number_portable_region=True)
| apache-2.0 |
MoKee/android_kernel_oppo_find7a | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
nostalgiaz/django-cms | cms/migrations_django/0001_initial.py | 19 | 10408 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from cms.models import ACCESS_CHOICES, Page
from cms.utils.conf import get_cms_setting
from django.conf import settings
from django.db import models, migrations
import django.utils.timezone
from django.utils.translation import ugettext_lazy as _
template_choices = [(x, _(y)) for x, y in get_cms_setting('TEMPLATES')]
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('sites', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CMSPlugin',
fields=[
('id', models.AutoField(primary_key=True, verbose_name=_('ID'), auto_created=True, serialize=False)),
('position', models.PositiveSmallIntegerField(null=True, editable=False, blank=True, verbose_name=_('position'))),
('language', models.CharField(db_index=True, max_length=15, verbose_name=_("language"), editable=False)),
('plugin_type', models.CharField(db_index=True, max_length=50, verbose_name=_('plugin_name'), editable=False)),
('creation_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name=_('creation date'), editable=False)),
('changed_date', models.DateTimeField(auto_now=True)),
('level', models.PositiveIntegerField(db_index=True, editable=False)),
('lft', models.PositiveIntegerField(db_index=True, editable=False)),
('rght', models.PositiveIntegerField(db_index=True, editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='AliasPluginModel',
fields=[
('cmsplugin_ptr', models.OneToOneField(primary_key=True, to='cms.CMSPlugin', auto_created=True, parent_link=True, serialize=False)),
('plugin', models.ForeignKey(null=True, to='cms.CMSPlugin', related_name='alias_reference', editable=False)),
],
options={
},
bases=('cms.cmsplugin',),
),
migrations.AddField(
model_name='cmsplugin',
name='parent',
field=models.ForeignKey(null=True, to='cms.CMSPlugin', blank=True, editable=False),
preserve_default=True,
),
migrations.CreateModel(
name='GlobalPagePermission',
fields=[
('id', models.AutoField(primary_key=True, verbose_name=_('ID'), auto_created=True, serialize=False)),
('can_change', models.BooleanField(default=True, verbose_name=_('can edit'))),
('can_add', models.BooleanField(default=True, verbose_name=_('can add'))),
('can_delete', models.BooleanField(default=True, verbose_name=_('can delete'))),
('can_change_advanced_settings', models.BooleanField(default=False, verbose_name=_('can change advanced settings'))),
('can_publish', models.BooleanField(default=True, verbose_name=_('can publish'))),
('can_change_permissions', models.BooleanField(default=False, help_text='on page level', verbose_name=_('can change permissions'))),
('can_move_page', models.BooleanField(default=True, verbose_name=_('can move'))),
('can_view', models.BooleanField(default=False, help_text='frontend view restriction', verbose_name=_('view restricted'))),
('can_recover_page', models.BooleanField(default=True, help_text='can recover any deleted page', verbose_name=_('can recover pages'))),
('group', models.ForeignKey(null=True, to='auth.Group', verbose_name=_('group'), blank=True)),
('sites', models.ManyToManyField(null=True, help_text='If none selected, user haves granted permissions to all sites.', blank=True, to='sites.Site', verbose_name=_('sites'))),
('user', models.ForeignKey(null=True, to=settings.AUTH_USER_MODEL, verbose_name=_('user'), blank=True)),
],
options={
'verbose_name': 'Page global permission',
'verbose_name_plural': 'Pages global permissions',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(primary_key=True, verbose_name=_('ID'), auto_created=True, serialize=False)),
('created_by', models.CharField(max_length=70, verbose_name=_('created by'), editable=False)),
('changed_by', models.CharField(max_length=70, verbose_name=_('changed by'), editable=False)),
('creation_date', models.DateTimeField(auto_now_add=True)),
('changed_date', models.DateTimeField(auto_now=True)),
('publication_date', models.DateTimeField(db_index=True, null=True, help_text='When the page should go live. Status must be "Published" for page to go live.', blank=True, verbose_name=_('publication date'))),
('publication_end_date', models.DateTimeField(db_index=True, null=True, help_text='When to expire the page. Leave empty to never expire.', blank=True, verbose_name=_('publication end date'))),
('in_navigation', models.BooleanField(db_index=True, default=True, verbose_name=_('in navigation'))),
('soft_root', models.BooleanField(db_index=True, default=False, help_text='All ancestors will not be displayed in the navigation', verbose_name=_('soft root'))),
('reverse_id', models.CharField(db_index=True, max_length=40, verbose_name=_('id'), null=True, help_text='A unique identifier that is used with the page_url templatetag for linking to this page', blank=True)),
('navigation_extenders', models.CharField(db_index=True, max_length=80, blank=True, verbose_name=_('attached menu'), null=True)),
('template', models.CharField(max_length=100, default='INHERIT', help_text='The template used to render the content.', verbose_name=_('template'), choices=template_choices)),
('login_required', models.BooleanField(default=False, verbose_name=_('login required'))),
('limit_visibility_in_menu', models.SmallIntegerField(db_index=True, default=None, verbose_name=_('menu visibility'), null=True, choices=Page.LIMIT_VISIBILITY_IN_MENU_CHOICES, help_text='limit when this page is visible in the menu', blank=True)),
('is_home', models.BooleanField(db_index=True, default=False, editable=False)),
('application_urls', models.CharField(db_index=True, max_length=200, blank=True, verbose_name=_('application'), null=True)),
('application_namespace', models.CharField(max_length=200, null=True, blank=True, verbose_name=_('application instance name'))),
('level', models.PositiveIntegerField(db_index=True, editable=False)),
('lft', models.PositiveIntegerField(db_index=True, editable=False)),
('rght', models.PositiveIntegerField(db_index=True, editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('publisher_is_draft', models.BooleanField(db_index=True, default=True, editable=False)),
('languages', models.CharField(max_length=255, null=True, blank=True, editable=False)),
('revision_id', models.PositiveIntegerField(default=0, editable=False)),
('xframe_options', models.IntegerField(default=0, choices=Page.X_FRAME_OPTIONS_CHOICES)),
('parent', models.ForeignKey(null=True, to='cms.Page', related_name='children', blank=True)),
('publisher_public', models.OneToOneField(null=True, to='cms.Page', related_name='publisher_draft', editable=False)),
('site', models.ForeignKey(to='sites.Site', verbose_name=_('site'), related_name='djangocms_pages', help_text='The site the page is accessible at.')),
],
options={
'ordering': ('tree_id', 'lft'),
'permissions': (('view_page', 'Can view page'), ('publish_page', 'Can publish page'), ('edit_static_placeholder', 'Can edit static placeholders')),
'verbose_name_plural': 'pages',
'verbose_name': 'page',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PagePermission',
fields=[
('id', models.AutoField(primary_key=True, verbose_name=_('ID'), auto_created=True, serialize=False)),
('can_change', models.BooleanField(default=True, verbose_name=_('can edit'))),
('can_add', models.BooleanField(default=True, verbose_name=_('can add'))),
('can_delete', models.BooleanField(default=True, verbose_name=_('can delete'))),
('can_change_advanced_settings', models.BooleanField(default=False, verbose_name=_('can change advanced settings'))),
('can_publish', models.BooleanField(default=True, verbose_name=_('can publish'))),
('can_change_permissions', models.BooleanField(default=False, help_text='on page level', verbose_name=_('can change permissions'))),
('can_move_page', models.BooleanField(default=True, verbose_name=_('can move'))),
('can_view', models.BooleanField(default=False, help_text='frontend view restriction', verbose_name=_('view restricted'))),
('grant_on', models.IntegerField(default=5, verbose_name=_('Grant on'), choices=ACCESS_CHOICES)),
('group', models.ForeignKey(null=True, to='auth.Group', verbose_name=_('group'), blank=True)),
('page', models.ForeignKey(null=True, to='cms.Page', verbose_name=_('page'), blank=True)),
('user', models.ForeignKey(null=True, to=settings.AUTH_USER_MODEL, verbose_name=_('user'), blank=True)),
],
options={
'verbose_name': 'Page permission',
'verbose_name_plural': 'Page permissions',
},
bases=(models.Model,),
),
]
| bsd-3-clause |
mgedmin/ansible | lib/ansible/template/vars.py | 20 | 3721 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.six import iteritems
from jinja2.utils import missing
from ansible.module_utils._text import to_native
__all__ = ['AnsibleJ2Vars']
class AnsibleJ2Vars:
'''
Helper class to template all variable content before jinja2 sees it. This is
done by hijacking the variable storage that jinja2 uses, and overriding __contains__
and __getitem__ to look like a dict. Added bonus is avoiding duplicating the large
hashes that inject tends to be.
To facilitate using builtin jinja2 things like range, globals are also handled here.
'''
def __init__(self, templar, globals, locals=None, *extras):
'''
Initializes this object with a valid Templar() object, as
well as several dictionaries of variables representing
different scopes (in jinja2 terminology).
'''
self._templar = templar
self._globals = globals
self._extras = extras
self._locals = dict()
if isinstance(locals, dict):
for key, val in iteritems(locals):
if key[:2] == 'l_' and val is not missing:
self._locals[key[2:]] = val
def __contains__(self, k):
if k in self._templar._available_variables:
return True
if k in self._locals:
return True
for i in self._extras:
if k in i:
return True
if k in self._globals:
return True
return False
def __getitem__(self, varname):
if varname not in self._templar._available_variables:
if varname in self._locals:
return self._locals[varname]
for i in self._extras:
if varname in i:
return i[varname]
if varname in self._globals:
return self._globals[varname]
else:
raise KeyError("undefined variable: %s" % varname)
variable = self._templar._available_variables[varname]
# HostVars is special, return it as-is, as is the special variable
# 'vars', which contains the vars structure
from ansible.vars.hostvars import HostVars
if isinstance(variable, dict) and varname == "vars" or isinstance(variable, HostVars):
return variable
else:
value = None
try:
value = self._templar.template(variable)
except Exception as e:
raise type(e)(to_native(variable) + ': ' + e.message)
return value
def add_locals(self, locals):
'''
If locals are provided, create a copy of self containing those
locals in addition to what is already in this variable proxy.
'''
if locals is None:
return self
return AnsibleJ2Vars(self._templar, self._globals, locals=locals, *self._extras)
| gpl-3.0 |
hydroshare/hydroshare-demo-auth | allaccess/management/commands/migrate_social_providers.py | 3 | 1785 | from __future__ import unicode_literals
from django.core.management.base import NoArgsCommand, CommandError
from django.test.client import RequestFactory
from allaccess.models import Provider
class Command(NoArgsCommand):
"Convert existing providers from django-social-auth to django-all-access."
def handle_noargs(self, **options):
verbosity = int(options.get('verbosity'))
try:
from social_auth import version
from social_auth.backends import get_backends, BaseOAuth
except ImportError: # pragma: no cover
raise CommandError("django-social-auth is not installed.")
request = RequestFactory().get('/')
for name, backend in get_backends().items():
if issubclass(backend, BaseOAuth) and backend.enabled():
if version < (0, 7):
# Prior to 0.7 get_key_and_secret was an instance method
backend = backend(request, '/')
# Create providers if they don't already exist
key, secret = backend.get_key_and_secret()
defaults = {
'request_token_url': getattr(backend, 'REQUEST_TOKEN_URL', '') or '',
'authorization_url': getattr(backend, 'AUTHORIZATION_URL', '') or '',
'access_token_url': getattr(backend, 'ACCESS_TOKEN_URL', '') or '',
'profile_url': '',
'consumer_key': key or None,
'consumer_secret': secret or None,
}
provider, created = Provider.objects.get_or_create(name=name, defaults=defaults)
if created and verbosity > 0:
self.stdout.write('New provider created from "%s" backend.\n' % name)
| bsd-2-clause |
thomasem/nova | nova/tests/unit/console/test_type.py | 84 | 2048 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.console import type as ctype
from nova import test
class TypeTestCase(test.NoDBTestCase):
def test_console(self):
c = ctype.Console(host='127.0.0.1', port=8945)
self.assertTrue(hasattr(c, 'host'))
self.assertTrue(hasattr(c, 'port'))
self.assertTrue(hasattr(c, 'internal_access_path'))
self.assertEqual('127.0.0.1', c.host)
self.assertEqual(8945, c.port)
self.assertIsNone(c.internal_access_path)
self.assertEqual({
'host': '127.0.0.1',
'port': 8945,
'internal_access_path': None,
'token': 'a-token',
'access_url': 'an-url'},
c.get_connection_info('a-token', 'an-url'))
def test_console_vnc(self):
c = ctype.ConsoleVNC(host='127.0.0.1', port=8945)
self.assertIsInstance(c, ctype.Console)
def test_console_rdp(self):
c = ctype.ConsoleRDP(host='127.0.0.1', port=8945)
self.assertIsInstance(c, ctype.Console)
def test_console_spice(self):
c = ctype.ConsoleSpice(host='127.0.0.1', port=8945, tlsPort=6547)
self.assertIsInstance(c, ctype.Console)
self.assertEqual(6547, c.tlsPort)
self.assertEqual(
6547, c.get_connection_info('a-token', 'an-url')['tlsPort'])
def test_console_serial(self):
c = ctype.ConsoleSerial(host='127.0.0.1', port=8945)
self.assertIsInstance(c, ctype.Console)
| apache-2.0 |
yyamano/RESTx | src/python/restx/storageabstraction/file_storage.py | 1 | 4434 | """
RESTx: Sane, simple and effective data publishing and integration.
Copyright (C) 2010 MuleSoft Inc. http://www.mulesoft.com
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Base class from which all storage abstractions derive.
"""
# Python imports
import os
# RESTx imports
import restx.settings as settings
from org.mulesoft.restx.exception import *
from org.mulesoft.restx.component.api import FileStore
class FileStorage(FileStore):
"""
Abstract implementation of the base storage methods.
"""
def __init__(self, storage_location, unique_prefix=""):
"""
The unique prefix is used to create a namespace in a flat bucket.
"""
self.storage_location = storage_location
self.unique_prefix = unique_prefix
def _get_storage_location(self):
return settings.get_root_dir()+self.storage_location
def __make_filename(self, file_name):
if self.unique_prefix:
name = "%s/%s__%s" % (self._get_storage_location(), self.unique_prefix, file_name)
else:
name = "%s/%s" % (self._get_storage_location(), file_name)
return name
def __remove_filename_prefix(self, file_name):
if self.unique_prefix:
if file_name.startswith(self.unique_prefix):
file_name = file_name[len(self.unique_prefix) + 2:]
return file_name
def loadFile(self, file_name):
"""
Load the specified file from storage.
@param file_name: Name of the selected file.
@type file_name: string
@return Buffer containing the file contents.
@rtype string
"""
try:
f = open(self.__make_filename(file_name), "r")
buf = f.read()
f.close()
except Exception, e:
raise RestxFileNotFoundException("File '%s' could not be found'" % (file_name))
return buf
def storeFile(self, file_name, data):
"""
Store the specified file in storage.
@param file_name: Name of the file.
@type file_name: string
@param data: Buffer containing the file contents.
@type data: string
"""
f = open(self.__make_filename(file_name), "w")
f.write(data)
f.close()
def deleteFile(self, file_name):
"""
Delete the specified file from storage.
@param file_name: Name of the selected file.
@type file_name: string
"""
try:
os.remove(self.__make_filename(file_name))
except OSError, e:
if e.errno == 2:
raise RestxFileNotFoundException(file_name)
elif e.errno == 13:
raise RestxPermissionDeniedException(file_name)
else:
raise RestxException("Cannot delete file '%s (%s)'" % (file_name, str(e)))
except Exception, e:
raise RestxException("Cannot delete file '%s' (%s)" % (file_name, str(e)))
def listFiles(self):
"""
Return list of all files in the storage.
@return: List of file names.
@rtype: list
"""
try:
dir_list = os.listdir(self._get_storage_location())
# Need to filter all those out, which are not part of our storage space
if self.unique_prefix:
our_files = [ name for name in dir_list if name.startswith(self.unique_prefix) ]
else:
our_files = dir_list
no_prefix_dir_list = [ self.__remove_filename_prefix(name) for name in our_files ]
return no_prefix_dir_list
except Exception, e:
raise RestxException("Problems getting file list from storage: " + str(e))
| gpl-3.0 |
jollaman999/jolla-kernel_G_Gen2 | scripts/build-all.py | 1182 | 9486 | #! /usr/bin/env python
# Copyright (c) 2009-2011, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
'CROSS_COMPILE': 'arm-none-linux-gnueabi-',
'KCONFIG_NOTIMESTAMP': 'true' })
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
for n in glob.glob('arch/arm/configs/[fm]sm[0-9-]*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/qsd*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/apq*_defconfig'):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/%s_defconfig' % target
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
shutil.copyfile(defconfig, dotconfig)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
build = Builder(log_name)
result = build.run(['make', 'O=%s' % dest_dir] + make_command)
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" % (target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=make_env, stdin=devnull)
devnull.close()
shutil.copyfile(savedefconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif args == ['perf']:
targets = []
for t in configs.keys():
if "perf" in t:
targets.append(t)
build_many(configs, targets)
elif args == ['noperf']:
targets = []
for t in configs.keys():
if "perf" not in t:
targets.append(t)
build_many(configs, targets)
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
| gpl-2.0 |
lahosken/pants | src/python/pants/backend/jvm/zinc/zinc_analysis_element_types.py | 9 | 5795 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.jvm.zinc.zinc_analysis_element import ZincAnalysisElement
class CompileSetup(ZincAnalysisElement):
headers = ('output mode', 'output directories', 'classpath options', 'compile options', 'javac options',
'compiler version', 'compile order', 'name hashing', 'skip Api storing', 'extra')
# Output directories can obviously contain directories under pants_home. Compile/javac options may
# refer to directories under pants_home.
pants_home_anywhere = ('output directories', 'classpath options')
def __init__(self, args):
# Most sections in CompileSetup are arrays represented as maps from index to item:
# 0 -> item0
# 1 -> item1
# ...
#
# We ensure these are sorted, in case any reading code makes assumptions about the order.
# These are very small sections, so there's no performance impact to sorting them.
super(CompileSetup, self).__init__(args, always_sort=True)
(self.output_mode, self.output_dirs, self.compile_options, self.javac_options,
self.compiler_version, self.compile_order, self.name_hashing, self.extra) = self.args
def translate(self, token_translator):
self.translate_values(token_translator, self.output_dirs)
for k, vs in list(self.compile_options.items()): # Make a copy, so we can del as we go.
# Remove mentions of custom plugins.
for v in vs:
if v.startswith(b'-Xplugin') or v.startswith(b'-P'):
del self.compile_options[k]
class Relations(ZincAnalysisElement):
headers = (b'products', b'library dependencies', b'library class names',
b'member reference internal dependencies', b'member reference external dependencies',
b'inheritance internal dependencies', b'inheritance external dependencies',
b'local internal inheritance dependencies', b'local external inheritance dependencies',
b'class names', b'used names', b'product class names',)
# Products are src->classfile, library dependencies are src->jarfile, source/internal dependencies are src->src,
# TODO: Check if 'used names' really needs to be in pants_home_anywhere, or can it be in pants_home_prefix_only?
pants_home_anywhere = (b'products', b'library dependencies',
b'inheritance internal dependencies')
# External dependencies and class names are src->fqcn.
pants_home_prefix_only = (b'library class names',
b'class names')
# Library dependencies are src->jarfile, and that jarfile might be under the jvm home.
java_home_anywhere = (b'library class names',
b'library dependencies',)
def __init__(self, args):
super(Relations, self).__init__(args)
(self.src_prod, self.binary_dep,
self.member_ref_internal_dep, self.member_ref_external_dep,
self.inheritance_internal_dep, self.inheritance_external_dep,
self.local_inheritance_internal_dep, self.local_inheritance_external_dep,
self.classes, self.used, self.binary_classes) = self.args
def translate(self, token_translator):
for a in self.args:
self.translate_values(token_translator, a)
self.translate_keys(token_translator, a)
class Stamps(ZincAnalysisElement):
headers = (b'product stamps', b'source stamps', b'binary stamps')
pants_home_anywhere = headers
# Only these sections can reference jar files under the jvm home.
java_home_anywhere = (b'binary stamps')
def __init__(self, args):
super(Stamps, self).__init__(args)
(self.products, self.sources, self.binaries, self.classnames) = self.args
def translate(self, token_translator):
for a in self.args:
self.translate_keys(token_translator, a)
self.translate_values(token_translator, self.classnames)
def __eq__(self, other):
return (self.products, self.sources, self.binaries, set(self.classnames.keys())) == \
(other.products, other.sources, other.binaries, set(other.classnames.keys()))
def __hash__(self):
return hash((self.products, self.sources, self.binaries, self.classnames.keys()))
class APIs(ZincAnalysisElement):
inline_vals = False
headers = (b'internal apis', b'external apis')
# Internal apis are src->blob, but external apis are fqcn->blob, so we don't need to rebase them.
pants_home_prefix_only = (b'internal apis',)
def __init__(self, args):
super(APIs, self).__init__(args)
(self.internal, self.external) = self.args
def translate(self, token_translator):
for a in self.args:
self.translate_base64_values(token_translator, a)
self.translate_keys(token_translator, a)
class SourceInfos(ZincAnalysisElement):
inline_vals = False
headers = (b'source infos', )
# Source infos are src->blob.
pants_home_anywhere = headers
def __init__(self, args):
super(SourceInfos, self).__init__(args)
(self.source_infos, ) = self.args
def translate(self, token_translator):
for a in self.args:
self.translate_base64_values(token_translator, a)
self.translate_keys(token_translator, a)
class Compilations(ZincAnalysisElement):
headers = (b'compilations', )
def __init__(self, args):
super(Compilations, self).__init__(args)
(self.compilations, ) = self.args
# Compilations aren't useful and can accumulate to be huge and drag down parse times.
# We clear them here to prevent them propagating through splits/merges.
self.compilations.clear()
def translate(self, token_translator):
pass
| apache-2.0 |
haf/puppet-dak | dak/dakdb/update45.py | 6 | 1821 | #!/usr/bin/env python
# coding=utf8
"""
Add tables for extra_src handling
@contact: Debian FTP Master <ftpmaster@debian.org>
@copyright: 2011 Mark Hymers <mhy@debian.org>
@license: GNU General Public License version 2 or later
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
################################################################################
import psycopg2
from daklib.dak_exceptions import DBUpdateError
################################################################################
def do_update(self):
"""
Add tables for extra_src handling
"""
print __doc__
try:
c = self.db.cursor()
c.execute("""
CREATE TABLE extra_src_references (
bin_id INT4 NOT NULL REFERENCES binaries(id) ON DELETE CASCADE,
src_id INT4 NOT NULL REFERENCES source(id) ON DELETE RESTRICT,
PRIMARY KEY (bin_id, src_id)
)""")
c.execute("UPDATE config SET value = '45' WHERE name = 'db_revision'")
self.db.commit()
except psycopg2.ProgrammingError as msg:
self.db.rollback()
raise DBUpdateError('Unable to apply update 45, rollback issued. Error message : %s' % (str(msg)))
| gpl-2.0 |
Nagalim/alp-collection | python/jsonrpc/modpywrapper.py | 68 | 1493 | import sys, os
from jsonrpc import ServiceHandler, ServiceException
class ServiceImplementaionNotFound(ServiceException):
pass
class ModPyServiceHandler(ServiceHandler):
def __init__(self, req):
self.req = req
ServiceHandler.__init__(self, None)
def findServiceEndpoint(self, name):
req = self.req
(modulePath, fileName) = os.path.split(req.filename)
(moduleName, ext) = os.path.splitext(fileName)
if not os.path.exists(os.path.join(modulePath, moduleName + ".py")):
raise ServiceImplementaionNotFound()
else:
if not modulePath in sys.path:
sys.path.insert(0, modulePath)
from mod_python import apache
module = apache.import_module(moduleName, log=1)
if hasattr(module, "service"):
self.service = module.service
elif hasattr(module, "Service"):
self.service = module.Service()
else:
self.service = module
return ServiceHandler.findServiceEndpoint(self, name)
def handleRequest(self, data):
self.req.content_type = "text/plain"
data = self.req.read()
resultData = ServiceHandler.handleRequest(self, data)
self.req.write(resultData)
self.req.flush()
def handler(req):
from mod_python import apache
ModPyServiceHandler(req).handleRequest(req)
return apache.OK
| mit |
ravsa/python-for-android | src/jni/freetype/builds/mac/ascii2mpw.py | 830 | 1033 | #!/usr/bin/env python
import sys
import string
if len( sys.argv ) == 1 :
for asc_line in sys.stdin.readlines():
mpw_line = string.replace(asc_line, "\\xA5", "\245")
mpw_line = string.replace(mpw_line, "\\xB6", "\266")
mpw_line = string.replace(mpw_line, "\\xC4", "\304")
mpw_line = string.replace(mpw_line, "\\xC5", "\305")
mpw_line = string.replace(mpw_line, "\\xFF", "\377")
mpw_line = string.replace(mpw_line, "\n", "\r")
mpw_line = string.replace(mpw_line, "\\n", "\n")
sys.stdout.write(mpw_line)
elif sys.argv[1] == "-r" :
for mpw_line in sys.stdin.readlines():
asc_line = string.replace(mpw_line, "\n", "\\n")
asc_line = string.replace(asc_line, "\r", "\n")
asc_line = string.replace(asc_line, "\245", "\\xA5")
asc_line = string.replace(asc_line, "\266", "\\xB6")
asc_line = string.replace(asc_line, "\304", "\\xC4")
asc_line = string.replace(asc_line, "\305", "\\xC5")
asc_line = string.replace(asc_line, "\377", "\\xFF")
sys.stdout.write(asc_line)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.