repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
iCarto/siga | extScripting/scripts/jython/Lib/encodings/cp1252.py | 4 | 2247 | """ Python Character Mapping Codec generated from 'CP1252.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x20ac, # EURO SIGN
0x0081: None, # UNDEFINED
0x0082: 0x201a, # SINGLE LOW-9 QUOTATION MARK
0x0083: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x0084: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x0085: 0x2026, # HORIZONTAL ELLIPSIS
0x0086: 0x2020, # DAGGER
0x0087: 0x2021, # DOUBLE DAGGER
0x0088: 0x02c6, # MODIFIER LETTER CIRCUMFLEX ACCENT
0x0089: 0x2030, # PER MILLE SIGN
0x008a: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x008b: 0x2039, # SINGLE LEFT-POINTING ANGLE QUOTATION MARK
0x008c: 0x0152, # LATIN CAPITAL LIGATURE OE
0x008d: None, # UNDEFINED
0x008e: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x008f: None, # UNDEFINED
0x0090: None, # UNDEFINED
0x0091: 0x2018, # LEFT SINGLE QUOTATION MARK
0x0092: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x0093: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x0094: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x0095: 0x2022, # BULLET
0x0096: 0x2013, # EN DASH
0x0097: 0x2014, # EM DASH
0x0098: 0x02dc, # SMALL TILDE
0x0099: 0x2122, # TRADE MARK SIGN
0x009a: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x009b: 0x203a, # SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
0x009c: 0x0153, # LATIN SMALL LIGATURE OE
0x009d: None, # UNDEFINED
0x009e: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x009f: 0x0178, # LATIN CAPITAL LETTER Y WITH DIAERESIS
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k
| gpl-3.0 |
dgarros/ansible | test/units/module_utils/test_known_hosts.py | 63 | 5338 | # -*- coding: utf-8 -*-
# (c) 2015, Michael Scherer <mscherer@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import json
import os.path
import ansible.module_utils.basic
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock, patch
from ansible.module_utils import known_hosts
from units.mock.procenv import swap_stdin_and_argv
class TestAnsibleModuleKnownHosts(unittest.TestCase):
urls = {
'ssh://one.example.org/example.git': {
'is_ssh_url': True,
'get_fqdn': 'one.example.org',
'add_host_key_cmd': " -t rsa one.example.org",
'port': None,
},
'ssh+git://two.example.org/example.git': {
'is_ssh_url': True,
'get_fqdn': 'two.example.org',
'add_host_key_cmd': " -t rsa two.example.org",
'port': None,
},
'rsync://three.example.org/user/example.git': {
'is_ssh_url': False,
'get_fqdn': 'three.example.org',
'add_host_key_cmd': None, # not called for non-ssh urls
'port': None,
},
'git@four.example.org:user/example.git': {
'is_ssh_url': True,
'get_fqdn': 'four.example.org',
'add_host_key_cmd': " -t rsa four.example.org",
'port': None,
},
'git+ssh://five.example.org/example.git': {
'is_ssh_url': True,
'get_fqdn': 'five.example.org',
'add_host_key_cmd': " -t rsa five.example.org",
'port': None,
},
'ssh://six.example.org:21/example.org': {
# ssh on FTP Port?
'is_ssh_url': True,
'get_fqdn': 'six.example.org',
'add_host_key_cmd': " -t rsa -p 21 six.example.org",
'port': '21',
},
'ssh://[2001:DB8::abcd:abcd]/example.git': {
'is_ssh_url': True,
'get_fqdn': '[2001:DB8::abcd:abcd]',
'add_host_key_cmd': " -t rsa [2001:DB8::abcd:abcd]",
'port': None,
},
'ssh://[2001:DB8::abcd:abcd]:22/example.git': {
'is_ssh_url': True,
'get_fqdn': '[2001:DB8::abcd:abcd]',
'add_host_key_cmd': " -t rsa -p 22 [2001:DB8::abcd:abcd]",
'port': '22',
},
'username@[2001:DB8::abcd:abcd]/example.git': {
'is_ssh_url': True,
'get_fqdn': '[2001:DB8::abcd:abcd]',
'add_host_key_cmd': " -t rsa [2001:DB8::abcd:abcd]",
'port': None,
},
'username@[2001:DB8::abcd:abcd]:path/example.git': {
'is_ssh_url': True,
'get_fqdn': '[2001:DB8::abcd:abcd]',
'add_host_key_cmd': " -t rsa [2001:DB8::abcd:abcd]",
'port': None,
},
'ssh://internal.git.server:7999/repos/repo.git': {
'is_ssh_url': True,
'get_fqdn': 'internal.git.server',
'add_host_key_cmd': " -t rsa -p 7999 internal.git.server",
'port': '7999',
},
}
def test_is_ssh_url(self):
for u in self.urls:
self.assertEqual(known_hosts.is_ssh_url(u), self.urls[u]['is_ssh_url'])
def test_get_fqdn_and_port(self):
for u in self.urls:
self.assertEqual(known_hosts.get_fqdn_and_port(u), (self.urls[u]['get_fqdn'], self.urls[u]['port']))
def test_add_host_key(self):
# Copied
args = json.dumps(dict(ANSIBLE_MODULE_ARGS={}))
# unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually
with swap_stdin_and_argv(stdin_data=args):
ansible.module_utils.basic._ANSIBLE_ARGS = None
self.module = ansible.module_utils.basic.AnsibleModule(argument_spec=dict())
get_bin_path = Mock()
get_bin_path.return_value = keyscan_cmd = "/custom/path/ssh-keyscan"
self.module.get_bin_path = get_bin_path
run_command = Mock()
run_command.return_value = (0, "Needs output, otherwise thinks ssh-keyscan timed out'", "")
self.module.run_command = run_command
append_to_file = Mock()
append_to_file.return_value = (None,)
self.module.append_to_file = append_to_file
with patch('os.path.isdir', return_value=True):
with patch('os.path.exists', return_value=True):
for u in self.urls:
if self.urls[u]['is_ssh_url']:
known_hosts.add_host_key(self.module, self.urls[u]['get_fqdn'], port=self.urls[u]['port'])
run_command.assert_called_with(keyscan_cmd + self.urls[u]['add_host_key_cmd'])
| gpl-3.0 |
dentaku65/plugin.video.italyalacarta | lib/gdata/tlslite/FileObject.py | 359 | 6807 | """Class returned by TLSConnection.makefile()."""
class FileObject:
"""This class provides a file object interface to a
L{tlslite.TLSConnection.TLSConnection}.
Call makefile() on a TLSConnection to create a FileObject instance.
This class was copied, with minor modifications, from the
_fileobject class in socket.py. Note that fileno() is not
implemented."""
default_bufsize = 16384 #TREV: changed from 8192
def __init__(self, sock, mode='rb', bufsize=-1):
self._sock = sock
self.mode = mode # Not actually used in this version
if bufsize < 0:
bufsize = self.default_bufsize
self.bufsize = bufsize
self.softspace = False
if bufsize == 0:
self._rbufsize = 1
elif bufsize == 1:
self._rbufsize = self.default_bufsize
else:
self._rbufsize = bufsize
self._wbufsize = bufsize
self._rbuf = "" # A string
self._wbuf = [] # A list of strings
def _getclosed(self):
return self._sock is not None
closed = property(_getclosed, doc="True if the file is closed")
def close(self):
try:
if self._sock:
for result in self._sock._decrefAsync(): #TREV
pass
finally:
self._sock = None
def __del__(self):
try:
self.close()
except:
# close() may fail if __init__ didn't complete
pass
def flush(self):
if self._wbuf:
buffer = "".join(self._wbuf)
self._wbuf = []
self._sock.sendall(buffer)
#def fileno(self):
# raise NotImplementedError() #TREV
def write(self, data):
data = str(data) # XXX Should really reject non-string non-buffers
if not data:
return
self._wbuf.append(data)
if (self._wbufsize == 0 or
self._wbufsize == 1 and '\n' in data or
self._get_wbuf_len() >= self._wbufsize):
self.flush()
def writelines(self, list):
# XXX We could do better here for very long lists
# XXX Should really reject non-string non-buffers
self._wbuf.extend(filter(None, map(str, list)))
if (self._wbufsize <= 1 or
self._get_wbuf_len() >= self._wbufsize):
self.flush()
def _get_wbuf_len(self):
buf_len = 0
for x in self._wbuf:
buf_len += len(x)
return buf_len
def read(self, size=-1):
data = self._rbuf
if size < 0:
# Read until EOF
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
if self._rbufsize <= 1:
recv_size = self.default_bufsize
else:
recv_size = self._rbufsize
while True:
data = self._sock.recv(recv_size)
if not data:
break
buffers.append(data)
return "".join(buffers)
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
left = size - buf_len
recv_size = max(self._rbufsize, left)
data = self._sock.recv(recv_size)
if not data:
break
buffers.append(data)
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
def readline(self, size=-1):
data = self._rbuf
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
assert data == ""
buffers = []
recv = self._sock.recv
while data != "\n":
data = recv(1)
if not data:
break
buffers.append(data)
return "".join(buffers)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self._sock.recv(self._rbufsize)
if not data:
break
buffers.append(data)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
return "".join(buffers)
else:
# Read until size bytes or \n or EOF seen, whichever comes first
nl = data.find('\n', 0, size)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self._sock.recv(self._rbufsize)
if not data:
break
buffers.append(data)
left = size - buf_len
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
def readlines(self, sizehint=0):
total = 0
list = []
while True:
line = self.readline()
if not line:
break
list.append(line)
total += len(line)
if sizehint and total >= sizehint:
break
return list
# Iterator protocols
def __iter__(self):
return self
def next(self):
line = self.readline()
if not line:
raise StopIteration
return line
| gpl-3.0 |
leeoniya/pdfquery | tests/tests.py | 1 | 6649 | # to run:
# pip install unittest2
# unit2 discover
#
# to debug:
# pip install nose
# nosetests --pdb
import StringIO
import sys
import pdfquery
import unittest2
from pdfquery.cache import FileCache
class TestPDFQuery(unittest2.TestCase):
"""
Various tests based on the IRS_1040A sample doc.
"""
@classmethod
def setUpClass(cls):
cls.pdf = pdfquery.PDFQuery(
"tests/samples/IRS_1040A.pdf",
parse_tree_cacher=FileCache("/tmp/") if sys.argv[1] == 'cache' else None,
)
cls.pdf.load()
def test_xml_conversion(self):
"""
Test that converted XML hasn't changed from saved version.
"""
# get current XML for sample file
tree_string = StringIO.StringIO()
self.pdf.tree.write(tree_string, pretty_print=True, encoding="utf-8")
tree_string = tree_string.getvalue()
# get previous XML
# this varies by Python version, because the float handling isn't quite
# the same
comparison_file = "tests/saved_output/IRS_1040A_output%s.xml" % (
"_python_2.6" if sys.version_info[0] == 2 and sys.version_info[1] < 7 else "")
with open(comparison_file, 'rb') as f:
saved_string = f.read()
# compare current to previous
if tree_string != saved_string:
with open("tests/failed_output.xml", "wb") as out:
out.write(tree_string)
self.fail("XML conversion of sample pdf has changed! Compare %s to "
"tests/failed_output.xml." % comparison_file)
def test_selectors(self):
"""
Test the :contains and :in_bbox selectors.
"""
label = self.pdf.pq('LTTextLineHorizontal:contains("Your first name '
'and initial")')
self.assertEqual(len(label), 1)
left_corner = float(label.attr('x0'))
self.assertEqual(left_corner, 143.651)
bottom_corner = float(label.attr('y0'))
self.assertEqual(bottom_corner, 714.694)
name = self.pdf.pq('LTTextLineHorizontal:in_bbox("%s, %s, %s, %s")' %
(left_corner,
bottom_corner - 30,
left_corner + 150,
bottom_corner)
).text()
self.assertEqual(name, "John E.")
def test_extract(self):
"""
Test the extract() function.
"""
values = self.pdf.extract([
('with_parent', 'LTPage[pageid="1"]'),
('with_formatter', 'text'),
('last_name', 'LTTextLineHorizontal:in_bbox("315,680,395,700")'),
('spouse', 'LTTextLineHorizontal:in_bbox("170,650,220,680")'),
('with_parent', 'LTPage[pageid="2"]'),
('oath', 'LTTextLineHorizontal:contains("perjury")',
lambda match: match.text()[:30] + "..."),
('year', 'LTTextLineHorizontal:contains("Form 1040A (")',
lambda match: int(match.text()[-5:-1]))
])
self.assertDictEqual(values, {
'last_name': 'Michaels',
'spouse': 'Susan R.',
'oath': u'Under penalties of perjury, I ...',
'year': 2007
})
def test_page_numbers(self):
self.assertEqual(self.pdf.tree.getroot()[0].get('page_label'), '1')
class TestDocInfo(unittest2.TestCase):
def test_docinfo(self):
doc_info_results = [
["tests/samples/bug11.pdf",
{'Producer': 'Mac OS X 10.9.3 Quartz PDFContext',
'Title': u'\u262d\U0001f61c\U0001f4a9Unicode is fun!',
'Author': 'Russkel', 'Creator': 'Firefox',
'ModDate': "D:20140528141914+08'00'",
'CreationDate': 'D:20140528061106Z', 'Subject': ''}],
["tests/samples/bug15.pdf",
{'Producer': 'Mac OS X 10.9.3 Quartz PDFContext',
'Author': 'Brepols Publishers',
'Creator': 'PDFsharp 1.2.1269-g (www.pdfsharp.com)',
'AAPL_Keywords': "[u'Brepols', u'Publishers', u'CTLO']",
'Title': 'Exporter',
'ModDate': "D:20140614192741Z00'00'",
'Keywords': 'Brepols, Publishers, CTLO',
'CreationDate': "D:20140614192741Z00'00'",
'Subject': 'Extrait de la Library of Latin Texts - Series A'}],
["tests/samples/bug17.pdf",
{'CreationDate': 'D:20140328164512Z',
'Creator': 'Adobe InDesign CC (Macintosh)',
'ModDate': 'D:20140328164513Z',
'Producer': 'Adobe PDF Library 10.0.1', 'Trapped': '/False'}]
]
for file_path, expected_results in doc_info_results:
pdf = pdfquery.PDFQuery(file_path)
pdf.load(None)
self.assertDictEqual(
dict(pdf.tree.getroot().attrib),
expected_results
)
class TestUnicode(unittest2.TestCase):
def test_unicode_text(self):
pdf = pdfquery.PDFQuery("tests/samples/bug18.pdf")
pdf.load()
self.assertEqual(
pdf.pq('LTTextLineHorizontal:contains("Hop Hing Oils")').text(),
(u'5 Hop Hing Oils and Fats (Hong Kong) Ltd \uf06c '
u'\u7279\u5bf6\u7cbe\u88fd\u8c6c\u6cb9')
)
class TestAnnotations(unittest2.TestCase):
"""
Ensure that annotations such as links are getting added to the PDFs
properly, as discussed in issue #28.
"""
@classmethod
def setUpClass(cls):
cls.pdf = pdfquery.PDFQuery(
"tests/samples/bug28.pdf",
parse_tree_cacher=FileCache("/tmp/") if sys.argv[1] == 'cache' else None,
)
cls.pdf.load()
def test_xml_conversion(self):
"""
Test that converted XML hasn't changed from saved version.
"""
# get current XML for sample file
tree_string = StringIO.StringIO()
self.pdf.tree.write(tree_string, pretty_print=True, encoding="utf-8")
tree_string = tree_string.getvalue()
# get previous XML
comparison_file = 'tests/saved_output/bug28.xml'
with open(comparison_file, 'rb') as f:
saved_string = f.read()
# compare current to previous
if tree_string != saved_string:
with open("tests/failed_output.xml", "wb") as out:
out.write(tree_string)
self.fail("XML conversion of sample pdf has changed! Compare %s "
"to tests/failed_output.xml." % comparison_file)
if __name__ == '__main__':
unittest2.main()
| mit |
vapor-ware/synse-server | tests/unit/cmd/test_write.py | 1 | 12864 | """Unit tests for the ``synse_server.cmd.write`` module."""
import asynctest
import pytest
from synse_grpc import api
from synse_server import cmd, errors
@pytest.mark.asyncio
async def test_write_async_plugin_not_found(mocker):
# Mock test data
mock_write_async = mocker.patch(
'synse_grpc.client.PluginClientV3.write_async',
)
# --- Test case -----------------------------
with asynctest.patch('synse_server.cache.get_plugin') as mock_get:
mock_get.return_value = None
with pytest.raises(errors.NotFound):
await cmd.write_async('123', {})
mock_get.assert_called_once()
mock_get.assert_called_with('123')
mock_write_async.assert_not_called()
@pytest.mark.asyncio
async def test_write_async_get_plugin_error(mocker):
# Mock test data
mock_write_async = mocker.patch(
'synse_grpc.client.PluginClientV3.write_async',
)
# --- Test case -----------------------------
with asynctest.patch('synse_server.cache.get_plugin') as mock_get:
mock_get.side_effect = ValueError()
with pytest.raises(ValueError):
await cmd.write_async('123', {})
mock_get.assert_called_once()
mock_get.assert_called_with('123')
mock_write_async.assert_not_called()
@pytest.mark.asyncio
async def test_write_async_error(mocker, simple_plugin):
# Mock test data
mock_write_async = mocker.patch(
'synse_grpc.client.PluginClientV3.write_async',
side_effect=ValueError(),
)
# --- Test case -----------------------------
with asynctest.patch('synse_server.cache.get_plugin') as mock_get:
with asynctest.patch('synse_server.cache.add_transaction') as mock_add:
mock_get.return_value = simple_plugin
with pytest.raises(errors.ServerError):
await cmd.write_async('123', {'action': 'foo'})
mock_get.assert_called_once()
mock_get.assert_called_with('123')
mock_write_async.assert_called_once()
mock_write_async.assert_called_with(device_id='123', data={'action': 'foo'})
mock_add.assert_not_called()
@pytest.mark.asyncio
async def test_write_async_add_transaction_error(mocker, simple_plugin):
# Mock test data
mock_write_async = mocker.patch(
'synse_grpc.client.PluginClientV3.write_async',
return_value=[
api.V3WriteTransaction(
id='txn-1',
device='abc',
context=api.V3WriteData(
action='foo',
),
timeout='5s',
),
],
)
# --- Test case -----------------------------
with asynctest.patch('synse_server.cache.get_plugin') as mock_get:
with asynctest.patch('synse_server.cache.add_transaction') as mock_add:
mock_get.return_value = simple_plugin
mock_add.side_effect = ValueError()
with pytest.raises(errors.ServerError):
await cmd.write_async('abc', {'action': 'foo'})
mock_get.assert_called_once()
mock_get.assert_called_with('abc')
mock_write_async.assert_called_once()
mock_write_async.assert_called_with(device_id='abc', data={'action': 'foo'})
mock_add.assert_called_once()
mock_add.assert_called_with('txn-1', 'abc', '123')
@pytest.mark.asyncio
async def test_write_async_ok(mocker, simple_plugin):
# Mock test data
mock_write_async = mocker.patch(
'synse_grpc.client.PluginClientV3.write_async',
return_value=[
api.V3WriteTransaction(
id='txn-1',
device='abc',
context=api.V3WriteData(
action='foo',
),
timeout='5s',
),
api.V3WriteTransaction(
id='txn-2',
device='abc',
context=api.V3WriteData(
action='bar',
),
timeout='5s',
),
api.V3WriteTransaction(
id='txn-3',
device='abc',
context=api.V3WriteData(
action='baz',
),
timeout='5s',
),
],
)
# --- Test case -----------------------------
with asynctest.patch('synse_server.cache.get_plugin') as mock_get:
with asynctest.patch('synse_server.cache.add_transaction') as mock_add:
mock_get.return_value = simple_plugin
resp = await cmd.write_async(
device_id='abc',
payload=[
{'action': 'foo'},
{'action': 'bar'},
{'action': 'baz'},
],
)
assert resp == [
{
'id': 'txn-1',
'device': 'abc',
'timeout': '5s',
'context': {
'action': 'foo',
'data': '',
},
},
{
'id': 'txn-2',
'device': 'abc',
'timeout': '5s',
'context': {
'action': 'bar',
'data': '',
},
},
{
'id': 'txn-3',
'device': 'abc',
'timeout': '5s',
'context': {
'action': 'baz',
'data': '',
},
},
]
mock_get.assert_called_once()
mock_get.assert_called_with('abc')
mock_write_async.assert_called_once()
mock_write_async.assert_called_with(
device_id='abc',
data=[
{'action': 'foo'},
{'action': 'bar'},
{'action': 'baz'},
])
mock_add.assert_called()
mock_add.assert_has_calls([
mocker.call('txn-1', 'abc', '123'),
mocker.call('txn-2', 'abc', '123'),
mocker.call('txn-3', 'abc', '123'),
])
@pytest.mark.asyncio
async def test_write_sync_plugin_not_found(mocker):
# Mock test data
mock_write_sync = mocker.patch(
'synse_grpc.client.PluginClientV3.write_sync',
)
# --- Test case -----------------------------
with asynctest.patch('synse_server.cache.get_plugin') as mock_get:
mock_get.return_value = None
with pytest.raises(errors.NotFound):
await cmd.write_sync('123', {})
mock_get.assert_called_once()
mock_get.assert_called_with('123')
mock_write_sync.assert_not_called()
@pytest.mark.asyncio
async def test_write_sync_get_plugin_error(mocker):
# Mock test data
mock_write_sync = mocker.patch(
'synse_grpc.client.PluginClientV3.write_sync',
)
# --- Test case -----------------------------
with asynctest.patch('synse_server.cache.get_plugin') as mock_get:
mock_get.side_effect = ValueError()
with pytest.raises(ValueError):
await cmd.write_sync('123', {})
mock_get.assert_called_once()
mock_get.assert_called_with('123')
mock_write_sync.assert_not_called()
@pytest.mark.asyncio
async def test_write_sync_error(mocker, simple_plugin):
# Mock test data
mock_write_sync = mocker.patch(
'synse_grpc.client.PluginClientV3.write_sync',
side_effect=ValueError(),
)
# --- Test case -----------------------------
with asynctest.patch('synse_server.cache.get_plugin') as mock_get:
with asynctest.patch('synse_server.cache.add_transaction') as mock_add:
mock_get.return_value = simple_plugin
with pytest.raises(errors.ServerError):
await cmd.write_sync('123', {'action': 'foo'})
mock_get.assert_called_once()
mock_get.assert_called_with('123')
mock_write_sync.assert_called_once()
mock_write_sync.assert_called_with(device_id='123', data={'action': 'foo'})
mock_add.assert_not_called()
@pytest.mark.asyncio
async def test_write_sync_add_transaction_error(mocker, simple_plugin):
# Mock test data
mock_write_sync = mocker.patch(
'synse_grpc.client.PluginClientV3.write_sync',
return_value=[
api.V3TransactionStatus(
id='txn-1',
created='2019-04-22T13:30:00Z',
updated='2019-04-22T13:31:00Z',
status=api.DONE,
context=api.V3WriteData(
action='foo',
)
),
],
)
# --- Test case -----------------------------
with asynctest.patch('synse_server.cache.get_plugin') as mock_get:
with asynctest.patch('synse_server.cache.add_transaction') as mock_add:
mock_get.return_value = simple_plugin
mock_add.side_effect = ValueError()
with pytest.raises(errors.ServerError):
await cmd.write_sync('abc', {'action': 'foo'})
mock_get.assert_called_once()
mock_get.assert_called_with('abc')
mock_write_sync.assert_called_once()
mock_write_sync.assert_called_with(device_id='abc', data={'action': 'foo'})
mock_add.assert_called_once()
mock_add.assert_called_with('txn-1', 'abc', '123')
@pytest.mark.asyncio
async def test_write_sync_ok(mocker, simple_plugin):
# Mock test data
mock_write_sync = mocker.patch(
'synse_grpc.client.PluginClientV3.write_sync',
return_value=[
api.V3TransactionStatus(
id='txn-1',
created='2019-04-22T13:30:00Z',
updated='2019-04-22T13:31:00Z',
status=api.DONE,
context=api.V3WriteData(
action='foo',
)
),
api.V3TransactionStatus(
id='txn-2',
created='2019-04-22T13:30:00Z',
updated='2019-04-22T13:31:00Z',
status=api.DONE,
context=api.V3WriteData(
action='bar',
)
),
api.V3TransactionStatus(
id='txn-3',
created='2019-04-22T13:30:00Z',
updated='2019-04-22T13:31:00Z',
status=api.DONE,
context=api.V3WriteData(
action='baz',
)
),
],
)
# --- Test case -----------------------------
with asynctest.patch('synse_server.cache.get_plugin') as mock_get:
with asynctest.patch('synse_server.cache.add_transaction') as mock_add:
mock_get.return_value = simple_plugin
resp = await cmd.write_sync(
device_id='abc',
payload=[
{'action': 'foo'},
{'action': 'bar'},
{'action': 'baz'},
],
)
assert resp == [
{
'id': 'txn-1',
'device': 'abc',
'created': '2019-04-22T13:30:00Z',
'updated': '2019-04-22T13:31:00Z',
'message': '',
'timeout': '',
'status': 'DONE',
'context': {
'action': 'foo',
'data': '',
},
},
{
'id': 'txn-2',
'device': 'abc',
'created': '2019-04-22T13:30:00Z',
'updated': '2019-04-22T13:31:00Z',
'message': '',
'timeout': '',
'status': 'DONE',
'context': {
'action': 'bar',
'data': '',
},
},
{
'id': 'txn-3',
'device': 'abc',
'created': '2019-04-22T13:30:00Z',
'updated': '2019-04-22T13:31:00Z',
'message': '',
'timeout': '',
'status': 'DONE',
'context': {
'action': 'baz',
'data': '',
},
},
]
mock_get.assert_called_once()
mock_get.assert_called_with('abc')
mock_write_sync.assert_called_once()
mock_write_sync.assert_called_with(
device_id='abc',
data=[
{'action': 'foo'},
{'action': 'bar'},
{'action': 'baz'},
])
mock_add.assert_called()
mock_add.assert_has_calls([
mocker.call('txn-1', 'abc', '123'),
mocker.call('txn-2', 'abc', '123'),
mocker.call('txn-3', 'abc', '123'),
])
| gpl-3.0 |
ymcagodme/Norwalk-Judo | django/core/servers/basehttp.py | 153 | 25156 | """
BaseHTTPServer that implements the Python WSGI protocol (PEP 333, rev 1.21).
Adapted from wsgiref.simple_server: http://svn.eby-sarna.com/wsgiref/
This is a simple server for use in testing or debugging Django apps. It hasn't
been reviewed for security issues. Don't use it for production use.
"""
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import os
import re
import socket
import sys
import urllib
import warnings
from django.core.management.color import color_style
from django.utils.http import http_date
from django.utils._os import safe_join
from django.views import static
from django.contrib.staticfiles import handlers
__version__ = "0.1"
__all__ = ['WSGIServer','WSGIRequestHandler']
server_version = "WSGIServer/" + __version__
sys_version = "Python/" + sys.version.split()[0]
software_version = server_version + ' ' + sys_version
class WSGIServerException(Exception):
pass
class FileWrapper(object):
"""Wrapper to convert file-like objects to iterables"""
def __init__(self, filelike, blksize=8192):
self.filelike = filelike
self.blksize = blksize
if hasattr(filelike,'close'):
self.close = filelike.close
def __getitem__(self,key):
data = self.filelike.read(self.blksize)
if data:
return data
raise IndexError
def __iter__(self):
return self
def next(self):
data = self.filelike.read(self.blksize)
if data:
return data
raise StopIteration
# Regular expression that matches `special' characters in parameters, the
# existence of which force quoting of the parameter value.
tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
def _formatparam(param, value=None, quote=1):
"""Convenience function to format and return a key=value pair.
This will quote the value if needed or if quote is true.
"""
if value is not None and len(value) > 0:
if quote or tspecials.search(value):
value = value.replace('\\', '\\\\').replace('"', r'\"')
return '%s="%s"' % (param, value)
else:
return '%s=%s' % (param, value)
else:
return param
class Headers(object):
"""Manage a collection of HTTP response headers"""
def __init__(self,headers):
if not isinstance(headers, list):
raise TypeError("Headers must be a list of name/value tuples")
self._headers = headers
def __len__(self):
"""Return the total number of headers, including duplicates."""
return len(self._headers)
def __setitem__(self, name, val):
"""Set the value of a header."""
del self[name]
self._headers.append((name, val))
def __delitem__(self,name):
"""Delete all occurrences of a header, if present.
Does *not* raise an exception if the header is missing.
"""
name = name.lower()
self._headers[:] = [kv for kv in self._headers if kv[0].lower()<>name]
def __getitem__(self,name):
"""Get the first header value for 'name'
Return None if the header is missing instead of raising an exception.
Note that if the header appeared multiple times, the first exactly which
occurrance gets returned is undefined. Use getall() to get all
the values matching a header field name.
"""
return self.get(name)
def has_key(self, name):
"""Return true if the message contains the header."""
return self.get(name) is not None
__contains__ = has_key
def get_all(self, name):
"""Return a list of all the values for the named field.
These will be sorted in the order they appeared in the original header
list or were added to this instance, and may contain duplicates. Any
fields deleted and re-inserted are always appended to the header list.
If no fields exist with the given name, returns an empty list.
"""
name = name.lower()
return [kv[1] for kv in self._headers if kv[0].lower()==name]
def get(self,name,default=None):
"""Get the first header value for 'name', or return 'default'"""
name = name.lower()
for k,v in self._headers:
if k.lower()==name:
return v
return default
def keys(self):
"""Return a list of all the header field names.
These will be sorted in the order they appeared in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [k for k, v in self._headers]
def values(self):
"""Return a list of all header values.
These will be sorted in the order they appeared in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [v for k, v in self._headers]
def items(self):
"""Get all the header fields and values.
These will be sorted in the order they were in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return self._headers[:]
def __repr__(self):
return "Headers(%s)" % `self._headers`
def __str__(self):
"""str() returns the formatted headers, complete with end line,
suitable for direct HTTP transmission."""
return '\r\n'.join(["%s: %s" % kv for kv in self._headers]+['',''])
def setdefault(self,name,value):
"""Return first matching header value for 'name', or 'value'
If there is no header named 'name', add a new header with name 'name'
and value 'value'."""
result = self.get(name)
if result is None:
self._headers.append((name,value))
return value
else:
return result
def add_header(self, _name, _value, **_params):
"""Extended header setting.
_name is the header field to add. keyword arguments can be used to set
additional parameters for the header field, with underscores converted
to dashes. Normally the parameter will be added as key="value" unless
value is None, in which case only the key will be added.
Example:
h.add_header('content-disposition', 'attachment', filename='bud.gif')
Note that unlike the corresponding 'email.Message' method, this does
*not* handle '(charset, language, value)' tuples: all values must be
strings or None.
"""
parts = []
if _value is not None:
parts.append(_value)
for k, v in _params.items():
if v is None:
parts.append(k.replace('_', '-'))
else:
parts.append(_formatparam(k.replace('_', '-'), v))
self._headers.append((_name, "; ".join(parts)))
def guess_scheme(environ):
"""Return a guess for whether 'wsgi.url_scheme' should be 'http' or 'https'
"""
if environ.get("HTTPS") in ('yes','on','1'):
return 'https'
else:
return 'http'
_hop_headers = {
'connection':1, 'keep-alive':1, 'proxy-authenticate':1,
'proxy-authorization':1, 'te':1, 'trailers':1, 'transfer-encoding':1,
'upgrade':1
}
def is_hop_by_hop(header_name):
"""Return true if 'header_name' is an HTTP/1.1 "Hop-by-Hop" header"""
return header_name.lower() in _hop_headers
class ServerHandler(object):
"""Manage the invocation of a WSGI application"""
# Configuration parameters; can override per-subclass or per-instance
wsgi_version = (1,0)
wsgi_multithread = True
wsgi_multiprocess = True
wsgi_run_once = False
origin_server = True # We are transmitting direct to client
http_version = "1.0" # Version that should be used for response
server_software = software_version
# os_environ is used to supply configuration from the OS environment:
# by default it's a copy of 'os.environ' as of import time, but you can
# override this in e.g. your __init__ method.
os_environ = dict(os.environ.items())
# Collaborator classes
wsgi_file_wrapper = FileWrapper # set to None to disable
headers_class = Headers # must be a Headers-like class
# Error handling (also per-subclass or per-instance)
traceback_limit = None # Print entire traceback to self.get_stderr()
error_status = "500 INTERNAL SERVER ERROR"
error_headers = [('Content-Type','text/plain')]
# State variables (don't mess with these)
status = result = None
headers_sent = False
headers = None
bytes_sent = 0
def __init__(self, stdin, stdout, stderr, environ, multithread=True,
multiprocess=False):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.base_env = environ
self.wsgi_multithread = multithread
self.wsgi_multiprocess = multiprocess
def run(self, application):
"""Invoke the application"""
# Note to self: don't move the close()! Asynchronous servers shouldn't
# call close() from finish_response(), so if you close() anywhere but
# the double-error branch here, you'll break asynchronous servers by
# prematurely closing. Async servers must return from 'run()' without
# closing if there might still be output to iterate over.
try:
self.setup_environ()
self.result = application(self.environ, self.start_response)
self.finish_response()
except:
try:
self.handle_error()
except:
# If we get an error handling an error, just give up already!
self.close()
raise # ...and let the actual server figure it out.
def setup_environ(self):
"""Set up the environment for one request"""
env = self.environ = self.os_environ.copy()
self.add_cgi_vars()
env['wsgi.input'] = self.get_stdin()
env['wsgi.errors'] = self.get_stderr()
env['wsgi.version'] = self.wsgi_version
env['wsgi.run_once'] = self.wsgi_run_once
env['wsgi.url_scheme'] = self.get_scheme()
env['wsgi.multithread'] = self.wsgi_multithread
env['wsgi.multiprocess'] = self.wsgi_multiprocess
if self.wsgi_file_wrapper is not None:
env['wsgi.file_wrapper'] = self.wsgi_file_wrapper
if self.origin_server and self.server_software:
env.setdefault('SERVER_SOFTWARE',self.server_software)
def finish_response(self):
"""
Send any iterable data, then close self and the iterable
Subclasses intended for use in asynchronous servers will want to
redefine this method, such that it sets up callbacks in the event loop
to iterate over the data, and to call 'self.close()' once the response
is finished.
"""
if not self.result_is_file() or not self.sendfile():
for data in self.result:
self.write(data)
self.finish_content()
self.close()
def get_scheme(self):
"""Return the URL scheme being used"""
return guess_scheme(self.environ)
def set_content_length(self):
"""Compute Content-Length or switch to chunked encoding if possible"""
try:
blocks = len(self.result)
except (TypeError, AttributeError, NotImplementedError):
pass
else:
if blocks==1:
self.headers['Content-Length'] = str(self.bytes_sent)
return
# XXX Try for chunked encoding if origin server and client is 1.1
def cleanup_headers(self):
"""Make any necessary header changes or defaults
Subclasses can extend this to add other defaults.
"""
if 'Content-Length' not in self.headers:
self.set_content_length()
def start_response(self, status, headers,exc_info=None):
"""'start_response()' callable as specified by PEP 333"""
if exc_info:
try:
if self.headers_sent:
# Re-raise original exception if headers sent
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None # avoid dangling circular ref
elif self.headers is not None:
raise AssertionError("Headers already set!")
assert isinstance(status, str),"Status must be a string"
assert len(status)>=4,"Status must be at least 4 characters"
assert int(status[:3]),"Status message must begin w/3-digit code"
assert status[3]==" ", "Status message must have a space after code"
if __debug__:
for name,val in headers:
assert isinstance(name, str),"Header names must be strings"
assert isinstance(val, str),"Header values must be strings"
assert not is_hop_by_hop(name),"Hop-by-hop headers not allowed"
self.status = status
self.headers = self.headers_class(headers)
return self.write
def send_preamble(self):
"""Transmit version/status/date/server, via self._write()"""
if self.origin_server:
if self.client_is_modern():
self._write('HTTP/%s %s\r\n' % (self.http_version,self.status))
if 'Date' not in self.headers:
self._write(
'Date: %s\r\n' % http_date()
)
if self.server_software and 'Server' not in self.headers:
self._write('Server: %s\r\n' % self.server_software)
else:
self._write('Status: %s\r\n' % self.status)
def write(self, data):
"""'write()' callable as specified by PEP 333"""
assert isinstance(data, str), "write() argument must be string"
if not self.status:
raise AssertionError("write() before start_response()")
elif not self.headers_sent:
# Before the first output, send the stored headers
self.bytes_sent = len(data) # make sure we know content-length
self.send_headers()
else:
self.bytes_sent += len(data)
# XXX check Content-Length and truncate if too many bytes written?
# If data is too large, socket will choke, so write chunks no larger
# than 32MB at a time.
length = len(data)
if length > 33554432:
offset = 0
while offset < length:
chunk_size = min(33554432, length)
self._write(data[offset:offset+chunk_size])
self._flush()
offset += chunk_size
else:
self._write(data)
self._flush()
def sendfile(self):
"""Platform-specific file transmission
Override this method in subclasses to support platform-specific
file transmission. It is only called if the application's
return iterable ('self.result') is an instance of
'self.wsgi_file_wrapper'.
This method should return a true value if it was able to actually
transmit the wrapped file-like object using a platform-specific
approach. It should return a false value if normal iteration
should be used instead. An exception can be raised to indicate
that transmission was attempted, but failed.
NOTE: this method should call 'self.send_headers()' if
'self.headers_sent' is false and it is going to attempt direct
transmission of the file1.
"""
return False # No platform-specific transmission by default
def finish_content(self):
"""Ensure headers and content have both been sent"""
if not self.headers_sent:
self.headers['Content-Length'] = "0"
self.send_headers()
else:
pass # XXX check if content-length was too short?
def close(self):
try:
self.request_handler.log_request(self.status.split(' ',1)[0], self.bytes_sent)
finally:
try:
if hasattr(self.result,'close'):
self.result.close()
finally:
self.result = self.headers = self.status = self.environ = None
self.bytes_sent = 0; self.headers_sent = False
def send_headers(self):
"""Transmit headers to the client, via self._write()"""
self.cleanup_headers()
self.headers_sent = True
if not self.origin_server or self.client_is_modern():
self.send_preamble()
self._write(str(self.headers))
def result_is_file(self):
"""True if 'self.result' is an instance of 'self.wsgi_file_wrapper'"""
wrapper = self.wsgi_file_wrapper
return wrapper is not None and isinstance(self.result,wrapper)
def client_is_modern(self):
"""True if client can accept status and headers"""
return self.environ['SERVER_PROTOCOL'].upper() != 'HTTP/0.9'
def log_exception(self,exc_info):
"""Log the 'exc_info' tuple in the server log
Subclasses may override to retarget the output or change its format.
"""
try:
from traceback import print_exception
stderr = self.get_stderr()
print_exception(
exc_info[0], exc_info[1], exc_info[2],
self.traceback_limit, stderr
)
stderr.flush()
finally:
exc_info = None
def handle_error(self):
"""Log current error, and send error output to client if possible"""
self.log_exception(sys.exc_info())
if not self.headers_sent:
self.result = self.error_output(self.environ, self.start_response)
self.finish_response()
# XXX else: attempt advanced recovery techniques for HTML or text?
def error_output(self, environ, start_response):
import traceback
start_response(self.error_status, self.error_headers[:], sys.exc_info())
return ['\n'.join(traceback.format_exception(*sys.exc_info()))]
# Pure abstract methods; *must* be overridden in subclasses
def _write(self,data):
self.stdout.write(data)
self._write = self.stdout.write
def _flush(self):
self.stdout.flush()
self._flush = self.stdout.flush
def get_stdin(self):
return self.stdin
def get_stderr(self):
return self.stderr
def add_cgi_vars(self):
self.environ.update(self.base_env)
class WSGIServer(HTTPServer):
"""BaseHTTPServer that implements the Python WSGI protocol"""
application = None
def __init__(self, *args, **kwargs):
if kwargs.pop('ipv6', False):
self.address_family = socket.AF_INET6
HTTPServer.__init__(self, *args, **kwargs)
def server_bind(self):
"""Override server_bind to store the server name."""
try:
HTTPServer.server_bind(self)
except Exception, e:
raise WSGIServerException(e)
self.setup_environ()
def setup_environ(self):
# Set up base environment
env = self.base_environ = {}
env['SERVER_NAME'] = self.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PORT'] = str(self.server_port)
env['REMOTE_HOST']=''
env['CONTENT_LENGTH']=''
env['SCRIPT_NAME'] = ''
def get_app(self):
return self.application
def set_app(self,application):
self.application = application
class WSGIRequestHandler(BaseHTTPRequestHandler):
server_version = "WSGIServer/" + __version__
def __init__(self, *args, **kwargs):
from django.conf import settings
self.admin_media_prefix = settings.ADMIN_MEDIA_PREFIX
# We set self.path to avoid crashes in log_message() on unsupported
# requests (like "OPTIONS").
self.path = ''
self.style = color_style()
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def get_environ(self):
env = self.server.base_environ.copy()
env['SERVER_PROTOCOL'] = self.request_version
env['REQUEST_METHOD'] = self.command
if '?' in self.path:
path,query = self.path.split('?',1)
else:
path,query = self.path,''
env['PATH_INFO'] = urllib.unquote(path)
env['QUERY_STRING'] = query
env['REMOTE_ADDR'] = self.client_address[0]
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
for h in self.headers.headers:
k,v = h.split(':',1)
k=k.replace('-','_').upper(); v=v.strip()
if k in env:
continue # skip content length, type,etc.
if 'HTTP_'+k in env:
env['HTTP_'+k] += ','+v # comma-separate multiple headers
else:
env['HTTP_'+k] = v
return env
def get_stderr(self):
return sys.stderr
def handle(self):
"""Handle a single HTTP request"""
self.raw_requestline = self.rfile.readline()
if not self.parse_request(): # An error code has been sent, just exit
return
handler = ServerHandler(self.rfile, self.wfile, self.get_stderr(), self.get_environ())
handler.request_handler = self # backpointer for logging
handler.run(self.server.get_app())
def log_message(self, format, *args):
# Don't bother logging requests for admin images or the favicon.
if self.path.startswith(self.admin_media_prefix) or self.path == '/favicon.ico':
return
msg = "[%s] %s\n" % (self.log_date_time_string(), format % args)
# Utilize terminal colors, if available
if args[1][0] == '2':
# Put 2XX first, since it should be the common case
msg = self.style.HTTP_SUCCESS(msg)
elif args[1][0] == '1':
msg = self.style.HTTP_INFO(msg)
elif args[1] == '304':
msg = self.style.HTTP_NOT_MODIFIED(msg)
elif args[1][0] == '3':
msg = self.style.HTTP_REDIRECT(msg)
elif args[1] == '404':
msg = self.style.HTTP_NOT_FOUND(msg)
elif args[1][0] == '4':
msg = self.style.HTTP_BAD_REQUEST(msg)
else:
# Any 5XX, or any other response
msg = self.style.HTTP_SERVER_ERROR(msg)
sys.stderr.write(msg)
class AdminMediaHandler(handlers.StaticFilesHandler):
"""
WSGI middleware that intercepts calls to the admin media directory, as
defined by the ADMIN_MEDIA_PREFIX setting, and serves those images.
Use this ONLY LOCALLY, for development! This hasn't been tested for
security and is not super efficient.
This is pending for deprecation since 1.3.
"""
def get_base_dir(self):
import django
return os.path.join(django.__path__[0], 'contrib', 'admin', 'media')
def get_base_url(self):
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
if not settings.ADMIN_MEDIA_PREFIX:
raise ImproperlyConfigured(
"The ADMIN_MEDIA_PREFIX setting can't be empty "
"when using the AdminMediaHandler, e.g. with runserver.")
return settings.ADMIN_MEDIA_PREFIX
def file_path(self, url):
"""
Returns the path to the media file on disk for the given URL.
The passed URL is assumed to begin with ``self.base_url``. If the
resulting file path is outside the media directory, then a ValueError
is raised.
"""
relative_url = url[len(self.base_url[2]):]
relative_path = urllib.url2pathname(relative_url)
return safe_join(self.base_dir, relative_path)
def serve(self, request):
document_root, path = os.path.split(self.file_path(request.path))
return static.serve(request, path, document_root=document_root)
def _should_handle(self, path):
"""
Checks if the path should be handled. Ignores the path if:
* the host is provided as part of the base_url
* the request's path isn't under the base path
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
def run(addr, port, wsgi_handler, ipv6=False):
server_address = (addr, port)
httpd = WSGIServer(server_address, WSGIRequestHandler, ipv6=ipv6)
httpd.set_app(wsgi_handler)
httpd.serve_forever()
| bsd-3-clause |
batermj/algorithm-challenger | code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/test/test_plistlib.py | 2 | 30078 | # Copyright (C) 2003-2013 Python Software Foundation
import copy
import operator
import pickle
import unittest
import plistlib
import os
import datetime
import codecs
import binascii
import collections
from test import support
from io import BytesIO
from plistlib import UID
ALL_FORMATS=(plistlib.FMT_XML, plistlib.FMT_BINARY)
# The testdata is generated using Mac/Tools/plistlib_generate_testdata.py
# (which using PyObjC to control the Cocoa classes for generating plists)
TESTDATA={
plistlib.FMT_XML: binascii.a2b_base64(b'''
PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPCFET0NU
WVBFIHBsaXN0IFBVQkxJQyAiLS8vQXBwbGUvL0RURCBQTElTVCAxLjAvL0VO
IiAiaHR0cDovL3d3dy5hcHBsZS5jb20vRFREcy9Qcm9wZXJ0eUxpc3QtMS4w
LmR0ZCI+CjxwbGlzdCB2ZXJzaW9uPSIxLjAiPgo8ZGljdD4KCTxrZXk+YUJp
Z0ludDwva2V5PgoJPGludGVnZXI+OTIyMzM3MjAzNjg1NDc3NTc2NDwvaW50
ZWdlcj4KCTxrZXk+YUJpZ0ludDI8L2tleT4KCTxpbnRlZ2VyPjkyMjMzNzIw
MzY4NTQ3NzU4NTI8L2ludGVnZXI+Cgk8a2V5PmFEYXRlPC9rZXk+Cgk8ZGF0
ZT4yMDA0LTEwLTI2VDEwOjMzOjMzWjwvZGF0ZT4KCTxrZXk+YURpY3Q8L2tl
eT4KCTxkaWN0PgoJCTxrZXk+YUZhbHNlVmFsdWU8L2tleT4KCQk8ZmFsc2Uv
PgoJCTxrZXk+YVRydWVWYWx1ZTwva2V5PgoJCTx0cnVlLz4KCQk8a2V5PmFV
bmljb2RlVmFsdWU8L2tleT4KCQk8c3RyaW5nPk3DpHNzaWcsIE1hw588L3N0
cmluZz4KCQk8a2V5PmFub3RoZXJTdHJpbmc8L2tleT4KCQk8c3RyaW5nPiZs
dDtoZWxsbyAmYW1wOyAnaGknIHRoZXJlISZndDs8L3N0cmluZz4KCQk8a2V5
PmRlZXBlckRpY3Q8L2tleT4KCQk8ZGljdD4KCQkJPGtleT5hPC9rZXk+CgkJ
CTxpbnRlZ2VyPjE3PC9pbnRlZ2VyPgoJCQk8a2V5PmI8L2tleT4KCQkJPHJl
YWw+MzIuNTwvcmVhbD4KCQkJPGtleT5jPC9rZXk+CgkJCTxhcnJheT4KCQkJ
CTxpbnRlZ2VyPjE8L2ludGVnZXI+CgkJCQk8aW50ZWdlcj4yPC9pbnRlZ2Vy
PgoJCQkJPHN0cmluZz50ZXh0PC9zdHJpbmc+CgkJCTwvYXJyYXk+CgkJPC9k
aWN0PgoJPC9kaWN0PgoJPGtleT5hRmxvYXQ8L2tleT4KCTxyZWFsPjAuNTwv
cmVhbD4KCTxrZXk+YUxpc3Q8L2tleT4KCTxhcnJheT4KCQk8c3RyaW5nPkE8
L3N0cmluZz4KCQk8c3RyaW5nPkI8L3N0cmluZz4KCQk8aW50ZWdlcj4xMjwv
aW50ZWdlcj4KCQk8cmVhbD4zMi41PC9yZWFsPgoJCTxhcnJheT4KCQkJPGlu
dGVnZXI+MTwvaW50ZWdlcj4KCQkJPGludGVnZXI+MjwvaW50ZWdlcj4KCQkJ
PGludGVnZXI+MzwvaW50ZWdlcj4KCQk8L2FycmF5PgoJPC9hcnJheT4KCTxr
ZXk+YU5lZ2F0aXZlQmlnSW50PC9rZXk+Cgk8aW50ZWdlcj4tODAwMDAwMDAw
MDA8L2ludGVnZXI+Cgk8a2V5PmFOZWdhdGl2ZUludDwva2V5PgoJPGludGVn
ZXI+LTU8L2ludGVnZXI+Cgk8a2V5PmFTdHJpbmc8L2tleT4KCTxzdHJpbmc+
RG9vZGFoPC9zdHJpbmc+Cgk8a2V5PmFuRW1wdHlEaWN0PC9rZXk+Cgk8ZGlj
dC8+Cgk8a2V5PmFuRW1wdHlMaXN0PC9rZXk+Cgk8YXJyYXkvPgoJPGtleT5h
bkludDwva2V5PgoJPGludGVnZXI+NzI4PC9pbnRlZ2VyPgoJPGtleT5uZXN0
ZWREYXRhPC9rZXk+Cgk8YXJyYXk+CgkJPGRhdGE+CgkJUEd4dmRITWdiMlln
WW1sdVlYSjVJR2QxYm1zK0FBRUNBenhzYjNSeklHOW1JR0pwYm1GeWVTQm5k
VzVyCgkJUGdBQkFnTThiRzkwY3lCdlppQmlhVzVoY25rZ1ozVnVhejRBQVFJ
RFBHeHZkSE1nYjJZZ1ltbHVZWEo1CgkJSUdkMWJtcytBQUVDQXp4c2IzUnpJ
RzltSUdKcGJtRnllU0JuZFc1clBnQUJBZ004Ykc5MGN5QnZaaUJpCgkJYVc1
aGNua2daM1Z1YXo0QUFRSURQR3h2ZEhNZ2IyWWdZbWx1WVhKNUlHZDFibXMr
QUFFQ0F6eHNiM1J6CgkJSUc5bUlHSnBibUZ5ZVNCbmRXNXJQZ0FCQWdNOGJH
OTBjeUJ2WmlCaWFXNWhjbmtnWjNWdWF6NEFBUUlECgkJUEd4dmRITWdiMlln
WW1sdVlYSjVJR2QxYm1zK0FBRUNBdz09CgkJPC9kYXRhPgoJPC9hcnJheT4K
CTxrZXk+c29tZURhdGE8L2tleT4KCTxkYXRhPgoJUEdKcGJtRnllU0JuZFc1
clBnPT0KCTwvZGF0YT4KCTxrZXk+c29tZU1vcmVEYXRhPC9rZXk+Cgk8ZGF0
YT4KCVBHeHZkSE1nYjJZZ1ltbHVZWEo1SUdkMWJtcytBQUVDQXp4c2IzUnpJ
RzltSUdKcGJtRnllU0JuZFc1clBnQUJBZ004CgliRzkwY3lCdlppQmlhVzVo
Y25rZ1ozVnVhejRBQVFJRFBHeHZkSE1nYjJZZ1ltbHVZWEo1SUdkMWJtcytB
QUVDQXp4cwoJYjNSeklHOW1JR0pwYm1GeWVTQm5kVzVyUGdBQkFnTThiRzkw
Y3lCdlppQmlhVzVoY25rZ1ozVnVhejRBQVFJRFBHeHYKCWRITWdiMllnWW1s
dVlYSjVJR2QxYm1zK0FBRUNBenhzYjNSeklHOW1JR0pwYm1GeWVTQm5kVzVy
UGdBQkFnTThiRzkwCgljeUJ2WmlCaWFXNWhjbmtnWjNWdWF6NEFBUUlEUEd4
dmRITWdiMllnWW1sdVlYSjVJR2QxYm1zK0FBRUNBdz09Cgk8L2RhdGE+Cgk8
a2V5PsOFYmVucmFhPC9rZXk+Cgk8c3RyaW5nPlRoYXQgd2FzIGEgdW5pY29k
ZSBrZXkuPC9zdHJpbmc+CjwvZGljdD4KPC9wbGlzdD4K'''),
plistlib.FMT_BINARY: binascii.a2b_base64(b'''
YnBsaXN0MDDfEBABAgMEBQYHCAkKCwwNDg8QERITFCgpLzAxMjM0NTc2OFdh
QmlnSW50WGFCaWdJbnQyVWFEYXRlVWFEaWN0VmFGbG9hdFVhTGlzdF8QD2FO
ZWdhdGl2ZUJpZ0ludFxhTmVnYXRpdmVJbnRXYVN0cmluZ1thbkVtcHR5RGlj
dFthbkVtcHR5TGlzdFVhbkludFpuZXN0ZWREYXRhWHNvbWVEYXRhXHNvbWVN
b3JlRGF0YWcAxQBiAGUAbgByAGEAYRN/////////1BQAAAAAAAAAAIAAAAAA
AAAsM0GcuX30AAAA1RUWFxgZGhscHR5bYUZhbHNlVmFsdWVaYVRydWVWYWx1
ZV1hVW5pY29kZVZhbHVlXWFub3RoZXJTdHJpbmdaZGVlcGVyRGljdAgJawBN
AOQAcwBzAGkAZwAsACAATQBhAN9fEBU8aGVsbG8gJiAnaGknIHRoZXJlIT7T
HyAhIiMkUWFRYlFjEBEjQEBAAAAAAACjJSYnEAEQAlR0ZXh0Iz/gAAAAAAAA
pSorLCMtUUFRQhAMoyUmLhADE////+1foOAAE//////////7VkRvb2RhaNCg
EQLYoTZPEPo8bG90cyBvZiBiaW5hcnkgZ3Vuaz4AAQIDPGxvdHMgb2YgYmlu
YXJ5IGd1bms+AAECAzxsb3RzIG9mIGJpbmFyeSBndW5rPgABAgM8bG90cyBv
ZiBiaW5hcnkgZ3Vuaz4AAQIDPGxvdHMgb2YgYmluYXJ5IGd1bms+AAECAzxs
b3RzIG9mIGJpbmFyeSBndW5rPgABAgM8bG90cyBvZiBiaW5hcnkgZ3Vuaz4A
AQIDPGxvdHMgb2YgYmluYXJ5IGd1bms+AAECAzxsb3RzIG9mIGJpbmFyeSBn
dW5rPgABAgM8bG90cyBvZiBiaW5hcnkgZ3Vuaz4AAQIDTTxiaW5hcnkgZ3Vu
az5fEBdUaGF0IHdhcyBhIHVuaWNvZGUga2V5LgAIACsAMwA8AEIASABPAFUA
ZwB0AHwAiACUAJoApQCuALsAygDTAOQA7QD4AQQBDwEdASsBNgE3ATgBTwFn
AW4BcAFyAXQBdgF/AYMBhQGHAYwBlQGbAZ0BnwGhAaUBpwGwAbkBwAHBAcIB
xQHHAsQC0gAAAAAAAAIBAAAAAAAAADkAAAAAAAAAAAAAAAAAAALs'''),
'KEYED_ARCHIVE': binascii.a2b_base64(b'''
YnBsaXN0MDDUAQIDBAUGHB1YJHZlcnNpb25YJG9iamVjdHNZJGFyY2hpdmVy
VCR0b3ASAAGGoKMHCA9VJG51bGzTCQoLDA0OVnB5dHlwZVYkY2xhc3NZTlMu
c3RyaW5nEAGAAl8QE0tleUFyY2hpdmUgVUlEIFRlc3TTEBESExQZWiRjbGFz
c25hbWVYJGNsYXNzZXNbJGNsYXNzaGludHNfEBdPQ19CdWlsdGluUHl0aG9u
VW5pY29kZaQVFhcYXxAXT0NfQnVpbHRpblB5dGhvblVuaWNvZGVfEBBPQ19Q
eXRob25Vbmljb2RlWE5TU3RyaW5nWE5TT2JqZWN0ohobXxAPT0NfUHl0aG9u
U3RyaW5nWE5TU3RyaW5nXxAPTlNLZXllZEFyY2hpdmVy0R4fVHJvb3SAAQAI
ABEAGgAjAC0AMgA3ADsAQQBIAE8AVgBgAGIAZAB6AIEAjACVAKEAuwDAANoA
7QD2AP8BAgEUAR0BLwEyATcAAAAAAAACAQAAAAAAAAAgAAAAAAAAAAAAAAAA
AAABOQ=='''),
}
class TestPlistlib(unittest.TestCase):
def tearDown(self):
try:
os.unlink(support.TESTFN)
except:
pass
def _create(self, fmt=None):
pl = dict(
aString="Doodah",
aList=["A", "B", 12, 32.5, [1, 2, 3]],
aFloat = 0.5,
anInt = 728,
aBigInt = 2 ** 63 - 44,
aBigInt2 = 2 ** 63 + 44,
aNegativeInt = -5,
aNegativeBigInt = -80000000000,
aDict=dict(
anotherString="<hello & 'hi' there!>",
aUnicodeValue='M\xe4ssig, Ma\xdf',
aTrueValue=True,
aFalseValue=False,
deeperDict=dict(a=17, b=32.5, c=[1, 2, "text"]),
),
someData = b"<binary gunk>",
someMoreData = b"<lots of binary gunk>\0\1\2\3" * 10,
nestedData = [b"<lots of binary gunk>\0\1\2\3" * 10],
aDate = datetime.datetime(2004, 10, 26, 10, 33, 33),
anEmptyDict = dict(),
anEmptyList = list()
)
pl['\xc5benraa'] = "That was a unicode key."
return pl
def test_create(self):
pl = self._create()
self.assertEqual(pl["aString"], "Doodah")
self.assertEqual(pl["aDict"]["aFalseValue"], False)
def test_io(self):
pl = self._create()
with open(support.TESTFN, 'wb') as fp:
plistlib.dump(pl, fp)
with open(support.TESTFN, 'rb') as fp:
pl2 = plistlib.load(fp)
self.assertEqual(dict(pl), dict(pl2))
self.assertRaises(AttributeError, plistlib.dump, pl, 'filename')
self.assertRaises(AttributeError, plistlib.load, 'filename')
def test_invalid_type(self):
pl = [ object() ]
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
self.assertRaises(TypeError, plistlib.dumps, pl, fmt=fmt)
def test_invalid_uid(self):
with self.assertRaises(TypeError):
UID("not an int")
with self.assertRaises(ValueError):
UID(2 ** 64)
with self.assertRaises(ValueError):
UID(-19)
def test_int(self):
for pl in [0, 2**8-1, 2**8, 2**16-1, 2**16, 2**32-1, 2**32,
2**63-1, 2**64-1, 1, -2**63]:
for fmt in ALL_FORMATS:
with self.subTest(pl=pl, fmt=fmt):
data = plistlib.dumps(pl, fmt=fmt)
pl2 = plistlib.loads(data)
self.assertIsInstance(pl2, int)
self.assertEqual(pl, pl2)
data2 = plistlib.dumps(pl2, fmt=fmt)
self.assertEqual(data, data2)
for fmt in ALL_FORMATS:
for pl in (2 ** 64 + 1, 2 ** 127-1, -2**64, -2 ** 127):
with self.subTest(pl=pl, fmt=fmt):
self.assertRaises(OverflowError, plistlib.dumps,
pl, fmt=fmt)
def test_bytearray(self):
for pl in (b'<binary gunk>', b"<lots of binary gunk>\0\1\2\3" * 10):
for fmt in ALL_FORMATS:
with self.subTest(pl=pl, fmt=fmt):
data = plistlib.dumps(bytearray(pl), fmt=fmt)
pl2 = plistlib.loads(data)
self.assertIsInstance(pl2, bytes)
self.assertEqual(pl2, pl)
data2 = plistlib.dumps(pl2, fmt=fmt)
self.assertEqual(data, data2)
def test_bytes(self):
pl = self._create()
data = plistlib.dumps(pl)
pl2 = plistlib.loads(data)
self.assertEqual(dict(pl), dict(pl2))
data2 = plistlib.dumps(pl2)
self.assertEqual(data, data2)
def test_indentation_array(self):
data = [[[[[[[[{'test': b'aaaaaa'}]]]]]]]]
self.assertEqual(plistlib.loads(plistlib.dumps(data)), data)
def test_indentation_dict(self):
data = {'1': {'2': {'3': {'4': {'5': {'6': {'7': {'8': {'9': b'aaaaaa'}}}}}}}}}
self.assertEqual(plistlib.loads(plistlib.dumps(data)), data)
def test_indentation_dict_mix(self):
data = {'1': {'2': [{'3': [[[[[{'test': b'aaaaaa'}]]]]]}]}}
self.assertEqual(plistlib.loads(plistlib.dumps(data)), data)
def test_uid(self):
data = UID(1)
self.assertEqual(plistlib.loads(plistlib.dumps(data, fmt=plistlib.FMT_BINARY)), data)
dict_data = {
'uid0': UID(0),
'uid2': UID(2),
'uid8': UID(2 ** 8),
'uid16': UID(2 ** 16),
'uid32': UID(2 ** 32),
'uid63': UID(2 ** 63)
}
self.assertEqual(plistlib.loads(plistlib.dumps(dict_data, fmt=plistlib.FMT_BINARY)), dict_data)
def test_uid_data(self):
uid = UID(1)
self.assertEqual(uid.data, 1)
def test_uid_eq(self):
self.assertEqual(UID(1), UID(1))
self.assertNotEqual(UID(1), UID(2))
self.assertNotEqual(UID(1), "not uid")
def test_uid_hash(self):
self.assertEqual(hash(UID(1)), hash(UID(1)))
def test_uid_repr(self):
self.assertEqual(repr(UID(1)), "UID(1)")
def test_uid_index(self):
self.assertEqual(operator.index(UID(1)), 1)
def test_uid_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertEqual(pickle.loads(pickle.dumps(UID(19), protocol=proto)), UID(19))
def test_uid_copy(self):
self.assertEqual(copy.copy(UID(1)), UID(1))
self.assertEqual(copy.deepcopy(UID(1)), UID(1))
def test_appleformatting(self):
for use_builtin_types in (True, False):
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt, use_builtin_types=use_builtin_types):
pl = plistlib.loads(TESTDATA[fmt],
use_builtin_types=use_builtin_types)
data = plistlib.dumps(pl, fmt=fmt)
self.assertEqual(data, TESTDATA[fmt],
"generated data was not identical to Apple's output")
def test_appleformattingfromliteral(self):
self.maxDiff = None
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
pl = self._create(fmt=fmt)
pl2 = plistlib.loads(TESTDATA[fmt], fmt=fmt)
self.assertEqual(dict(pl), dict(pl2),
"generated data was not identical to Apple's output")
pl2 = plistlib.loads(TESTDATA[fmt])
self.assertEqual(dict(pl), dict(pl2),
"generated data was not identical to Apple's output")
def test_bytesio(self):
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
b = BytesIO()
pl = self._create(fmt=fmt)
plistlib.dump(pl, b, fmt=fmt)
pl2 = plistlib.load(BytesIO(b.getvalue()), fmt=fmt)
self.assertEqual(dict(pl), dict(pl2))
pl2 = plistlib.load(BytesIO(b.getvalue()))
self.assertEqual(dict(pl), dict(pl2))
def test_keysort_bytesio(self):
pl = collections.OrderedDict()
pl['b'] = 1
pl['a'] = 2
pl['c'] = 3
for fmt in ALL_FORMATS:
for sort_keys in (False, True):
with self.subTest(fmt=fmt, sort_keys=sort_keys):
b = BytesIO()
plistlib.dump(pl, b, fmt=fmt, sort_keys=sort_keys)
pl2 = plistlib.load(BytesIO(b.getvalue()),
dict_type=collections.OrderedDict)
self.assertEqual(dict(pl), dict(pl2))
if sort_keys:
self.assertEqual(list(pl2.keys()), ['a', 'b', 'c'])
else:
self.assertEqual(list(pl2.keys()), ['b', 'a', 'c'])
def test_keysort(self):
pl = collections.OrderedDict()
pl['b'] = 1
pl['a'] = 2
pl['c'] = 3
for fmt in ALL_FORMATS:
for sort_keys in (False, True):
with self.subTest(fmt=fmt, sort_keys=sort_keys):
data = plistlib.dumps(pl, fmt=fmt, sort_keys=sort_keys)
pl2 = plistlib.loads(data, dict_type=collections.OrderedDict)
self.assertEqual(dict(pl), dict(pl2))
if sort_keys:
self.assertEqual(list(pl2.keys()), ['a', 'b', 'c'])
else:
self.assertEqual(list(pl2.keys()), ['b', 'a', 'c'])
def test_keys_no_string(self):
pl = { 42: 'aNumber' }
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
self.assertRaises(TypeError, plistlib.dumps, pl, fmt=fmt)
b = BytesIO()
self.assertRaises(TypeError, plistlib.dump, pl, b, fmt=fmt)
def test_skipkeys(self):
pl = {
42: 'aNumber',
'snake': 'aWord',
}
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
data = plistlib.dumps(
pl, fmt=fmt, skipkeys=True, sort_keys=False)
pl2 = plistlib.loads(data)
self.assertEqual(pl2, {'snake': 'aWord'})
fp = BytesIO()
plistlib.dump(
pl, fp, fmt=fmt, skipkeys=True, sort_keys=False)
data = fp.getvalue()
pl2 = plistlib.loads(fp.getvalue())
self.assertEqual(pl2, {'snake': 'aWord'})
def test_tuple_members(self):
pl = {
'first': (1, 2),
'second': (1, 2),
'third': (3, 4),
}
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
data = plistlib.dumps(pl, fmt=fmt)
pl2 = plistlib.loads(data)
self.assertEqual(pl2, {
'first': [1, 2],
'second': [1, 2],
'third': [3, 4],
})
if fmt != plistlib.FMT_BINARY:
self.assertIsNot(pl2['first'], pl2['second'])
def test_list_members(self):
pl = {
'first': [1, 2],
'second': [1, 2],
'third': [3, 4],
}
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
data = plistlib.dumps(pl, fmt=fmt)
pl2 = plistlib.loads(data)
self.assertEqual(pl2, {
'first': [1, 2],
'second': [1, 2],
'third': [3, 4],
})
self.assertIsNot(pl2['first'], pl2['second'])
def test_dict_members(self):
pl = {
'first': {'a': 1},
'second': {'a': 1},
'third': {'b': 2 },
}
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
data = plistlib.dumps(pl, fmt=fmt)
pl2 = plistlib.loads(data)
self.assertEqual(pl2, {
'first': {'a': 1},
'second': {'a': 1},
'third': {'b': 2 },
})
self.assertIsNot(pl2['first'], pl2['second'])
def test_controlcharacters(self):
for i in range(128):
c = chr(i)
testString = "string containing %s" % c
if i >= 32 or c in "\r\n\t":
# \r, \n and \t are the only legal control chars in XML
data = plistlib.dumps(testString, fmt=plistlib.FMT_XML)
if c != "\r":
self.assertEqual(plistlib.loads(data), testString)
else:
with self.assertRaises(ValueError):
plistlib.dumps(testString, fmt=plistlib.FMT_XML)
plistlib.dumps(testString, fmt=plistlib.FMT_BINARY)
def test_non_bmp_characters(self):
pl = {'python': '\U0001f40d'}
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
data = plistlib.dumps(pl, fmt=fmt)
self.assertEqual(plistlib.loads(data), pl)
def test_lone_surrogates(self):
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
with self.assertRaises(UnicodeEncodeError):
plistlib.dumps('\ud8ff', fmt=fmt)
with self.assertRaises(UnicodeEncodeError):
plistlib.dumps('\udcff', fmt=fmt)
def test_nondictroot(self):
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
test1 = "abc"
test2 = [1, 2, 3, "abc"]
result1 = plistlib.loads(plistlib.dumps(test1, fmt=fmt))
result2 = plistlib.loads(plistlib.dumps(test2, fmt=fmt))
self.assertEqual(test1, result1)
self.assertEqual(test2, result2)
def test_invalidarray(self):
for i in ["<key>key inside an array</key>",
"<key>key inside an array2</key><real>3</real>",
"<true/><key>key inside an array3</key>"]:
self.assertRaises(ValueError, plistlib.loads,
("<plist><array>%s</array></plist>"%i).encode())
def test_invaliddict(self):
for i in ["<key><true/>k</key><string>compound key</string>",
"<key>single key</key>",
"<string>missing key</string>",
"<key>k1</key><string>v1</string><real>5.3</real>"
"<key>k1</key><key>k2</key><string>double key</string>"]:
self.assertRaises(ValueError, plistlib.loads,
("<plist><dict>%s</dict></plist>"%i).encode())
self.assertRaises(ValueError, plistlib.loads,
("<plist><array><dict>%s</dict></array></plist>"%i).encode())
def test_invalidinteger(self):
self.assertRaises(ValueError, plistlib.loads,
b"<plist><integer>not integer</integer></plist>")
def test_invalidreal(self):
self.assertRaises(ValueError, plistlib.loads,
b"<plist><integer>not real</integer></plist>")
def test_xml_encodings(self):
base = TESTDATA[plistlib.FMT_XML]
for xml_encoding, encoding, bom in [
(b'utf-8', 'utf-8', codecs.BOM_UTF8),
(b'utf-16', 'utf-16-le', codecs.BOM_UTF16_LE),
(b'utf-16', 'utf-16-be', codecs.BOM_UTF16_BE),
# Expat does not support UTF-32
#(b'utf-32', 'utf-32-le', codecs.BOM_UTF32_LE),
#(b'utf-32', 'utf-32-be', codecs.BOM_UTF32_BE),
]:
pl = self._create(fmt=plistlib.FMT_XML)
with self.subTest(encoding=encoding):
data = base.replace(b'UTF-8', xml_encoding)
data = bom + data.decode('utf-8').encode(encoding)
pl2 = plistlib.loads(data)
self.assertEqual(dict(pl), dict(pl2))
class TestBinaryPlistlib(unittest.TestCase):
def test_nonstandard_refs_size(self):
# Issue #21538: Refs and offsets are 24-bit integers
data = (b'bplist00'
b'\xd1\x00\x00\x01\x00\x00\x02QaQb'
b'\x00\x00\x08\x00\x00\x0f\x00\x00\x11'
b'\x00\x00\x00\x00\x00\x00'
b'\x03\x03'
b'\x00\x00\x00\x00\x00\x00\x00\x03'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x13')
self.assertEqual(plistlib.loads(data), {'a': 'b'})
def test_dump_duplicates(self):
# Test effectiveness of saving duplicated objects
for x in (None, False, True, 12345, 123.45, 'abcde', b'abcde',
datetime.datetime(2004, 10, 26, 10, 33, 33),
plistlib.Data(b'abcde'), bytearray(b'abcde'),
[12, 345], (12, 345), {'12': 345}):
with self.subTest(x=x):
data = plistlib.dumps([x]*1000, fmt=plistlib.FMT_BINARY)
self.assertLess(len(data), 1100, repr(data))
def test_identity(self):
for x in (None, False, True, 12345, 123.45, 'abcde', b'abcde',
datetime.datetime(2004, 10, 26, 10, 33, 33),
plistlib.Data(b'abcde'), bytearray(b'abcde'),
[12, 345], (12, 345), {'12': 345}):
with self.subTest(x=x):
data = plistlib.dumps([x]*2, fmt=plistlib.FMT_BINARY)
a, b = plistlib.loads(data)
if isinstance(x, tuple):
x = list(x)
self.assertEqual(a, x)
self.assertEqual(b, x)
self.assertIs(a, b)
def test_cycles(self):
# recursive list
a = []
a.append(a)
b = plistlib.loads(plistlib.dumps(a, fmt=plistlib.FMT_BINARY))
self.assertIs(b[0], b)
# recursive tuple
a = ([],)
a[0].append(a)
b = plistlib.loads(plistlib.dumps(a, fmt=plistlib.FMT_BINARY))
self.assertIs(b[0][0], b)
# recursive dict
a = {}
a['x'] = a
b = plistlib.loads(plistlib.dumps(a, fmt=plistlib.FMT_BINARY))
self.assertIs(b['x'], b)
def test_large_timestamp(self):
# Issue #26709: 32-bit timestamp out of range
for ts in -2**31-1, 2**31:
with self.subTest(ts=ts):
d = (datetime.datetime.utcfromtimestamp(0) +
datetime.timedelta(seconds=ts))
data = plistlib.dumps(d, fmt=plistlib.FMT_BINARY)
self.assertEqual(plistlib.loads(data), d)
def test_invalid_binary(self):
for data in [
# too short data
b'',
# too large offset_table_offset and nonstandard offset_size
b'\x00\x08'
b'\x00\x00\x00\x00\x00\x00\x03\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x2a',
# integer overflow in offset_table_offset
b'\x00\x08'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\xff\xff\xff\xff\xff\xff\xff\xff',
# offset_size = 0
b'\x00\x08'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x09',
# ref_size = 0
b'\xa1\x01\x00\x08\x0a'
b'\x00\x00\x00\x00\x00\x00\x01\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x02'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x0b',
# integer overflow in offset
b'\x00\xff\xff\xff\xff\xff\xff\xff\xff'
b'\x00\x00\x00\x00\x00\x00\x08\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x09',
# invalid ASCII
b'\x51\xff\x08'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x0a',
# invalid UTF-16
b'\x61\xd8\x00\x08'
b'\x00\x00\x00\x00\x00\x00\x01\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x0b',
]:
with self.assertRaises(plistlib.InvalidFileException):
plistlib.loads(b'bplist00' + data, fmt=plistlib.FMT_BINARY)
class TestPlistlibDeprecated(unittest.TestCase):
def test_io_deprecated(self):
pl_in = {
'key': 42,
'sub': {
'key': 9,
'alt': 'value',
'data': b'buffer',
}
}
pl_out = {
'key': 42,
'sub': {
'key': 9,
'alt': 'value',
'data': plistlib.Data(b'buffer'),
}
}
self.addCleanup(support.unlink, support.TESTFN)
with self.assertWarns(DeprecationWarning):
plistlib.writePlist(pl_in, support.TESTFN)
with self.assertWarns(DeprecationWarning):
pl2 = plistlib.readPlist(support.TESTFN)
self.assertEqual(pl_out, pl2)
os.unlink(support.TESTFN)
with open(support.TESTFN, 'wb') as fp:
with self.assertWarns(DeprecationWarning):
plistlib.writePlist(pl_in, fp)
with open(support.TESTFN, 'rb') as fp:
with self.assertWarns(DeprecationWarning):
pl2 = plistlib.readPlist(fp)
self.assertEqual(pl_out, pl2)
def test_bytes_deprecated(self):
pl = {
'key': 42,
'sub': {
'key': 9,
'alt': 'value',
'data': b'buffer',
}
}
with self.assertWarns(DeprecationWarning):
data = plistlib.writePlistToBytes(pl)
with self.assertWarns(DeprecationWarning):
pl2 = plistlib.readPlistFromBytes(data)
self.assertIsInstance(pl2, dict)
self.assertEqual(pl2, dict(
key=42,
sub=dict(
key=9,
alt='value',
data=plistlib.Data(b'buffer'),
)
))
with self.assertWarns(DeprecationWarning):
data2 = plistlib.writePlistToBytes(pl2)
self.assertEqual(data, data2)
def test_dataobject_deprecated(self):
in_data = { 'key': plistlib.Data(b'hello') }
out_data = { 'key': b'hello' }
buf = plistlib.dumps(in_data)
cur = plistlib.loads(buf)
self.assertEqual(cur, out_data)
self.assertEqual(cur, in_data)
cur = plistlib.loads(buf, use_builtin_types=False)
self.assertEqual(cur, out_data)
self.assertEqual(cur, in_data)
with self.assertWarns(DeprecationWarning):
cur = plistlib.readPlistFromBytes(buf)
self.assertEqual(cur, out_data)
self.assertEqual(cur, in_data)
class TestKeyedArchive(unittest.TestCase):
def test_keyed_archive_data(self):
# This is the structure of a NSKeyedArchive packed plist
data = {
'$version': 100000,
'$objects': [
'$null', {
'pytype': 1,
'$class': UID(2),
'NS.string': 'KeyArchive UID Test'
},
{
'$classname': 'OC_BuiltinPythonUnicode',
'$classes': [
'OC_BuiltinPythonUnicode',
'OC_PythonUnicode',
'NSString',
'NSObject'
],
'$classhints': [
'OC_PythonString', 'NSString'
]
}
],
'$archiver': 'NSKeyedArchiver',
'$top': {
'root': UID(1)
}
}
self.assertEqual(plistlib.loads(TESTDATA["KEYED_ARCHIVE"]), data)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
blacklist = {"PlistFormat", "PLISTHEADER"}
support.check__all__(self, plistlib, blacklist=blacklist)
def test_main():
support.run_unittest(TestPlistlib, TestPlistlibDeprecated, TestKeyedArchive, MiscTestCase)
if __name__ == '__main__':
test_main()
| apache-2.0 |
fblupi/master_informatica-SSBW | tarea6/sitio_web/sitio_web/urls.py | 3 | 1158 | """sitio_web URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from registration.backends.simple.views import RegistrationView
class MyRegistrationView(RegistrationView):
def get_success_url(self,request, user):
return '/'
urlpatterns = [
url(r'^', include('restaurantes.urls')),
url(r'^admin/', admin.site.urls),
url(r'^accounts/register/$', MyRegistrationView.as_view(), name='registration_register'),
url(r'^accounts/', include('registration.backends.simple.urls')),
]
| gpl-3.0 |
t3dev/odoo | addons/sale_coupon/wizard/sale_coupon_apply_code.py | 1 | 2547 | # -*- coding: utf-8 -*-
from odoo import api, fields, models, _
from odoo.exceptions import UserError
class SaleCouponApplyCode(models.TransientModel):
_name = 'sale.coupon.apply.code'
_rec_name = 'coupon_code'
_description = 'Sales Coupon Apply Code'
coupon_code = fields.Char(string="Coupon", required=True)
@api.multi
def process_coupon(self):
"""
Apply the entered coupon code if valid, raise an UserError otherwise.
"""
sales_order = self.env['sale.order'].browse(self.env.context.get('active_id'))
error_status = self.apply_coupon(sales_order, self.coupon_code)
if error_status.get('error', False):
raise UserError(error_status.get('error', False))
if error_status.get('not_found', False):
raise UserError(error_status.get('not_found', False))
def apply_coupon(self, order, coupon_code):
error_status = {}
program = self.env['sale.coupon.program'].search([('promo_code', '=', coupon_code)])
if program:
error_status = program._check_promo_code(order, coupon_code)
if not error_status:
if program.promo_applicability == 'on_next_order':
# Avoid creating the coupon if it already exist
if program.discount_line_product_id.id not in order.generated_coupon_ids.filtered(lambda coupon: coupon.state in ['new', 'reserved']).mapped('discount_line_product_id').ids:
coupon = order._create_reward_coupon(program)
return {
'generated_coupon': {
'reward': coupon.program_id.discount_line_product_id.name,
'code': coupon.code,
}
}
else: # The program is applied on this order
order._create_reward_line(program)
order.code_promo_program_id = program
else:
coupon = self.env['sale.coupon'].search([('code', '=', coupon_code)], limit=1)
if coupon:
error_status = coupon._check_coupon_code(order)
if not error_status:
order._create_reward_line(coupon.program_id)
order.applied_coupon_ids += coupon
coupon.write({'state': 'used'})
else:
error_status = {'not_found': _('The code %s is invalid') % (coupon_code)}
return error_status
| gpl-3.0 |
fedorpatlin/ansible | lib/ansible/module_utils/ipa.py | 97 | 6567 | # -*- coding: utf-8 -*-
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2016 Thomas Krahn (@Nosmoht)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
try:
import json
except ImportError:
import simplejson as json
from ansible.module_utils._text import to_bytes, to_text
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.six import PY3
from ansible.module_utils.six.moves.urllib.parse import quote
from ansible.module_utils.urls import fetch_url
class IPAClient(object):
def __init__(self, module, host, port, protocol):
self.host = host
self.port = port
self.protocol = protocol
self.module = module
self.headers = None
def get_base_url(self):
return '%s://%s/ipa' % (self.protocol, self.host)
def get_json_url(self):
return '%s/session/json' % self.get_base_url()
def login(self, username, password):
url = '%s/session/login_password' % self.get_base_url()
data = 'user=%s&password=%s' % (quote(username, safe=''), quote(password, safe=''))
headers = {'referer': self.get_base_url(),
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'text/plain'}
try:
resp, info = fetch_url(module=self.module, url=url, data=to_bytes(data), headers=headers)
status_code = info['status']
if status_code not in [200, 201, 204]:
self._fail('login', info['msg'])
self.headers = {'referer': self.get_base_url(),
'Content-Type': 'application/json',
'Accept': 'application/json',
'Cookie': resp.info().get('Set-Cookie')}
except Exception:
e = get_exception()
self._fail('login', str(e))
def _fail(self, msg, e):
if 'message' in e:
err_string = e.get('message')
else:
err_string = e
self.module.fail_json(msg='%s: %s' % (msg, err_string))
def _post_json(self, method, name, item=None):
if item is None:
item = {}
url = '%s/session/json' % self.get_base_url()
data = {'method': method, 'params': [[name], item]}
try:
resp, info = fetch_url(module=self.module, url=url, data=to_bytes(json.dumps(data)), headers=self.headers)
status_code = info['status']
if status_code not in [200, 201, 204]:
self._fail(method, info['msg'])
except Exception:
e = get_exception()
self._fail('post %s' % method, str(e))
if PY3:
charset = resp.headers.get_content_charset('latin-1')
else:
response_charset = resp.headers.getparam('charset')
if response_charset:
charset = response_charset
else:
charset = 'latin-1'
resp = json.loads(to_text(resp.read(), encoding=charset), encoding=charset)
err = resp.get('error')
if err is not None:
self._fail('repsonse %s' % method, err)
if 'result' in resp:
result = resp.get('result')
if 'result' in result:
result = result.get('result')
if isinstance(result, list):
if len(result) > 0:
return result[0]
else:
return {}
return result
return None
def get_diff(self, ipa_data, module_data):
result = []
for key in module_data.keys():
mod_value = module_data.get(key, None)
if isinstance(mod_value, list):
default = []
else:
default = None
ipa_value = ipa_data.get(key, default)
if isinstance(ipa_value, list) and not isinstance(mod_value, list):
mod_value = [mod_value]
if isinstance(ipa_value, list) and isinstance(mod_value, list):
mod_value = sorted(mod_value)
ipa_value = sorted(ipa_value)
if mod_value != ipa_value:
result.append(key)
return result
def modify_if_diff(self, name, ipa_list, module_list, add_method, remove_method, item=None):
changed = False
diff = list(set(ipa_list) - set(module_list))
if len(diff) > 0:
changed = True
if not self.module.check_mode:
if item:
remove_method(name=name, item={item: diff})
else:
remove_method(name=name, item=diff)
diff = list(set(module_list) - set(ipa_list))
if len(diff) > 0:
changed = True
if not self.module.check_mode:
if item:
add_method(name=name, item={item: diff})
else:
add_method(name=name, item=diff)
return changed
| gpl-3.0 |
barbour-em/osf.io | framework/auth/signing.py | 80 | 2103 | # encoding: utf-8
import hmac
import json
import time
import base64
import collections
from website import settings
# Written by @jmcarp originally
def order_recursive(data):
"""Recursively sort keys of input data and all its nested dictionaries.
Used to ensure consistent ordering of JSON payloads.
"""
if isinstance(data, dict):
return collections.OrderedDict(
sorted(
(
(key, order_recursive(value))
for key, value in data.items()
),
key=lambda item: item[0]
)
)
if isinstance(data, list):
return [
order_recursive(value)
for value in data
]
return data
def serialize_payload(payload):
ordered = order_recursive(payload)
return base64.b64encode(json.dumps(ordered))
def unserialize_payload(message):
payload = json.loads(base64.b64decode(message))
return order_recursive(payload)
class Signer(object):
def __init__(self, secret, digest):
assert callable(digest)
self.secret = secret
self.digest = digest
def sign_message(self, message):
return hmac.new(
key=self.secret,
digestmod=self.digest,
msg=message,
).hexdigest()
def sign_payload(self, payload):
message = serialize_payload(payload)
signature = self.sign_message(message)
return message, signature
def verify_message(self, signature, message):
expected = self.sign_message(message)
return signature == expected
def verify_payload(self, signature, payload):
_, expected = self.sign_payload(payload)
return signature == expected
def sign_data(signer, data, ttl=100):
target = {'time': int(time.time() + ttl)}
target.update(data)
payload, signature = signer.sign_payload(target)
return {
'payload': payload.decode(),
'signature': signature,
}
default_signer = Signer(settings.DEFAULT_HMAC_SECRET, settings.DEFAULT_HMAC_ALGORITHM)
| apache-2.0 |
temasek/android_external_chromium_org_third_party_WebKit | Tools/Scripts/webkitpy/common/checkout/scm/scm_unittest.py | 9 | 30094 | # Copyright (C) 2009 Google Inc. All rights reserved.
# Copyright (C) 2009 Apple Inc. All rights reserved.
# Copyright (C) 2011 Daniel Bates (dbates@intudata.com). All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import atexit
import os
import shutil
import unittest
from webkitpy.common.system.executive import Executive, ScriptError
from webkitpy.common.system.executive_mock import MockExecutive
from webkitpy.common.system.filesystem import FileSystem
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.checkout.scm.detection import detect_scm_system
from webkitpy.common.checkout.scm.git import Git, AmbiguousCommitError
from webkitpy.common.checkout.scm.scm import SCM
from webkitpy.common.checkout.scm.svn import SVN
# We cache the mock SVN repo so that we don't create it again for each call to an SVNTest or GitTest test_ method.
# We store it in a global variable so that we can delete this cached repo on exit(3).
original_cwd = None
cached_svn_repo_path = None
@atexit.register
def delete_cached_svn_repo_at_exit():
if cached_svn_repo_path:
os.chdir(original_cwd)
shutil.rmtree(cached_svn_repo_path)
class SCMTestBase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(SCMTestBase, self).__init__(*args, **kwargs)
self.scm = None
self.executive = None
self.fs = None
self.original_cwd = None
def setUp(self):
self.executive = Executive()
self.fs = FileSystem()
self.original_cwd = self.fs.getcwd()
def tearDown(self):
self._chdir(self.original_cwd)
def _join(self, *comps):
return self.fs.join(*comps)
def _chdir(self, path):
self.fs.chdir(path)
def _mkdir(self, path):
assert not self.fs.exists(path)
self.fs.maybe_make_directory(path)
def _mkdtemp(self, **kwargs):
return str(self.fs.mkdtemp(**kwargs))
def _remove(self, path):
self.fs.remove(path)
def _rmtree(self, path):
self.fs.rmtree(path)
def _run(self, *args, **kwargs):
return self.executive.run_command(*args, **kwargs)
def _run_silent(self, args, **kwargs):
self.executive.run_and_throw_if_fail(args, quiet=True, **kwargs)
def _write_text_file(self, path, contents):
self.fs.write_text_file(path, contents)
def _write_binary_file(self, path, contents):
self.fs.write_binary_file(path, contents)
def _make_diff(self, command, *args):
# We use this wrapper to disable output decoding. diffs should be treated as
# binary files since they may include text files of multiple differnet encodings.
return self._run([command, "diff"] + list(args), decode_output=False)
def _svn_diff(self, *args):
return self._make_diff("svn", *args)
def _git_diff(self, *args):
return self._make_diff("git", *args)
def _svn_add(self, path):
self._run(["svn", "add", path])
def _svn_commit(self, message):
self._run(["svn", "commit", "--quiet", "--message", message])
# This is a hot function since it's invoked by unittest before calling each test_ method in SVNTest and
# GitTest. We create a mock SVN repo once and then perform an SVN checkout from a filesystem copy of
# it since it's expensive to create the mock repo.
def _set_up_svn_checkout(self):
global cached_svn_repo_path
global original_cwd
if not cached_svn_repo_path:
cached_svn_repo_path = self._set_up_svn_repo()
original_cwd = self.original_cwd
self.temp_directory = self._mkdtemp(suffix="svn_test")
self.svn_repo_path = self._join(self.temp_directory, "repo")
self.svn_repo_url = "file://%s" % self.svn_repo_path
self.svn_checkout_path = self._join(self.temp_directory, "checkout")
shutil.copytree(cached_svn_repo_path, self.svn_repo_path)
self._run(['svn', 'checkout', '--quiet', self.svn_repo_url + "/trunk", self.svn_checkout_path])
def _set_up_svn_repo(self):
svn_repo_path = self._mkdtemp(suffix="svn_test_repo")
svn_repo_url = "file://%s" % svn_repo_path # Not sure this will work on windows
# git svn complains if we don't pass --pre-1.5-compatible, not sure why:
# Expected FS format '2'; found format '3' at /usr/local/libexec/git-core//git-svn line 1477
self._run(['svnadmin', 'create', '--pre-1.5-compatible', svn_repo_path])
# Create a test svn checkout
svn_checkout_path = self._mkdtemp(suffix="svn_test_checkout")
self._run(['svn', 'checkout', '--quiet', svn_repo_url, svn_checkout_path])
# Create and checkout a trunk dir to match the standard svn configuration to match git-svn's expectations
self._chdir(svn_checkout_path)
self._mkdir('trunk')
self._svn_add('trunk')
# We can add tags and branches as well if we ever need to test those.
self._svn_commit('add trunk')
self._rmtree(svn_checkout_path)
self._chdir(self.original_cwd)
self._set_up_svn_test_commits(svn_repo_url + "/trunk")
return svn_repo_path
def _set_up_svn_test_commits(self, svn_repo_url):
svn_checkout_path = self._mkdtemp(suffix="svn_test_checkout")
self._run(['svn', 'checkout', '--quiet', svn_repo_url, svn_checkout_path])
# Add some test commits
self._chdir(svn_checkout_path)
self._write_text_file("test_file", "test1")
self._svn_add("test_file")
self._svn_commit("initial commit")
self._write_text_file("test_file", "test1test2")
# This used to be the last commit, but doing so broke
# GitTest.test_apply_git_patch which use the inverse diff of the last commit.
# svn-apply fails to remove directories in Git, see:
# https://bugs.webkit.org/show_bug.cgi?id=34871
self._mkdir("test_dir")
# Slash should always be the right path separator since we use cygwin on Windows.
test_file3_path = "test_dir/test_file3"
self._write_text_file(test_file3_path, "third file")
self._svn_add("test_dir")
self._svn_commit("second commit")
self._write_text_file("test_file", "test1test2test3\n")
self._write_text_file("test_file2", "second file")
self._svn_add("test_file2")
self._svn_commit("third commit")
# This 4th commit is used to make sure that our patch file handling
# code correctly treats patches as binary and does not attempt to
# decode them assuming they're utf-8.
self._write_binary_file("test_file", u"latin1 test: \u00A0\n".encode("latin-1"))
self._write_binary_file("test_file2", u"utf-8 test: \u00A0\n".encode("utf-8"))
self._svn_commit("fourth commit")
# svn does not seem to update after commit as I would expect.
self._run(['svn', 'update'])
self._rmtree(svn_checkout_path)
self._chdir(self.original_cwd)
def _tear_down_svn_checkout(self):
self._rmtree(self.temp_directory)
def _shared_test_add_recursively(self):
self._mkdir("added_dir")
self._write_text_file("added_dir/added_file", "new stuff")
self.scm.add("added_dir/added_file")
self.assertIn("added_dir/added_file", self.scm._added_files())
def _shared_test_delete_recursively(self):
self._mkdir("added_dir")
self._write_text_file("added_dir/added_file", "new stuff")
self.scm.add("added_dir/added_file")
self.assertIn("added_dir/added_file", self.scm._added_files())
self.scm.delete("added_dir/added_file")
self.assertNotIn("added_dir", self.scm._added_files())
def _shared_test_delete_recursively_or_not(self):
self._mkdir("added_dir")
self._write_text_file("added_dir/added_file", "new stuff")
self._write_text_file("added_dir/another_added_file", "more new stuff")
self.scm.add("added_dir/added_file")
self.scm.add("added_dir/another_added_file")
self.assertIn("added_dir/added_file", self.scm._added_files())
self.assertIn("added_dir/another_added_file", self.scm._added_files())
self.scm.delete("added_dir/added_file")
self.assertIn("added_dir/another_added_file", self.scm._added_files())
def _shared_test_exists(self, scm, commit_function):
self._chdir(scm.checkout_root)
self.assertFalse(scm.exists('foo.txt'))
self._write_text_file('foo.txt', 'some stuff')
self.assertFalse(scm.exists('foo.txt'))
scm.add('foo.txt')
commit_function('adding foo')
self.assertTrue(scm.exists('foo.txt'))
scm.delete('foo.txt')
commit_function('deleting foo')
self.assertFalse(scm.exists('foo.txt'))
def _shared_test_move(self):
self._write_text_file('added_file', 'new stuff')
self.scm.add('added_file')
self.scm.move('added_file', 'moved_file')
self.assertIn('moved_file', self.scm._added_files())
def _shared_test_move_recursive(self):
self._mkdir("added_dir")
self._write_text_file('added_dir/added_file', 'new stuff')
self._write_text_file('added_dir/another_added_file', 'more new stuff')
self.scm.add('added_dir')
self.scm.move('added_dir', 'moved_dir')
self.assertIn('moved_dir/added_file', self.scm._added_files())
self.assertIn('moved_dir/another_added_file', self.scm._added_files())
class SVNTest(SCMTestBase):
def setUp(self):
super(SVNTest, self).setUp()
self._set_up_svn_checkout()
self._chdir(self.svn_checkout_path)
self.scm = detect_scm_system(self.svn_checkout_path)
self.scm.svn_server_realm = None
def tearDown(self):
super(SVNTest, self).tearDown()
self._tear_down_svn_checkout()
def test_detect_scm_system_relative_url(self):
scm = detect_scm_system(".")
# I wanted to assert that we got the right path, but there was some
# crazy magic with temp folder names that I couldn't figure out.
self.assertTrue(scm.checkout_root)
def test_detection(self):
self.assertEqual(self.scm.display_name(), "svn")
self.assertEqual(self.scm.supports_local_commits(), False)
def test_add_recursively(self):
self._shared_test_add_recursively()
def test_delete(self):
self._chdir(self.svn_checkout_path)
self.scm.delete("test_file")
self.assertIn("test_file", self.scm._deleted_files())
def test_delete_list(self):
self._chdir(self.svn_checkout_path)
self.scm.delete_list(["test_file", "test_file2"])
self.assertIn("test_file", self.scm._deleted_files())
self.assertIn("test_file2", self.scm._deleted_files())
def test_delete_recursively(self):
self._shared_test_delete_recursively()
def test_delete_recursively_or_not(self):
self._shared_test_delete_recursively_or_not()
def test_move(self):
self._shared_test_move()
def test_move_recursive(self):
self._shared_test_move_recursive()
class GitTest(SCMTestBase):
def setUp(self):
super(GitTest, self).setUp()
self._set_up_git_checkouts()
def tearDown(self):
super(GitTest, self).tearDown()
self._tear_down_git_checkouts()
def _set_up_git_checkouts(self):
"""Sets up fresh git repository with one commit. Then sets up a second git repo that tracks the first one."""
self.untracking_checkout_path = self._mkdtemp(suffix="git_test_checkout2")
self._run(['git', 'init', self.untracking_checkout_path])
self._chdir(self.untracking_checkout_path)
self._write_text_file('foo_file', 'foo')
self._run(['git', 'add', 'foo_file'])
self._run(['git', 'commit', '-am', 'dummy commit'])
self.untracking_scm = detect_scm_system(self.untracking_checkout_path)
self.tracking_git_checkout_path = self._mkdtemp(suffix="git_test_checkout")
self._run(['git', 'clone', '--quiet', self.untracking_checkout_path, self.tracking_git_checkout_path])
self._chdir(self.tracking_git_checkout_path)
self.tracking_scm = detect_scm_system(self.tracking_git_checkout_path)
def _tear_down_git_checkouts(self):
self._run(['rm', '-rf', self.tracking_git_checkout_path])
self._run(['rm', '-rf', self.untracking_checkout_path])
def test_remote_branch_ref(self):
self.assertEqual(self.tracking_scm._remote_branch_ref(), 'refs/remotes/origin/master')
self._chdir(self.untracking_checkout_path)
self.assertRaises(ScriptError, self.untracking_scm._remote_branch_ref)
def test_multiple_remotes(self):
self._run(['git', 'config', '--add', 'svn-remote.svn.fetch', 'trunk:remote1'])
self._run(['git', 'config', '--add', 'svn-remote.svn.fetch', 'trunk:remote2'])
self.assertEqual(self.tracking_scm._remote_branch_ref(), 'remote1')
def test_create_patch(self):
self._write_text_file('test_file_commit1', 'contents')
self._run(['git', 'add', 'test_file_commit1'])
scm = self.tracking_scm
scm.commit_locally_with_message('message')
patch = scm.create_patch()
self.assertNotRegexpMatches(patch, r'Subversion Revision:')
def test_exists(self):
scm = self.untracking_scm
self._shared_test_exists(scm, scm.commit_locally_with_message)
def test_rename_files(self):
scm = self.tracking_scm
scm.move('foo_file', 'bar_file')
scm.commit_locally_with_message('message')
class GitSVNTest(SCMTestBase):
def setUp(self):
super(GitSVNTest, self).setUp()
self._set_up_svn_checkout()
self._set_up_gitsvn_checkout()
self.scm = detect_scm_system(self.git_checkout_path)
self.scm.svn_server_realm = None
def tearDown(self):
super(GitSVNTest, self).tearDown()
self._tear_down_svn_checkout()
self._tear_down_gitsvn_checkout()
def _set_up_gitsvn_checkout(self):
self.git_checkout_path = self._mkdtemp(suffix="git_test_checkout")
# --quiet doesn't make git svn silent
self._run_silent(['git', 'svn', 'clone', '-T', 'trunk', self.svn_repo_url, self.git_checkout_path])
self._chdir(self.git_checkout_path)
self.git_v2 = self._run(['git', '--version']).startswith('git version 2')
if self.git_v2:
# The semantics of 'git svn clone -T' changed in v2 (apparently), so the branch names are different.
# This works around it, for compatibility w/ v1.
self._run_silent(['git', 'branch', 'trunk', 'origin/trunk'])
def _tear_down_gitsvn_checkout(self):
self._rmtree(self.git_checkout_path)
def test_detection(self):
self.assertEqual(self.scm.display_name(), "git")
self.assertEqual(self.scm.supports_local_commits(), True)
def test_read_git_config(self):
key = 'test.git-config'
value = 'git-config value'
self._run(['git', 'config', key, value])
self.assertEqual(self.scm.read_git_config(key), value)
def test_local_commits(self):
test_file = self._join(self.git_checkout_path, 'test_file')
self._write_text_file(test_file, 'foo')
self._run(['git', 'commit', '-a', '-m', 'local commit'])
self.assertEqual(len(self.scm._local_commits()), 1)
def test_discard_local_commits(self):
test_file = self._join(self.git_checkout_path, 'test_file')
self._write_text_file(test_file, 'foo')
self._run(['git', 'commit', '-a', '-m', 'local commit'])
self.assertEqual(len(self.scm._local_commits()), 1)
self.scm._discard_local_commits()
self.assertEqual(len(self.scm._local_commits()), 0)
def test_delete_branch(self):
new_branch = 'foo'
self._run(['git', 'checkout', '-b', new_branch])
self.assertEqual(self._run(['git', 'symbolic-ref', 'HEAD']).strip(), 'refs/heads/' + new_branch)
self._run(['git', 'checkout', '-b', 'bar'])
self.scm.delete_branch(new_branch)
self.assertNotRegexpMatches(self._run(['git', 'branch']), r'foo')
def test_rebase_in_progress(self):
svn_test_file = self._join(self.svn_checkout_path, 'test_file')
self._write_text_file(svn_test_file, "svn_checkout")
self._run(['svn', 'commit', '--message', 'commit to conflict with git commit'], cwd=self.svn_checkout_path)
git_test_file = self._join(self.git_checkout_path, 'test_file')
self._write_text_file(git_test_file, "git_checkout")
self._run(['git', 'commit', '-a', '-m', 'commit to be thrown away by rebase abort'])
# Should fail due to a conflict leaving us mid-rebase.
# we use self._run_slient because --quiet doesn't actually make git svn silent.
self.assertRaises(ScriptError, self._run_silent, ['git', 'svn', '--quiet', 'rebase'])
self.assertTrue(self.scm._rebase_in_progress())
# Make sure our cleanup works.
self.scm._discard_working_directory_changes()
self.assertFalse(self.scm._rebase_in_progress())
# Make sure cleanup doesn't throw when no rebase is in progress.
self.scm._discard_working_directory_changes()
def _local_commit(self, filename, contents, message):
self._write_text_file(filename, contents)
self._run(['git', 'add', filename])
self.scm.commit_locally_with_message(message)
def _one_local_commit(self):
self._local_commit('test_file_commit1', 'more test content', 'another test commit')
def _one_local_commit_plus_working_copy_changes(self):
self._one_local_commit()
self._write_text_file('test_file_commit2', 'still more test content')
self._run(['git', 'add', 'test_file_commit2'])
def _second_local_commit(self):
self._local_commit('test_file_commit2', 'still more test content', 'yet another test commit')
def _two_local_commits(self):
self._one_local_commit()
self._second_local_commit()
def _three_local_commits(self):
self._local_commit('test_file_commit0', 'more test content', 'another test commit')
self._two_local_commits()
def test_locally_commit_all_working_copy_changes(self):
self._local_commit('test_file', 'test content', 'test commit')
self._write_text_file('test_file', 'changed test content')
self.assertTrue(self.scm.has_working_directory_changes())
self.scm.commit_locally_with_message('all working copy changes')
self.assertFalse(self.scm.has_working_directory_changes())
def test_locally_commit_no_working_copy_changes(self):
self._local_commit('test_file', 'test content', 'test commit')
self._write_text_file('test_file', 'changed test content')
self.assertTrue(self.scm.has_working_directory_changes())
self.assertRaises(ScriptError, self.scm.commit_locally_with_message, 'no working copy changes', False)
def _test_upstream_branch(self):
self._run(['git', 'checkout', '-t', '-b', 'my-branch'])
self._run(['git', 'checkout', '-t', '-b', 'my-second-branch'])
self.assertEqual(self.scm._upstream_branch(), 'my-branch')
def test_remote_branch_ref(self):
remote_branch_ref = self.scm._remote_branch_ref()
if self.git_v2:
self.assertEqual(remote_branch_ref, 'refs/remotes/origin/trunk')
else:
self.assertEqual(remote_branch_ref, 'refs/remotes/trunk')
def test_create_patch_local_plus_working_copy(self):
self._one_local_commit_plus_working_copy_changes()
patch = self.scm.create_patch()
self.assertRegexpMatches(patch, r'test_file_commit1')
self.assertRegexpMatches(patch, r'test_file_commit2')
def test_create_patch(self):
self._one_local_commit_plus_working_copy_changes()
patch = self.scm.create_patch()
self.assertRegexpMatches(patch, r'test_file_commit2')
self.assertRegexpMatches(patch, r'test_file_commit1')
self.assertRegexpMatches(patch, r'Subversion Revision: 5')
def test_create_patch_after_merge(self):
self._run(['git', 'checkout', '-b', 'dummy-branch', 'trunk~3'])
self._one_local_commit()
self._run(['git', 'merge', 'trunk'])
patch = self.scm.create_patch()
self.assertRegexpMatches(patch, r'test_file_commit1')
self.assertRegexpMatches(patch, r'Subversion Revision: 5')
def test_create_patch_with_changed_files(self):
self._one_local_commit_plus_working_copy_changes()
patch = self.scm.create_patch(changed_files=['test_file_commit2'])
self.assertRegexpMatches(patch, r'test_file_commit2')
def test_create_patch_with_rm_and_changed_files(self):
self._one_local_commit_plus_working_copy_changes()
self._remove('test_file_commit1')
patch = self.scm.create_patch()
patch_with_changed_files = self.scm.create_patch(changed_files=['test_file_commit1', 'test_file_commit2'])
self.assertEqual(patch, patch_with_changed_files)
def test_create_patch_git_commit(self):
self._two_local_commits()
patch = self.scm.create_patch(git_commit="HEAD^")
self.assertRegexpMatches(patch, r'test_file_commit1')
self.assertNotRegexpMatches(patch, r'test_file_commit2')
def test_create_patch_git_commit_range(self):
self._three_local_commits()
patch = self.scm.create_patch(git_commit="HEAD~2..HEAD")
self.assertNotRegexpMatches(patch, r'test_file_commit0')
self.assertRegexpMatches(patch, r'test_file_commit2')
self.assertRegexpMatches(patch, r'test_file_commit1')
def test_create_patch_working_copy_only(self):
self._one_local_commit_plus_working_copy_changes()
patch = self.scm.create_patch(git_commit="HEAD....")
self.assertNotRegexpMatches(patch, r'test_file_commit1')
self.assertRegexpMatches(patch, r'test_file_commit2')
def test_create_patch_multiple_local_commits(self):
self._two_local_commits()
patch = self.scm.create_patch()
self.assertRegexpMatches(patch, r'test_file_commit2')
self.assertRegexpMatches(patch, r'test_file_commit1')
def test_create_patch_not_synced(self):
self._run(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
self._two_local_commits()
patch = self.scm.create_patch()
self.assertNotRegexpMatches(patch, r'test_file2')
self.assertRegexpMatches(patch, r'test_file_commit2')
self.assertRegexpMatches(patch, r'test_file_commit1')
def test_create_binary_patch(self):
# Create a git binary patch and check the contents.
test_file_name = 'binary_file'
test_file_path = self.fs.join(self.git_checkout_path, test_file_name)
file_contents = ''.join(map(chr, range(256)))
self._write_binary_file(test_file_path, file_contents)
self._run(['git', 'add', test_file_name])
patch = self.scm.create_patch()
self.assertRegexpMatches(patch, r'\nliteral 0\n')
self.assertRegexpMatches(patch, r'\nliteral 256\n')
# Check if we can create a patch from a local commit.
self._write_binary_file(test_file_path, file_contents)
self._run(['git', 'add', test_file_name])
self._run(['git', 'commit', '-m', 'binary diff'])
patch_from_local_commit = self.scm.create_patch('HEAD')
self.assertRegexpMatches(patch_from_local_commit, r'\nliteral 0\n')
self.assertRegexpMatches(patch_from_local_commit, r'\nliteral 256\n')
def test_changed_files_local_plus_working_copy(self):
self._one_local_commit_plus_working_copy_changes()
files = self.scm.changed_files()
self.assertIn('test_file_commit1', files)
self.assertIn('test_file_commit2', files)
# working copy should *not* be in the list.
files = self.scm.changed_files('trunk..')
self.assertIn('test_file_commit1', files)
self.assertNotIn('test_file_commit2', files)
# working copy *should* be in the list.
files = self.scm.changed_files('trunk....')
self.assertIn('test_file_commit1', files)
self.assertIn('test_file_commit2', files)
def test_changed_files_git_commit(self):
self._two_local_commits()
files = self.scm.changed_files(git_commit="HEAD^")
self.assertIn('test_file_commit1', files)
self.assertNotIn('test_file_commit2', files)
def test_changed_files_git_commit_range(self):
self._three_local_commits()
files = self.scm.changed_files(git_commit="HEAD~2..HEAD")
self.assertNotIn('test_file_commit0', files)
self.assertIn('test_file_commit1', files)
self.assertIn('test_file_commit2', files)
def test_changed_files_working_copy_only(self):
self._one_local_commit_plus_working_copy_changes()
files = self.scm.changed_files(git_commit="HEAD....")
self.assertNotIn('test_file_commit1', files)
self.assertIn('test_file_commit2', files)
def test_changed_files_multiple_local_commits(self):
self._two_local_commits()
files = self.scm.changed_files()
self.assertIn('test_file_commit2', files)
self.assertIn('test_file_commit1', files)
def test_changed_files_not_synced(self):
self._run(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
self._two_local_commits()
files = self.scm.changed_files()
self.assertNotIn('test_file2', files)
self.assertIn('test_file_commit2', files)
self.assertIn('test_file_commit1', files)
def test_changed_files_upstream(self):
self._run(['git', 'checkout', '-t', '-b', 'my-branch'])
self._one_local_commit()
self._run(['git', 'checkout', '-t', '-b', 'my-second-branch'])
self._second_local_commit()
self._write_text_file('test_file_commit0', 'more test content')
self._run(['git', 'add', 'test_file_commit0'])
# equivalent to 'git diff my-branch..HEAD, should not include working changes
files = self.scm.changed_files(git_commit='UPSTREAM..')
self.assertNotIn('test_file_commit1', files)
self.assertIn('test_file_commit2', files)
self.assertNotIn('test_file_commit0', files)
# equivalent to 'git diff my-branch', *should* include working changes
files = self.scm.changed_files(git_commit='UPSTREAM....')
self.assertNotIn('test_file_commit1', files)
self.assertIn('test_file_commit2', files)
self.assertIn('test_file_commit0', files)
def test_add_recursively(self):
self._shared_test_add_recursively()
def test_delete(self):
self._two_local_commits()
self.scm.delete('test_file_commit1')
self.assertIn("test_file_commit1", self.scm._deleted_files())
def test_delete_list(self):
self._two_local_commits()
self.scm.delete_list(["test_file_commit1", "test_file_commit2"])
self.assertIn("test_file_commit1", self.scm._deleted_files())
self.assertIn("test_file_commit2", self.scm._deleted_files())
def test_delete_recursively(self):
self._shared_test_delete_recursively()
def test_delete_recursively_or_not(self):
self._shared_test_delete_recursively_or_not()
def test_move(self):
self._shared_test_move()
def test_move_recursive(self):
self._shared_test_move_recursive()
def test_exists(self):
self._shared_test_exists(self.scm, self.scm.commit_locally_with_message)
class GitTestWithMock(SCMTestBase):
def make_scm(self):
scm = Git(cwd=".", executive=MockExecutive(), filesystem=MockFileSystem())
scm.read_git_config = lambda *args, **kw: "MOCKKEY:MOCKVALUE"
return scm
def test_timestamp_of_revision(self):
scm = self.make_scm()
scm.find_checkout_root = lambda path: ''
scm._run_git = lambda args: 'Date: 2013-02-08 08:05:49 +0000'
self.assertEqual(scm.timestamp_of_revision('some-path', '12345'), '2013-02-08T08:05:49Z')
scm._run_git = lambda args: 'Date: 2013-02-08 01:02:03 +0130'
self.assertEqual(scm.timestamp_of_revision('some-path', '12345'), '2013-02-07T23:32:03Z')
scm._run_git = lambda args: 'Date: 2013-02-08 01:55:21 -0800'
self.assertEqual(scm.timestamp_of_revision('some-path', '12345'), '2013-02-08T09:55:21Z')
| bsd-3-clause |
ArcaniteSolutions/truffe2 | truffe2/users/migrations/0005_auto__add_field_truffeuser_email_perso__add_unique_truffeuser_email.py | 2 | 5130 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'TruffeUser.email_perso'
db.add_column(u'users_truffeuser', 'email_perso',
self.gf('django.db.models.fields.EmailField')(max_length=255, null=True, blank=True),
keep_default=False)
# Adding unique constraint on 'TruffeUser', fields ['email']
db.create_unique(u'users_truffeuser', ['email'])
def backwards(self, orm):
# Deleting field 'TruffeUser.email_perso'
db.delete_column(u'users_truffeuser', 'email_perso')
# Removing unique constraint on 'TruffeUser', fields ['email']
db.delete_unique(u'users_truffeuser', ['email'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'users.truffeuser': {
'Meta': {'object_name': 'TruffeUser'},
'adresse': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'body': ('django.db.models.fields.CharField', [], {'default': "'.'", 'max_length': '1'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '255'}),
'email_perso': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
'iban_ou_ccp': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'nom_banque': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'users.userprivacy': {
'Meta': {'object_name': 'UserPrivacy'},
'field': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
}
}
complete_apps = ['users']
| bsd-2-clause |
partofthething/home-assistant | homeassistant/components/sky_hub/device_tracker.py | 13 | 2183 | """Support for Sky Hub."""
import logging
from pyskyqhub.skyq_hub import SkyQHub
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_HOST
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Optional(CONF_HOST): cv.string})
async def async_get_scanner(hass, config):
"""Return a Sky Hub scanner if successful."""
host = config[DOMAIN].get(CONF_HOST, "192.168.1.254")
websession = async_get_clientsession(hass)
hub = SkyQHub(websession, host)
_LOGGER.debug("Initialising Sky Hub")
await hub.async_connect()
if hub.success_init:
scanner = SkyHubDeviceScanner(hub)
return scanner
return None
class SkyHubDeviceScanner(DeviceScanner):
"""This class queries a Sky Hub router."""
def __init__(self, hub):
"""Initialise the scanner."""
self._hub = hub
self.last_results = {}
async def async_scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
await self._async_update_info()
return [device.mac for device in self.last_results]
async def async_get_device_name(self, device):
"""Return the name of the given device."""
name = next(
(result.name for result in self.last_results if result.mac == device),
None,
)
return name
async def async_get_extra_attributes(self, device):
"""Get extra attributes of a device."""
device = next(
(result for result in self.last_results if result.mac == device), None
)
if device is None:
return {}
return device.asdict()
async def _async_update_info(self):
"""Ensure the information from the Sky Hub is up to date."""
_LOGGER.debug("Scanning")
data = await self._hub.async_get_skyhub_data()
if not data:
return
self.last_results = data
| mit |
Freso/morituri | whipper/test/test_common_accurip.py | 2 | 10499 | # -*- Mode: Python; test-case-name: whipper.test.test_common_accurip -*-
# vi:si:et:sw=4:sts=4:ts=4
import sys
from StringIO import StringIO
from os import chmod, makedirs
from os.path import dirname, exists, join
from shutil import copy, rmtree
from tempfile import mkdtemp
from unittest import TestCase
from whipper.common import accurip
from whipper.common.accurip import (
calculate_checksums, get_db_entry, print_report, verify_result,
_split_responses, EntryNotFound
)
from whipper.result.result import RipResult, TrackResult
class TestAccurateRipResponse(TestCase):
@classmethod
def setUpClass(cls):
cls.path = 'c/1/2/dBAR-002-0000f21c-00027ef8-05021002.bin'
cls.entry = _split_responses(
open(join(dirname(__file__), cls.path[6:])).read()
)
cls.other_path = '4/8/2/dBAR-011-0010e284-009228a3-9809ff0b.bin'
def setUp(self):
self.cache_dir = mkdtemp(suffix='whipper_accurip_cache_test')
accurip._CACHE_DIR = self.cache_dir
def cleanup(cachedir):
chmod(cachedir, 0755)
rmtree(cachedir)
self.addCleanup(cleanup, self.cache_dir)
def test_uses_cache_dir(self):
# copy normal entry into other entry's place
makedirs(dirname(join(self.cache_dir, self.other_path)))
copy(
join(dirname(__file__), self.path[6:]),
join(self.cache_dir, self.other_path)
)
# ask cache for other entry and assert cached entry equals normal entry
self.assertEquals(self.entry, get_db_entry(self.other_path))
def test_raises_entrynotfound_for_no_entry(self):
with self.assertRaises(EntryNotFound):
get_db_entry('definitely_a_404')
def test_can_return_entry_without_saving(self):
chmod(self.cache_dir, 0)
self.assertEqual(get_db_entry(self.path), self.entry)
chmod(self.cache_dir, 0755)
self.assertFalse(exists(join(self.cache_dir, self.path)))
def test_retrieves_and_saves_accuraterip_entry(self):
# for path, entry in zip(self.paths[0], self.entries):
self.assertFalse(exists(join(self.cache_dir, self.path)))
self.assertEquals(get_db_entry(self.path), self.entry)
self.assertTrue(exists(join(self.cache_dir, self.path)))
def test_AccurateRipResponse_parses_correctly(self):
responses = get_db_entry(self.path)
self.assertEquals(len(responses), 2)
self.assertEquals(responses[0].num_tracks, 2)
self.assertEquals(responses[0].discId1, '0000f21c')
self.assertEquals(responses[0].discId2, '00027ef8')
self.assertEquals(responses[0].cddbDiscId, '05021002')
self.assertEquals(responses[0].confidences[0], 12)
self.assertEquals(responses[0].confidences[1], 20)
self.assertEquals(responses[0].checksums[0], '284fc705')
self.assertEquals(responses[0].checksums[1], '9cc1f32e')
self.assertEquals(responses[1].num_tracks, 2)
self.assertEquals(responses[1].discId1, '0000f21c')
self.assertEquals(responses[1].discId2, '00027ef8')
self.assertEquals(responses[1].cddbDiscId, '05021002')
self.assertEquals(responses[1].confidences[0], 4)
self.assertEquals(responses[1].confidences[1], 4)
self.assertEquals(responses[1].checksums[0], 'dc77f9ab')
self.assertEquals(responses[1].checksums[1], 'dd97d2c3')
# XXX: test arc.py
class TestCalculateChecksums(TestCase):
def test_returns_none_for_bad_files(self):
self.assertEquals(
calculate_checksums(['/does/not/exist']),
{'v1': [None], 'v2': [None]}
)
# TODO: test success when file exists
class TestVerifyResult(TestCase):
@classmethod
def setUpClass(cls):
path = 'c/1/2/dBAR-002-0000f21c-00027ef8-05021002.bin'
cls.responses = _split_responses(
open(join(dirname(__file__), path[6:])).read()
)
cls.checksums = {
'v1': ['284fc705', '9cc1f32e'],
'v2': ['dc77f9ab', 'dd97d2c3'],
}
def setUp(self):
self.result = RipResult()
for n in range(1, 2 + 1):
track = TrackResult()
track.number = n
self.result.tracks.append(track)
def test_empty_result_returns_false(self):
self.assertEquals(
verify_result(RipResult(), self.responses, self.checksums),
False
)
def test_empty_responses_returns_false(self):
self.assertEquals(
verify_result(self.result, [], self.checksums),
False
)
# XXX: would this happen?
def test_empty_checksums_returns_false(self):
self.assertEquals(
verify_result(self.result, self.responses, {}),
False
)
def test_wrong_checksums_returns_false(self):
self.assertEquals(
verify_result(self.result, self.responses, {
'v1': ['deadbeef', '89abcdef'],
'v2': ['76543210', '01234567']
}),
False
)
def test_incomplete_checksums(self):
self.assertEquals(
verify_result(self.result, self.responses, {
'v1': ['284fc705', '9cc1f32e'],
'v2': [None, 'dd97d2c3'],
}),
True
)
self.assertEquals(
verify_result(self.result, self.responses, {
'v1': ['284fc705', None],
'v2': ['dc77f9ab', 'dd97d2c3'],
}),
True
)
self.assertEquals(
verify_result(self.result, self.responses, {
'v1': ['284fc705', None],
'v2': [None, 'dd97d2c3'],
}),
True
)
def test_matches_only_v1_or_v2_responses(self):
self.assertEquals(
verify_result(
self.result, [self.responses[0]], self.checksums
),
True
)
self.assertEquals(
verify_result(
self.result, [self.responses[1]], self.checksums
),
True
)
def test_passes_with_htoa(self):
htoa = TrackResult()
htoa.number = 0
self.result.tracks.append(htoa)
self.assertEquals(
verify_result(self.result, self.responses, self.checksums),
True
)
def test_stores_accuraterip_results_on_result(self):
self.assertEquals(
verify_result(self.result, self.responses, self.checksums),
True
)
self.assertEquals(self.result.tracks[0].AR, {
'v1': {
'CRC': '284fc705',
'DBCRC': '284fc705',
'DBConfidence': 12,
},
'v2': {
'CRC': 'dc77f9ab',
'DBCRC': 'dc77f9ab',
'DBConfidence': 4,
},
'DBMaxConfidence': 12,
'DBMaxConfidenceCRC': '284fc705',
})
self.assertEquals(self.result.tracks[1].AR, {
'v1': {
'CRC': '9cc1f32e',
'DBCRC': '9cc1f32e',
'DBConfidence': 20,
},
'v2': {
'CRC': 'dd97d2c3',
'DBCRC': 'dd97d2c3',
'DBConfidence': 4,
},
'DBMaxConfidence': 20,
'DBMaxConfidenceCRC': '9cc1f32e',
})
class TestAccurateRipReport(TestCase):
def setUp(self):
sys.stdout = StringIO()
self.result = RipResult()
track = TrackResult()
track.number = 1
track.AR = {
'v1': {
'CRC': '284fc705',
'DBCRC': '284fc705',
'DBConfidence': 12,
},
'v2': {
'CRC': 'dc77f9ab',
'DBCRC': 'dc77f9ab',
'DBConfidence': 4,
},
'DBMaxConfidence': 12,
'DBMaxConfidenceCRC': '284fc705',
}
self.result.tracks.append(track)
def tearDown(self):
sys.stdout = sys.__stdout__
def test_report_no_result(self):
track = TrackResult()
track.number = 1
self.result.tracks[0] = track
print_report(self.result)
self.assertEquals(
sys.stdout.getvalue(),
'track 1: unknown (error)\n'
)
def test_track_not_found(self):
self.result.tracks[0].AR['DBMaxConfidence'] = None
print_report(self.result)
self.assertEquals(
sys.stdout.getvalue(),
'track 1: rip NOT accurate (not found) '
' v1 [284fc705], v2 [dc77f9ab], DB [notfound]\n'
)
def test_htoa_not_tracked(self):
self.result.tracks[0].number = 0
self.result.tracks[0].AR['v1']['CRC'] = None
self.result.tracks[0].AR['v2']['CRC'] = None
print_report(self.result)
self.assertEquals(
sys.stdout.getvalue(),
'track 0: unknown (not tracked)\n'
)
def test_report_v1_only(self):
self.result.tracks[0].AR['v2']['DBCRC'] = None
self.result.tracks[0].AR['v2']['DBConfidence'] = None
print_report(self.result)
self.assertEquals(
sys.stdout.getvalue(),
'track 1: rip accurate (max confidence 12)'
' v1 [284fc705], v2 [dc77f9ab], DB [284fc705]\n'
)
def test_report_v2_only(self):
self.result.tracks[0].AR['v1']['DBCRC'] = None
self.result.tracks[0].AR['v1']['DBConfidence'] = None
print_report(self.result)
self.assertEquals(
sys.stdout.getvalue(),
'track 1: rip accurate (confidence 4 of 12)'
' v1 [284fc705], v2 [dc77f9ab], DB [dc77f9ab]\n'
)
def test_report_v1_and_v2_max_confidence(self):
print_report(self.result)
self.assertEquals(
sys.stdout.getvalue(),
'track 1: rip accurate (max confidence 12)'
' v1 [284fc705], v2 [dc77f9ab], DB [284fc705, dc77f9ab]\n'
)
def test_report_v1_and_v2(self):
self.result.tracks[0].AR['DBMaxConfidence'] = 66
print_report(self.result)
self.assertEquals(
sys.stdout.getvalue(),
'track 1: rip accurate (confidence 12 of 66)'
' v1 [284fc705], v2 [dc77f9ab], DB [284fc705, dc77f9ab]\n'
)
| gpl-3.0 |
fluproject/FluScan | FluScan.py | 1 | 3420 | import socket
import pygeoip
import json
import requests
from ports import getcommonports, portscan
from ipaddress import ip_private, ip_add, ip_order
from hetrixblacklist import blacklistscan
from mongo import ConexionMongoDB
def geo(_file, _ip):
''' This function search the geolocation values of an IP address '''
try:
_geo = []
geoDb = pygeoip.GeoIP(_file)
ip_dictionary_values = geoDb.record_by_addr(_ip)
ip_list_values = ip_dictionary_values.items()
for item in ip_list_values:
_geo.append({item[0]:item[1]})
return _geo
except:
pass
def hosts(_ip):
''' This function search the hostnames '''
_host = None
try:
hosts_values = socket.gethostbyaddr(_ip)
_host = str(hosts_values[0])
return _host
except:
return _host
def ports(_ip):
''' This function search open ports '''
_ports = []
try:
common_ports = getcommonports()
for value in common_ports:
banner_exists, banner = portscan(_ip, value)
if not banner_exists:
_ports.append({"p":value, "name":str(common_ports[value]), "banner":str(banner)})
except:
pass
return _ports
def main(_ip1, _ip2, _mongocon, _token, _analyzeblacklist):
''' Main function, launch the main activities '''
conexion = ConexionMongoDB(_mongocon)
conexion.open_conexion()
_ip3 = _ip1
_ip3_prev = ""
while (_ip3_prev <> _ip2):
if not ip_private(_ip3):
print '[ip] '+_ip3
try:
document = []
_host = hosts(_ip3)
ip_list_values = geo('GeoIP/GeoLiteCity.dat', _ip3)
json_ports = ports(_ip3)
if _analyzeblacklist:
blacklistpages = blacklistscan(_ip3, _token)
dictionary = {"host":{_ip3.replace(".","_"):[{"ip":_ip3, "hostname":_host, "geo":ip_list_values, "ports":json_ports, "blacklist":blacklistpages}]}}
else:
dictionary = {"host":{_ip3.replace(".","_"):[{"ip":_ip3, "hostname":_host, "geo":ip_list_values, "ports":json_ports}]}}
document.append(dictionary)
conexion.insert_doc("hosts",document)
except ValueError:
print 'Error on: %s > %s' % _ip3, ValueError
_ip3_prev = _ip3
_ip3 = ip_add(_ip3)
conexion.close_conexion()
del conexion
if __name__ == "__main__":
print '[FluScan], an IPv4 scanner. Created by http://www.flu-project.com & https://www.zerolynx.com\n'
# ************** ONLY MODIFY HERE **************
# Step 1: Put here the first ip address
ip1 = 'AAA.BBB.CCC.DDD'
# Step 2: Put here the last ip address
ip2 = 'AAA.BBB.CCC.DDD'
# Step 3: Install MongoDB, and create a database with a named "hosts" collection
# Step 4: Put here your connection data
mongocon = 'mongodb://USER:PASS@MONGODB:PORT/DATABASE'
# Step 5: Put here your Hetrix Blacklist token
token = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# If you don't want to analyze blacklists, you can put the following flag to 'False', and you don't need to fill the previous "token" field
analyzeblacklist = True
# ************** ONLY MODIFY HERE **************
ip2, ip1 = ip_order(ip1, ip2)
main(ip1, ip2, mongocon, token, analyzeblacklist)
| gpl-3.0 |
googleinterns/learnbase | learnbase/src/main/webapp/WEB-INF/Lib/readline.py | 13 | 5659 | import os.path
import sys
from warnings import warn
try:
_console = sys._jy_console
_reader = _console.reader
except AttributeError:
raise ImportError("Cannot access JLine2 setup")
try:
# jarjar-ed version
from org.python.jline.console.history import MemoryHistory
except ImportError:
# dev version from extlibs
from jline.console.history import MemoryHistory
__all__ = ['add_history', 'clear_history', 'get_begidx', 'get_completer',
'get_completer_delims', 'get_current_history_length',
'get_endidx', 'get_history_item', 'get_history_length',
'get_line_buffer', 'insert_text', 'parse_and_bind',
'read_history_file', 'read_init_file', 'redisplay',
'remove_history_item', 'set_completer', 'set_completer_delims',
'set_history_length', 'set_pre_input_hook', 'set_startup_hook',
'write_history_file']
_history_list = None
# The need for the following warnings should go away once we update
# JLine. Choosing ImportWarning as the closest warning to what is
# going on here, namely this is functionality not yet available on
# Jython.
class NotImplementedWarning(ImportWarning):
"""Not yet implemented by Jython"""
class SecurityWarning(ImportWarning):
"""Security manager prevents access to private field"""
def parse_and_bind(string):
pass
def get_line_buffer():
return str(_reader.cursorBuffer.buffer)
def insert_text(string):
_reader.putString(string)
def read_init_file(filename=None):
warn("read_init_file: %s" % (filename,), NotImplementedWarning, "module", 2)
def read_history_file(filename="~/.history"):
expanded = os.path.expanduser(filename)
with open(expanded) as f:
_reader.history.load(f)
def write_history_file(filename="~/.history"):
expanded = os.path.expanduser(filename)
with open(expanded, 'w') as f:
for line in _reader.history.entries():
f.write(line.value().encode("utf-8"))
f.write("\n")
def clear_history():
_reader.history.clear()
def add_history(line):
_reader.history.add(line)
def get_history_length():
return _reader.history.maxSize
def set_history_length(length):
_reader.history.maxSize = length
def get_current_history_length():
return _reader.history.size()
def get_history_item(index):
# JLine indexes from 0 while readline indexes from 1 (at least in test_readline)
if index>0:
return _reader.history.get(index-1)
else:
return None
def remove_history_item(pos):
_reader.history.remove(pos)
def replace_history_item(pos, line):
_reader.history.set(pos, line)
def redisplay():
_reader.redrawLine()
def set_startup_hook(function=None):
_console.startupHook = function
def set_pre_input_hook(function=None):
warn("set_pre_input_hook %s" % (function,), NotImplementedWarning, stacklevel=2)
_completer_function = None
def set_completer(function=None):
"""set_completer([function]) -> None
Set or remove the completer function.
The function is called as function(text, state),
for state in 0, 1, 2, ..., until it returns a non-string.
It should return the next possible completion starting with 'text'."""
global _completer_function
_completer_function = function
def complete_handler(buffer, cursor, candidates):
start = _get_delimited(buffer, cursor)[0]
delimited = buffer[start:cursor]
try:
sys.ps2
have_ps2 = True
except AttributeError:
have_ps2 = False
if (have_ps2 and _reader.prompt == sys.ps2) and (not delimited or delimited.isspace()):
# Insert tab (as expanded to 4 spaces), but only if if
# preceding is whitespace/empty and in console
# continuation; this is a planned featue for Python 3 per
# http://bugs.python.org/issue5845
#
# Ideally this would not expand tabs, in case of mixed
# copy&paste of tab-indented code, however JLine2 gets
# confused as to the cursor position if certain, but not
# all, subsequent editing if the tab is backspaced
candidates.add(" " * 4)
return start
# TODO: if there are a reasonably large number of completions
# (need to get specific numbers), CPython 3.4 will show a
# message like so:
# >>>
# Display all 186 possibilities? (y or n)
# Currently Jython arbitrarily limits this to 100 and displays them
for state in xrange(100):
completion = None
try:
completion = function(delimited, state)
except:
pass
if completion:
candidates.add(completion)
else:
break
return start
_reader.addCompleter(complete_handler)
def get_completer():
return _completer_function
def _get_delimited(buffer, cursor):
start = cursor
for i in xrange(cursor-1, -1, -1):
if buffer[i] in _completer_delims:
break
start = i
return start, cursor
def get_begidx():
return _get_delimited(str(_reader.cursorBuffer.buffer), _reader.cursorBuffer.cursor)[0]
def get_endidx():
return _get_delimited(str(_reader.cursorBuffer.buffer), _reader.cursorBuffer.cursor)[1]
def set_completer_delims(string):
global _completer_delims, _completer_delims_set
_completer_delims = string
_completer_delims_set = set(string)
def get_completer_delims():
return _completer_delims
set_completer_delims(' \t\n`~!@#$%^&*()-=+[{]}\\|;:\'",<>/?')
| apache-2.0 |
alexproca/askbot-devel | askbot/migrations/0132_auto__add_draftquestion__add_draftanswer.py | 15 | 30960 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'DraftQuestion'
db.create_table('askbot_draftquestion', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('author', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=300, null=True)),
('text', self.gf('django.db.models.fields.TextField')(null=True)),
('tagnames', self.gf('django.db.models.fields.CharField')(max_length=125, null=True)),
))
db.send_create_signal('askbot', ['DraftQuestion'])
# Adding model 'DraftAnswer'
db.create_table('askbot_draftanswer', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('thread', self.gf('django.db.models.fields.related.ForeignKey')(related_name='draft_answers', to=orm['askbot.Thread'])),
('author', self.gf('django.db.models.fields.related.ForeignKey')(related_name='draft_answers', to=orm['auth.User'])),
('text', self.gf('django.db.models.fields.TextField')(null=True)),
))
db.send_create_signal('askbot', ['DraftAnswer'])
def backwards(self, orm):
# Deleting model 'DraftQuestion'
db.delete_table('askbot_draftquestion')
# Deleting model 'DraftAnswer'
db.delete_table('askbot_draftanswer')
models = {
'askbot.activity': {
'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Post']", 'null': 'True'}),
'receiving_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'received_activity'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'incoming_activity'", 'symmetrical': 'False', 'through': "orm['askbot.ActivityAuditStatus']", 'to': "orm['auth.User']"}),
'summary': ('django.db.models.fields.TextField', [], {'default': "''"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.activityauditstatus': {
'Meta': {'unique_together': "(('user', 'activity'),)", 'object_name': 'ActivityAuditStatus'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Activity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.anonymousanswer': {
'Meta': {'object_name': 'AnonymousAnswer'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_answers'", 'to': "orm['askbot.Post']"}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.anonymousquestion': {
'Meta': {'object_name': 'AnonymousQuestion'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.award': {
'Meta': {'object_name': 'Award', 'db_table': "u'award'"},
'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['askbot.BadgeData']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': "orm['auth.User']"})
},
'askbot.badgedata': {
'Meta': {'ordering': "('slug',)", 'object_name': 'BadgeData'},
'awarded_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'awarded_to': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'symmetrical': 'False', 'through': "orm['askbot.Award']", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'askbot.draftanswer': {
'Meta': {'object_name': 'DraftAnswer'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'draft_answers'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'draft_answers'", 'to': "orm['askbot.Thread']"})
},
'askbot.draftquestion': {
'Meta': {'object_name': 'DraftQuestion'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125', 'null': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True'})
},
'askbot.emailfeedsetting': {
'Meta': {'unique_together': "(('subscriber', 'feed_type'),)", 'object_name': 'EmailFeedSetting'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reported_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notification_subscriptions'", 'to': "orm['auth.User']"})
},
'askbot.favoritequestion': {
'Meta': {'object_name': 'FavoriteQuestion', 'db_table': "u'favorite_question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Thread']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_favorite_questions'", 'to': "orm['auth.User']"})
},
'askbot.groupmembership': {
'Meta': {'unique_together': "(('group', 'user'),)", 'object_name': 'GroupMembership'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_memberships'", 'to': "orm['askbot.Tag']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'group_memberships'", 'to': "orm['auth.User']"})
},
'askbot.groupprofile': {
'Meta': {'object_name': 'GroupProfile'},
'group_tag': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'group_profile'", 'unique': 'True', 'to': "orm['askbot.Tag']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_open': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'logo_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'moderate_email': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'preapproved_email_domains': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'preapproved_emails': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'})
},
'askbot.markedtag': {
'Meta': {'object_name': 'MarkedTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_selections'", 'to': "orm['askbot.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tag_selections'", 'to': "orm['auth.User']"})
},
'askbot.post': {
'Meta': {'object_name': 'Post'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'approved': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'group_posts'", 'symmetrical': 'False', 'to': "orm['askbot.Tag']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'old_answer_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'old_comment_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'old_question_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['askbot.Post']"}),
'post_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'posts'", 'null': 'True', 'blank': 'True', 'to': "orm['askbot.Thread']"}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.postflagreason': {
'Meta': {'object_name': 'PostFlagReason'},
'added_at': ('django.db.models.fields.DateTimeField', [], {}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'details': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'post_reject_reasons'", 'to': "orm['askbot.Post']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'askbot.postrevision': {
'Meta': {'ordering': "('-revision',)", 'unique_together': "(('post', 'revision'),)", 'object_name': 'PostRevision'},
'approved': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'approved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'approved_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'postrevisions'", 'to': "orm['auth.User']"}),
'by_email': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_address': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisions'", 'null': 'True', 'to': "orm['askbot.Post']"}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '125', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '300', 'blank': 'True'})
},
'askbot.questionview': {
'Meta': {'object_name': 'QuestionView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'viewed'", 'to': "orm['askbot.Post']"}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_views'", 'to': "orm['auth.User']"})
},
'askbot.replyaddress': {
'Meta': {'object_name': 'ReplyAddress'},
'address': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '25'}),
'allowed_from_email': ('django.db.models.fields.EmailField', [], {'max_length': '150'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reply_addresses'", 'null': 'True', 'to': "orm['askbot.Post']"}),
'reply_action': ('django.db.models.fields.CharField', [], {'default': "'auto_answer_or_comment'", 'max_length': '32'}),
'response_post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'edit_addresses'", 'null': 'True', 'to': "orm['askbot.Post']"}),
'used_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.repute': {
'Meta': {'object_name': 'Repute', 'db_table': "u'repute'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'negative': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'positive': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Post']", 'null': 'True', 'blank': 'True'}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'reputation_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'reputed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.tag': {
'Meta': {'ordering': "('-used_count', 'name')", 'object_name': 'Tag', 'db_table': "u'tag'"},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_tags'", 'to': "orm['auth.User']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_tags'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'suggested_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'suggested_tags'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'tag_wiki': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'described_tag'", 'unique': 'True', 'null': 'True', 'to': "orm['askbot.Post']"}),
'used_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.thread': {
'Meta': {'object_name': 'Thread'},
'accepted_answer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['askbot.Post']"}),
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'answer_accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'answer_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'approved': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'favorited_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'unused_favorite_threads'", 'symmetrical': 'False', 'through': "orm['askbot.FavoriteQuestion']", 'to': "orm['auth.User']"}),
'favourite_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'followed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followed_threads'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'group_threads'", 'symmetrical': 'False', 'to': "orm['askbot.Tag']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_activity_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'unused_last_active_in_threads'", 'to': "orm['auth.User']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'threads'", 'symmetrical': 'False', 'to': "orm['askbot.Tag']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.vote': {
'Meta': {'unique_together': "(('user', 'voted_post'),)", 'object_name': 'Vote', 'db_table': "u'vote'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {}),
'voted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'voted_post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['askbot.Post']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_signature': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_marked_tags': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'subscribed_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['askbot'] | gpl-3.0 |
ddboline/pylearn2 | pylearn2/sandbox/cuda_convnet/specialized_bench.py | 44 | 3906 | __authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
from pylearn2.testing.skip import skip_if_no_gpu
skip_if_no_gpu()
import numpy as np
from theano.compat.six.moves import xrange
from theano import shared
from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
from theano.tensor.nnet.conv import conv2d
from theano import function
import time
import matplotlib.pyplot as plt
def make_funcs(batch_size, rows, cols, channels, filter_rows,
num_filters):
rng = np.random.RandomState([2012,10,9])
filter_cols = filter_rows
base_image_value = rng.uniform(-1., 1., (channels, rows, cols,
batch_size)).astype('float32')
base_filters_value = rng.uniform(-1., 1., (channels, filter_rows,
filter_cols, num_filters)).astype('float32')
images = shared(base_image_value)
filters = shared(base_filters_value, name='filters')
# bench.py should always be run in gpu mode so we should not need a gpu_from_host here
layer_1_detector = FilterActs()(images, filters)
layer_1_pooled_fake = layer_1_detector[:,0:layer_1_detector.shape[0]:2,
0:layer_1_detector.shape[1]:2, :]
base_filters2_value = rng.uniform(-1., 1., (num_filters, filter_rows,
filter_cols, num_filters)).astype('float32')
filters2 = shared(base_filters_value, name='filters')
layer_2_detector = FilterActs()(images, filters2)
output = layer_2_detector
output_shared = shared( output.eval() )
cuda_convnet = function([], updates = { output_shared : output } )
cuda_convnet.name = 'cuda_convnet'
images_bc01 = base_image_value.transpose(3,0,1,2)
filters_bc01 = base_filters_value.transpose(3,0,1,2)
filters_bc01 = filters_bc01[:,:,::-1,::-1]
images_bc01 = shared(images_bc01)
filters_bc01 = shared(filters_bc01)
output_conv2d = conv2d(images_bc01, filters_bc01,
border_mode='valid')
output_conv2d_shared = shared(output_conv2d.eval())
baseline = function([], updates = { output_conv2d_shared : output_conv2d } )
baseline.name = 'baseline'
return cuda_convnet, baseline
def bench(f):
for i in xrange(3):
f()
trials = 10
t1 = time.time()
for i in xrange(trials):
f()
t2 = time.time()
return (t2-t1)/float(trials)
def get_speedup( *args, **kwargs):
cuda_convnet, baseline = make_funcs(*args, **kwargs)
return bench(baseline) / bench(cuda_convnet)
def get_time_per_10k_ex( *args, **kwargs):
cuda_convnet, baseline = make_funcs(*args, **kwargs)
batch_size = kwargs['batch_size']
return 10000 * bench(cuda_convnet) / float(batch_size)
def make_batch_size_plot(yfunc, yname, batch_sizes, rows, cols, channels, filter_rows, num_filters):
speedups = []
for batch_size in batch_sizes:
speedup = yfunc(batch_size = batch_size,
rows = rows,
cols = cols,
channels = channels,
filter_rows = filter_rows,
num_filters = num_filters)
speedups.append(speedup)
plt.plot(batch_sizes, speedups)
plt.title("cuda-convnet benchmark")
plt.xlabel("Batch size")
plt.ylabel(yname)
plt.show()
"""
make_batch_size_plot(get_speedup, "Speedup factor", batch_sizes = [1,2,5,25,32,50,63,64,65,96,100,127,128,129,159,160,161,191,192,193,200,255,256,257],
rows = 32,
cols = 32,
channels = 3,
filter_rows = 7,
num_filters = 64)
"""
make_batch_size_plot(get_time_per_10k_ex, "Time per 10k examples", batch_sizes = [1,2,5,25,32,50,63,64,65,96,100,127,128,129,159,160,161,191,192,193,200,255,256,257],
rows = 32,
cols = 32,
channels = 3,
filter_rows = 5,
num_filters = 64)
| bsd-3-clause |
Jobava/pootle | pootle/apps/virtualfolder/helpers.py | 7 | 3402 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from pootle.core.url_helpers import get_all_pootle_paths, split_pootle_path
from pootle_app.models import Directory
from pootle_store.models import Store
from .models import VirtualFolder
def extract_vfolder_from_path(pootle_path):
"""Return a valid virtual folder and an adjusted pootle path.
This accepts a pootle path and extracts the virtual folder from it (if
present) returning the virtual folder and the clean path.
If it can't be determined the virtual folder, then the provided path is
returned unchanged along as a None value.
The path /gl/firefox/browser/vfolder/chrome/file.po with the vfolder
virtual folder on it will be converted to
/gl/firefox/browser/chrome/file.po if the virtual folder exists and is
public.
Have in mind that several virtual folders with the same name might apply in
the same path (as long as they have different locations this is possible)
and in such cases the one with higher priority is returned.
"""
lang, proj, dir_path, filename = split_pootle_path(pootle_path)
if ((filename and Store.objects.filter(pootle_path=pootle_path).exists()) or
Directory.objects.filter(pootle_path=pootle_path).exists()):
# If there is no vfolder then return the provided path.
return None, pootle_path
# Get the pootle paths for all the parents except the one for the file and
# those for the translation project and above.
all_dir_paths = [dir_path for dir_path in get_all_pootle_paths(pootle_path)
if dir_path.count('/') > 3 and dir_path.endswith('/')]
all_dir_paths = sorted(all_dir_paths)
for dir_path in all_dir_paths:
if Directory.objects.filter(pootle_path=dir_path).exists():
continue
# There is no directory with such path, and that might mean that it
# includes a virtual folder.
valid_starting_path, vfolder_name = dir_path.rstrip('/').rsplit('/', 1)
vfolders = VirtualFolder.objects.filter(
name=vfolder_name,
is_public=True
).order_by('-priority')
vfolder = None
for vf in vfolders:
# There might be several virtual folders with the same name, so get
# the first higher priority one that applies to the adjusted path.
try:
# Ensure that the virtual folder applies in the path.
vf.get_adjusted_location(valid_starting_path + '/')
except Exception:
continue
vfolder = vf
break
if vfolder is None:
# The virtual folder does not exist or is not public or doesn't
# apply in this location, so this is an invalid path.
break
valid_ending_path = pootle_path.replace(dir_path, '')
adjusted_path = '/'.join([valid_starting_path, valid_ending_path])
return vfolder, adjusted_path
# There is no virtual folder (or is not public) and the provided path
# doesn't exist, so let the calling code to deal with this.
return None, pootle_path
| gpl-3.0 |
beatrizjesus/my-first-blog | pasta/Lib/site-packages/pip/_vendor/requests/models.py | 161 | 28111 | # -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import collections
import datetime
from io import BytesIO, UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header
from .packages.urllib3.fields import RequestField
from .packages.urllib3.filepost import encode_multipart_formdata
from .packages.urllib3.util import parse_url
from .packages.urllib3.exceptions import (
DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
from .exceptions import (
HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
ContentDecodingError, ConnectionError, StreamConsumedError)
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, to_native_string)
from .compat import (
cookielib, urlunparse, urlsplit, urlencode, str, bytes, StringIO,
is_py2, chardet, json, builtin_str, basestring)
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
json_dumps = json.dumps
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, str):
fp = StringIO(fp)
if isinstance(fp, bytes):
fp = BytesIO(fp)
rf = RequestField(name=k, data=fp.read(),
filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, collections.Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, collections.Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach to the request. If a dictionary is provided, form-encoding will take place.
:param json: json for the body to attach to the request (if data is not specified).
:param params: dictionary of URL parameters to append to the URL.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None,
url=None,
headers=None,
files=None,
data=None,
params=None,
auth=None,
cookies=None,
hooks=None,
json=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.json = json
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
json=self.json,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Generated from either a :class:`Request <Request>` object or manually.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> r = req.prepare()
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
def prepare(self, method=None, url=None, headers=None, files=None,
data=None, params=None, auth=None, cookies=None, hooks=None,
json=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = self._cookies.copy() if self._cookies is not None else None
p.body = self.body
p.hooks = self.hooks
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = self.method.upper()
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
#: We're unable to blindy call unicode/str functions
#: as this will include the bytestring indicator (b'')
#: on python 3.x.
#: https://github.com/kennethreitz/requests/pull/2238
if isinstance(url, bytes):
url = url.decode('utf8')
else:
url = unicode(url) if is_py2 else str(url)
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
raise MissingSchema("Invalid URL {0!r}: No schema supplied. "
"Perhaps you meant http://{0}?".format(url))
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# Only want to apply IDNA to the hostname
try:
host = host.encode('idna').decode('utf-8')
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
if headers:
self.headers = CaseInsensitiveDict((to_native_string(name), value) for name, value in headers.items())
else:
self.headers = CaseInsensitiveDict()
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
length = None
if json is not None:
content_type = 'application/json'
body = json_dumps(json)
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, dict))
])
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
if is_stream:
body = data
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length is not None:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data and json is None:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ('content-type' not in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
if hasattr(body, 'seek') and hasattr(body, 'tell'):
body.seek(0, 2)
self.headers['Content-Length'] = builtin_str(body.tell())
body.seek(0, 0)
elif body is not None:
l = super_len(body)
if l:
self.headers['Content-Length'] = builtin_str(l)
elif (self.method not in ('GET', 'HEAD')) and (self.headers.get('Content-Length') is None):
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data."""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content',
'status_code',
'headers',
'url',
'history',
'encoding',
'reason',
'cookies',
'elapsed',
'request',
]
def __init__(self):
super(Response, self).__init__()
self._content = False
self._content_consumed = False
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
# This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta)
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return dict(
(attr, getattr(self, attr, None))
for attr in self.__attrs__
)
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
setattr(self, 'raw', None)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __nonzero__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
try:
self.raise_for_status()
except HTTPError:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanant versions of redirect"""
return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library"""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
try:
# Special case for urllib3.
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
except AttributeError:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None, delimiter=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
try:
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
except AttributeError:
self._content = None
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
"""
if not self.encoding and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return json.loads(self.content.decode(encoding), **kwargs)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
return json.loads(self.text, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises stored :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if 400 <= self.status_code < 500:
http_error_msg = '%s Client Error: %s' % (self.status_code, self.reason)
elif 500 <= self.status_code < 600:
http_error_msg = '%s Server Error: %s' % (self.status_code, self.reason)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
return self.raw.release_conn()
| mit |
Hybrid-Cloud/conveyor | conveyor/conveyorheat/engine/hot/functions.py | 1 | 24189 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import hashlib
import itertools
import six
from oslo_serialization import jsonutils
from conveyor.conveyorheat.common import exception
from conveyor.conveyorheat.engine import attributes
from conveyor.conveyorheat.engine.cfn import functions as cfn_funcs
from conveyor.conveyorheat.engine import function
from conveyor.i18n import _
class GetParam(function.Function):
"""A function for resolving parameter references.
Takes the form::
get_param: <param_name>
or::
get_param:
- <param_name>
- <path1>
- ...
"""
def __init__(self, stack, fn_name, args):
super(GetParam, self).__init__(stack, fn_name, args)
self.parameters = self.stack.parameters
def result(self):
args = function.resolve(self.args)
if not args:
raise ValueError(_('Function "%s" must have arguments') %
self.fn_name)
if isinstance(args, six.string_types):
param_name = args
path_components = []
elif isinstance(args, collections.Sequence):
param_name = args[0]
path_components = args[1:]
else:
raise TypeError(_('Argument to "%s" must be string or list') %
self.fn_name)
if not isinstance(param_name, six.string_types):
raise TypeError(_('Parameter name in "%s" must be string') %
self.fn_name)
try:
parameter = self.parameters[param_name]
except KeyError:
raise exception.UserParameterMissing(key=param_name)
def get_path_component(collection, key):
if not isinstance(collection, (collections.Mapping,
collections.Sequence)):
raise TypeError(_('"%s" can\'t traverse path') % self.fn_name)
if not isinstance(key, (six.string_types, int)):
raise TypeError(_('Path components in "%s" '
'must be strings') % self.fn_name)
if isinstance(collection, collections.Sequence
) and isinstance(key, six.string_types):
try:
key = int(key)
except ValueError:
raise TypeError(_("Path components in '%s' "
"must be a string that can be "
"parsed into an "
"integer.") % self.fn_name)
return collection[key]
try:
return six.moves.reduce(get_path_component, path_components,
parameter)
except (KeyError, IndexError, TypeError):
return ''
class GetAttThenSelect(cfn_funcs.GetAtt):
"""A function for resolving resource attributes.
Takes the form::
get_attr:
- <resource_name>
- <attribute_name>
- <path1>
- ...
"""
def _parse_args(self):
if (not isinstance(self.args, collections.Sequence) or
isinstance(self.args, six.string_types)):
raise TypeError(_('Argument to "%s" must be a list') %
self.fn_name)
if len(self.args) < 2:
raise ValueError(_('Arguments to "%s" must be of the form '
'[resource_name, attribute, (path), ...]') %
self.fn_name)
self._path_components = self.args[2:]
return tuple(self.args[:2])
def result(self):
attribute = super(GetAttThenSelect, self).result()
if attribute is None:
return None
path_components = function.resolve(self._path_components)
return attributes.select_from_attribute(attribute, path_components)
def dep_attrs(self, resource_name):
if self._resource().name == resource_name:
path = function.resolve(self._path_components)
attr = [function.resolve(self._attribute)]
if path:
attrs = [tuple(attr + path)]
else:
attrs = attr
else:
attrs = []
return itertools.chain(function.dep_attrs(self.args, resource_name),
attrs)
class GetAtt(GetAttThenSelect):
"""A function for resolving resource attributes.
Takes the form::
get_attr:
- <resource_name>
- <attribute_name>
- <path1>
- ...
"""
def result(self):
path_components = function.resolve(self._path_components)
attribute = function.resolve(self._attribute)
r = self._resource()
if (r.status in (r.IN_PROGRESS, r.COMPLETE) and
r.action in (r.CREATE, r.ADOPT, r.SUSPEND, r.RESUME,
r.UPDATE, r.CHECK, r.SNAPSHOT)):
return r.FnGetAtt(attribute, *path_components)
else:
return None
class GetAttAllAttributes(GetAtt):
"""A function for resolving resource attributes.
Takes the form::
get_attr:
- <resource_name>
- <attributes_name>
- <path1>
- ...
where <attributes_name> and <path1>, ... are optional arguments. If there
is no <attributes_name>, result will be dict of all resource's attributes.
Else function returns resolved resource's attribute.
"""
def _parse_args(self):
if not self.args:
raise ValueError(_('Arguments to "%s" can be of the next '
'forms: [resource_name] or '
'[resource_name, attribute, (path), ...]'
) % self.fn_name)
elif isinstance(self.args, collections.Sequence):
if len(self.args) > 1:
return super(GetAttAllAttributes, self)._parse_args()
else:
return self.args[0], None
else:
raise TypeError(_('Argument to "%s" must be a list') %
self.fn_name)
def dep_attrs(self, resource_name):
"""Check if there is no attribute_name defined, return empty chain."""
if self._attribute is not None:
return super(GetAttAllAttributes, self).dep_attrs(resource_name)
elif self._resource().name == resource_name:
res = self._resource()
attrs = six.iterkeys(res.attributes_schema)
else:
attrs = []
return itertools.chain(function.dep_attrs(self.args,
resource_name), attrs)
def result(self):
if self._attribute is None:
r = self._resource()
if (r.status in (r.IN_PROGRESS, r.COMPLETE) and
r.action in (r.CREATE, r.ADOPT, r.SUSPEND, r.RESUME,
r.UPDATE, r.CHECK, r.SNAPSHOT)):
return r.FnGetAtts()
else:
return None
else:
return super(GetAttAllAttributes, self).result()
def _allow_without_attribute_name(self):
return True
class Replace(cfn_funcs.Replace):
"""A function for performing string substitutions.
Takes the form::
str_replace:
template: <key_1> <key_2>
params:
<key_1>: <value_1>
<key_2>: <value_2>
...
And resolves to::
"<value_1> <value_2>"
This is implemented using python str.replace on each key. Longer keys are
substituted before shorter ones, but the order in which replacements are
performed is otherwise undefined.
"""
def _parse_args(self):
if not isinstance(self.args, collections.Mapping):
raise TypeError(_('Arguments to "%s" must be a map') %
self.fn_name)
try:
mapping = self.args['params']
string = self.args['template']
except (KeyError, TypeError):
example = ('''str_replace:
template: This is var1 template var2
params:
var1: a
var2: string''')
raise KeyError(_('"str_replace" syntax should be %s') %
example)
else:
return mapping, string
class ReplaceJson(Replace):
"""A function for performing string substitutions.
Takes the form::
str_replace:
template: <key_1> <key_2>
params:
<key_1>: <value_1>
<key_2>: <value_2>
...
And resolves to::
"<value_1> <value_2>"
This is implemented using python str.replace on each key. Longer keys are
substituted before shorter ones, but the order in which replacements are
performed is otherwise undefined.
Non-string param values (e.g maps or lists) are serialized as JSON before
being substituted in.
"""
def result(self):
template = function.resolve(self._string)
mapping = function.resolve(self._mapping)
if not isinstance(template, six.string_types):
raise TypeError(_('"%s" template must be a string') % self.fn_name)
if not isinstance(mapping, collections.Mapping):
raise TypeError(_('"%s" params must be a map') % self.fn_name)
def replace(string, change):
placeholder, value = change
if not isinstance(placeholder, six.string_types):
raise TypeError(_('"%s" param placeholders must be strings') %
self.fn_name)
if value is None:
value = ''
if not isinstance(value,
(six.string_types, six.integer_types,
float, bool)):
if isinstance(value,
(collections.Mapping, collections.Sequence)):
try:
value = jsonutils.dumps(value, default=None)
except TypeError:
raise TypeError(_('"%(name)s" params must be strings, '
'numbers, list or map. '
'Failed to json serialize %(value)s'
) % {'name': self.fn_name,
'value': value})
else:
raise TypeError(_('"%s" params must be strings, numbers, '
'list or map.') % self.fn_name)
return string.replace(placeholder, six.text_type(value))
mapping = collections.OrderedDict(sorted(mapping.items(),
key=lambda t: len(t[0]),
reverse=True))
return six.moves.reduce(replace, six.iteritems(mapping), template)
class GetFile(function.Function):
"""A function for including a file inline.
Takes the form::
get_file: <file_key>
And resolves to the content stored in the files dictionary under the given
key.
"""
def __init__(self, stack, fn_name, args):
super(GetFile, self).__init__(stack, fn_name, args)
self.files = self.stack.t.files
def result(self):
args = function.resolve(self.args)
if not (isinstance(args, six.string_types)):
raise TypeError(_('Argument to "%s" must be a string') %
self.fn_name)
f = self.files.get(args)
if f is None:
fmt_data = {'fn_name': self.fn_name,
'file_key': args}
raise ValueError(_('No content found in the "files" section for '
'%(fn_name)s path: %(file_key)s') % fmt_data)
return f
class Join(cfn_funcs.Join):
"""A function for joining strings.
Takes the form::
{ "list_join" : [ "<delim>", [ "<string_1>", "<string_2>", ... ] ] }
And resolves to::
"<string_1><delim><string_2><delim>..."
"""
class JoinMultiple(function.Function):
"""A function for joining one or more lists of strings.
Takes the form::
{ "list_join" : [ "<delim>", [ "<string_1>", "<string_2>", ... ] ] }
And resolves to::
"<string_1><delim><string_2><delim>..."
Optionally multiple lists may be specified, which will also be joined.
"""
def __init__(self, stack, fn_name, args):
super(JoinMultiple, self).__init__(stack, fn_name, args)
example = '"%s" : [ " ", [ "str1", "str2"] ...]' % fn_name
fmt_data = {'fn_name': fn_name,
'example': example}
if not isinstance(args, list):
raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
try:
self._delim = args[0]
self._joinlists = args[1:]
if len(self._joinlists) < 1:
raise ValueError
except (IndexError, ValueError):
raise ValueError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
def result(self):
r_joinlists = function.resolve(self._joinlists)
strings = []
for jl in r_joinlists:
if jl:
if (isinstance(jl, six.string_types) or
not isinstance(jl, collections.Sequence)):
raise TypeError(_('"%s" must operate on '
'a list') % self.fn_name)
strings += jl
delim = function.resolve(self._delim)
if not isinstance(delim, six.string_types):
raise TypeError(_('"%s" delimiter must be a string') %
self.fn_name)
def ensure_string(s):
msg = _('Items to join must be string, map or list not %s'
) % (repr(s)[:200])
if s is None:
return ''
elif isinstance(s, six.string_types):
return s
elif isinstance(s, (collections.Mapping, collections.Sequence)):
try:
return jsonutils.dumps(s, default=None)
except TypeError:
msg = _('Items to join must be string, map or list. '
'%s failed json serialization'
) % (repr(s)[:200])
raise TypeError(msg)
return delim.join(ensure_string(s) for s in strings)
class MapMerge(function.Function):
"""A function for merging maps.
Takes the form::
{ "map_merge" : [{'k1': 'v1', 'k2': 'v2'}, {'k1': 'v2'}] }
And resolves to::
{'k1': 'v2', 'k2': 'v2'}
"""
def __init__(self, stack, fn_name, args):
super(MapMerge, self).__init__(stack, fn_name, args)
example = (_('"%s" : [ { "key1": "val1" }, { "key2": "val2" } ]')
% fn_name)
self.fmt_data = {'fn_name': fn_name, 'example': example}
def result(self):
args = function.resolve(self.args)
if not isinstance(args, collections.Sequence):
raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % self.fmt_data)
def ensure_map(m):
if m is None:
return {}
elif isinstance(m, collections.Mapping):
return m
else:
msg = _('Incorrect arguments: Items to merge must be maps.')
raise TypeError(msg)
ret_map = {}
for m in args:
ret_map.update(ensure_map(m))
return ret_map
class ResourceFacade(cfn_funcs.ResourceFacade):
"""A function for retrieving data in a parent provider template.
A function for obtaining data from the facade resource from within the
corresponding provider template.
Takes the form::
resource_facade: <attribute_type>
where the valid attribute types are "metadata", "deletion_policy" and
"update_policy".
"""
_RESOURCE_ATTRIBUTES = (
METADATA, DELETION_POLICY, UPDATE_POLICY,
) = (
'metadata', 'deletion_policy', 'update_policy'
)
class Removed(function.Function):
"""This function existed in previous versions of HOT, but has been removed.
Check the HOT guide for an equivalent native function.
"""
def validate(self):
exp = (_("The function %s is not supported in this version of HOT.") %
self.fn_name)
raise exception.InvalidTemplateVersion(explanation=exp)
def result(self):
return super(Removed, self).result()
class Repeat(function.Function):
"""A function for iterating over a list of items.
Takes the form::
repeat:
template:
<body>
for_each:
<var>: <list>
The result is a new list of the same size as <list>, where each element
is a copy of <body> with any occurrences of <var> replaced with the
corresponding item of <list>.
"""
def __init__(self, stack, fn_name, args):
super(Repeat, self).__init__(stack, fn_name, args)
if not isinstance(self.args, collections.Mapping):
raise TypeError(_('Arguments to "%s" must be a map') %
self.fn_name)
# We don't check for invalid keys appearing here, which is wrong but
# it's probably too late to change
try:
self._for_each = self.args['for_each']
self._template = self.args['template']
except KeyError:
example = ('''repeat:
template: This is %var%
for_each:
%var%: ['a', 'b', 'c']''')
raise KeyError(_('"repeat" syntax should be %s') % example)
def validate(self):
super(Repeat, self).validate()
if not isinstance(self._for_each, function.Function):
if not isinstance(self._for_each, collections.Mapping):
raise TypeError(_('The "for_each" argument to "%s" must '
'contain a map') % self.fn_name)
if not all(self._valid_list(v) for v in self._for_each.values()):
raise TypeError(_('The values of the "for_each" argument '
'to "%s" must be lists') % self.fn_name)
@staticmethod
def _valid_list(arg):
return (isinstance(arg, (collections.Sequence,
function.Function)) and
not isinstance(arg, six.string_types))
def _do_replacement(self, keys, values, template):
if isinstance(template, six.string_types):
for (key, value) in zip(keys, values):
template = template.replace(key, value)
return template
elif isinstance(template, collections.Sequence):
return [self._do_replacement(keys, values, elem)
for elem in template]
elif isinstance(template, collections.Mapping):
return dict((self._do_replacement(keys, values, k),
self._do_replacement(keys, values, v))
for (k, v) in template.items())
def result(self):
for_each = function.resolve(self._for_each)
if not all(self._valid_list(l) for l in for_each.values()):
raise TypeError(_('The values of the "for_each" argument to '
'"%s" must be lists') % self.fn_name)
template = function.resolve(self._template)
keys, lists = six.moves.zip(*for_each.items())
return [self._do_replacement(keys, replacements, template)
for replacements in itertools.product(*lists)]
class Digest(function.Function):
"""A function for performing digest operations.
Takes the form::
digest:
- <algorithm>
- <value>
Valid algorithms are the ones provided by natively by hashlib (md5, sha1,
sha224, sha256, sha384, and sha512) or any one provided by OpenSSL.
"""
def validate_usage(self, args):
if not (isinstance(args, list) and
all([isinstance(a, six.string_types) for a in args])):
msg = _('Argument to function "%s" must be a list of strings')
raise TypeError(msg % self.fn_name)
if len(args) != 2:
msg = _('Function "%s" usage: ["<algorithm>", "<value>"]')
raise ValueError(msg % self.fn_name)
if six.PY3:
algorithms = hashlib.algorithms_available
else:
algorithms = hashlib.algorithms
if args[0].lower() not in algorithms:
msg = _('Algorithm must be one of %s')
raise ValueError(msg % six.text_type(algorithms))
def digest(self, algorithm, value):
_hash = hashlib.new(algorithm)
_hash.update(six.b(value))
return _hash.hexdigest()
def result(self):
args = function.resolve(self.args)
self.validate_usage(args)
return self.digest(*args)
class StrSplit(function.Function):
"""A function for splitting delimited strings into a list.
Optionally extracting a specific list member by index.
Takes the form::
str_split: [delimiter, string, <index> ]
or::
str_split:
- delimiter
- string
- <index>
If <index> is specified, the specified list item will be returned
otherwise, the whole list is returned, similar to get_attr with
path based attributes accessing lists.
"""
def __init__(self, stack, fn_name, args):
super(StrSplit, self).__init__(stack, fn_name, args)
example = '"%s" : [ ",", "apples,pears", <index>]' % fn_name
self.fmt_data = {'fn_name': fn_name,
'example': example}
self.fn_name = fn_name
if isinstance(args, (six.string_types, collections.Mapping)):
raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % self.fmt_data)
def result(self):
args = function.resolve(self.args)
try:
delim = args.pop(0)
str_to_split = args.pop(0)
except (AttributeError, IndexError):
raise ValueError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % self.fmt_data)
if str_to_split is None:
return None
split_list = str_to_split.split(delim)
# Optionally allow an index to be specified
if args:
try:
index = int(args.pop(0))
except ValueError:
raise ValueError(_('Incorrect index to "%(fn_name)s" '
'should be: %(example)s') % self.fmt_data)
else:
try:
res = split_list[index]
except IndexError:
raise ValueError(_('Incorrect index to "%(fn_name)s" '
'should be between 0 and '
'%(max_index)s')
% {'fn_name': self.fn_name,
'max_index': len(split_list) - 1})
else:
res = split_list
return res
| apache-2.0 |
rizar/actor-critic-public | bin/pack_to_hdf5.py | 1 | 2224 | #!/usr/bin/env python
import h5py
import numpy
import argparse
import cPickle
from fuel.datasets.hdf5 import H5PYDataset
def pack(f, name, dataset_pathes):
datasets = [cPickle.load(open(path)) for path in dataset_pathes]
data = sum(datasets, [])
dtype = h5py.special_dtype(vlen=numpy.dtype('int32'))
table = f.create_dataset(name, (len(data),), dtype=dtype)
for i, example in enumerate(data):
table[i] = example
return numpy.array([len(d) for d in datasets])
if __name__ == '__main__':
parser = argparse.ArgumentParser("Pack data to HDF5")
parser.add_argument('-s', dest='sources', nargs='*', help="Source datasets")
parser.add_argument('-t', dest='targets', nargs='*', help="Target datasets")
parser.add_argument('-n', dest='names', nargs='*', help="Dataset names")
parser.add_argument('-i', dest='add_ids',
action='store_true', default=False,
help="Add integer IDs")
parser.add_argument('dest', help="Destination")
args = parser.parse_args()
assert len(args.sources) == len(args.targets)
assert len(args.sources) == len(args.names)
with h5py.File(args.dest, mode='w') as f:
lengths = pack(f, "sources", args.sources)
assert numpy.all(lengths == pack(f, "targets", args.targets))
offsets = [0] + list(lengths.cumsum())
total_len = offsets[-1]
if args.add_ids:
id_table = f.create_dataset('ids',
data=numpy.arange(total_len,
dtype='int32'))
split_dict = {
args.names[i]:
{'sources': (offsets[i], offsets[i + 1]),
'targets': (offsets[i], offsets[i + 1]),
'ids': (offsets[i], offsets[i + 1])}
for i in range(len(args.names))}
else:
split_dict = {
args.names[i]:
{'sources': (offsets[i], offsets[i + 1]),
'targets': (offsets[i], offsets[i + 1])}
for i in range(len(args.names))}
f.attrs['split'] = H5PYDataset.create_split_array(split_dict)
| mit |
forumber/2.6.35.7-u8800pro-cm7 | tools/perf/scripts/python/syscall-counts.py | 944 | 1429 | # system call counts
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
usage = "perf trace -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
pass
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40d %10d\n" % (id, val),
| gpl-2.0 |
caphrim007/ansible | lib/ansible/modules/windows/win_iis_webapppool.py | 43 | 7037 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Henrik Wallström <henrik@wallstroms.nu>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_iis_webapppool
version_added: "2.0"
short_description: Configure IIS Web Application Pools
description:
- Creates, removes and configures an IIS Web Application Pool.
options:
attributes:
description:
- This field is a free form dictionary value for the application pool
attributes.
- These attributes are based on the naming standard at
U(https://www.iis.net/configreference/system.applicationhost/applicationpools/add#005),
see the examples section for more details on how to set this.
- You can also set the attributes of child elements like cpu and
processModel, see the examples to see how it is done.
- While you can use the numeric values for enums it is recommended to use
the enum name itself, e.g. use SpecificUser instead of 3 for
processModel.identityType.
- managedPipelineMode may be either "Integrated" or "Classic".
- startMode may be either "OnDemand" or "AlwaysRunning".
- Use C(state) module parameter to modify the state of the app pool.
- When trying to set 'processModel.password' and you receive a 'Value
does fall within the expected range' error, you have a corrupted
keystore. Please follow
U(http://structuredsight.com/2014/10/26/im-out-of-range-youre-out-of-range/)
to help fix your host.
name:
description:
- Name of the application pool.
required: yes
state:
choices: [ absent, present, restarted, started, stopped ]
default: present
description:
- The state of the application pool.
- If C(absent) will ensure the app pool is removed.
- If C(present) will ensure the app pool is configured and exists.
- If C(restarted) will ensure the app pool exists and will restart, this
is never idempotent.
- If C(started) will ensure the app pool exists and is started.
- If C(stopped) will ensure the app pool exists and is stopped.
author:
- Henrik Wallström (@henrikwallstrom)
- Jordan Borean (@jborean93)
'''
EXAMPLES = r'''
- name: return information about an existing application pool
win_iis_webapppool:
name: DefaultAppPool
state: present
- name: create a new application pool in 'Started' state
win_iis_webapppool:
name: AppPool
state: started
- name: stop an application pool
win_iis_webapppool:
name: AppPool
state: stopped
- name: restart an application pool (non-idempotent)
win_iis_webapppool:
name: AppPool
state: restart
- name: change application pool attributes using new dict style
win_iis_webapppool:
name: AppPool
attributes:
managedRuntimeVersion: v4.0
autoStart: no
- name: creates an application pool, sets attributes and starts it
win_iis_webapppool:
name: AnotherAppPool
state: started
attributes:
managedRuntimeVersion: v4.0
autoStart: no
# In the below example we are setting attributes in child element processModel
# https://www.iis.net/configreference/system.applicationhost/applicationpools/add/processmodel
- name: manage child element and set identity of application pool
win_iis_webapppool:
name: IdentitiyAppPool
state: started
attributes:
managedPipelineMode: Classic
processModel.identityType: SpecificUser
processModel.userName: '{{ansible_user}}'
processModel.password: '{{ansible_password}}'
processModel.loadUserProfile: True
- name: manage a timespan attribute
win_iis_webapppool:
name: TimespanAppPool
state: started
attributes:
# Timespan with full string "day:hour:minute:second.millisecond"
recycling.periodicRestart.time: "00:00:05:00.000000"
recycling.periodicRestart.schedule: ["00:10:00", "05:30:00"]
# Shortened timespan "hour:minute:second"
processModel.pingResponseTime: "00:03:00"
'''
RETURN = r'''
attributes:
description: Application Pool attributes that were set and processed by this
module invocation.
returned: success
type: dictionary
sample:
enable32BitAppOnWin64: "true"
managedRuntimeVersion: "v4.0"
managedPipelineMode: "Classic"
info:
description: Information on current state of the Application Pool. See
https://www.iis.net/configreference/system.applicationhost/applicationpools/add#005
for the full list of return attributes based on your IIS version.
returned: success
type: complex
sample:
contains:
attributes:
description: Key value pairs showing the current Application Pool attributes.
returned: success
type: dictionary
sample:
autoStart: true
managedRuntimeLoader: "webengine4.dll"
managedPipelineMode: "Classic"
name: "DefaultAppPool"
CLRConfigFile: ""
passAnonymousToken: true
applicationPoolSid: "S-1-5-82-1352790163-598702362-1775843902-1923651883-1762956711"
queueLength: 1000
managedRuntimeVersion: "v4.0"
state: "Started"
enableConfigurationOverride: true
startMode: "OnDemand"
enable32BitAppOnWin64: true
cpu:
description: Key value pairs showing the current Application Pool cpu attributes.
returned: success
type: dictionary
sample:
action: "NoAction"
limit: 0
resetInterval:
Days: 0
Hours: 0
failure:
description: Key value pairs showing the current Application Pool failure attributes.
returned: success
type: dictionary
sample:
autoShutdownExe: ""
orphanActionExe: ""
rapidFailProtextionInterval:
Days: 0
Hours: 0
name:
description: Name of Application Pool that was processed by this module invocation.
returned: success
type: string
sample: "DefaultAppPool"
processModel:
description: Key value pairs showing the current Application Pool processModel attributes.
returned: success
type: dictionary
sample:
identityType: "ApplicationPoolIdentity"
logonType: "LogonBatch"
pingInterval:
Days: 0
Hours: 0
recycling:
description: Key value pairs showing the current Application Pool recycling attributes.
returned: success
type: dictionary
sample:
disallowOverlappingRotation: false
disallowRotationOnConfigChange: false
logEventOnRecycle: "Time,Requests,Schedule,Memory,IsapiUnhealthy,OnDemand,ConfigChange,PrivateMemory"
state:
description: Current runtime state of the pool as the module completed.
returned: success
type: string
sample: "Started"
'''
| gpl-3.0 |
peterbarker/ardupilot-1 | Tools/LogAnalyzer/tests/TestPerformance.py | 261 | 3061 | from LogAnalyzer import Test,TestResult
import DataflashLog
class TestPerformance(Test):
'''check performance monitoring messages (PM) for issues with slow loops, etc'''
def __init__(self):
Test.__init__(self)
self.name = "PM"
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
# this test should be valid for all vehicle types, just need to figure out why PM logging data is different in each
if logdata.vehicleType != "ArduCopter":
self.result.status = TestResult.StatusType.NA
return
# NOTE: we'll ignore MaxT altogether for now, it seems there are quite regularly one or two high values in there, even ignoring the ones expected after arm/disarm events
# gather info on arm/disarm lines, we will ignore the MaxT data from the first line found after each of these
# armingLines = []
# for line,ev in logdata.channels["EV"]["Id"].listData:
# if (ev == 10) or (ev == 11):
# armingLines.append(line)
# ignoreMaxTLines = []
# for maxT in logdata.channels["PM"]["MaxT"].listData:
# if not armingLines:
# break
# if maxT[0] > armingLines[0]:
# #print "Ignoring maxT from line %d, as it is the first PM line after arming on line %d" % (maxT[0],armingLines[0])
# ignoreMaxTLines.append(maxT[0])
# armingLines.pop(0)
if "PM" not in logdata.channels:
self.result.status = TestResult.StatusType.UNKNOWN
self.result.statusMessage = "No PM log data"
return
# check for slow loops, i.e. NLon greater than 6% of NLoop
maxPercentSlow = 0
maxPercentSlowLine = 0
slowLoopLineCount = 0
for i in range(len(logdata.channels["PM"]["NLon"].listData)):
(line, nLon) = logdata.channels["PM"]["NLon"].listData[i]
(line, nLoop) = logdata.channels["PM"]["NLoop"].listData[i]
(line, maxT) = logdata.channels["PM"]["MaxT"].listData[i]
percentSlow = (nLon / float(nLoop)) * 100
if percentSlow > 6.0:
slowLoopLineCount = slowLoopLineCount + 1
if percentSlow > maxPercentSlow:
maxPercentSlow = percentSlow
maxPercentSlowLine = line
#if (maxT > 13000) and line not in ignoreMaxTLines:
# print "MaxT of %d detected on line %d" % (maxT,line)
if (maxPercentSlow > 10) or (slowLoopLineCount > 6):
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = "%d slow loop lines found, max %.2f%% on line %d" % (slowLoopLineCount,maxPercentSlow,maxPercentSlowLine)
elif (maxPercentSlow > 6):
self.result.status = TestResult.StatusType.WARN
self.result.statusMessage = "%d slow loop lines found, max %.2f%% on line %d" % (slowLoopLineCount,maxPercentSlow,maxPercentSlowLine)
| gpl-3.0 |
argivaitv/argivaitv | plugin.video.SportsDevil/lib/utils/xbmcUtils.py | 27 | 2568 | # -*- coding: utf-8 -*-
import xbmcgui, xbmc, xbmcplugin
enable_debug = False
#######################################
# Xbmc Helpers
#######################################
def select(title, menuItems):
select = xbmcgui.Dialog().select(title, menuItems)
if select == -1:
return None
else:
return menuItems[select]
def getKeyboard(default = '', heading = '', hidden = False):
kboard = xbmc.Keyboard(default, heading, hidden)
kboard.doModal()
if kboard.isConfirmed():
return kboard.getText()
return ''
def getImage(title):
dialog = xbmcgui.Dialog()
image = dialog.browse(1, title, 'pictures', '.jpg|.png', True)
return image
def showMessage(msg):
xbmc.executebuiltin('Notification(SportsDevil,' + str(msg.encode('utf-8', 'ignore')) + ')')
def showBusyAnimation():
xbmc.executebuiltin( 'ActivateWindow(busydialog)' )
def hideBusyAnimation():
xbmc.executebuiltin( 'Dialog.Close(busydialog,true)' )
def closeAllDialogs():
xbmc.executebuiltin('Dialog.Close(all, true)')
def log(msg):
if enable_debug:
try:
xbmc.log(msg)
except:
xbmc.log(msg.encode('utf-8'))
def setSortMethodsForCurrentXBMCList(handle, sortKeys):
def addSortMethod(method):
xbmcplugin.addSortMethod(handle = handle, sortMethod = method)
if not sortKeys or sortKeys==[]:
addSortMethod(xbmcplugin.SORT_METHOD_UNSORTED)
else:
if 'name' in sortKeys:
addSortMethod(xbmcplugin.SORT_METHOD_LABEL)
if 'size' in sortKeys:
addSortMethod(xbmcplugin.SORT_METHOD_SIZE)
if 'duration' in sortKeys:
addSortMethod(xbmcplugin.SORT_METHOD_DURATION)
if 'genre' in sortKeys:
addSortMethod(xbmcplugin.SORT_METHOD_GENRE)
if 'rating' in sortKeys:
addSortMethod(xbmcplugin.SORT_METHOD_VIDEO_RATING)
if 'date' in sortKeys:
addSortMethod(xbmcplugin.SORT_METHOD_DATE)
if 'file' in sortKeys:
addSortMethod(xbmcplugin.SORT_METHOD_FILE)
def getContainerFolderPath():
return xbmc.getInfoLabel('Container.FolderPath')
def getListItemPath():
return xbmc.getInfoLabel('ListItem.Path')
def getCurrentWindow():
return xbmc.getInfoLabel('System.CurrentWindow')
def getCurrentControl():
return xbmc.getInfoLabel('System.CurrentControl')
def getCurrentWindowXmlFile():
return xbmc.getInfoLabel('Window.Property(xmlfile)') | gpl-2.0 |
jgraham/servo | tests/wpt/css-tests/tools/pywebsocket/src/test/testdata/handlers/sub/no_wsh_at_the_end.py | 499 | 1839 | # Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Correct signatures, wrong file name.
"""
def web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
request.connection.write(
'sub/no_wsh_at_the_end.py is called for %s, %s' %
(request.ws_resource, request.ws_protocol))
# vi:sts=4 sw=4 et
| mpl-2.0 |
nelmiux/CarnotKE | jyhton/lib-python/2.7/plat-mac/lib-scriptpackages/SystemEvents/System_Events_Suite.py | 82 | 3770 | """Suite System Events Suite: Terms and Events for controlling the System Events application
Level 1, version 1
Generated from /System/Library/CoreServices/System Events.app
AETE/AEUT resource version 1/0, language 0, script 0
"""
import aetools
import MacOS
_code = 'sevs'
class System_Events_Suite_Events:
def do_script(self, _object, _attributes={}, **_arguments):
"""do script: Execute an OSA script.
Required argument: the object for the command
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'misc'
_subcode = 'dosc'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
class application(aetools.ComponentItem):
"""application - The System Events application """
want = 'capp'
class _Prop__3c_Inheritance_3e_(aetools.NProperty):
"""<Inheritance> - All of the properties of the superclass. """
which = 'c@#^'
want = 'capp'
_3c_Inheritance_3e_ = _Prop__3c_Inheritance_3e_()
class _Prop_folder_actions_enabled(aetools.NProperty):
"""folder actions enabled - Are Folder Actions currently being processed? """
which = 'faen'
want = 'bool'
folder_actions_enabled = _Prop_folder_actions_enabled()
class _Prop_properties(aetools.NProperty):
"""properties - every property of the System Events application """
which = 'pALL'
want = '****'
properties = _Prop_properties()
# element 'cdis' as ['name', 'indx', 'rele', 'rang', 'test']
# element 'cfol' as ['name', 'indx', 'rele', 'rang', 'test']
# element 'cobj' as ['name', 'indx', 'rele', 'rang', 'test']
# element 'cwin' as ['name', 'indx', 'rele', 'rang', 'test', 'ID ']
# element 'docu' as ['name', 'indx', 'rele', 'rang', 'test']
# element 'file' as ['name', 'indx', 'rele', 'rang', 'test']
# element 'foac' as ['name', 'indx', 'rele', 'rang', 'test']
# element 'logi' as ['name', 'indx', 'rele', 'rang', 'test']
# element 'pcap' as ['name', 'indx', 'rele', 'rang', 'test']
# element 'pcda' as ['name', 'indx', 'rele', 'rang', 'test']
# element 'prcs' as ['name', 'indx', 'rele', 'rang', 'test']
applications = application
application._superclassnames = []
import Disk_Folder_File_Suite
import Standard_Suite
import Folder_Actions_Suite
import Login_Items_Suite
import Processes_Suite
application._privpropdict = {
'_3c_Inheritance_3e_' : _Prop__3c_Inheritance_3e_,
'folder_actions_enabled' : _Prop_folder_actions_enabled,
'properties' : _Prop_properties,
}
application._privelemdict = {
'application_process' : Processes_Suite.application_process,
'desk_accessory_process' : Processes_Suite.desk_accessory_process,
'disk' : Disk_Folder_File_Suite.disk,
'document' : Standard_Suite.document,
'file' : Disk_Folder_File_Suite.file,
'folder' : Disk_Folder_File_Suite.folder,
'folder_action' : Folder_Actions_Suite.folder_action,
'item' : Disk_Folder_File_Suite.item,
'login_item' : Login_Items_Suite.login_item,
'process' : Processes_Suite.process,
'window' : Standard_Suite.window,
}
#
# Indices of types declared in this module
#
_classdeclarations = {
'capp' : application,
}
_propdeclarations = {
'c@#^' : _Prop__3c_Inheritance_3e_,
'faen' : _Prop_folder_actions_enabled,
'pALL' : _Prop_properties,
}
_compdeclarations = {
}
_enumdeclarations = {
}
| apache-2.0 |
danny88br/cjdns | node_build/dependencies/libuv/build/gyp/test/prune_targets/gyptest-prune-targets.py | 139 | 2174 | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies --root-target removes the unnecessary targets.
"""
import TestGyp
test = TestGyp.TestGyp()
build_error_code = {
'android': 2,
'cmake': 1,
'make': 2,
'msvs': 1,
'ninja': 1,
'xcode': 65,
}[test.format]
# By default, everything will be included.
test.run_gyp('test1.gyp')
test.build('test2.gyp', 'lib1')
test.build('test2.gyp', 'lib2')
test.build('test2.gyp', 'lib3')
test.build('test2.gyp', 'lib_indirect')
test.build('test1.gyp', 'program1')
test.build('test1.gyp', 'program2')
test.build('test1.gyp', 'program3')
# With deep dependencies of program1 only.
test.run_gyp('test1.gyp', '--root-target=program1')
test.build('test2.gyp', 'lib1')
test.build('test2.gyp', 'lib2', status=build_error_code, stderr=None)
test.build('test2.gyp', 'lib3', status=build_error_code, stderr=None)
test.build('test2.gyp', 'lib_indirect')
test.build('test1.gyp', 'program1')
test.build('test1.gyp', 'program2', status=build_error_code, stderr=None)
test.build('test1.gyp', 'program3', status=build_error_code, stderr=None)
# With deep dependencies of program2 only.
test.run_gyp('test1.gyp', '--root-target=program2')
test.build('test2.gyp', 'lib1', status=build_error_code, stderr=None)
test.build('test2.gyp', 'lib2')
test.build('test2.gyp', 'lib3', status=build_error_code, stderr=None)
test.build('test2.gyp', 'lib_indirect')
test.build('test1.gyp', 'program1', status=build_error_code, stderr=None)
test.build('test1.gyp', 'program2')
test.build('test1.gyp', 'program3', status=build_error_code, stderr=None)
# With deep dependencies of program1 and program2.
test.run_gyp('test1.gyp', '--root-target=program1', '--root-target=program2')
test.build('test2.gyp', 'lib1')
test.build('test2.gyp', 'lib2')
test.build('test2.gyp', 'lib3', status=build_error_code, stderr=None)
test.build('test2.gyp', 'lib_indirect')
test.build('test1.gyp', 'program1')
test.build('test1.gyp', 'program2')
test.build('test1.gyp', 'program3', status=build_error_code, stderr=None)
test.pass_test()
| gpl-3.0 |
Wonfee/android_kernel_asus_grouper | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
anirudhvenkats/clowdflows | csv2arff-read-only/csv2arff.py | 12 | 3968 | # -*- coding: cp1252 -*-
import csv
import sys
from xml.dom import minidom
def get_attributes(file_xml):
out = []
dom1 = minidom.parse(file_xml)
for node in dom1.getElementsByTagName('attribute'):
out.append({
'name': node.getAttribute('name') ,
'atype': node.getAttribute('atype'),
'format':node.getAttribute('format'),
'skip':node.getAttribute('skip')
})
#print out
return out
def get_relation(file_xml):
dom1 = minidom.parse(file_xml)
out=''
delimiter=''
for node in dom1.getElementsByTagName('csv'):
out=node.getAttribute('name')
delimiter=node.getAttribute('delimiter');
if(len(delimiter)==0):
delimiter=';';
print delimiter
return out, delimiter
class csv_arff_converter:
def __init__(self,csv_file, attribute_file, file_out):
self.csv_file = csv_file
self.attribute_file = attribute_file
self.file_out = file_out
def run(self):
classes = []
#read attribute
self.relation_name, self.delimiter = get_relation(attribute_file)
attributes_list = get_attributes(attribute_file)
arff_data = '@RELATION ' + self.relation_name + '\n\n'
for i in attributes_list:
if (i['skip'] != 'yes'):
arff_data += '@ATTRIBUTE '+i['name']+' ' + i['atype']
if (i['atype']=='date'):
arff_data += ' '+i['format']
if (i['atype']=='class'):
arff_data += ' (#@#'+i['name'] + '#@#)'
arff_data +='\n'
classes.append('')
arff_data += '\n@DATA\n'
print classes
#open csv
reader = csv.reader(open(self.csv_file), delimiter=self.delimiter, quoting=csv.QUOTE_NONE)
rnum = 0
for row in reader:
#print row
buff = ''
pos = 0
#print len(row)
#occhio alla lunghezza riga
for j in range(0, len(row)-1):
field = row[j]
if(attributes_list[pos]['skip'] != 'yes'):
if (pos > 0):
buff += ','
if(attributes_list[pos]['atype'] == 'string'):
field = "'" + field + "'"
buff += field
#se è una classe raccolgo i valori
if(attributes_list[pos]['atype'] == 'class'):
if (rnum > 0):
classes[pos]+= ','+ field
else:
classes[pos]+= field
pos += 1
buff += '\n'
arff_data += buff
rnum += 1
pos = 0
for a in classes:
j = a.split(',')
un = list(set(j))
#print un
if (len(un) > 0):
this_replacement = ",".join(un)
#print this_replacement
old_text = '#@#'+ attributes_list[pos]['name'] + '#@#'
#print old_text
arff_data = arff_data.replace(old_text, this_replacement)
pos += 1
#print arff_data
a = open(self.file_out, 'w')
a.write(arff_data)
a.close()
if __name__ == "__main__":
#csv_file = sys.argv[1]
#attribute_file = sys.argv[2]
csv_file = './test_csv2arff/test_dataset_1.csv'
attribute_file = './test_csv2arff/test_dataset_1.att'
instance = csv_arff_converter(csv_file, attribute_file, './test_csv2arff/output.arff')
instance.run()
| gpl-3.0 |
kobotoolbox/kobocat | onadata/apps/logger/management/commands/populate_xml_hashes_for_instances.py | 1 | 1720 | # coding: utf-8
'''
Django management command to populate `Instance` instances with hashes for use in duplicate
detection.
Relies solely on the `argparse` capabilities of `BaseCommand` for argument parsing and validation.
:Example:
python manage.py populate_xml_hashes_for_instances --repopulate --usernames someuser anotheruser
python manage.py populate_xml_hashes_for_instances --all
'''
from datetime import datetime
from django.core.management.base import BaseCommand
from ...models import Instance
class Command(BaseCommand):
def add_arguments(self, parser):
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
'--all',
action='store_true',
help='Populate all `Instance` objects with hashes.',
)
group.add_argument(
'--usernames',
nargs='+',
help='Space-delimited list of usernames whose `Instance` objects '
'should be populated with hashes.'
)
parser.add_argument(
'--repopulate',
action='store_true',
help='Recalculate even `Instance` objects that already have '
'hashes.',
)
def handle(self, *_, **options):
# Populate the `Instance` hashes and track how long it took.
start_time = datetime.now()
instances_updated_total = Instance.populate_xml_hashes_for_instances(
usernames=options['usernames'],
repopulate=options['repopulate'],
)
execution_time = datetime.now() - start_time
print('Populated {} `Instance` hashes in {}.'.format(
instances_updated_total, execution_time))
| bsd-2-clause |
Guneet-Dhillon/mxnet | example/rcnn/rcnn/core/tester.py | 25 | 10193 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import cPickle
import os
import time
import mxnet as mx
import numpy as np
from module import MutableModule
from rcnn.logger import logger
from rcnn.config import config
from rcnn.io import image
from rcnn.processing.bbox_transform import bbox_pred, clip_boxes
from rcnn.processing.nms import py_nms_wrapper, cpu_nms_wrapper, gpu_nms_wrapper
class Predictor(object):
def __init__(self, symbol, data_names, label_names,
context=mx.cpu(), max_data_shapes=None,
provide_data=None, provide_label=None,
arg_params=None, aux_params=None):
self._mod = MutableModule(symbol, data_names, label_names,
context=context, max_data_shapes=max_data_shapes)
self._mod.bind(provide_data, provide_label, for_training=False)
self._mod.init_params(arg_params=arg_params, aux_params=aux_params)
def predict(self, data_batch):
self._mod.forward(data_batch)
return dict(zip(self._mod.output_names, self._mod.get_outputs()))
def im_proposal(predictor, data_batch, data_names, scale):
data_dict = dict(zip(data_names, data_batch.data))
output = predictor.predict(data_batch)
# drop the batch index
boxes = output['rois_output'].asnumpy()[:, 1:]
scores = output['rois_score'].asnumpy()
# transform to original scale
boxes = boxes / scale
return scores, boxes, data_dict
def generate_proposals(predictor, test_data, imdb, vis=False, thresh=0.):
"""
Generate detections results using RPN.
:param predictor: Predictor
:param test_data: data iterator, must be non-shuffled
:param imdb: image database
:param vis: controls visualization
:param thresh: thresh for valid detections
:return: list of detected boxes
"""
assert vis or not test_data.shuffle
data_names = [k[0] for k in test_data.provide_data]
i = 0
t = time.time()
imdb_boxes = list()
original_boxes = list()
for im_info, data_batch in test_data:
t1 = time.time() - t
t = time.time()
scale = im_info[0, 2]
scores, boxes, data_dict = im_proposal(predictor, data_batch, data_names, scale)
t2 = time.time() - t
t = time.time()
# assemble proposals
dets = np.hstack((boxes, scores))
original_boxes.append(dets)
# filter proposals
keep = np.where(dets[:, 4:] > thresh)[0]
dets = dets[keep, :]
imdb_boxes.append(dets)
if vis:
vis_all_detection(data_dict['data'].asnumpy(), [dets], ['obj'], scale)
logger.info('generating %d/%d ' % (i + 1, imdb.num_images) +
'proposal %d ' % (dets.shape[0]) +
'data %.4fs net %.4fs' % (t1, t2))
i += 1
assert len(imdb_boxes) == imdb.num_images, 'calculations not complete'
# save results
rpn_folder = os.path.join(imdb.root_path, 'rpn_data')
if not os.path.exists(rpn_folder):
os.mkdir(rpn_folder)
rpn_file = os.path.join(rpn_folder, imdb.name + '_rpn.pkl')
with open(rpn_file, 'wb') as f:
cPickle.dump(imdb_boxes, f, cPickle.HIGHEST_PROTOCOL)
if thresh > 0:
full_rpn_file = os.path.join(rpn_folder, imdb.name + '_full_rpn.pkl')
with open(full_rpn_file, 'wb') as f:
cPickle.dump(original_boxes, f, cPickle.HIGHEST_PROTOCOL)
logger.info('wrote rpn proposals to %s' % rpn_file)
return imdb_boxes
def im_detect(predictor, data_batch, data_names, scale):
output = predictor.predict(data_batch)
data_dict = dict(zip(data_names, data_batch.data))
if config.TEST.HAS_RPN:
rois = output['rois_output'].asnumpy()[:, 1:]
else:
rois = data_dict['rois'].asnumpy().reshape((-1, 5))[:, 1:]
im_shape = data_dict['data'].shape
# save output
scores = output['cls_prob_reshape_output'].asnumpy()[0]
bbox_deltas = output['bbox_pred_reshape_output'].asnumpy()[0]
# post processing
pred_boxes = bbox_pred(rois, bbox_deltas)
pred_boxes = clip_boxes(pred_boxes, im_shape[-2:])
# we used scaled image & roi to train, so it is necessary to transform them back
pred_boxes = pred_boxes / scale
return scores, pred_boxes, data_dict
def pred_eval(predictor, test_data, imdb, vis=False, thresh=1e-3):
"""
wrapper for calculating offline validation for faster data analysis
in this example, all threshold are set by hand
:param predictor: Predictor
:param test_data: data iterator, must be non-shuffle
:param imdb: image database
:param vis: controls visualization
:param thresh: valid detection threshold
:return:
"""
assert vis or not test_data.shuffle
data_names = [k[0] for k in test_data.provide_data]
nms = py_nms_wrapper(config.TEST.NMS)
# limit detections to max_per_image over all classes
max_per_image = -1
num_images = imdb.num_images
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes)]
i = 0
t = time.time()
for im_info, data_batch in test_data:
t1 = time.time() - t
t = time.time()
scale = im_info[0, 2]
scores, boxes, data_dict = im_detect(predictor, data_batch, data_names, scale)
t2 = time.time() - t
t = time.time()
for j in range(1, imdb.num_classes):
indexes = np.where(scores[:, j] > thresh)[0]
cls_scores = scores[indexes, j, np.newaxis]
cls_boxes = boxes[indexes, j * 4:(j + 1) * 4]
cls_dets = np.hstack((cls_boxes, cls_scores))
keep = nms(cls_dets)
all_boxes[j][i] = cls_dets[keep, :]
if max_per_image > 0:
image_scores = np.hstack([all_boxes[j][i][:, -1]
for j in range(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in range(1, imdb.num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
if vis:
boxes_this_image = [[]] + [all_boxes[j][i] for j in range(1, imdb.num_classes)]
vis_all_detection(data_dict['data'].asnumpy(), boxes_this_image, imdb.classes, scale)
t3 = time.time() - t
t = time.time()
logger.info('testing %d/%d data %.4fs net %.4fs post %.4fs' % (i, imdb.num_images, t1, t2, t3))
i += 1
det_file = os.path.join(imdb.cache_path, imdb.name + '_detections.pkl')
with open(det_file, 'wb') as f:
cPickle.dump(all_boxes, f, protocol=cPickle.HIGHEST_PROTOCOL)
imdb.evaluate_detections(all_boxes)
def vis_all_detection(im_array, detections, class_names, scale):
"""
visualize all detections in one image
:param im_array: [b=1 c h w] in rgb
:param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
:param class_names: list of names in imdb
:param scale: visualize the scaled image
:return:
"""
import matplotlib.pyplot as plt
import random
im = image.transform_inverse(im_array, config.PIXEL_MEANS)
plt.imshow(im)
for j, name in enumerate(class_names):
if name == '__background__':
continue
color = (random.random(), random.random(), random.random()) # generate a random color
dets = detections[j]
for det in dets:
bbox = det[:4] * scale
score = det[-1]
rect = plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor=color, linewidth=3.5)
plt.gca().add_patch(rect)
plt.gca().text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(name, score),
bbox=dict(facecolor=color, alpha=0.5), fontsize=12, color='white')
plt.show()
def draw_all_detection(im_array, detections, class_names, scale):
"""
visualize all detections in one image
:param im_array: [b=1 c h w] in rgb
:param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
:param class_names: list of names in imdb
:param scale: visualize the scaled image
:return:
"""
import cv2
import random
color_white = (255, 255, 255)
im = image.transform_inverse(im_array, config.PIXEL_MEANS)
# change to bgr
im = cv2.cvtColor(im, cv2.cv.CV_RGB2BGR)
for j, name in enumerate(class_names):
if name == '__background__':
continue
color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256)) # generate a random color
dets = detections[j]
for det in dets:
bbox = det[:4] * scale
score = det[-1]
bbox = map(int, bbox)
cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=2)
cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10),
color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5)
return im
| apache-2.0 |
dvliman/jaikuengine | .google_appengine/lib/django-1.4/tests/regressiontests/model_fields/imagefield.py | 38 | 16526 | from __future__ import absolute_import
import os
import shutil
from django.core.files import File
from django.core.files.images import ImageFile
from django.test import TestCase
from .models import (Image, Person, PersonWithHeight, PersonWithHeightAndWidth,
PersonDimensionsFirst, PersonTwoImages, TestImageFieldFile)
# If PIL available, do these tests.
if Image:
from .models import temp_storage_dir
class ImageFieldTestMixin(object):
"""
Mixin class to provide common functionality to ImageField test classes.
"""
# Person model to use for tests.
PersonModel = PersonWithHeightAndWidth
# File class to use for file instances.
File = ImageFile
def setUp(self):
"""
Creates a pristine temp directory (or deletes and recreates if it
already exists) that the model uses as its storage directory.
Sets up two ImageFile instances for use in tests.
"""
if os.path.exists(temp_storage_dir):
shutil.rmtree(temp_storage_dir)
os.mkdir(temp_storage_dir)
file_path1 = os.path.join(os.path.dirname(__file__), "4x8.png")
self.file1 = self.File(open(file_path1, 'rb'))
file_path2 = os.path.join(os.path.dirname(__file__), "8x4.png")
self.file2 = self.File(open(file_path2, 'rb'))
def tearDown(self):
"""
Removes temp directory and all its contents.
"""
shutil.rmtree(temp_storage_dir)
def check_dimensions(self, instance, width, height,
field_name='mugshot'):
"""
Asserts that the given width and height values match both the
field's height and width attributes and the height and width fields
(if defined) the image field is caching to.
Note, this method will check for dimension fields named by adding
"_width" or "_height" to the name of the ImageField. So, the
models used in these tests must have their fields named
accordingly.
By default, we check the field named "mugshot", but this can be
specified by passing the field_name parameter.
"""
field = getattr(instance, field_name)
# Check height/width attributes of field.
if width is None and height is None:
self.assertRaises(ValueError, getattr, field, 'width')
self.assertRaises(ValueError, getattr, field, 'height')
else:
self.assertEqual(field.width, width)
self.assertEqual(field.height, height)
# Check height/width fields of model, if defined.
width_field_name = field_name + '_width'
if hasattr(instance, width_field_name):
self.assertEqual(getattr(instance, width_field_name), width)
height_field_name = field_name + '_height'
if hasattr(instance, height_field_name):
self.assertEqual(getattr(instance, height_field_name), height)
class ImageFieldTests(ImageFieldTestMixin, TestCase):
"""
Tests for ImageField that don't need to be run with each of the
different test model classes.
"""
def test_equal_notequal_hash(self):
"""
Bug #9786: Ensure '==' and '!=' work correctly.
Bug #9508: make sure hash() works as expected (equal items must
hash to the same value).
"""
# Create two Persons with different mugshots.
p1 = self.PersonModel(name="Joe")
p1.mugshot.save("mug", self.file1)
p2 = self.PersonModel(name="Bob")
p2.mugshot.save("mug", self.file2)
self.assertEqual(p1.mugshot == p2.mugshot, False)
self.assertEqual(p1.mugshot != p2.mugshot, True)
# Test again with an instance fetched from the db.
p1_db = self.PersonModel.objects.get(name="Joe")
self.assertEqual(p1_db.mugshot == p2.mugshot, False)
self.assertEqual(p1_db.mugshot != p2.mugshot, True)
# Instance from db should match the local instance.
self.assertEqual(p1_db.mugshot == p1.mugshot, True)
self.assertEqual(hash(p1_db.mugshot), hash(p1.mugshot))
self.assertEqual(p1_db.mugshot != p1.mugshot, False)
def test_instantiate_missing(self):
"""
If the underlying file is unavailable, still create instantiate the
object without error.
"""
p = self.PersonModel(name="Joan")
p.mugshot.save("shot", self.file1)
p = self.PersonModel.objects.get(name="Joan")
path = p.mugshot.path
shutil.move(path, path + '.moved')
p2 = self.PersonModel.objects.get(name="Joan")
def test_delete_when_missing(self):
"""
Bug #8175: correctly delete an object where the file no longer
exists on the file system.
"""
p = self.PersonModel(name="Fred")
p.mugshot.save("shot", self.file1)
os.remove(p.mugshot.path)
p.delete()
def test_size_method(self):
"""
Bug #8534: FileField.size should not leave the file open.
"""
p = self.PersonModel(name="Joan")
p.mugshot.save("shot", self.file1)
# Get a "clean" model instance
p = self.PersonModel.objects.get(name="Joan")
# It won't have an opened file.
self.assertEqual(p.mugshot.closed, True)
# After asking for the size, the file should still be closed.
_ = p.mugshot.size
self.assertEqual(p.mugshot.closed, True)
def test_pickle(self):
"""
Tests that ImageField can be pickled, unpickled, and that the
image of the unpickled version is the same as the original.
"""
import pickle
p = Person(name="Joe")
p.mugshot.save("mug", self.file1)
dump = pickle.dumps(p)
p2 = Person(name="Bob")
p2.mugshot = self.file1
loaded_p = pickle.loads(dump)
self.assertEqual(p.mugshot, loaded_p.mugshot)
class ImageFieldTwoDimensionsTests(ImageFieldTestMixin, TestCase):
"""
Tests behavior of an ImageField and its dimensions fields.
"""
def test_constructor(self):
"""
Tests assigning an image field through the model's constructor.
"""
p = self.PersonModel(name='Joe', mugshot=self.file1)
self.check_dimensions(p, 4, 8)
p.save()
self.check_dimensions(p, 4, 8)
def test_image_after_constructor(self):
"""
Tests behavior when image is not passed in constructor.
"""
p = self.PersonModel(name='Joe')
# TestImageField value will default to being an instance of its
# attr_class, a TestImageFieldFile, with name == None, which will
# cause it to evaluate as False.
self.assertEqual(isinstance(p.mugshot, TestImageFieldFile), True)
self.assertEqual(bool(p.mugshot), False)
# Test setting a fresh created model instance.
p = self.PersonModel(name='Joe')
p.mugshot = self.file1
self.check_dimensions(p, 4, 8)
def test_create(self):
"""
Tests assigning an image in Manager.create().
"""
p = self.PersonModel.objects.create(name='Joe', mugshot=self.file1)
self.check_dimensions(p, 4, 8)
def test_default_value(self):
"""
Tests that the default value for an ImageField is an instance of
the field's attr_class (TestImageFieldFile in this case) with no
name (name set to None).
"""
p = self.PersonModel()
self.assertEqual(isinstance(p.mugshot, TestImageFieldFile), True)
self.assertEqual(bool(p.mugshot), False)
def test_assignment_to_None(self):
"""
Tests that assigning ImageField to None clears dimensions.
"""
p = self.PersonModel(name='Joe', mugshot=self.file1)
self.check_dimensions(p, 4, 8)
# If image assigned to None, dimension fields should be cleared.
p.mugshot = None
self.check_dimensions(p, None, None)
p.mugshot = self.file2
self.check_dimensions(p, 8, 4)
def test_field_save_and_delete_methods(self):
"""
Tests assignment using the field's save method and deletion using
the field's delete method.
"""
p = self.PersonModel(name='Joe')
p.mugshot.save("mug", self.file1)
self.check_dimensions(p, 4, 8)
# A new file should update dimensions.
p.mugshot.save("mug", self.file2)
self.check_dimensions(p, 8, 4)
# Field and dimensions should be cleared after a delete.
p.mugshot.delete(save=False)
self.assertEqual(p.mugshot, None)
self.check_dimensions(p, None, None)
def test_dimensions(self):
"""
Checks that dimensions are updated correctly in various situations.
"""
p = self.PersonModel(name='Joe')
# Dimensions should get set if file is saved.
p.mugshot.save("mug", self.file1)
self.check_dimensions(p, 4, 8)
# Test dimensions after fetching from database.
p = self.PersonModel.objects.get(name='Joe')
# Bug 11084: Dimensions should not get recalculated if file is
# coming from the database. We test this by checking if the file
# was opened.
self.assertEqual(p.mugshot.was_opened, False)
self.check_dimensions(p, 4, 8)
# After checking dimensions on the image field, the file will have
# opened.
self.assertEqual(p.mugshot.was_opened, True)
# Dimensions should now be cached, and if we reset was_opened and
# check dimensions again, the file should not have opened.
p.mugshot.was_opened = False
self.check_dimensions(p, 4, 8)
self.assertEqual(p.mugshot.was_opened, False)
# If we assign a new image to the instance, the dimensions should
# update.
p.mugshot = self.file2
self.check_dimensions(p, 8, 4)
# Dimensions were recalculated, and hence file should have opened.
self.assertEqual(p.mugshot.was_opened, True)
class ImageFieldNoDimensionsTests(ImageFieldTwoDimensionsTests):
"""
Tests behavior of an ImageField with no dimension fields.
"""
PersonModel = Person
class ImageFieldOneDimensionTests(ImageFieldTwoDimensionsTests):
"""
Tests behavior of an ImageField with one dimensions field.
"""
PersonModel = PersonWithHeight
class ImageFieldDimensionsFirstTests(ImageFieldTwoDimensionsTests):
"""
Tests behavior of an ImageField where the dimensions fields are
defined before the ImageField.
"""
PersonModel = PersonDimensionsFirst
class ImageFieldUsingFileTests(ImageFieldTwoDimensionsTests):
"""
Tests behavior of an ImageField when assigning it a File instance
rather than an ImageFile instance.
"""
PersonModel = PersonDimensionsFirst
File = File
class TwoImageFieldTests(ImageFieldTestMixin, TestCase):
"""
Tests a model with two ImageFields.
"""
PersonModel = PersonTwoImages
def test_constructor(self):
p = self.PersonModel(mugshot=self.file1, headshot=self.file2)
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
p.save()
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
def test_create(self):
p = self.PersonModel.objects.create(mugshot=self.file1,
headshot=self.file2)
self.check_dimensions(p, 4, 8)
self.check_dimensions(p, 8, 4, 'headshot')
def test_assignment(self):
p = self.PersonModel()
self.check_dimensions(p, None, None, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
p.mugshot = self.file1
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
p.headshot = self.file2
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
# Clear the ImageFields one at a time.
p.mugshot = None
self.check_dimensions(p, None, None, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
p.headshot = None
self.check_dimensions(p, None, None, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
def test_field_save_and_delete_methods(self):
p = self.PersonModel(name='Joe')
p.mugshot.save("mug", self.file1)
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
p.headshot.save("head", self.file2)
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
# We can use save=True when deleting the image field with null=True
# dimension fields and the other field has an image.
p.headshot.delete(save=True)
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
p.mugshot.delete(save=False)
self.check_dimensions(p, None, None, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
def test_dimensions(self):
"""
Checks that dimensions are updated correctly in various situations.
"""
p = self.PersonModel(name='Joe')
# Dimensions should get set for the saved file.
p.mugshot.save("mug", self.file1)
p.headshot.save("head", self.file2)
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
# Test dimensions after fetching from database.
p = self.PersonModel.objects.get(name='Joe')
# Bug 11084: Dimensions should not get recalculated if file is
# coming from the database. We test this by checking if the file
# was opened.
self.assertEqual(p.mugshot.was_opened, False)
self.assertEqual(p.headshot.was_opened, False)
self.check_dimensions(p, 4, 8,'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
# After checking dimensions on the image fields, the files will
# have been opened.
self.assertEqual(p.mugshot.was_opened, True)
self.assertEqual(p.headshot.was_opened, True)
# Dimensions should now be cached, and if we reset was_opened and
# check dimensions again, the file should not have opened.
p.mugshot.was_opened = False
p.headshot.was_opened = False
self.check_dimensions(p, 4, 8,'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
self.assertEqual(p.mugshot.was_opened, False)
self.assertEqual(p.headshot.was_opened, False)
# If we assign a new image to the instance, the dimensions should
# update.
p.mugshot = self.file2
p.headshot = self.file1
self.check_dimensions(p, 8, 4, 'mugshot')
self.check_dimensions(p, 4, 8, 'headshot')
# Dimensions were recalculated, and hence file should have opened.
self.assertEqual(p.mugshot.was_opened, True)
self.assertEqual(p.headshot.was_opened, True)
| apache-2.0 |
trolleway/map-in-picture | map-in-picture.py | 1 | 7113 | from PIL import Image
from PIL.ExifTags import TAGS, GPSTAGS
import os, sys
import subprocess
'''
Usage: python map-in-picture.py photo1.JPG photo2.JPG
Set variable maperitive_path to maperitive.exe!
'''
def get_exif_data(image):
"""Returns a dictionary from the exif data of an PIL Image item. Also converts the GPS Tags"""
exif_data = {}
info = image._getexif()
if info:
for tag, value in info.items():
decoded = TAGS.get(tag, tag)
if decoded == "GPSInfo":
gps_data = {}
for t in value:
sub_decoded = GPSTAGS.get(t, t)
gps_data[sub_decoded] = value[t]
exif_data[decoded] = gps_data
else:
exif_data[decoded] = value
return exif_data
def _get_if_exist(data, key):
if key in data:
return data[key]
return None
def _convert_to_degress(value):
"""Helper function to convert the GPS coordinates stored in the EXIF to degress in float format"""
d0 = value[0][0]
d1 = value[0][1]
d = float(d0) / float(d1)
m0 = value[1][0]
m1 = value[1][1]
m = float(m0) / float(m1)
s0 = value[2][0]
s1 = value[2][1]
s = float(s0) / float(s1)
return d + (m / 60.0) + (s / 3600.0)
def get_lat_lon(exif_data):
"""Returns the latitude and longitude, if available, from the provided exif_data (obtained through get_exif_data above)"""
lat = None
lon = None
if "GPSInfo" in exif_data:
gps_info = exif_data["GPSInfo"]
gps_latitude = _get_if_exist(gps_info, "GPSLatitude")
gps_latitude_ref = _get_if_exist(gps_info, 'GPSLatitudeRef')
gps_longitude = _get_if_exist(gps_info, 'GPSLongitude')
gps_longitude_ref = _get_if_exist(gps_info, 'GPSLongitudeRef')
if gps_latitude and gps_latitude_ref and gps_longitude and gps_longitude_ref:
lat = _convert_to_degress(gps_latitude)
if gps_latitude_ref != "N":
lat = 0 - lat
lon = _convert_to_degress(gps_longitude)
if gps_longitude_ref != "E":
lon = 0 - lon
return lat, lon
def gen_maps(data, map_w, map_h):
maperitive_path='D:\Programs\_Maps\Maperitive\Maperitive.exe'
lat = data[0]
lon = data[1]
f = open('temp.mscript', 'w')
f.write('clear-map\n')
#f.write('add-web-map provider="hikebike"\n')
f.write('load-source data.osm\n')
f.write('use-ruleset alias="ad47"\n')
f.write('load-source "'+os.getcwd()+'\\maps\\temp.gpx"\n')
f.write('clear-map\n')
#f.write('add-web-map provider="hikebike"\n')
f.write('add-web-map provider="mapquest.osm"\n')
#f.write('load-source data.osm\n')
f.write('use-ruleset alias="ad47"\n')
f.write('load-source "'+os.getcwd()+'\\maps\\temp.gpx"\n')
f.write('move-pos x='+str(lon)+' y='+str(lat)+' zoom=10\n')
f.write('set-setting name="map.decoration.attribution" value="False"\n')
f.write('pause\n')
f.write('export-bitmap width='+str(map_w)+' height='+str(map_h)+' zoom=10 file=maps/1.png\n')
f.write('export-bitmap width='+str(map_w)+' height='+str(map_h)+' zoom=14 file=maps/2.png\n')
f.write('export-bitmap width='+str(map_w)+' height='+str(map_h)+' zoom=16 file=maps/3.png\n')
f.close()
os.system(maperitive_path+' -exitafter '+os.getcwd()+'/temp.mscript')#
return None
def gen_point_gpx(data):
f = open('maps/temp.gpx', 'w')
lat = data[0]
lon = data[1]
text='''<?xml version='1.0' encoding='UTF-8'?>
<gpx version="1.1" creator="Small python script" xmlns="http://www.topografix.com/GPX/1/1"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.topografix.com/GPX/1/1 http://www.topografix.com/GPX/1/1/gpx.xsd">
<metadata>
</metadata>
'''
f.write(text)
f.write(' <wpt lat="'+str(lat)+'" lon="'+str(lon)+'"/>')
f.write('</gpx>')
f.close()
################
# Example ######
################
if __name__ == "__main__":
print "Usage: python map-in-picture.py d:\upload\2014-08\2\_20140817_227.JPG d:\upload\2014-08\2\_20140817_237.JPG"
size = 128, 128
for infile in sys.argv[1:]:
outfile = os.path.splitext(infile)[0] + ".new.jpg"
print infile
if infile != outfile:
try:
filename = infile
image = Image.open(filename)
exif_data = get_exif_data(image)
print 'Photo coords founded'
gen_point_gpx(get_lat_lon(exif_data))
im = Image.open(infile) #
im.thumbnail(size) #
source_width, source_height = image.size
target_h=768
target_h=source_height
map_border_h=1
map_border_v=1
map_w=340
map_w = (source_width-(2*map_border_v))//3
map_h=250
#if source_width-source_height>200:
# map_h=source_width-source_height-map_border_h-map_border_h
gen_maps(get_lat_lon(exif_data),map_w,map_h)
white=(0,0,0)
#new canvas size
#isize=(1024,target_h+map_h+map_border_v+map_border_v)
isize=(source_width,target_h+map_h+map_border_v+map_border_v)
print isize
inew = Image.new('RGB',isize,white)
imgsrc = Image.open(infile)
inew.paste(imgsrc,(0,0,source_width,source_height))
left = 0
right = map_w
upper = target_h+map_border_v
lower = target_h+map_h+map_border_v
bbox = (left,upper,right,lower)
imgmap = Image.open('maps/1.png')
inew.paste(imgmap,bbox)
left = map_w+(map_border_h*1)
right = left+map_w
upper = target_h+map_border_v
lower = target_h+map_h+map_border_v
bbox = (left,upper,right,lower)
imgmap = Image.open('maps/2.png')
inew.paste(imgmap,bbox)
left = (map_w*2)+(map_border_h*2)
right = left+map_w
upper = target_h+map_border_v
lower = target_h+map_h+map_border_v
bbox = (left,upper,right,lower)
imgmap = Image.open('maps/3.png')
inew.paste(imgmap,bbox)
exif = imgsrc.info['exif']
inew.save(outfile, quality=95)
subprocess.call(['exiftool.exe',
outfile,
'-tagsFromFile',
filename], shell=True)
except IOError:
print "cannot create thumbnail for", infile
| cc0-1.0 |
elegion/djangodash2013 | wtl/wtlib/forms.py | 1 | 1119 | from django import forms
from django.forms.util import ErrorList
from django.utils.translation import ugettext_lazy as _
from github import UnknownObjectException
from wtl.wtgithub.worker import GithubWorker, ParseError, CantFindParserError
class AnalyzeForm(forms.Form):
git_url = forms.CharField()
def analyze(self):
worker = GithubWorker()
try:
self.repository, self.project = worker.analyze_repo(
self.cleaned_data['git_url'])
return self.repository, self.project
except UnknownObjectException:
self._add_error('git_url', _('Repository not found.'))
except CantFindParserError:
self._add_error('git_url', _('Cant find requirements file..'))
except ParseError:
self._add_error('git_url', _('Failed to parse your repo.'))
except:
self._add_error('git_url', _('Unknown error while parsing your repo.'))
def _add_error(self, field, error):
if field not in self._errors:
self._errors[field] = ErrorList()
self._errors[field].append(error)
| mit |
leeseuljeong/leeseulstack_neutron | neutron/plugins/ml2/models.py | 9 | 4918 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from sqlalchemy import orm
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import portbindings
BINDING_PROFILE_LEN = 4095
class NetworkSegment(model_base.BASEV2, models_v2.HasId):
"""Represent persistent state of a network segment.
A network segment is a portion of a neutron network with a
specific physical realization. A neutron network can consist of
one or more segments.
"""
__tablename__ = 'ml2_network_segments'
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
nullable=False)
network_type = sa.Column(sa.String(32), nullable=False)
physical_network = sa.Column(sa.String(64))
segmentation_id = sa.Column(sa.Integer)
is_dynamic = sa.Column(sa.Boolean, default=False, nullable=False,
server_default=sa.sql.false())
segment_index = sa.Column(sa.Integer, nullable=False, server_default='0')
class PortBinding(model_base.BASEV2):
"""Represent binding-related state of a port.
A port binding stores the port attributes required for the
portbindings extension, as well as internal ml2 state such as
which MechanismDriver and which segment are used by the port
binding.
"""
__tablename__ = 'ml2_port_bindings'
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
host = sa.Column(sa.String(255), nullable=False, default='',
server_default='')
vnic_type = sa.Column(sa.String(64), nullable=False,
default=portbindings.VNIC_NORMAL,
server_default=portbindings.VNIC_NORMAL)
profile = sa.Column(sa.String(BINDING_PROFILE_LEN), nullable=False,
default='', server_default='')
vif_type = sa.Column(sa.String(64), nullable=False)
vif_details = sa.Column(sa.String(4095), nullable=False, default='',
server_default='')
driver = sa.Column(sa.String(64))
segment = sa.Column(sa.String(36),
sa.ForeignKey('ml2_network_segments.id',
ondelete="SET NULL"))
# Add a relationship to the Port model in order to instruct SQLAlchemy to
# eagerly load port bindings
port = orm.relationship(
models_v2.Port,
backref=orm.backref("port_binding",
lazy='joined', uselist=False,
cascade='delete'))
class DVRPortBinding(model_base.BASEV2):
"""Represent binding-related state of a DVR port.
Port binding for all the ports associated to a DVR identified by router_id.
"""
__tablename__ = 'ml2_dvr_port_bindings'
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
host = sa.Column(sa.String(255), nullable=False, primary_key=True)
router_id = sa.Column(sa.String(36), nullable=True)
vif_type = sa.Column(sa.String(64), nullable=False)
vif_details = sa.Column(sa.String(4095), nullable=False, default='',
server_default='')
vnic_type = sa.Column(sa.String(64), nullable=False,
default=portbindings.VNIC_NORMAL,
server_default=portbindings.VNIC_NORMAL)
profile = sa.Column(sa.String(BINDING_PROFILE_LEN), nullable=False,
default='', server_default='')
cap_port_filter = sa.Column(sa.Boolean, nullable=False)
driver = sa.Column(sa.String(64))
segment = sa.Column(sa.String(36),
sa.ForeignKey('ml2_network_segments.id',
ondelete="SET NULL"))
status = sa.Column(sa.String(16), nullable=False)
# Add a relationship to the Port model in order to instruct SQLAlchemy to
# eagerly load port bindings
port = orm.relationship(
models_v2.Port,
backref=orm.backref("dvr_port_binding",
lazy='joined', uselist=False,
cascade='delete'))
| apache-2.0 |
semonte/intellij-community | python/helpers/pycharm/pytestrunner.py | 21 | 1660 | import sys
has_pytest = False
#there is the difference between 1.3.4 and 2.0.2 versions
#Since version 1.4, the testing tool "py.test" is part of its own pytest distribution.
try:
import pytest
has_pytest = True
except:
try:
import py
except:
raise NameError("No py.test runner found in selected interpreter")
def get_plugin_manager():
try:
from _pytest.config import get_plugin_manager
return get_plugin_manager()
except ImportError:
from _pytest.core import PluginManager
return PluginManager(load=True)
# "-s" is always required: no test output provided otherwise (see PY-12621)
args = sys.argv[1:]
args.append("-s") if "-s" not in args else None
if has_pytest:
_preinit = []
def main():
_pluginmanager = get_plugin_manager()
hook = _pluginmanager.hook
try:
config = hook.pytest_cmdline_parse(
pluginmanager=_pluginmanager, args=args)
exitstatus = hook.pytest_cmdline_main(config=config)
except pytest.UsageError:
e = sys.exc_info()[1]
sys.stderr.write("ERROR: %s\n" %(e.args[0],))
exitstatus = 3
return exitstatus
else:
def main():
config = py.test.config
try:
config.parse(args)
config.pluginmanager.do_configure(config)
session = config.initsession()
colitems = config.getinitialnodes()
exitstatus = session.main(colitems)
config.pluginmanager.do_unconfigure(config)
except config.Error:
e = sys.exc_info()[1]
sys.stderr.write("ERROR: %s\n" %(e.args[0],))
exitstatus = 3
py.test.config = py.test.config.__class__()
return exitstatus
if __name__ == "__main__":
main() | apache-2.0 |
watchdogpolska/watchdog-id | watchdog_id/users/tests/test_admin.py | 1 | 1405 | # coding=utf-8
from test_plus.test import TestCase
from ..admin import MyUserCreationForm
class TestMyUserCreationForm(TestCase):
def setUp(self):
self.user = self.make_user('notalamode', 'notalamodespassword')
def test_clean_username_success(self):
# Instantiate the form with a new username
form = MyUserCreationForm({
'username': 'alamode',
'password1': '7jefB#f@Cc7YJB]2v',
'password2': '7jefB#f@Cc7YJB]2v',
})
# Run is_valid() to trigger the validation
valid = form.is_valid()
self.assertTrue(valid)
# Run the actual clean_username method
username = form.clean_username()
self.assertEqual('alamode', username)
def test_clean_username_false(self):
# Instantiate the form with the same username as self.user
form = MyUserCreationForm({
'username': self.user.username,
'password1': 'notalamodespassword',
'password2': 'notalamodespassword',
})
# Run is_valid() to trigger the validation, which is going to fail
# because the username is already taken
valid = form.is_valid()
self.assertFalse(valid)
# The form.errors dict should contain a single error called 'username'
self.assertTrue(len(form.errors) == 1)
self.assertTrue('username' in form.errors)
| mit |
40223201/w16b_test | static/Brython3.1.3-20150514-095342/Lib/unittest/main.py | 739 | 10385 | """Unittest main program"""
import sys
import optparse
import os
from . import loader, runner
from .signals import installHandler
__unittest = True
FAILFAST = " -f, --failfast Stop on first failure\n"
CATCHBREAK = " -c, --catch Catch control-C and display results\n"
BUFFEROUTPUT = " -b, --buffer Buffer stdout and stderr during test runs\n"
USAGE_AS_MAIN = """\
Usage: %(progName)s [options] [tests]
Options:
-h, --help Show this message
-v, --verbose Verbose output
-q, --quiet Minimal output
%(failfast)s%(catchbreak)s%(buffer)s
Examples:
%(progName)s test_module - run tests from test_module
%(progName)s module.TestClass - run tests from module.TestClass
%(progName)s module.Class.test_method - run specified test method
[tests] can be a list of any number of test modules, classes and test
methods.
Alternative Usage: %(progName)s discover [options]
Options:
-v, --verbose Verbose output
%(failfast)s%(catchbreak)s%(buffer)s -s directory Directory to start discovery ('.' default)
-p pattern Pattern to match test files ('test*.py' default)
-t directory Top level directory of project (default to
start directory)
For test discovery all test modules must be importable from the top
level directory of the project.
"""
USAGE_FROM_MODULE = """\
Usage: %(progName)s [options] [test] [...]
Options:
-h, --help Show this message
-v, --verbose Verbose output
-q, --quiet Minimal output
%(failfast)s%(catchbreak)s%(buffer)s
Examples:
%(progName)s - run default set of tests
%(progName)s MyTestSuite - run suite 'MyTestSuite'
%(progName)s MyTestCase.testSomething - run MyTestCase.testSomething
%(progName)s MyTestCase - run all 'test*' test methods
in MyTestCase
"""
def _convert_name(name):
# on Linux / Mac OS X 'foo.PY' is not importable, but on
# Windows it is. Simpler to do a case insensitive match
# a better check would be to check that the name is a
# valid Python module name.
if os.path.isfile(name) and name.lower().endswith('.py'):
if os.path.isabs(name):
rel_path = os.path.relpath(name, os.getcwd())
if os.path.isabs(rel_path) or rel_path.startswith(os.pardir):
return name
name = rel_path
# on Windows both '\' and '/' are used as path
# separators. Better to replace both than rely on os.path.sep
return name[:-3].replace('\\', '.').replace('/', '.')
return name
def _convert_names(names):
return [_convert_name(name) for name in names]
class TestProgram(object):
"""A command-line program that runs a set of tests; this is primarily
for making test modules conveniently executable.
"""
USAGE = USAGE_FROM_MODULE
# defaults for testing
failfast = catchbreak = buffer = progName = warnings = None
def __init__(self, module='__main__', defaultTest=None, argv=None,
testRunner=None, testLoader=loader.defaultTestLoader,
exit=True, verbosity=1, failfast=None, catchbreak=None,
buffer=None, warnings=None):
if isinstance(module, str):
self.module = __import__(module)
for part in module.split('.')[1:]:
self.module = getattr(self.module, part)
else:
self.module = module
if argv is None:
argv = sys.argv
self.exit = exit
self.failfast = failfast
self.catchbreak = catchbreak
self.verbosity = verbosity
self.buffer = buffer
if warnings is None and not sys.warnoptions:
# even if DreprecationWarnings are ignored by default
# print them anyway unless other warnings settings are
# specified by the warnings arg or the -W python flag
self.warnings = 'default'
else:
# here self.warnings is set either to the value passed
# to the warnings args or to None.
# If the user didn't pass a value self.warnings will
# be None. This means that the behavior is unchanged
# and depends on the values passed to -W.
self.warnings = warnings
self.defaultTest = defaultTest
self.testRunner = testRunner
self.testLoader = testLoader
self.progName = os.path.basename(argv[0])
self.parseArgs(argv)
self.runTests()
def usageExit(self, msg=None):
if msg:
print(msg)
usage = {'progName': self.progName, 'catchbreak': '', 'failfast': '',
'buffer': ''}
if self.failfast != False:
usage['failfast'] = FAILFAST
if self.catchbreak != False:
usage['catchbreak'] = CATCHBREAK
if self.buffer != False:
usage['buffer'] = BUFFEROUTPUT
print(self.USAGE % usage)
sys.exit(2)
def parseArgs(self, argv):
if ((len(argv) > 1 and argv[1].lower() == 'discover') or
(len(argv) == 1 and self.module is None)):
self._do_discovery(argv[2:])
return
parser = self._getOptParser()
options, args = parser.parse_args(argv[1:])
self._setAttributesFromOptions(options)
if len(args) == 0 and self.module is None:
# this allows "python -m unittest -v" to still work for
# test discovery. This means -c / -b / -v / -f options will
# be handled twice, which is harmless but not ideal.
self._do_discovery(argv[1:])
return
if len(args) == 0 and self.defaultTest is None:
# createTests will load tests from self.module
self.testNames = None
elif len(args) > 0:
self.testNames = _convert_names(args)
if __name__ == '__main__':
# to support python -m unittest ...
self.module = None
else:
self.testNames = (self.defaultTest,)
self.createTests()
def createTests(self):
if self.testNames is None:
self.test = self.testLoader.loadTestsFromModule(self.module)
else:
self.test = self.testLoader.loadTestsFromNames(self.testNames,
self.module)
def _getOptParser(self):
import optparse
parser = optparse.OptionParser()
parser.prog = self.progName
parser.add_option('-v', '--verbose', dest='verbose', default=False,
help='Verbose output', action='store_true')
parser.add_option('-q', '--quiet', dest='quiet', default=False,
help='Quiet output', action='store_true')
if self.failfast != False:
parser.add_option('-f', '--failfast', dest='failfast', default=False,
help='Stop on first fail or error',
action='store_true')
if self.catchbreak != False:
parser.add_option('-c', '--catch', dest='catchbreak', default=False,
help='Catch ctrl-C and display results so far',
action='store_true')
if self.buffer != False:
parser.add_option('-b', '--buffer', dest='buffer', default=False,
help='Buffer stdout and stderr during tests',
action='store_true')
return parser
def _setAttributesFromOptions(self, options):
# only set options from the parsing here
# if they weren't set explicitly in the constructor
if self.failfast is None:
self.failfast = options.failfast
if self.catchbreak is None:
self.catchbreak = options.catchbreak
if self.buffer is None:
self.buffer = options.buffer
if options.verbose:
self.verbosity = 2
elif options.quiet:
self.verbosity = 0
def _addDiscoveryOptions(self, parser):
parser.add_option('-s', '--start-directory', dest='start', default='.',
help="Directory to start discovery ('.' default)")
parser.add_option('-p', '--pattern', dest='pattern', default='test*.py',
help="Pattern to match tests ('test*.py' default)")
parser.add_option('-t', '--top-level-directory', dest='top', default=None,
help='Top level directory of project (defaults to start directory)')
def _do_discovery(self, argv, Loader=None):
if Loader is None:
Loader = lambda: self.testLoader
# handle command line args for test discovery
self.progName = '%s discover' % self.progName
parser = self._getOptParser()
self._addDiscoveryOptions(parser)
options, args = parser.parse_args(argv)
if len(args) > 3:
self.usageExit()
for name, value in zip(('start', 'pattern', 'top'), args):
setattr(options, name, value)
self._setAttributesFromOptions(options)
start_dir = options.start
pattern = options.pattern
top_level_dir = options.top
loader = Loader()
self.test = loader.discover(start_dir, pattern, top_level_dir)
def runTests(self):
if self.catchbreak:
installHandler()
if self.testRunner is None:
self.testRunner = runner.TextTestRunner
if isinstance(self.testRunner, type):
try:
testRunner = self.testRunner(verbosity=self.verbosity,
failfast=self.failfast,
buffer=self.buffer,
warnings=self.warnings)
except TypeError:
# didn't accept the verbosity, buffer or failfast arguments
testRunner = self.testRunner()
else:
# it is assumed to be a TestRunner instance
testRunner = self.testRunner
self.result = testRunner.run(self.test)
if self.exit:
sys.exit(not self.result.wasSuccessful())
main = TestProgram
| agpl-3.0 |
msingh172/pylearn2 | pylearn2/linear/tests/test_conv2d_c01b.py | 34 | 6358 | import theano
from theano import tensor
from theano.compat.six.moves import xrange
import numpy
from pylearn2.linear.conv2d_c01b import (Conv2D, make_random_conv2D,
make_sparse_random_conv2D, setup_detector_layer_c01b)
from pylearn2.space import Conv2DSpace
from pylearn2.utils import sharedX
from pylearn2.testing.skip import skip_if_no_gpu
from pylearn2.models.maxout import MaxoutConvC01B
from pylearn2.models.mlp import MLP
skip_if_no_gpu()
import unittest
try:
scipy_available = True
import scipy.ndimage
except ImportError:
scipy_available = False
class TestConv2DC01b(unittest.TestCase):
"""
Tests for Alex Krizhevsky's Conv2D code
"""
def setUp(self):
"""
Set up a test image and filter to re-use
"""
self.orig_floatX = theano.config.floatX
theano.config.floatX = 'float32'
theano.sandbox.cuda.use('gpu')
self.image = \
numpy.random.rand(16, 3, 3, 1).astype(theano.config.floatX)
self.image_tensor = tensor.tensor4()
self.filters_values = numpy.random.rand(
16, 2, 2, 32).astype(theano.config.floatX)
self.filters = sharedX(self.filters_values, name='filters')
self.conv2d = Conv2D(self.filters)
def tearDown(self):
theano.config.floatX = self.orig_floatX
theano.sandbox.cuda.unuse()
def scipy_conv_c01b(self, images, filters):
"""
Emulate c01b convolution with scipy
"""
assert images.ndim == 4
assert filters.ndim == 4
in_chans, rows, cols, bs = images.shape
in_chans_, rows_, cols_, out_chans = filters.shape
assert in_chans_ == in_chans
out_bc01 = [
[sum(scipy.ndimage.filters.convolve(images[c, :, :, b],
filters[c, ::-1, ::-1, i])
for c in xrange(in_chans))
for i in xrange(out_chans)]
for b in xrange(bs)]
out_c01b = numpy.array(out_bc01).transpose(1, 2, 3, 0)
return out_c01b
def test_get_params(self):
"""
Check whether the conv2d has stored the correct filters
"""
assert self.conv2d.get_params() == [self.filters]
def test_lmul(self):
"""
Use SciPy's ndimage to check whether the convolution worked
correctly
"""
f = theano.function([self.image_tensor],
self.conv2d.lmul(self.image_tensor))
if scipy_available:
self.assertTrue(
numpy.allclose(
f(self.image),
self.scipy_conv_c01b(self.image,
self.filters_values)[:, :2, :2, :]))
def test_lmul_T(self):
"""
Check whether this function outputs the right shape
"""
conv2d = self.conv2d.lmul(self.image_tensor)
f = theano.function([self.image_tensor],
self.conv2d.lmul_T(conv2d))
assert f(self.image).shape == self.image.shape
def test_axes(self):
"""
Use custom output axes and check whether it worked
"""
default_axes = ('c', 0, 1, 'b')
axes = (0, 'b', 1, 'c')
mapping = tuple(axes.index(axis) for axis in default_axes)
conv2d = Conv2D(self.filters, output_axes=axes)
f_axes = theano.function([self.image_tensor],
conv2d.lmul(self.image_tensor))
f = theano.function([self.image_tensor],
self.conv2d.lmul(self.image_tensor))
output_axes = f_axes(self.image)
output = f(self.image)
output_axes = numpy.transpose(output_axes, mapping)
numpy.testing.assert_allclose(output, output_axes)
assert output.shape == output_axes.shape
def test_channels(self):
"""
Go from 32 to 16 channels and see whether that works without error
"""
filters_values = numpy.ones(
(32, 2, 2, 16), dtype=theano.config.floatX
)
filters = sharedX(filters_values)
image = numpy.random.rand(32, 3, 3, 1).astype(theano.config.floatX)
conv2d = Conv2D(filters)
f = theano.function([self.image_tensor],
conv2d.lmul(self.image_tensor))
assert f(image).shape == (16, 2, 2, 1)
def test_make_random_conv2D(self):
"""
Make random filters
"""
default_axes = ('c', 0, 1, 'b')
conv2d = make_random_conv2D(1, 16, default_axes, default_axes,
16, (2, 2))
f = theano.function([self.image_tensor],
conv2d.lmul(self.image_tensor))
assert f(self.image).shape == (16, 2, 2, 1)
assert conv2d.output_axes == default_axes
def test_make_sparse_random_conv2D(self):
"""
Make random sparse filters, count whether the number of
non-zero elements is sensible
"""
axes = ('c', 0, 1, 'b')
input_space = Conv2DSpace((3, 3), 16, axes=axes)
output_space = Conv2DSpace((3, 3), 16, axes=axes)
num_nonzero = 2
kernel_shape = (2, 2)
conv2d = make_sparse_random_conv2D(num_nonzero, input_space,
output_space, kernel_shape)
f = theano.function([self.image_tensor],
conv2d.lmul(self.image_tensor))
assert f(self.image).shape == (16, 2, 2, 1)
assert conv2d.output_axes == axes
assert numpy.count_nonzero(conv2d._filters.get_value()) >= 32
def test_setup_detector_layer_c01b(self):
"""
Very basic test to see whether a detector layer can be set up
without error. Not checking much for the actual output.
"""
axes = ('c', 0, 1, 'b')
layer = MaxoutConvC01B(16, 2, (2, 2), (2, 2),
(1, 1), 'maxout', irange=1.)
input_space = Conv2DSpace((3, 3), 16, axes=axes)
MLP(layers=[layer], input_space=input_space)
layer.set_input_space(input_space)
assert isinstance(layer.input_space, Conv2DSpace)
input = theano.tensor.tensor4()
f = theano.function([input], layer.fprop(input))
f(numpy.random.rand(16, 3, 3, 1).astype(theano.config.floatX))
| bsd-3-clause |
chrys87/orca-beep | test/keystrokes/firefox/html_struct_nav_bug_567984.py | 1 | 4518 | #!/usr/bin/python
"""Test of structural navigation by heading."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
#sequence.append(WaitForDocLoad())
sequence.append(PauseAction(5000))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("<Control>Home"))
sequence.append(utils.AssertPresentationAction(
"1. Top of file",
["BRAILLE LINE: 'Index Vakbarát Hírportál h1'",
" VISIBLE: 'Index Vakbarát Hírportál h1', cursor=1",
"SPEECH OUTPUT: 'Index Vakbarát Hírportál heading level 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("h"))
sequence.append(utils.AssertPresentationAction(
"2. h",
["BRAILLE LINE: 'Legfrissebb hírek h2'",
" VISIBLE: 'Legfrissebb hírek h2', cursor=1",
"SPEECH OUTPUT: 'Legfrissebb hírek heading level 2'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("h"))
sequence.append(utils.AssertPresentationAction(
"3. h",
["BRAILLE LINE: 'Izrael bejelentette az",
"egyoldalú tûzszünetet h3'",
" VISIBLE: 'Izrael bejelentette az",
"egyoldalú', cursor=1",
"BRAILLE LINE: 'Izrael bejelentette az h3'",
" VISIBLE: 'Izrael bejelentette az h3', cursor=1",
"SPEECH OUTPUT: 'Izrael bejelentette az",
"egyoldalú tûzszünetet'",
"SPEECH OUTPUT: 'link heading level 3.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("h"))
sequence.append(utils.AssertPresentationAction(
"4. h",
["BRAILLE LINE: 'Videók a Hudsonba zuhanó repülõrõl h3'",
" VISIBLE: 'Videók a Hudsonba zuhanó repülõr', cursor=1",
"SPEECH OUTPUT: 'Videók a Hudsonba zuhanó repülõrõl'",
"SPEECH OUTPUT: 'link heading level 3.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("h"))
sequence.append(utils.AssertPresentationAction(
"5. h",
["BRAILLE LINE: 'Újabb pénzügyi guru tûnt el, pénzzel együtt h3'",
" VISIBLE: 'Újabb pénzügyi guru tûnt el, pén', cursor=1",
"SPEECH OUTPUT: 'Újabb pénzügyi guru tûnt el, pénzzel együtt'",
"SPEECH OUTPUT: 'link heading level 3.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"6. Down",
["BRAILLE LINE: 'A 75 éves Arthur Nadeltõl több százmillió dollár követelnének az ügyfelei, de még a férfit sem találják.'",
" VISIBLE: 'A 75 éves Arthur Nadeltõl több s', cursor=1",
"SPEECH OUTPUT: 'A 75 éves Arthur Nadeltõl több százmillió dollár követelnének az ügyfelei, de még a férfit sem találják.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"7. Down",
["BRAILLE LINE: '1150 embert utcára tesz a pécsi Elcoteq h3'",
" VISIBLE: '1150 embert utcára tesz a pécsi ', cursor=1",
"SPEECH OUTPUT: '1150 embert utcára tesz a pécsi Elcoteq'",
"SPEECH OUTPUT: 'link heading level 3.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("h"))
sequence.append(utils.AssertPresentationAction(
"8. h",
["BRAILLE LINE: 'Hamarosan újraindul a gázszállítás h3'",
" VISIBLE: 'Hamarosan újraindul a gázszállít', cursor=1",
"SPEECH OUTPUT: 'Hamarosan újraindul a gázszállítás'",
"SPEECH OUTPUT: 'link heading level 3.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"9. Down",
["BRAILLE LINE: 'Megállapodott Putyin és Tyimosenko az orosz-ukrán szerzõdésrõl. Amint lepapírozzák, jön a gáz.'",
" VISIBLE: 'Megállapodott Putyin és Tyimosen', cursor=1",
"SPEECH OUTPUT: 'Megállapodott Putyin és Tyimosenko az orosz-ukrán szerzõdésrõl. Amint lepapírozzák, jön a gáz.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("<Shift>h"))
sequence.append(utils.AssertPresentationAction(
"10. Shift+h",
["BRAILLE LINE: 'Hamarosan újraindul a gázszállítás h3'",
" VISIBLE: 'Hamarosan újraindul a gázszállít', cursor=1",
"SPEECH OUTPUT: 'Hamarosan újraindul a gázszállítás'",
"SPEECH OUTPUT: 'link heading level 3.'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
| lgpl-2.1 |
mehdidc/scikit-learn | sklearn/svm/classes.py | 3 | 36924 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better (to large numbers of
samples).
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from an theoretical perspective
as it is consistent it is seldom used in practice and rarely leads to
better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2 \
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that \
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss
)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better (to large numbers of
samples).
This class supports both dense and sparse input.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive'
(default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2 \
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that \
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
.. The narrative documentation is available at http://scikit-learn.org/
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default=0.0)
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 0.0 then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function. \
For multiclass, coefficient for all 1-vs-1 classifiers. \
The layout of the coefficients in the multiclass case is somewhat \
non-trivial. See the section about multi-class classification in the \
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, degree=3,
gamma=0.0, kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma=0.0,
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, random_state=None):
super(SVC, self).__init__(
'c_svc', kernel, degree, gamma, coef0, tol, C, 0., 0., shrinking,
probability, cache_size, class_weight, verbose, max_iter,
random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of kernel function
is significant only in poly, rbf, sigmoid.
gamma : float, optional (default=0.0)
Kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional (default=0.0)
Independent term in kernel function. It is only significant
in poly/sigmoid.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vector for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function. \
For multiclass, coefficient for all 1-vs-1 classifiers. \
The layout of the coefficients in the multiclass case is somewhat \
non-trivial. See the section about multi-class classification in \
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, coef0=0.0, degree=3, gamma=0.0, kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma=0.0,
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, verbose=False, max_iter=-1,
random_state=None):
super(NuSVC, self).__init__(
'nu_svc', kernel, degree, gamma, coef0, tol, 0., nu, 0., shrinking,
probability, cache_size, None, verbose, max_iter, random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of kernel function
is significant only in poly, rbf, sigmoid.
gamma : float, optional (default=0.0)
Kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional (default=0.0)
independent term in kernel function. It is only significant
in poly/sigmoid.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma=0.0,
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma=0.0, coef0=0.0, tol=1e-3,
C=1.0, epsilon=0.1, shrinking=True, cache_size=200,
verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of kernel function
is significant only in poly, rbf, sigmoid.
gamma : float, optional (default=0.0)
Kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional (default=0.0)
Independent term in kernel function. It is only significant
in poly/sigmoid.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma=0.0, kernel='rbf',
max_iter=-1, nu=0.1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma=0.0, coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default=0.0)
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 0.0 then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking: boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma=0.0, coef0=0.0, tol=1e-3,
nu=0.5, shrinking=True, cache_size=200, verbose=False,
max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, [], sample_weight=sample_weight,
**params)
return self
| bsd-3-clause |
Juraci/tempest | tempest/api/network/test_metering_extensions.py | 17 | 6784 | # Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest.api.network import base
from tempest.common.utils import data_utils
from tempest import test
LOG = logging.getLogger(__name__)
class MeteringTestJSON(base.BaseAdminNetworkTest):
"""
Tests the following operations in the Neutron API using the REST client for
Neutron:
List, Show, Create, Delete Metering labels
List, Show, Create, Delete Metering labels rules
"""
@classmethod
def skip_checks(cls):
super(MeteringTestJSON, cls).skip_checks()
if not test.is_extension_enabled('metering', 'network'):
msg = "metering extension not enabled."
raise cls.skipException(msg)
@classmethod
def resource_setup(cls):
super(MeteringTestJSON, cls).resource_setup()
description = "metering label created by tempest"
name = data_utils.rand_name("metering-label")
cls.metering_label = cls.create_metering_label(name, description)
remote_ip_prefix = ("10.0.0.0/24" if cls._ip_version == 4
else "fd02::/64")
direction = "ingress"
cls.metering_label_rule = cls.create_metering_label_rule(
remote_ip_prefix, direction,
metering_label_id=cls.metering_label['id'])
def _delete_metering_label(self, metering_label_id):
# Deletes a label and verifies if it is deleted or not
self.admin_client.delete_metering_label(metering_label_id)
# Asserting that the label is not found in list after deletion
labels = self.admin_client.list_metering_labels(id=metering_label_id)
self.assertEqual(len(labels['metering_labels']), 0)
def _delete_metering_label_rule(self, metering_label_rule_id):
# Deletes a rule and verifies if it is deleted or not
self.admin_client.delete_metering_label_rule(
metering_label_rule_id)
# Asserting that the rule is not found in list after deletion
rules = (self.admin_client.list_metering_label_rules(
id=metering_label_rule_id))
self.assertEqual(len(rules['metering_label_rules']), 0)
@test.idempotent_id('e2fb2f8c-45bf-429a-9f17-171c70444612')
def test_list_metering_labels(self):
# Verify label filtering
body = self.admin_client.list_metering_labels(id=33)
metering_labels = body['metering_labels']
self.assertEqual(0, len(metering_labels))
@test.idempotent_id('ec8e15ff-95d0-433b-b8a6-b466bddb1e50')
def test_create_delete_metering_label_with_filters(self):
# Creates a label
name = data_utils.rand_name('metering-label-')
description = "label created by tempest"
body = self.admin_client.create_metering_label(name=name,
description=description)
metering_label = body['metering_label']
self.addCleanup(self._delete_metering_label,
metering_label['id'])
# Assert whether created labels are found in labels list or fail
# if created labels are not found in labels list
labels = (self.admin_client.list_metering_labels(
id=metering_label['id']))
self.assertEqual(len(labels['metering_labels']), 1)
@test.idempotent_id('30abb445-0eea-472e-bd02-8649f54a5968')
def test_show_metering_label(self):
# Verifies the details of a label
body = self.admin_client.show_metering_label(self.metering_label['id'])
metering_label = body['metering_label']
self.assertEqual(self.metering_label['id'], metering_label['id'])
self.assertEqual(self.metering_label['tenant_id'],
metering_label['tenant_id'])
self.assertEqual(self.metering_label['name'], metering_label['name'])
self.assertEqual(self.metering_label['description'],
metering_label['description'])
@test.idempotent_id('cc832399-6681-493b-9d79-0202831a1281')
def test_list_metering_label_rules(self):
# Verify rule filtering
body = self.admin_client.list_metering_label_rules(id=33)
metering_label_rules = body['metering_label_rules']
self.assertEqual(0, len(metering_label_rules))
@test.idempotent_id('f4d547cd-3aee-408f-bf36-454f8825e045')
def test_create_delete_metering_label_rule_with_filters(self):
# Creates a rule
remote_ip_prefix = ("10.0.1.0/24" if self._ip_version == 4
else "fd03::/64")
body = (self.admin_client.create_metering_label_rule(
remote_ip_prefix=remote_ip_prefix,
direction="ingress",
metering_label_id=self.metering_label['id']))
metering_label_rule = body['metering_label_rule']
self.addCleanup(self._delete_metering_label_rule,
metering_label_rule['id'])
# Assert whether created rules are found in rules list or fail
# if created rules are not found in rules list
rules = (self.admin_client.list_metering_label_rules(
id=metering_label_rule['id']))
self.assertEqual(len(rules['metering_label_rules']), 1)
@test.idempotent_id('b7354489-96ea-41f3-9452-bace120fb4a7')
def test_show_metering_label_rule(self):
# Verifies the details of a rule
body = (self.admin_client.show_metering_label_rule(
self.metering_label_rule['id']))
metering_label_rule = body['metering_label_rule']
self.assertEqual(self.metering_label_rule['id'],
metering_label_rule['id'])
self.assertEqual(self.metering_label_rule['remote_ip_prefix'],
metering_label_rule['remote_ip_prefix'])
self.assertEqual(self.metering_label_rule['direction'],
metering_label_rule['direction'])
self.assertEqual(self.metering_label_rule['metering_label_id'],
metering_label_rule['metering_label_id'])
self.assertFalse(metering_label_rule['excluded'])
class MeteringIpV6TestJSON(MeteringTestJSON):
_ip_version = 6
| apache-2.0 |
aquametalabs/django-snailtracker | django_snailtracker/tests/dummy/dummy/wsgi.py | 1 | 1132 | """
WSGI config for dummy project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dummy.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| bsd-3-clause |
mfherbst/spack | var/spack/repos/builtin/packages/fasttree/package.py | 5 | 2232 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Fasttree(Package):
"""FastTree infers approximately-maximum-likelihood phylogenetic
trees from alignments of nucleotide or protein sequences.
FastTree can handle alignments with up to a million of sequences
in a reasonable amount of time and memory."""
homepage = "http://www.microbesonline.org/fasttree"
url = "http://www.microbesonline.org/fasttree/FastTree-2.1.10.c"
version('2.1.10', '1c2c6425a638ec0c61ef064cda687987', expand=False, url='http://www.microbesonline.org/fasttree/FastTree-2.1.10.c')
phases = ['build', 'install']
def build(self, spec, prefix):
cc = Executable(spack_cc)
cc('-O3', self.compiler.openmp_flag,
'-DOPENMP', '-finline-functions', '-funroll-loops', '-Wall',
'-oFastTreeMP', 'FastTree-' + format(spec.version.dotted) + '.c',
'-lm')
def install(self, spec, prefix):
mkdir(prefix.bin)
install('FastTreeMP', prefix.bin)
| lgpl-2.1 |
xiangke/pycopia | mibs/pycopia/mibs/CISCO_CIPTG_MIB_OID.py | 1 | 5895 | # python
# This file is generated by a program (mib2py).
import CISCO_CIPTG_MIB
OIDMAP = {
'1.3.6.1.4.1.9.9.73': CISCO_CIPTG_MIB.ciscoCipTgMIB,
'1.3.6.1.4.1.9.9.73.1': CISCO_CIPTG_MIB.cipTgObjects,
'1.3.6.1.4.1.9.9.73.1.1': CISCO_CIPTG_MIB.cipTgLlc,
'1.3.6.1.4.1.9.9.73.1.2': CISCO_CIPTG_MIB.cipTgIp,
'1.3.6.1.4.1.9.9.73.1.3': CISCO_CIPTG_MIB.cipTgCmgr,
'1.3.6.1.4.1.9.9.73.3': CISCO_CIPTG_MIB.ciscoCipTgMibConformance,
'1.3.6.1.4.1.9.9.73.3.1': CISCO_CIPTG_MIB.ciscoCipTgMibCompliances,
'1.3.6.1.4.1.9.9.73.3.2': CISCO_CIPTG_MIB.ciscoCipTgMibGroups,
'1.3.6.1.4.1.9.9.73.1.1.1.1.1': CISCO_CIPTG_MIB.cipTgLlcAdminName,
'1.3.6.1.4.1.9.9.73.1.1.1.1.2': CISCO_CIPTG_MIB.cipTgLlcAdminLanType,
'1.3.6.1.4.1.9.9.73.1.1.1.1.3': CISCO_CIPTG_MIB.cipTgLlcAdminAdaptNo,
'1.3.6.1.4.1.9.9.73.1.1.1.1.4': CISCO_CIPTG_MIB.cipTgLlcAdminLSAP,
'1.3.6.1.4.1.9.9.73.1.1.1.1.5': CISCO_CIPTG_MIB.cipTgLlcAdminRMAC,
'1.3.6.1.4.1.9.9.73.1.1.1.1.6': CISCO_CIPTG_MIB.cipTgLlcAdminRSAP,
'1.3.6.1.4.1.9.9.73.1.1.1.1.7': CISCO_CIPTG_MIB.cipTgLlcAdminRowStatus,
'1.3.6.1.4.1.9.9.73.1.1.2.1.1': CISCO_CIPTG_MIB.cipTgLlcOperState,
'1.3.6.1.4.1.9.9.73.1.1.2.1.2': CISCO_CIPTG_MIB.cipTgLlcOperTGN,
'1.3.6.1.4.1.9.9.73.1.1.2.1.3': CISCO_CIPTG_MIB.cipTgLlcOperLocalCP,
'1.3.6.1.4.1.9.9.73.1.1.2.1.4': CISCO_CIPTG_MIB.cipTgLlcOperRemoteCP,
'1.3.6.1.4.1.9.9.73.1.1.2.1.5': CISCO_CIPTG_MIB.cipTgLlcOperMaxIn,
'1.3.6.1.4.1.9.9.73.1.1.2.1.6': CISCO_CIPTG_MIB.cipTgLlcOperMaxOut,
'1.3.6.1.4.1.9.9.73.1.1.2.1.7': CISCO_CIPTG_MIB.cipTgLlcOperHpr,
'1.3.6.1.4.1.9.9.73.1.1.2.1.8': CISCO_CIPTG_MIB.cipTgLlcOperHprLSAP,
'1.3.6.1.4.1.9.9.73.1.1.2.1.9': CISCO_CIPTG_MIB.cipTgLlcOperHprRSAP,
'1.3.6.1.4.1.9.9.73.1.1.2.1.10': CISCO_CIPTG_MIB.cipTgLlcOperRIF,
'1.3.6.1.4.1.9.9.73.1.1.2.1.11': CISCO_CIPTG_MIB.cipTgLlcOperLocalVcToken,
'1.3.6.1.4.1.9.9.73.1.1.2.1.12': CISCO_CIPTG_MIB.cipTgLlcOperRemoteVcToken,
'1.3.6.1.4.1.9.9.73.1.1.2.1.13': CISCO_CIPTG_MIB.cipTgLlcOperLocalConnToken,
'1.3.6.1.4.1.9.9.73.1.1.2.1.14': CISCO_CIPTG_MIB.cipTgLlcOperRemoteConnToken,
'1.3.6.1.4.1.9.9.73.1.1.2.1.15': CISCO_CIPTG_MIB.cipTgLlcOperVcStatus,
'1.3.6.1.4.1.9.9.73.1.1.2.1.16': CISCO_CIPTG_MIB.cipTgLlcOperConnStatus,
'1.3.6.1.4.1.9.9.73.1.1.3.1.1': CISCO_CIPTG_MIB.cipTgLlcStatsIFramesIn,
'1.3.6.1.4.1.9.9.73.1.1.3.1.2': CISCO_CIPTG_MIB.cipTgLlcStatsIFrameBytesIn,
'1.3.6.1.4.1.9.9.73.1.1.3.1.3': CISCO_CIPTG_MIB.cipTgLlcStatsHCIFrameBytesIn,
'1.3.6.1.4.1.9.9.73.1.1.3.1.4': CISCO_CIPTG_MIB.cipTgLlcStatsIFramesOut,
'1.3.6.1.4.1.9.9.73.1.1.3.1.5': CISCO_CIPTG_MIB.cipTgLlcStatsIFrameBytesOut,
'1.3.6.1.4.1.9.9.73.1.1.3.1.6': CISCO_CIPTG_MIB.cipTgLlcStatsHCIFrameBytesOut,
'1.3.6.1.4.1.9.9.73.1.1.3.1.7': CISCO_CIPTG_MIB.cipTgLlcStatsUIFramesIn,
'1.3.6.1.4.1.9.9.73.1.1.3.1.8': CISCO_CIPTG_MIB.cipTgLlcStatsUIFrameBytesIn,
'1.3.6.1.4.1.9.9.73.1.1.3.1.9': CISCO_CIPTG_MIB.cipTgLlcStatsHCUIFrameBytesIn,
'1.3.6.1.4.1.9.9.73.1.1.3.1.10': CISCO_CIPTG_MIB.cipTgLlcStatsUIFramesOut,
'1.3.6.1.4.1.9.9.73.1.1.3.1.11': CISCO_CIPTG_MIB.cipTgLlcStatsUIFrameBytesOut,
'1.3.6.1.4.1.9.9.73.1.1.3.1.12': CISCO_CIPTG_MIB.cipTgLlcStatsHCUIFrameBytesOut,
'1.3.6.1.4.1.9.9.73.1.1.3.1.13': CISCO_CIPTG_MIB.cipTgLlcStatsTestCmdsOut,
'1.3.6.1.4.1.9.9.73.1.1.3.1.14': CISCO_CIPTG_MIB.cipTgLlcStatsTestRspsIn,
'1.3.6.1.4.1.9.9.73.1.1.3.1.15': CISCO_CIPTG_MIB.cipTgLlcStatsXidCmdsIn,
'1.3.6.1.4.1.9.9.73.1.1.3.1.16': CISCO_CIPTG_MIB.cipTgLlcStatsXidCmdsOut,
'1.3.6.1.4.1.9.9.73.1.1.3.1.17': CISCO_CIPTG_MIB.cipTgLlcStatsXidRspsIn,
'1.3.6.1.4.1.9.9.73.1.1.3.1.18': CISCO_CIPTG_MIB.cipTgLlcStatsXidRspsOut,
'1.3.6.1.4.1.9.9.73.1.1.3.1.19': CISCO_CIPTG_MIB.cipTgLlcStatsConnNumberRecv,
'1.3.6.1.4.1.9.9.73.1.1.3.1.20': CISCO_CIPTG_MIB.cipTgLlcStatsConnNumberSent,
'1.3.6.1.4.1.9.9.73.1.2.1.1.1': CISCO_CIPTG_MIB.cipTgIpAdminName,
'1.3.6.1.4.1.9.9.73.1.2.1.1.2': CISCO_CIPTG_MIB.cipTgIpAdminType,
'1.3.6.1.4.1.9.9.73.1.2.1.1.3': CISCO_CIPTG_MIB.cipTgIpAdminRemoteIpAddr,
'1.3.6.1.4.1.9.9.73.1.2.1.1.4': CISCO_CIPTG_MIB.cipTgIpAdminLocalIpAddr,
'1.3.6.1.4.1.9.9.73.1.2.1.1.5': CISCO_CIPTG_MIB.cipTgIpAdminBroadcast,
'1.3.6.1.4.1.9.9.73.1.2.1.1.6': CISCO_CIPTG_MIB.cipTgIpAdminRowStatus,
'1.3.6.1.4.1.9.9.73.1.2.2.1.1': CISCO_CIPTG_MIB.cipTgIpOperLocalVcToken,
'1.3.6.1.4.1.9.9.73.1.2.2.1.2': CISCO_CIPTG_MIB.cipTgIpOperRemoteVcToken,
'1.3.6.1.4.1.9.9.73.1.2.2.1.3': CISCO_CIPTG_MIB.cipTgIpOperLocalConnToken,
'1.3.6.1.4.1.9.9.73.1.2.2.1.4': CISCO_CIPTG_MIB.cipTgIpOperRemoteConnToken,
'1.3.6.1.4.1.9.9.73.1.2.2.1.5': CISCO_CIPTG_MIB.cipTgIpOperVcStatus,
'1.3.6.1.4.1.9.9.73.1.2.2.1.6': CISCO_CIPTG_MIB.cipTgIpOperConnStatus,
'1.3.6.1.4.1.9.9.73.1.2.3.1.1': CISCO_CIPTG_MIB.cipTgIpStatsPacketsIn,
'1.3.6.1.4.1.9.9.73.1.2.3.1.2': CISCO_CIPTG_MIB.cipTgIpStatsBytesIn,
'1.3.6.1.4.1.9.9.73.1.2.3.1.3': CISCO_CIPTG_MIB.cipTgIpStatsHCBytesIn,
'1.3.6.1.4.1.9.9.73.1.2.3.1.4': CISCO_CIPTG_MIB.cipTgIpStatsPacketsOut,
'1.3.6.1.4.1.9.9.73.1.2.3.1.5': CISCO_CIPTG_MIB.cipTgIpStatsBytesOut,
'1.3.6.1.4.1.9.9.73.1.2.3.1.6': CISCO_CIPTG_MIB.cipTgIpStatsHCBytesOut,
'1.3.6.1.4.1.9.9.73.1.3.1.1.1': CISCO_CIPTG_MIB.cipTgCmgrOperName,
'1.3.6.1.4.1.9.9.73.1.3.1.1.2': CISCO_CIPTG_MIB.cipTgCmgrOperType,
'1.3.6.1.4.1.9.9.73.1.3.1.1.3': CISCO_CIPTG_MIB.cipTgCmgrOperLocalGrToken,
'1.3.6.1.4.1.9.9.73.1.3.1.1.4': CISCO_CIPTG_MIB.cipTgCmgrOperRemoteGrToken,
'1.3.6.1.4.1.9.9.73.1.3.1.1.5': CISCO_CIPTG_MIB.cipTgCmgrOperLocalVcToken,
'1.3.6.1.4.1.9.9.73.1.3.1.1.6': CISCO_CIPTG_MIB.cipTgCmgrOperRemoteVcToken,
'1.3.6.1.4.1.9.9.73.1.3.1.1.7': CISCO_CIPTG_MIB.cipTgCmgrOperLocalConnToken,
'1.3.6.1.4.1.9.9.73.1.3.1.1.8': CISCO_CIPTG_MIB.cipTgCmgrOperRemoteConnToken,
'1.3.6.1.4.1.9.9.73.1.3.1.1.9': CISCO_CIPTG_MIB.cipTgCmgrOperVcStatus,
'1.3.6.1.4.1.9.9.73.1.3.1.1.10': CISCO_CIPTG_MIB.cipTgCmgrOperConnStatus,
'1.3.6.1.4.1.9.9.73.3.2.3': CISCO_CIPTG_MIB.ciscoCipTgLlcGroupRev1,
'1.3.6.1.4.1.9.9.73.3.2.4': CISCO_CIPTG_MIB.ciscoCipTgIpGroup,
'1.3.6.1.4.1.9.9.73.3.2.5': CISCO_CIPTG_MIB.ciscoCipTgCmgrGroup,
}
| lgpl-2.1 |
hachreak/invenio-search | invenio_search/config.py | 1 | 2187 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2010, 2011, 2012, 2013,
# 2015, 2016, 2017 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Invenio Search Engine config parameters."""
SEARCH_ALLOWED_KEYWORDS = []
"""A list of allowed keywords for the query parser."""
#
# ELASTIC configuration
#
SEARCH_ELASTIC_HOSTS = None # default localhost
"""List of hosts for Elasticsearch client.
Elasticsearch
<https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch>
"""
SEARCH_ELASTIC_KEYWORD_MAPPING = {
None: ["_all"],
"author": {
'a': ["main_entry_personal_name.personal_name",
"added_entry_personal_name.personal_name"],
'p': ["main_entry_personal_name.personal_name",
"added_entry_personal_name.personal_name"],
'e': ['authors.raw'],
},
"collection": ["_collections"],
"title": ["title_statement.title"],
"980": [
"collections.primary",
"collections.secondary",
"collections.deleted",
],
"980__a": ["collections.primary"],
"980__b": ["collections.secondary"],
"542__l": ["information_relating_to_copyright_status.copyright_status"],
}
"""Holds a dictionary to map invenio keywords to elasticsearch fields."""
| gpl-2.0 |
jiangxb1987/spark | python/pyspark/sql/session.py | 3 | 37802 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
from functools import reduce
from threading import RLock
if sys.version >= '3':
basestring = unicode = str
xrange = range
else:
from itertools import izip as zip, imap as map
from pyspark import since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.sql.conf import RuntimeConfig
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, DataType, StringType, StructType, TimestampType, \
_make_type_verifier, _infer_schema, _has_nulltype, _merge_type, _create_converter, \
_parse_datatype_string
from pyspark.sql.utils import install_exception_handler
__all__ = ["SparkSession"]
def _monkey_patch_RDD(sparkSession):
def toDF(self, schema=None, sampleRatio=None):
"""
Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``
:param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns
:param samplingRatio: the sample ratio of rows used for inferring
:return: a DataFrame
>>> rdd.toDF().collect()
[Row(name=u'Alice', age=1)]
"""
return sparkSession.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SparkSession(object):
"""The entry point to programming Spark with the Dataset and DataFrame API.
A SparkSession can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
To create a SparkSession, use the following builder pattern:
>>> spark = SparkSession.builder \\
... .master("local") \\
... .appName("Word Count") \\
... .config("spark.some.config.option", "some-value") \\
... .getOrCreate()
.. autoattribute:: builder
:annotation:
"""
class Builder(object):
"""Builder for :class:`SparkSession`.
"""
_lock = RLock()
_options = {}
_sc = None
@since(2.0)
def config(self, key=None, value=None, conf=None):
"""Sets a config option. Options set using this method are automatically propagated to
both :class:`SparkConf` and :class:`SparkSession`'s own configuration.
For an existing SparkConf, use `conf` parameter.
>>> from pyspark.conf import SparkConf
>>> SparkSession.builder.config(conf=SparkConf())
<pyspark.sql.session...
For a (key, value) pair, you can omit parameter names.
>>> SparkSession.builder.config("spark.some.config.option", "some-value")
<pyspark.sql.session...
:param key: a key name string for configuration property
:param value: a value for configuration property
:param conf: an instance of :class:`SparkConf`
"""
with self._lock:
if conf is None:
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self
@since(2.0)
def master(self, master):
"""Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"
to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone
cluster.
:param master: a url for spark master
"""
return self.config("spark.master", master)
@since(2.0)
def appName(self, name):
"""Sets a name for the application, which will be shown in the Spark web UI.
If no application name is set, a randomly generated name will be used.
:param name: an application name
"""
return self.config("spark.app.name", name)
@since(2.0)
def enableHiveSupport(self):
"""Enables Hive support, including connectivity to a persistent Hive metastore, support
for Hive SerDes, and Hive user-defined functions.
"""
return self.config("spark.sql.catalogImplementation", "hive")
def _sparkContext(self, sc):
with self._lock:
self._sc = sc
return self
@since(2.0)
def getOrCreate(self):
"""Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
new one based on the options set in this builder.
This method first checks whether there is a valid global default SparkSession, and if
yes, return that one. If no valid global default SparkSession exists, the method
creates a new SparkSession and assigns the newly created SparkSession as the global
default.
>>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
>>> s1.conf.get("k1") == "v1"
True
In case an existing SparkSession is returned, the config options specified
in this builder will be applied to the existing SparkSession.
>>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
>>> s1.conf.get("k1") == s2.conf.get("k1")
True
>>> s1.conf.get("k2") == s2.conf.get("k2")
True
"""
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedSession
if session is None or session._sc._jsc is None:
if self._sc is not None:
sc = self._sc
else:
sparkConf = SparkConf()
for key, value in self._options.items():
sparkConf.set(key, value)
# This SparkContext may be an existing one.
sc = SparkContext.getOrCreate(sparkConf)
# Do not update `SparkConf` for existing `SparkContext`, as it's shared
# by all sessions.
session = SparkSession(sc)
for key, value in self._options.items():
session._jsparkSession.sessionState().conf().setConfString(key, value)
return session
builder = Builder()
"""A class attribute having a :class:`Builder` to construct :class:`SparkSession` instances."""
_instantiatedSession = None
_activeSession = None
@ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
"""Creates a new SparkSession.
>>> from datetime import datetime
>>> spark = SparkSession(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if jsparkSession is None:
if self._jvm.SparkSession.getDefaultSession().isDefined() \
and not self._jvm.SparkSession.getDefaultSession().get() \
.sparkContext().isStopped():
jsparkSession = self._jvm.SparkSession.getDefaultSession().get()
else:
jsparkSession = self._jvm.SparkSession(self._jsc.sc())
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
# If we had an instantiated SparkSession attached with a SparkContext
# which is stopped now, we need to renew the instantiated SparkSession.
# Otherwise, we will use invalid SparkSession when we call Builder.getOrCreate.
if SparkSession._instantiatedSession is None \
or SparkSession._instantiatedSession._sc._jsc is None:
SparkSession._instantiatedSession = self
SparkSession._activeSession = self
self._jvm.SparkSession.setDefaultSession(self._jsparkSession)
self._jvm.SparkSession.setActiveSession(self._jsparkSession)
def _repr_html_(self):
return """
<div>
<p><b>SparkSession - {catalogImplementation}</b></p>
{sc_HTML}
</div>
""".format(
catalogImplementation=self.conf.get("spark.sql.catalogImplementation"),
sc_HTML=self.sparkContext._repr_html_()
)
@since(2.0)
def newSession(self):
"""
Returns a new SparkSession as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self._jsparkSession.newSession())
@classmethod
@since(3.0)
def getActiveSession(cls):
"""
Returns the active SparkSession for the current thread, returned by the builder.
>>> s = SparkSession.getActiveSession()
>>> l = [('Alice', 1)]
>>> rdd = s.sparkContext.parallelize(l)
>>> df = s.createDataFrame(rdd, ['name', 'age'])
>>> df.select("age").collect()
[Row(age=1)]
"""
from pyspark import SparkContext
sc = SparkContext._active_spark_context
if sc is None:
return None
else:
if sc._jvm.SparkSession.getActiveSession().isDefined():
SparkSession(sc, sc._jvm.SparkSession.getActiveSession().get())
return SparkSession._activeSession
else:
return None
@property
@since(2.0)
def sparkContext(self):
"""Returns the underlying :class:`SparkContext`."""
return self._sc
@property
@since(2.0)
def version(self):
"""The version of Spark on which this application is running."""
return self._jsparkSession.version()
@property
@since(2.0)
def conf(self):
"""Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
"""
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
@property
@since(2.0)
def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions, etc.
:return: :class:`Catalog`
"""
from pyspark.sql.catalog import Catalog
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
@property
@since(2.0)
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
from pyspark.sql.udf import UDFRegistration
return UDFRegistration(self)
@since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped)
def _inferSchemaFromList(self, data, names=None):
"""
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:param names: list of column names
:return: :class:`pyspark.sql.types.StructType`
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = reduce(_merge_type, (_infer_schema(row, names) for row in data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None, names=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first, names=names)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row, names=names))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(lambda row: _infer_schema(row, names)).reduce(_merge_type)
return schema
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio, names=schema)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema):
"""
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data, names=schema)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
def _get_numpy_record_dtype(self, rec):
"""
Used when converting a pandas.DataFrame to Spark using to_records(), this will correct
the dtypes of fields in a record so they can be properly loaded into Spark.
:param rec: a numpy record to check field dtypes
:return corrected dtype for a numpy.record or None if no correction needed
"""
import numpy as np
cur_dtypes = rec.dtype
col_names = cur_dtypes.names
record_type_list = []
has_rec_fix = False
for i in xrange(len(cur_dtypes)):
curr_type = cur_dtypes[i]
# If type is a datetime64 timestamp, convert to microseconds
# NOTE: if dtype is datetime[ns] then np.record.tolist() will output values as longs,
# conversion from [us] or lower will lead to py datetime objects, see SPARK-22417
if curr_type == np.dtype('datetime64[ns]'):
curr_type = 'datetime64[us]'
has_rec_fix = True
record_type_list.append((str(col_names[i]), curr_type))
return np.dtype(record_type_list) if has_rec_fix else None
def _convert_from_pandas(self, pdf, schema, timezone):
"""
Convert a pandas.DataFrame to list of records that can be used to make a DataFrame
:return list of records
"""
if timezone is not None:
from pyspark.sql.types import _check_series_convert_timestamps_tz_local
copied = False
if isinstance(schema, StructType):
for field in schema:
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if isinstance(field.dataType, TimestampType):
s = _check_series_convert_timestamps_tz_local(pdf[field.name], timezone)
if s is not pdf[field.name]:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[field.name] = s
else:
for column, series in pdf.iteritems():
s = _check_series_convert_timestamps_tz_local(series, timezone)
if s is not series:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[column] = s
# Convert pandas.DataFrame to list of numpy records
np_records = pdf.to_records(index=False)
# Check if any columns need to be fixed for Spark to infer properly
if len(np_records) > 0:
record_dtype = self._get_numpy_record_dtype(np_records[0])
if record_dtype is not None:
return [r.astype(record_dtype).tolist() for r in np_records]
# Convert list of numpy records to python lists
return [r.tolist() for r in np_records]
def _create_from_pandas_with_arrow(self, pdf, schema, timezone):
"""
Create a DataFrame from a given pandas.DataFrame by slicing it into partitions, converting
to Arrow data, then sending to the JVM to parallelize. If a schema is passed in, the
data types will be used to coerce the data in Pandas to Arrow conversion.
"""
from pyspark.serializers import ArrowStreamPandasSerializer
from pyspark.sql.types import from_arrow_type, to_arrow_type, TimestampType
from pyspark.sql.utils import require_minimum_pandas_version, \
require_minimum_pyarrow_version
require_minimum_pandas_version()
require_minimum_pyarrow_version()
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
import pyarrow as pa
# Create the Spark schema from list of names passed in with Arrow types
if isinstance(schema, (list, tuple)):
arrow_schema = pa.Schema.from_pandas(pdf, preserve_index=False)
struct = StructType()
for name, field in zip(schema, arrow_schema):
struct.add(name, from_arrow_type(field.type), nullable=field.nullable)
schema = struct
# Determine arrow types to coerce data when creating batches
if isinstance(schema, StructType):
arrow_types = [to_arrow_type(f.dataType) for f in schema.fields]
elif isinstance(schema, DataType):
raise ValueError("Single data type %s is not supported with Arrow" % str(schema))
else:
# Any timestamps must be coerced to be compatible with Spark
arrow_types = [to_arrow_type(TimestampType())
if is_datetime64_dtype(t) or is_datetime64tz_dtype(t) else None
for t in pdf.dtypes]
# Slice the DataFrame to be batched
step = -(-len(pdf) // self.sparkContext.defaultParallelism) # round int up
pdf_slices = (pdf[start:start + step] for start in xrange(0, len(pdf), step))
# Create list of Arrow (columns, type) for serializer dump_stream
arrow_data = [[(c, t) for (_, c), t in zip(pdf_slice.iteritems(), arrow_types)]
for pdf_slice in pdf_slices]
jsqlContext = self._wrapped._jsqlContext
safecheck = self._wrapped._conf.arrowSafeTypeConversion()
col_by_name = True # col by name only applies to StructType columns, can't happen here
ser = ArrowStreamPandasSerializer(timezone, safecheck, col_by_name)
def reader_func(temp_filename):
return self._jvm.PythonSQLUtils.readArrowStreamFromFile(jsqlContext, temp_filename)
def create_RDD_server():
return self._jvm.ArrowRDDServer(jsqlContext)
# Create Spark DataFrame from Arrow stream file, using one batch per partition
jrdd = self._sc._serialize_to_jvm(arrow_data, ser, reader_func, create_RDD_server)
jdf = self._jvm.PythonSQLUtils.toDataFrame(jrdd, schema.json(), jsqlContext)
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@staticmethod
def _create_shell_session():
"""
Initialize a SparkSession for a pyspark shell session. This is called from shell.py
to make error handling simpler without needing to declare local variables in that
script, which would expose those to users.
"""
import py4j
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
try:
# Try to access HiveConf, it will raise exception if Hive is not added
conf = SparkConf()
if conf.get('spark.sql.catalogImplementation', 'hive').lower() == 'hive':
SparkContext._jvm.org.apache.hadoop.hive.conf.HiveConf()
return SparkSession.builder\
.enableHiveSupport()\
.getOrCreate()
else:
return SparkSession.builder.getOrCreate()
except (py4j.protocol.Py4JError, TypeError):
if conf.get('spark.sql.catalogImplementation', '').lower() == 'hive':
warnings.warn("Fall back to non-hive support because failing to access HiveConf, "
"please make sure you build spark with hive")
return SparkSession.builder.getOrCreate()
@since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of either :class:`Row`,
:class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string, it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value".
Each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation (e.g. row, tuple, int, boolean,
etc.), :class:`list`, or :class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is ``None``. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use
``int`` as a short name for ``IntegerType``.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.1
Added verifySchema.
.. note:: Usage with spark.sql.execution.arrow.pyspark.enabled=True is experimental.
.. note:: When Arrow optimization is enabled, strings inside Pandas DataFrame in Python
2 are converted into bytes as they are bytes in Python 2 whereas regular strings are
left as strings. When using strings in Python 2, use unicode `u""` as Python standard
practice.
>>> l = [('Alice', 1)]
>>> spark.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> spark.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> spark.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> spark.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = spark.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = spark.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = spark.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> spark.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> spark.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
SparkSession._activeSession = self
self._jvm.SparkSession.setActiveSession(self._jsparkSession)
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(schema, basestring):
schema = _parse_datatype_string(schema)
elif isinstance(schema, (list, tuple)):
# Must re-encode any unicode strings to be consistent with StructField names
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema]
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
if self._wrapped._conf.pandasRespectSessionTimeZone():
timezone = self._wrapped._conf.sessionLocalTimeZone()
else:
timezone = None
# If no schema supplied by user then get the names of columns only
if schema is None:
schema = [str(x) if not isinstance(x, basestring) else
(x.encode('utf-8') if not isinstance(x, str) else x)
for x in data.columns]
if self._wrapped._conf.arrowPySparkEnabled() and len(data) > 0:
try:
return self._create_from_pandas_with_arrow(data, schema, timezone)
except Exception as e:
from pyspark.util import _exception_message
if self._wrapped._conf.arrowPySparkFallbackEnabled():
msg = (
"createDataFrame attempted Arrow optimization because "
"'spark.sql.execution.arrow.pyspark.enabled' is set to true; however, "
"failed by the reason below:\n %s\n"
"Attempting non-optimization as "
"'spark.sql.execution.arrow.pyspark.fallback.enabled' is set to "
"true." % _exception_message(e))
warnings.warn(msg)
else:
msg = (
"createDataFrame attempted Arrow optimization because "
"'spark.sql.execution.arrow.pyspark.enabled' is set to true, but has "
"reached the error below and will not continue because automatic "
"fallback with 'spark.sql.execution.arrow.pyspark.fallback.enabled' "
"has been set to false.\n %s" % _exception_message(e))
warnings.warn(msg)
raise
data = self._convert_from_pandas(data, schema, timezone)
if isinstance(schema, StructType):
verify_func = _make_type_verifier(schema) if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add("value", schema)
verify_func = _make_type_verifier(
dataType, name="field value") if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj,
else:
prepare = lambda obj: obj
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
@since(2.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
@property
@since(2.0)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self._wrapped)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Evolving.
:return: :class:`DataStreamReader`
"""
return DataStreamReader(self._wrapped)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` instances active on `this` context.
.. note:: Evolving.
:return: :class:`StreamingQueryManager`
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams())
@since(2.0)
def stop(self):
"""Stop the underlying :class:`SparkContext`.
"""
self._sc.stop()
# We should clean the default session up. See SPARK-23228.
self._jvm.SparkSession.clearDefaultSession()
self._jvm.SparkSession.clearActiveSession()
SparkSession._instantiatedSession = None
SparkSession._activeSession = None
@since(2.0)
def __enter__(self):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
"""
return self
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Specifically stop the SparkSession on exit of the with block.
"""
self.stop()
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
spacewalkproject/spacewalk | contrib/actionchain/usecases.py | 17 | 3847 | #!/usr/bin/python
#
import xmlrpclib
class SMConnect:
"""
SM connection mixin.
"""
def __init__(self, host, user, password):
"""
Constructor.
"""
self.client = xmlrpclib.Server("http://%s/rpc/api" % host, verbose=0)
self.token = self.client.auth.login(user, password)
def findInstalledPackagesByName(self, serverId, name):
"""
Find needed packages.
"""
return filter(None, [(pkg.get('name', '').lower().find(name.lower()) > -1 and pkg or None)
for pkg in self.client.system.listPackages(self.token, serverId)])
def findServerByHostname(self, fullHostName):
"""
Find needed host.
"""
hostName = fullHostName.split(".")[0]
for server in self.client.system.listActiveSystems(self.token):
if server.get("name") in [hostName, fullHostName]:
return server
return {}
class ScenarioRunner(SMConnect):
"""
Admin scenario for the SUSE Manager, utilizing Action Chains.
"""
def example_01(self):
server = self.findServerByHostname("pig.suse.de")
if not server:
return
print server
# You are already able to search for the packages
for pkg in self.findInstalledPackagesByName(server['id'], "yum"):
print pkg.get("name")
# Therefore you also want to use the API in the same way
print self.client.actionchains.addPackageUpgrade(
self.token, 1000010000,
[
{
"name" : "alsa-lib",
"version" : "1.0.22",
},
{
"name" : "java-cup",
"version" : "0.11",
},
{
"name" : "javassist",
"version" : "3.9.0",
"release" : "6.el6",
},
],
"My Great Chain")
def example_02(self):
"""
List action chains.
"""
# List all action chains, available to the current user
for chain in self.client.actionchains.listChains(self.token):
print "Chain:", chain
# Print the details (raw hash)
for data in self.client.actionchains.chainActions(self.token, chain.get("name")):
print "\t", data
def example_03(self):
"""
Remove action entries in the action chain.
"""
self.client.actionchains.addPackageRemoval(
self.token, "pig", "",
[
{
"name" : "alsa-lib",
# "version" : "1.0.22",
},
],
"Test Chain")
# Test Chain must be there
for chain in self.client.actionchains.listChains():
print "Chain:", chain.get("name")
# List actions
for data in self.client.actionchains.chainActions("Test Chain"):
print "\t", data
self.client.actionchains.removeActions("Test Chain", ["Package Install"])
# List actions (should be empty)
print "After deletion:", self.client.actionchains.chainActions("Test Chain")
# Remove the chain itself:
self.client.actionchains.removeChains(["Test Chain",])
# Test Chain must be no longer there
for chain in self.client.actionchains.listChains():
print "Chain:", chain.get("name")
def example_04(self):
self.client.actionchains.addPackageInstall(self.token, 1000010000, [581,], "Test Chain")
if __name__ == "__main__":
host = "pig.suse.de"
user = "admin"
password = "admin"
sr = ScenarioRunner(host, user, password)
sr.example_04()
| gpl-2.0 |
berkeley-stat159/project-epsilon | code/utils/scripts/plot_mosaic.py | 4 | 2092 | """
"""
from __future__ import division, print_function
import sys, os, pdb
import numpy as np
import nibabel as nib
def plot_mosaic(img_data, transpose=False):
""" Return a mosaic plot for each slice of
the 3rd dimension of img_data
Parameters:
----------
img_data = 3D array
Returns:
-------
grid_2D : a 2D image with each slice of
the 3rd dimension of img_data plotted
in a mosaic
"""
n_slices = img_data.shape[2]
# Dimensions of the mosaic grid
n_rows = int(np.ceil(float(np.sqrt(n_slices))))
n_cols = int(np.ceil(float(n_slices)/float(n_rows)))
# Define the 2D mosaic
grid_2D = np.zeros((n_rows*img_data.shape[0], n_cols*img_data.shape[1]))
z = 0
for i in range(n_rows):
for j in range(n_cols):
if z < n_slices:
if transpose==True:
img_data_slice = img_data[:,::-1,z].T
else:
img_data_slice = img_data[:,::-1,z]
grid_2D[i*img_data.shape[0]:(i+1)*img_data.shape[0],\
j*img_data.shape[1]:(j+1)*img_data.shape[1]] = img_data_slice
z += 1
return grid_2D
if __name__=='__main__':
import matplotlib.pyplot as plt
plt.rcParams['image.cmap'] = 'gray'
plt.rcParams['image.interpolation'] = 'nearest'
project_path='../../../'
#img = nib.load(\
#'../../../data/ds005/sub001/BOLD/task001_run001/bold.nii.gz')
template = nib.load(project_path+\
'data/mni_icbm152_t1_tal_nlin_asym_09c_2mm.nii')
template_data_int = template.get_data()
template_data = template_data_int.astype(float)
img = nib.load(project_path+\
'data/ds005/sub001/model/model001/task001_run001.feat/' + \
'masked_filtered_func_data_mni.nii.gz')
img_data_int = img.get_data()
img_data = img_data_int.astype(float)
mean_data = np.mean(img_data, axis=-1)
plt.title('In brain voxels - mean values')
plt.imshow(plot_mosaic(template_data, transpose=False), cmap='gray', alpha=1)
plt.imshow(plot_mosaic(mean_data, transpose=False), cmap='gray', alpha=1)
plt.colorbar()
plt.show()
| bsd-3-clause |
mgoral/googlemock | scripts/generator/cpp/keywords.py | 1157 | 2004 | #!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""C++ keywords and helper utilities for determining keywords."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
try:
# Python 3.x
import builtins
except ImportError:
# Python 2.x
import __builtin__ as builtins
if not hasattr(builtins, 'set'):
# Nominal support for Python 2.3.
from sets import Set as set
TYPES = set('bool char int long short double float void wchar_t unsigned signed'.split())
TYPE_MODIFIERS = set('auto register const inline extern static virtual volatile mutable'.split())
ACCESS = set('public protected private friend'.split())
CASTS = set('static_cast const_cast dynamic_cast reinterpret_cast'.split())
OTHERS = set('true false asm class namespace using explicit this operator sizeof'.split())
OTHER_TYPES = set('new delete typedef struct union enum typeid typename template'.split())
CONTROL = set('case switch default if else return goto'.split())
EXCEPTION = set('try catch throw'.split())
LOOP = set('while do for break continue'.split())
ALL = TYPES | TYPE_MODIFIERS | ACCESS | CASTS | OTHERS | OTHER_TYPES | CONTROL | EXCEPTION | LOOP
def IsKeyword(token):
return token in ALL
def IsBuiltinType(token):
if token in ('virtual', 'inline'):
# These only apply to methods, they can't be types by themselves.
return False
return token in TYPES or token in TYPE_MODIFIERS
| bsd-3-clause |
BIT-SYS/gem5-spm-module | src/mem/slicc/ast/TypeDeclAST.py | 75 | 2786 | # Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.DeclAST import DeclAST
from slicc.symbols.Type import Type
class TypeDeclAST(DeclAST):
def __init__(self, slicc, type_ast, pairs, field_asts):
super(TypeDeclAST, self).__init__(slicc, pairs)
self.type_ast = type_ast
self.field_asts = field_asts
def __repr__(self):
return "[TypeDecl: %r]" % (self.type_ast)
def files(self, parent=None):
if "external" in self:
return set()
if parent:
ident = "%s_%s" % (parent, self.type_ast.ident)
else:
ident = self.type_ast.ident
return set(("%s.hh" % ident, "%s.cc" % ident))
def generate(self):
ident = str(self.type_ast)
machine = self.symtab.state_machine
# Make the new type
new_type = Type(self.symtab, ident, self.location, self.pairs,
self.state_machine)
if machine:
machine.addType(new_type)
self.symtab.newSymbol(new_type)
self.symtab.pushFrame()
# Add all of the fields of the type to it
for field in self.field_asts:
field.generate(new_type)
self.symtab.popFrame()
| bsd-3-clause |
jrabbit/compose | tests/unit/cli/formatter_test.py | 9 | 1896 | from __future__ import absolute_import
from __future__ import unicode_literals
import logging
from compose.cli import colors
from compose.cli.formatter import ConsoleWarningFormatter
from tests import unittest
MESSAGE = 'this is the message'
def make_log_record(level, message=None):
return logging.LogRecord('name', level, 'pathame', 0, message or MESSAGE, (), None)
class ConsoleWarningFormatterTestCase(unittest.TestCase):
def setUp(self):
self.formatter = ConsoleWarningFormatter()
def test_format_warn(self):
output = self.formatter.format(make_log_record(logging.WARN))
expected = colors.yellow('WARNING') + ': '
assert output == expected + MESSAGE
def test_format_error(self):
output = self.formatter.format(make_log_record(logging.ERROR))
expected = colors.red('ERROR') + ': '
assert output == expected + MESSAGE
def test_format_info(self):
output = self.formatter.format(make_log_record(logging.INFO))
assert output == MESSAGE
def test_format_unicode_info(self):
message = b'\xec\xa0\x95\xec\x88\x98\xec\xa0\x95'
output = self.formatter.format(make_log_record(logging.INFO, message))
print(output)
assert output == message.decode('utf-8')
def test_format_unicode_warn(self):
message = b'\xec\xa0\x95\xec\x88\x98\xec\xa0\x95'
output = self.formatter.format(make_log_record(logging.WARN, message))
expected = colors.yellow('WARNING') + ': '
assert output == '{0}{1}'.format(expected, message.decode('utf-8'))
def test_format_unicode_error(self):
message = b'\xec\xa0\x95\xec\x88\x98\xec\xa0\x95'
output = self.formatter.format(make_log_record(logging.ERROR, message))
expected = colors.red('ERROR') + ': '
assert output == '{0}{1}'.format(expected, message.decode('utf-8'))
| apache-2.0 |
romanoid/buck | third-party/py/unittest2/unittest2/case.py | 153 | 42780 | """Test case implementation"""
import sys
import difflib
import pprint
import re
import unittest
import warnings
from unittest2 import result
from unittest2.util import (
safe_repr, safe_str, strclass,
unorderable_list_difference
)
from unittest2.compatibility import wraps
__unittest = True
DIFF_OMITTED = ('\nDiff is %s characters long. '
'Set self.maxDiff to None to see it.')
class SkipTest(Exception):
"""
Raise this exception in a test to skip it.
Usually you can use TestResult.skip() or one of the skipping decorators
instead of raising this directly.
"""
class _ExpectedFailure(Exception):
"""
Raise this when a test is expected to fail.
This is an implementation detail.
"""
def __init__(self, exc_info):
# can't use super because Python 2.4 exceptions are old style
Exception.__init__(self)
self.exc_info = exc_info
class _UnexpectedSuccess(Exception):
"""
The test was supposed to fail, but it didn't!
"""
def _id(obj):
return obj
def skip(reason):
"""
Unconditionally skip a test.
"""
def decorator(test_item):
if not (isinstance(test_item, type) and issubclass(test_item, TestCase)):
@wraps(test_item)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
test_item = skip_wrapper
test_item.__unittest_skip__ = True
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skipIf(condition, reason):
"""
Skip a test if the condition is true.
"""
if condition:
return skip(reason)
return _id
def skipUnless(condition, reason):
"""
Skip a test unless the condition is true.
"""
if not condition:
return skip(reason)
return _id
def expectedFailure(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception:
raise _ExpectedFailure(sys.exc_info())
raise _UnexpectedSuccess
return wrapper
class _AssertRaisesContext(object):
"""A context manager used to implement TestCase.assertRaises* methods."""
def __init__(self, expected, test_case, expected_regexp=None):
self.expected = expected
self.failureException = test_case.failureException
self.expected_regexp = expected_regexp
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
raise self.failureException(
"%s not raised" % (exc_name,))
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
self.exception = exc_value # store for later retrieval
if self.expected_regexp is None:
return True
expected_regexp = self.expected_regexp
if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(str(exc_value)):
raise self.failureException('"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value)))
return True
class _TypeEqualityDict(object):
def __init__(self, testcase):
self.testcase = testcase
self._store = {}
def __setitem__(self, key, value):
self._store[key] = value
def __getitem__(self, key):
value = self._store[key]
if isinstance(value, basestring):
return getattr(self.testcase, value)
return value
def get(self, key, default=None):
if key in self._store:
return self[key]
return default
class TestCase(unittest.TestCase):
"""A class whose instances are single test cases.
By default, the test code itself should be placed in a method named
'runTest'.
If the fixture may be used for many test cases, create as
many test methods as are needed. When instantiating such a TestCase
subclass, specify in the constructor arguments the name of the test method
that the instance is to execute.
Test authors should subclass TestCase for their own tests. Construction
and deconstruction of the test's environment ('fixture') can be
implemented by overriding the 'setUp' and 'tearDown' methods respectively.
If it is necessary to override the __init__ method, the base class
__init__ method must always be called. It is important that subclasses
should not change the signature of their __init__ method, since instances
of the classes are instantiated automatically by parts of the framework
in order to be run.
"""
# This attribute determines which exception will be raised when
# the instance's assertion methods fail; test methods raising this
# exception will be deemed to have 'failed' rather than 'errored'
failureException = AssertionError
# This attribute sets the maximum length of a diff in failure messages
# by assert methods using difflib. It is looked up as an instance attribute
# so can be configured by individual tests if required.
maxDiff = 80*8
# This attribute determines whether long messages (including repr of
# objects used in assert methods) will be printed on failure in *addition*
# to any explicit message passed.
longMessage = True
# Attribute used by TestSuite for classSetUp
_classSetupFailed = False
def __init__(self, methodName='runTest'):
"""Create an instance of the class that will use the named test
method when executed. Raises a ValueError if the instance does
not have a method with the specified name.
"""
self._testMethodName = methodName
self._resultForDoCleanups = None
try:
testMethod = getattr(self, methodName)
except AttributeError:
raise ValueError("no such test method in %s: %s" % \
(self.__class__, methodName))
self._testMethodDoc = testMethod.__doc__
self._cleanups = []
# Map types to custom assertEqual functions that will compare
# instances of said type in more detail to generate a more useful
# error message.
self._type_equality_funcs = _TypeEqualityDict(self)
self.addTypeEqualityFunc(dict, 'assertDictEqual')
self.addTypeEqualityFunc(list, 'assertListEqual')
self.addTypeEqualityFunc(tuple, 'assertTupleEqual')
self.addTypeEqualityFunc(set, 'assertSetEqual')
self.addTypeEqualityFunc(frozenset, 'assertSetEqual')
self.addTypeEqualityFunc(unicode, 'assertMultiLineEqual')
def addTypeEqualityFunc(self, typeobj, function):
"""Add a type specific assertEqual style function to compare a type.
This method is for use by TestCase subclasses that need to register
their own type equality functions to provide nicer error messages.
Args:
typeobj: The data type to call this function on when both values
are of the same type in assertEqual().
function: The callable taking two arguments and an optional
msg= argument that raises self.failureException with a
useful error message when the two arguments are not equal.
"""
self._type_equality_funcs[typeobj] = function
def addCleanup(self, function, *args, **kwargs):
"""Add a function, with arguments, to be called when the test is
completed. Functions added are called on a LIFO basis and are
called after tearDown on test failure or success.
Cleanup items are called even if setUp fails (unlike tearDown)."""
self._cleanups.append((function, args, kwargs))
def setUp(self):
"Hook method for setting up the test fixture before exercising it."
@classmethod
def setUpClass(cls):
"Hook method for setting up class fixture before running tests in the class."
@classmethod
def tearDownClass(cls):
"Hook method for deconstructing the class fixture after running all tests in the class."
def tearDown(self):
"Hook method for deconstructing the test fixture after testing it."
def countTestCases(self):
return 1
def defaultTestResult(self):
return result.TestResult()
def shortDescription(self):
"""Returns a one-line description of the test, or None if no
description has been provided.
The default implementation of this method returns the first line of
the specified test method's docstring.
"""
doc = self._testMethodDoc
return doc and doc.split("\n")[0].strip() or None
def id(self):
return "%s.%s" % (strclass(self.__class__), self._testMethodName)
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self._testMethodName == other._testMethodName
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((type(self), self._testMethodName))
def __str__(self):
return "%s (%s)" % (self._testMethodName, strclass(self.__class__))
def __repr__(self):
return "<%s testMethod=%s>" % \
(strclass(self.__class__), self._testMethodName)
def _addSkip(self, result, reason):
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None:
addSkip(self, reason)
else:
warnings.warn("Use of a TestResult without an addSkip method is deprecated",
DeprecationWarning, 2)
result.addSuccess(self)
def run(self, result=None):
orig_result = result
if result is None:
result = self.defaultTestResult()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
self._resultForDoCleanups = result
result.startTest(self)
testMethod = getattr(self, self._testMethodName)
if (getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)):
# If the class or method was skipped.
try:
skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
or getattr(testMethod, '__unittest_skip_why__', ''))
self._addSkip(result, skip_why)
finally:
result.stopTest(self)
return
try:
success = False
try:
self.setUp()
except SkipTest, e:
self._addSkip(result, str(e))
except Exception:
result.addError(self, sys.exc_info())
else:
try:
testMethod()
except self.failureException:
result.addFailure(self, sys.exc_info())
except _ExpectedFailure, e:
addExpectedFailure = getattr(result, 'addExpectedFailure', None)
if addExpectedFailure is not None:
addExpectedFailure(self, e.exc_info)
else:
warnings.warn("Use of a TestResult without an addExpectedFailure method is deprecated",
DeprecationWarning)
result.addSuccess(self)
except _UnexpectedSuccess:
addUnexpectedSuccess = getattr(result, 'addUnexpectedSuccess', None)
if addUnexpectedSuccess is not None:
addUnexpectedSuccess(self)
else:
warnings.warn("Use of a TestResult without an addUnexpectedSuccess method is deprecated",
DeprecationWarning)
result.addFailure(self, sys.exc_info())
except SkipTest, e:
self._addSkip(result, str(e))
except Exception:
result.addError(self, sys.exc_info())
else:
success = True
try:
self.tearDown()
except Exception:
result.addError(self, sys.exc_info())
success = False
cleanUpSuccess = self.doCleanups()
success = success and cleanUpSuccess
if success:
result.addSuccess(self)
finally:
result.stopTest(self)
if orig_result is None:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
def doCleanups(self):
"""Execute all cleanup functions. Normally called for you after
tearDown."""
result = self._resultForDoCleanups
ok = True
while self._cleanups:
function, args, kwargs = self._cleanups.pop(-1)
try:
function(*args, **kwargs)
except Exception:
ok = False
result.addError(self, sys.exc_info())
return ok
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the test without collecting errors in a TestResult"""
self.setUp()
getattr(self, self._testMethodName)()
self.tearDown()
while self._cleanups:
function, args, kwargs = self._cleanups.pop(-1)
function(*args, **kwargs)
def skipTest(self, reason):
"""Skip this test."""
raise SkipTest(reason)
def fail(self, msg=None):
"""Fail immediately, with the given message."""
raise self.failureException(msg)
def assertFalse(self, expr, msg=None):
"Fail the test if the expression is true."
if expr:
msg = self._formatMessage(msg, "%s is not False" % safe_repr(expr))
raise self.failureException(msg)
def assertTrue(self, expr, msg=None):
"""Fail the test unless the expression is true."""
if not expr:
msg = self._formatMessage(msg, "%s is not True" % safe_repr(expr))
raise self.failureException(msg)
def _formatMessage(self, msg, standardMsg):
"""Honour the longMessage attribute when generating failure messages.
If longMessage is False this means:
* Use only an explicit message if it is provided
* Otherwise use the standard message for the assert
If longMessage is True:
* Use the standard message
* If an explicit message is provided, plus ' : ' and the explicit message
"""
if not self.longMessage:
return msg or standardMsg
if msg is None:
return standardMsg
try:
return '%s : %s' % (standardMsg, msg)
except UnicodeDecodeError:
return '%s : %s' % (safe_str(standardMsg), safe_str(msg))
def assertRaises(self, excClass, callableObj=None, *args, **kwargs):
"""Fail unless an exception of class excClass is thrown
by callableObj when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
thrown, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
If called with callableObj omitted or None, will return a
context object used like this::
with self.assertRaises(SomeException):
do_something()
The context manager keeps a reference to the exception as
the 'exception' attribute. This allows you to inspect the
exception after the assertion::
with self.assertRaises(SomeException) as cm:
do_something()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, 3)
"""
if callableObj is None:
return _AssertRaisesContext(excClass, self)
try:
callableObj(*args, **kwargs)
except excClass:
return
if hasattr(excClass,'__name__'):
excName = excClass.__name__
else:
excName = str(excClass)
raise self.failureException, "%s not raised" % excName
def _getAssertEqualityFunc(self, first, second):
"""Get a detailed comparison function for the types of the two args.
Returns: A callable accepting (first, second, msg=None) that will
raise a failure exception if first != second with a useful human
readable error message for those types.
"""
#
# NOTE(gregory.p.smith): I considered isinstance(first, type(second))
# and vice versa. I opted for the conservative approach in case
# subclasses are not intended to be compared in detail to their super
# class instances using a type equality func. This means testing
# subtypes won't automagically use the detailed comparison. Callers
# should use their type specific assertSpamEqual method to compare
# subclasses if the detailed comparison is desired and appropriate.
# See the discussion in http://bugs.python.org/issue2578.
#
if type(first) is type(second):
asserter = self._type_equality_funcs.get(type(first))
if asserter is not None:
return asserter
return self._baseAssertEqual
def _baseAssertEqual(self, first, second, msg=None):
"""The default assertEqual implementation, not type specific."""
if not first == second:
standardMsg = '%s != %s' % (safe_repr(first), safe_repr(second))
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertEqual(self, first, second, msg=None):
"""Fail if the two objects are unequal as determined by the '=='
operator.
"""
assertion_func = self._getAssertEqualityFunc(first, second)
assertion_func(first, second, msg=msg)
def assertNotEqual(self, first, second, msg=None):
"""Fail if the two objects are equal as determined by the '=='
operator.
"""
if not first != second:
msg = self._formatMessage(msg, '%s == %s' % (safe_repr(first),
safe_repr(second)))
raise self.failureException(msg)
def assertAlmostEqual(self, first, second, places=None, msg=None, delta=None):
"""Fail if the two objects are unequal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is more than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
If the two objects compare equal then they will automatically
compare almost equal.
"""
if first == second:
# shortcut
return
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if abs(first - second) <= delta:
return
standardMsg = '%s != %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if round(abs(second-first), places) == 0:
return
standardMsg = '%s != %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertNotAlmostEqual(self, first, second, places=None, msg=None, delta=None):
"""Fail if the two objects are equal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is less than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
Objects that are equal automatically fail.
"""
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if not (first == second) and abs(first - second) > delta:
return
standardMsg = '%s == %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if not (first == second) and round(abs(second-first), places) != 0:
return
standardMsg = '%s == %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
# Synonyms for assertion methods
# The plurals are undocumented. Keep them that way to discourage use.
# Do not add more. Do not remove.
# Going through a deprecation cycle on these would annoy many people.
assertEquals = assertEqual
assertNotEquals = assertNotEqual
assertAlmostEquals = assertAlmostEqual
assertNotAlmostEquals = assertNotAlmostEqual
assert_ = assertTrue
# These fail* assertion method names are pending deprecation and will
# be a DeprecationWarning in 3.2; http://bugs.python.org/issue2578
def _deprecate(original_func):
def deprecated_func(*args, **kwargs):
warnings.warn(
('Please use %s instead.' % original_func.__name__),
PendingDeprecationWarning, 2)
return original_func(*args, **kwargs)
return deprecated_func
failUnlessEqual = _deprecate(assertEqual)
failIfEqual = _deprecate(assertNotEqual)
failUnlessAlmostEqual = _deprecate(assertAlmostEqual)
failIfAlmostEqual = _deprecate(assertNotAlmostEqual)
failUnless = _deprecate(assertTrue)
failUnlessRaises = _deprecate(assertRaises)
failIf = _deprecate(assertFalse)
def assertSequenceEqual(self, seq1, seq2,
msg=None, seq_type=None, max_diff=80*8):
"""An equality assertion for ordered sequences (like lists and tuples).
For the purposes of this function, a valid ordered sequence type is one
which can be indexed, has a length, and has an equality operator.
Args:
seq1: The first sequence to compare.
seq2: The second sequence to compare.
seq_type: The expected datatype of the sequences, or None if no
datatype should be enforced.
msg: Optional message to use on failure instead of a list of
differences.
max_diff: Maximum size off the diff, larger diffs are not shown
"""
if seq_type is not None:
seq_type_name = seq_type.__name__
if not isinstance(seq1, seq_type):
raise self.failureException('First sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq1)))
if not isinstance(seq2, seq_type):
raise self.failureException('Second sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq2)))
else:
seq_type_name = "sequence"
differing = None
try:
len1 = len(seq1)
except (TypeError, NotImplementedError):
differing = 'First %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
try:
len2 = len(seq2)
except (TypeError, NotImplementedError):
differing = 'Second %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
if seq1 == seq2:
return
seq1_repr = repr(seq1)
seq2_repr = repr(seq2)
if len(seq1_repr) > 30:
seq1_repr = seq1_repr[:30] + '...'
if len(seq2_repr) > 30:
seq2_repr = seq2_repr[:30] + '...'
elements = (seq_type_name.capitalize(), seq1_repr, seq2_repr)
differing = '%ss differ: %s != %s\n' % elements
for i in xrange(min(len1, len2)):
try:
item1 = seq1[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of first %s\n' %
(i, seq_type_name))
break
try:
item2 = seq2[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of second %s\n' %
(i, seq_type_name))
break
if item1 != item2:
differing += ('\nFirst differing element %d:\n%s\n%s\n' %
(i, item1, item2))
break
else:
if (len1 == len2 and seq_type is None and
type(seq1) != type(seq2)):
# The sequences are the same, but have differing types.
return
if len1 > len2:
differing += ('\nFirst %s contains %d additional '
'elements.\n' % (seq_type_name, len1 - len2))
try:
differing += ('First extra element %d:\n%s\n' %
(len2, seq1[len2]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of first %s\n' % (len2, seq_type_name))
elif len1 < len2:
differing += ('\nSecond %s contains %d additional '
'elements.\n' % (seq_type_name, len2 - len1))
try:
differing += ('First extra element %d:\n%s\n' %
(len1, seq2[len1]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of second %s\n' % (len1, seq_type_name))
standardMsg = differing
diffMsg = '\n' + '\n'.join(
difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
standardMsg = self._truncateMessage(standardMsg, diffMsg)
msg = self._formatMessage(msg, standardMsg)
self.fail(msg)
def _truncateMessage(self, message, diff):
max_diff = self.maxDiff
if max_diff is None or len(diff) <= max_diff:
return message + diff
return message + (DIFF_OMITTED % len(diff))
def assertListEqual(self, list1, list2, msg=None):
"""A list-specific equality assertion.
Args:
list1: The first list to compare.
list2: The second list to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(list1, list2, msg, seq_type=list)
def assertTupleEqual(self, tuple1, tuple2, msg=None):
"""A tuple-specific equality assertion.
Args:
tuple1: The first tuple to compare.
tuple2: The second tuple to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(tuple1, tuple2, msg, seq_type=tuple)
def assertSetEqual(self, set1, set2, msg=None):
"""A set-specific equality assertion.
Args:
set1: The first set to compare.
set2: The second set to compare.
msg: Optional message to use on failure instead of a list of
differences.
assertSetEqual uses ducktyping to support
different types of sets, and is optimized for sets specifically
(parameters must support a difference method).
"""
try:
difference1 = set1.difference(set2)
except TypeError, e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError, e:
self.fail('first argument does not support set difference: %s' % e)
try:
difference2 = set2.difference(set1)
except TypeError, e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError, e:
self.fail('second argument does not support set difference: %s' % e)
if not (difference1 or difference2):
return
lines = []
if difference1:
lines.append('Items in the first set but not the second:')
for item in difference1:
lines.append(repr(item))
if difference2:
lines.append('Items in the second set but not the first:')
for item in difference2:
lines.append(repr(item))
standardMsg = '\n'.join(lines)
self.fail(self._formatMessage(msg, standardMsg))
def assertIn(self, member, container, msg=None):
"""Just like self.assertTrue(a in b), but with a nicer default message."""
if member not in container:
standardMsg = '%s not found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIn(self, member, container, msg=None):
"""Just like self.assertTrue(a not in b), but with a nicer default message."""
if member in container:
standardMsg = '%s unexpectedly found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertIs(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is b), but with a nicer default message."""
if expr1 is not expr2:
standardMsg = '%s is not %s' % (safe_repr(expr1), safe_repr(expr2))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNot(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is not b), but with a nicer default message."""
if expr1 is expr2:
standardMsg = 'unexpectedly identical: %s' % (safe_repr(expr1),)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictEqual(self, d1, d2, msg=None):
self.assert_(isinstance(d1, dict), 'First argument is not a dictionary')
self.assert_(isinstance(d2, dict), 'Second argument is not a dictionary')
if d1 != d2:
standardMsg = '%s != %s' % (safe_repr(d1, True), safe_repr(d2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
pprint.pformat(d1).splitlines(),
pprint.pformat(d2).splitlines())))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictContainsSubset(self, expected, actual, msg=None):
"""Checks whether actual is a superset of expected."""
missing = []
mismatched = []
for key, value in expected.iteritems():
if key not in actual:
missing.append(key)
elif value != actual[key]:
mismatched.append('%s, expected: %s, actual: %s' %
(safe_repr(key), safe_repr(value),
safe_repr(actual[key])))
if not (missing or mismatched):
return
standardMsg = ''
if missing:
standardMsg = 'Missing: %s' % ','.join(safe_repr(m) for m in
missing)
if mismatched:
if standardMsg:
standardMsg += '; '
standardMsg += 'Mismatched values: %s' % ','.join(mismatched)
self.fail(self._formatMessage(msg, standardMsg))
def assertItemsEqual(self, expected_seq, actual_seq, msg=None):
"""An unordered sequence specific comparison. It asserts that
expected_seq and actual_seq contain the same elements. It is
the equivalent of::
self.assertEqual(sorted(expected_seq), sorted(actual_seq))
Raises with an error message listing which elements of expected_seq
are missing from actual_seq and vice versa if any.
Asserts that each element has the same count in both sequences.
Example:
- [0, 1, 1] and [1, 0, 1] compare equal.
- [0, 0, 1] and [0, 1] compare unequal.
"""
try:
expected = sorted(expected_seq)
actual = sorted(actual_seq)
except TypeError:
# Unsortable items (example: set(), complex(), ...)
expected = list(expected_seq)
actual = list(actual_seq)
missing, unexpected = unorderable_list_difference(
expected, actual, ignore_duplicate=False
)
else:
return self.assertSequenceEqual(expected, actual, msg=msg)
errors = []
if missing:
errors.append('Expected, but missing:\n %s' %
safe_repr(missing))
if unexpected:
errors.append('Unexpected, but present:\n %s' %
safe_repr(unexpected))
if errors:
standardMsg = '\n'.join(errors)
self.fail(self._formatMessage(msg, standardMsg))
def assertMultiLineEqual(self, first, second, msg=None):
"""Assert that two multi-line strings are equal."""
self.assert_(isinstance(first, basestring), (
'First argument is not a string'))
self.assert_(isinstance(second, basestring), (
'Second argument is not a string'))
if first != second:
standardMsg = '%s != %s' % (safe_repr(first, True), safe_repr(second, True))
diff = '\n' + ''.join(difflib.ndiff(first.splitlines(True),
second.splitlines(True)))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertLess(self, a, b, msg=None):
"""Just like self.assertTrue(a < b), but with a nicer default message."""
if not a < b:
standardMsg = '%s not less than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertLessEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a <= b), but with a nicer default message."""
if not a <= b:
standardMsg = '%s not less than or equal to %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreater(self, a, b, msg=None):
"""Just like self.assertTrue(a > b), but with a nicer default message."""
if not a > b:
standardMsg = '%s not greater than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreaterEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a >= b), but with a nicer default message."""
if not a >= b:
standardMsg = '%s not greater than or equal to %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNone(self, obj, msg=None):
"""Same as self.assertTrue(obj is None), with a nicer default message."""
if obj is not None:
standardMsg = '%s is not None' % (safe_repr(obj),)
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNotNone(self, obj, msg=None):
"""Included for symmetry with assertIsNone."""
if obj is None:
standardMsg = 'unexpectedly None'
self.fail(self._formatMessage(msg, standardMsg))
def assertIsInstance(self, obj, cls, msg=None):
"""Same as self.assertTrue(isinstance(obj, cls)), with a nicer
default message."""
if not isinstance(obj, cls):
standardMsg = '%s is not an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIsInstance(self, obj, cls, msg=None):
"""Included for symmetry with assertIsInstance."""
if isinstance(obj, cls):
standardMsg = '%s is an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertRaisesRegexp(self, expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Asserts that the message in a raised exception matches a regexp.
Args:
expected_exception: Exception class expected to be raised.
expected_regexp: Regexp (re pattern object or string) expected
to be found in error message.
callable_obj: Function to be called.
args: Extra args.
kwargs: Extra kwargs.
"""
if callable_obj is None:
return _AssertRaisesContext(expected_exception, self, expected_regexp)
try:
callable_obj(*args, **kwargs)
except expected_exception, exc_value:
if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(str(exc_value)):
raise self.failureException('"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value)))
else:
if hasattr(expected_exception, '__name__'):
excName = expected_exception.__name__
else:
excName = str(expected_exception)
raise self.failureException, "%s not raised" % excName
def assertRegexpMatches(self, text, expected_regexp, msg=None):
"""Fail the test unless the text matches the regular expression."""
if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(text):
msg = msg or "Regexp didn't match"
msg = '%s: %r not found in %r' % (msg, expected_regexp.pattern, text)
raise self.failureException(msg)
def assertNotRegexpMatches(self, text, unexpected_regexp, msg=None):
"""Fail the test if the text matches the regular expression."""
if isinstance(unexpected_regexp, basestring):
unexpected_regexp = re.compile(unexpected_regexp)
match = unexpected_regexp.search(text)
if match:
msg = msg or "Regexp matched"
msg = '%s: %r matches %r in %r' % (msg,
text[match.start():match.end()],
unexpected_regexp.pattern,
text)
raise self.failureException(msg)
class FunctionTestCase(TestCase):
"""A test case that wraps a test function.
This is useful for slipping pre-existing test functions into the
unittest framework. Optionally, set-up and tidy-up functions can be
supplied. As with TestCase, the tidy-up ('tearDown') function will
always be called if the set-up ('setUp') function ran successfully.
"""
def __init__(self, testFunc, setUp=None, tearDown=None, description=None):
super(FunctionTestCase, self).__init__()
self._setUpFunc = setUp
self._tearDownFunc = tearDown
self._testFunc = testFunc
self._description = description
def setUp(self):
if self._setUpFunc is not None:
self._setUpFunc()
def tearDown(self):
if self._tearDownFunc is not None:
self._tearDownFunc()
def runTest(self):
self._testFunc()
def id(self):
return self._testFunc.__name__
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._setUpFunc == other._setUpFunc and \
self._tearDownFunc == other._tearDownFunc and \
self._testFunc == other._testFunc and \
self._description == other._description
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((type(self), self._setUpFunc, self._tearDownFunc,
self._testFunc, self._description))
def __str__(self):
return "%s (%s)" % (strclass(self.__class__),
self._testFunc.__name__)
def __repr__(self):
return "<%s testFunc=%s>" % (strclass(self.__class__),
self._testFunc)
def shortDescription(self):
if self._description is not None:
return self._description
doc = self._testFunc.__doc__
return doc and doc.split("\n")[0].strip() or None
| apache-2.0 |
Ms2ger/servo | tests/wpt/css-tests/css21_dev/html4/reference/support/fonts/makegsubfonts.py | 1616 | 14125 |
import os
import textwrap
from xml.etree import ElementTree
from fontTools.ttLib import TTFont, newTable
from fontTools.misc.psCharStrings import T2CharString
from fontTools.ttLib.tables.otTables import GSUB,\
ScriptList, ScriptRecord, Script, DefaultLangSys,\
FeatureList, FeatureRecord, Feature,\
LookupList, Lookup, AlternateSubst, SingleSubst
# paths
directory = os.path.dirname(__file__)
shellSourcePath = os.path.join(directory, "gsubtest-shell.ttx")
shellTempPath = os.path.join(directory, "gsubtest-shell.otf")
featureList = os.path.join(directory, "gsubtest-features.txt")
javascriptData = os.path.join(directory, "gsubtest-features.js")
outputPath = os.path.join(os.path.dirname(directory), "gsubtest-lookup%d")
baseCodepoint = 0xe000
# -------
# Features
# -------
f = open(featureList, "rb")
text = f.read()
f.close()
mapping = []
for line in text.splitlines():
line = line.strip()
if not line:
continue
if line.startswith("#"):
continue
# parse
values = line.split("\t")
tag = values.pop(0)
mapping.append(tag);
# --------
# Outlines
# --------
def addGlyphToCFF(glyphName=None, program=None, private=None, globalSubrs=None, charStringsIndex=None, topDict=None, charStrings=None):
charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs)
charStringsIndex.append(charString)
glyphID = len(topDict.charset)
charStrings.charStrings[glyphName] = glyphID
topDict.charset.append(glyphName)
def makeLookup1():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup1")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
# bump this up so that the sequence is the same as the lookup 3 font
cp += 3
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 1
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = SingleSubst()
subtable.Format = 2
subtable.LookupType = 1
subtable.mapping = {
"%s.pass" % tag : "%s.fail" % tag,
"%s.fail" % tag : "%s.pass" % tag,
}
lookup.SubTable.append(subtable)
path = outputPath % 1 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeLookup3():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup3")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
# tag.default
glyphName = "%s.default" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.alt1,2,3
for i in range(1,4):
glyphName = "%s.alt%d" % (tag, i)
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 3
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = AlternateSubst()
subtable.Format = 1
subtable.LookupType = 3
subtable.alternates = {
"%s.default" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt1" % tag : ["%s.pass" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt2" % tag : ["%s.fail" % tag, "%s.pass" % tag, "%s.fail" % tag],
"%s.alt3" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.pass" % tag]
}
lookup.SubTable.append(subtable)
path = outputPath % 3 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeJavascriptData():
features = sorted(mapping)
outStr = []
outStr.append("")
outStr.append("/* This file is autogenerated by makegsubfonts.py */")
outStr.append("")
outStr.append("/* ")
outStr.append(" Features defined in gsubtest fonts with associated base")
outStr.append(" codepoints for each feature:")
outStr.append("")
outStr.append(" cp = codepoint for feature featX")
outStr.append("")
outStr.append(" cp default PASS")
outStr.append(" cp featX=1 FAIL")
outStr.append(" cp featX=2 FAIL")
outStr.append("")
outStr.append(" cp+1 default FAIL")
outStr.append(" cp+1 featX=1 PASS")
outStr.append(" cp+1 featX=2 FAIL")
outStr.append("")
outStr.append(" cp+2 default FAIL")
outStr.append(" cp+2 featX=1 FAIL")
outStr.append(" cp+2 featX=2 PASS")
outStr.append("")
outStr.append("*/")
outStr.append("")
outStr.append("var gFeatures = {");
cp = baseCodepoint
taglist = []
for tag in features:
taglist.append("\"%s\": 0x%x" % (tag, cp))
cp += 4
outStr.append(textwrap.fill(", ".join(taglist), initial_indent=" ", subsequent_indent=" "))
outStr.append("};");
outStr.append("");
if os.path.exists(javascriptData):
os.remove(javascriptData)
f = open(javascriptData, "wb")
f.write("\n".join(outStr))
f.close()
# build fonts
print "Making lookup type 1 font..."
makeLookup1()
print "Making lookup type 3 font..."
makeLookup3()
# output javascript data
print "Making javascript data file..."
makeJavascriptData()
| mpl-2.0 |
arista-eosplus/ansible | lib/ansible/module_utils/vca.py | 36 | 11161 | #
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
try:
from pyvcloud.vcloudair import VCA
HAS_PYVCLOUD = True
except ImportError:
HAS_PYVCLOUD = False
from ansible.module_utils.basic import AnsibleModule
SERVICE_MAP = {'vca': 'ondemand', 'vchs': 'subscription', 'vcd': 'vcd'}
LOGIN_HOST = {'vca': 'vca.vmware.com', 'vchs': 'vchs.vmware.com'}
DEFAULT_SERVICE_TYPE = 'vca'
DEFAULT_VERSION = '5.7'
class VcaError(Exception):
def __init__(self, msg, **kwargs):
self.kwargs = kwargs
super(VcaError, self).__init__(msg)
def vca_argument_spec():
return dict(
username=dict(type='str', aliases=['user'], required=True),
password=dict(type='str', aliases=['pass', 'passwd'], required=True, no_log=True),
org=dict(),
service_id=dict(),
instance_id=dict(),
host=dict(),
api_version=dict(default=DEFAULT_VERSION),
service_type=dict(default=DEFAULT_SERVICE_TYPE, choices=SERVICE_MAP.keys()),
vdc_name=dict(),
gateway_name=dict(default='gateway'),
verify_certs=dict(type='bool', default=True)
)
class VcaAnsibleModule(AnsibleModule):
def __init__(self, *args, **kwargs):
argument_spec = vca_argument_spec()
argument_spec.update(kwargs.get('argument_spec', dict()))
kwargs['argument_spec'] = argument_spec
super(VcaAnsibleModule, self).__init__(*args, **kwargs)
if not HAS_PYVCLOUD:
self.fail("python module pyvcloud is required for this module")
self._vca = self.create_instance()
self.login()
self._gateway = None
self._vdc = None
@property
def vca(self):
return self._vca
@property
def gateway(self):
if self._gateway is not None:
return self._gateway
vdc_name = self.params['vdc_name']
gateway_name = self.params['gateway_name']
_gateway = self.vca.get_gateway(vdc_name, gateway_name)
if not _gateway:
raise VcaError('vca instance has no gateway named %s' % gateway_name)
self._gateway = _gateway
return _gateway
@property
def vdc(self):
if self._vdc is not None:
return self._vdc
vdc_name = self.params['vdc_name']
_vdc = self.vca.get_vdc(vdc_name)
if not _vdc:
raise VcaError('vca instance has no vdc named %s' % vdc_name)
self._vdc = _vdc
return _vdc
def get_vapp(self, vapp_name):
vapp = self.vca.get_vapp(self.vdc, vapp_name)
if not vapp:
raise VcaError('vca instance has no vapp named %s' % vapp_name)
return vapp
def get_vm(self, vapp_name, vm_name):
vapp = self.get_vapp(vapp_name)
children = vapp.me.get_Children()
vms = [vm for vm in children.get_Vm() if vm.name == vm_name]
try:
return vms[0]
except IndexError:
raise VcaError('vapp has no vm named %s' % vm_name)
def create_instance(self):
service_type = self.params.get('service_type', DEFAULT_SERVICE_TYPE)
if service_type == 'vcd':
host = self.params['host']
else:
host = LOGIN_HOST[service_type]
username = self.params['username']
version = self.params.get('api_version')
if service_type == 'vchs':
version = '5.6'
verify = self.params.get('verify_certs')
return VCA(host=host, username=username,
service_type=SERVICE_MAP[service_type],
version=version, verify=verify)
def login(self):
service_type = self.params['service_type']
password = self.params['password']
login_org = None
if service_type == 'vcd':
login_org = self.params['org']
if not self.vca.login(password=password, org=login_org):
self.fail('Login to VCA failed', response=self.vca.response)
try:
method_name = 'login_%s' % service_type
meth = getattr(self, method_name)
meth()
except AttributeError:
self.fail('no login method exists for service_type %s' % service_type)
except VcaError as e:
self.fail(e.message, response=self.vca.response, **e.kwargs)
def login_vca(self):
instance_id = self.params['instance_id']
if not instance_id:
raise VcaError('missing required instance_id for service_type vca')
self.vca.login_to_instance_sso(instance=instance_id)
def login_vchs(self):
service_id = self.params['service_id']
if not service_id:
raise VcaError('missing required service_id for service_type vchs')
org = self.params['org']
if not org:
raise VcaError('missing required org for service_type vchs')
self.vca.login_to_org(service_id, org)
def login_vcd(self):
org = self.params['org']
if not org:
raise VcaError('missing required org for service_type vcd')
if not self.vca.token:
raise VcaError('unable to get token for service_type vcd')
if not self.vca.vcloud_session.org_url:
raise VcaError('unable to get org_url for service_type vcd')
self.vca.login(token=self.vca.token, org=org,
org_url=self.vca.vcloud_session.org_url)
def save_services_config(self, blocking=True):
task = self.gateway.save_services_configuration()
if not task:
self.fail(msg='unable to save gateway services configuration')
if blocking:
self.vca.block_until_completed(task)
def fail(self, msg, **kwargs):
self.fail_json(msg=msg, **kwargs)
def exit(self, **kwargs):
self.exit_json(**kwargs)
# -------------------------------------------------------------
# 9/18/2015 @privateip
# All of the functions below here were migrated from the original
# vca_* modules. All functions below should be considered deprecated
# and will be removed once all of the vca_* modules have been updated
# to use the new instance module above
# -------------------------------------------------------------
VCA_REQ_ARGS = ['instance_id', 'vdc_name']
VCHS_REQ_ARGS = ['service_id']
VCD_REQ_ARGS = []
def _validate_module(module):
if not HAS_PYVCLOUD:
module.fail_json(msg="python module pyvcloud is needed for this module")
service_type = module.params.get('service_type', DEFAULT_SERVICE_TYPE)
if service_type == 'vca':
for arg in VCA_REQ_ARGS:
if module.params.get(arg) is None:
module.fail_json(msg="argument %s is mandatory when service type "
"is vca" % arg)
if service_type == 'vchs':
for arg in VCHS_REQ_ARGS:
if module.params.get(arg) is None:
module.fail_json(msg="argument %s is mandatory when service type "
"is vchs" % arg)
if service_type == 'vcd':
for arg in VCD_REQ_ARGS:
if module.params.get(arg) is None:
module.fail_json(msg="argument %s is mandatory when service type "
"is vcd" % arg)
def serialize_instances(instance_list):
instances = []
for i in instance_list:
instances.append(dict(apiUrl=i['apiUrl'], instance_id=i['id']))
return instances
def _vca_login(vca, password, instance):
if not vca.login(password=password):
raise VcaError("Login Failed: Please check username or password",
error=vca.response.content)
if not vca.login_to_instance_sso(instance=instance):
s_json = serialize_instances(vca.instances)
raise VcaError("Login to Instance failed: Seems like instance_id provided "
"is wrong .. Please check", valid_instances=s_json)
return vca
def _vchs_login(vca, password, service, org):
if not vca.login(password=password):
raise VcaError("Login Failed: Please check username or password",
error=vca.response.content)
if not vca.login_to_org(service, org):
raise VcaError("Failed to login to org, Please check the orgname",
error=vca.response.content)
def _vcd_login(vca, password, org):
# TODO: this function needs to be refactored
if not vca.login(password=password, org=org):
raise VcaError("Login Failed: Please check username or password "
"or host parameters")
if not vca.login(password=password, org=org):
raise VcaError("Failed to get the token",
error=vca.response.content)
if not vca.login(token=vca.token, org=org, org_url=vca.vcloud_session.org_url):
raise VcaError("Failed to login to org", error=vca.response.content)
def vca_login(module):
service_type = module.params.get('service_type')
username = module.params.get('username')
password = module.params.get('password')
instance = module.params.get('instance_id')
org = module.params.get('org')
vdc_name = module.params.get('vdc_name')
service = module.params.get('service_id')
version = module.params.get('api_version')
verify = module.params.get('verify_certs')
_validate_module(module)
if not vdc_name and service_type == 'vchs':
vdc_name = module.params.get('service_id')
if not org and service_type == 'vchs':
org = vdc_name or service
if service_type == 'vcd':
host = module.params.get('host')
else:
host = LOGIN_HOST[service_type]
username = os.environ.get('VCA_USER', username)
password = os.environ.get('VCA_PASS', password)
if not username or not password:
msg = "Either the username or password is not set, please check args"
module.fail_json(msg=msg)
if service_type == 'vchs':
version = '5.6'
elif service_type == 'vcd' and not version:
version = '5.6'
vca = VCA(host=host, username=username,
service_type=SERVICE_MAP[service_type],
version=version, verify=verify)
try:
if service_type == 'vca':
_vca_login(vca, password, instance)
elif service_type == 'vchs':
_vchs_login(vca, password, service, org)
elif service_type == 'vcd':
_vcd_login(vca, password, org)
except VcaError as e:
module.fail_json(msg=e.message, **e.kwargs)
return vca
| gpl-3.0 |
newswangerd/ansible | lib/ansible/plugins/doc_fragments/shell_windows.py | 8 | 1457 | # Copyright (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class ModuleDocFragment(object):
# Windows shell documentation fragment
# FIXME: set_module_language don't belong here but must be set so they don't fail when someone
# get_option('set_module_language') on this plugin
DOCUMENTATION = """
options:
async_dir:
description:
- Directory in which ansible will keep async job information.
- Before Ansible 2.8, this was set to C(remote_tmp + "\\.ansible_async").
default: '%USERPROFILE%\\.ansible_async'
ini:
- section: powershell
key: async_dir
vars:
- name: ansible_async_dir
version_added: '2.8'
remote_tmp:
description:
- Temporary directory to use on targets when copying files to the host.
default: '%TEMP%'
ini:
- section: powershell
key: remote_tmp
vars:
- name: ansible_remote_tmp
set_module_language:
description:
- Controls if we set the locale for modules when executing on the
target.
- Windows only supports C(no) as an option.
type: bool
default: 'no'
choices:
- 'no'
environment:
description:
- List of dictionaries of environment variables and their values to use when
executing commands.
type: list
default: [{}]
"""
| gpl-3.0 |
imsparsh/python-for-android | python-modules/twisted/twisted/plugins/twisted_trial.py | 122 | 2034 |
from zope.interface import implements
from twisted.trial.itrial import IReporter
from twisted.plugin import IPlugin
class _Reporter(object):
implements(IPlugin, IReporter)
def __init__(self, name, module, description, longOpt, shortOpt, klass):
self.name = name
self.module = module
self.description = description
self.longOpt = longOpt
self.shortOpt = shortOpt
self.klass = klass
Tree = _Reporter("Tree Reporter",
"twisted.trial.reporter",
description="verbose color output (default reporter)",
longOpt="verbose",
shortOpt="v",
klass="TreeReporter")
BlackAndWhite = _Reporter("Black-And-White Reporter",
"twisted.trial.reporter",
description="Colorless verbose output",
longOpt="bwverbose",
shortOpt="o",
klass="VerboseTextReporter")
Minimal = _Reporter("Minimal Reporter",
"twisted.trial.reporter",
description="minimal summary output",
longOpt="summary",
shortOpt="s",
klass="MinimalReporter")
Classic = _Reporter("Classic Reporter",
"twisted.trial.reporter",
description="terse text output",
longOpt="text",
shortOpt="t",
klass="TextReporter")
Timing = _Reporter("Timing Reporter",
"twisted.trial.reporter",
description="Timing output",
longOpt="timing",
shortOpt=None,
klass="TimingTextReporter")
Subunit = _Reporter("Subunit Reporter",
"twisted.trial.reporter",
description="subunit output",
longOpt="subunit",
shortOpt=None,
klass="SubunitReporter")
| apache-2.0 |
harisbal/pandas | pandas/tests/io/parser/test_network.py | 4 | 7742 | # -*- coding: utf-8 -*-
"""
Tests parsers ability to read and parse non-local files
and hence require a network connection to be read.
"""
import logging
import numpy as np
import pytest
from pandas.compat import BytesIO, StringIO
import pandas.util._test_decorators as td
from pandas import DataFrame
import pandas.util.testing as tm
from pandas.io.parsers import read_csv
@pytest.mark.network
@pytest.mark.parametrize(
"compress_type, extension", [
('gzip', '.gz'), ('bz2', '.bz2'), ('zip', '.zip'),
pytest.param('xz', '.xz', marks=td.skip_if_no_lzma)
]
)
@pytest.mark.parametrize('mode', ['explicit', 'infer'])
@pytest.mark.parametrize('engine', ['python', 'c'])
def test_compressed_urls(salaries_table, compress_type, extension, mode,
engine):
check_compressed_urls(salaries_table, compress_type, extension, mode,
engine)
@tm.network
def check_compressed_urls(salaries_table, compression, extension, mode,
engine):
# test reading compressed urls with various engines and
# extension inference
base_url = ('https://github.com/pandas-dev/pandas/raw/master/'
'pandas/tests/io/parser/data/salaries.csv')
url = base_url + extension
if mode != 'explicit':
compression = mode
url_table = read_csv(url, sep='\t', compression=compression, engine=engine)
tm.assert_frame_equal(url_table, salaries_table)
@pytest.fixture
def tips_df(datapath):
"""DataFrame with the tips dataset."""
return read_csv(datapath('io', 'parser', 'data', 'tips.csv'))
@pytest.mark.usefixtures("s3_resource")
@td.skip_if_not_us_locale()
class TestS3(object):
def test_parse_public_s3_bucket(self, tips_df):
pytest.importorskip('s3fs')
# more of an integration test due to the not-public contents portion
# can probably mock this though.
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' +
ext, compression=comp)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(df, tips_df)
# Read public file from bucket with not-public contents
df = read_csv('s3://cant_get_it/tips.csv')
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(df, tips_df)
def test_parse_public_s3n_bucket(self, tips_df):
# Read from AWS s3 as "s3n" URL
df = read_csv('s3n://pandas-test/tips.csv', nrows=10)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(tips_df.iloc[:10], df)
def test_parse_public_s3a_bucket(self, tips_df):
# Read from AWS s3 as "s3a" URL
df = read_csv('s3a://pandas-test/tips.csv', nrows=10)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(tips_df.iloc[:10], df)
def test_parse_public_s3_bucket_nrows(self, tips_df):
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' +
ext, nrows=10, compression=comp)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(tips_df.iloc[:10], df)
def test_parse_public_s3_bucket_chunked(self, tips_df):
# Read with a chunksize
chunksize = 5
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df_reader = read_csv('s3://pandas-test/tips.csv' + ext,
chunksize=chunksize, compression=comp)
assert df_reader.chunksize == chunksize
for i_chunk in [0, 1, 2]:
# Read a couple of chunks and make sure we see them
# properly.
df = df_reader.get_chunk()
assert isinstance(df, DataFrame)
assert not df.empty
true_df = tips_df.iloc[
chunksize * i_chunk: chunksize * (i_chunk + 1)]
tm.assert_frame_equal(true_df, df)
def test_parse_public_s3_bucket_chunked_python(self, tips_df):
# Read with a chunksize using the Python parser
chunksize = 5
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df_reader = read_csv('s3://pandas-test/tips.csv' + ext,
chunksize=chunksize, compression=comp,
engine='python')
assert df_reader.chunksize == chunksize
for i_chunk in [0, 1, 2]:
# Read a couple of chunks and make sure we see them properly.
df = df_reader.get_chunk()
assert isinstance(df, DataFrame)
assert not df.empty
true_df = tips_df.iloc[
chunksize * i_chunk: chunksize * (i_chunk + 1)]
tm.assert_frame_equal(true_df, df)
def test_parse_public_s3_bucket_python(self, tips_df):
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' + ext, engine='python',
compression=comp)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(df, tips_df)
def test_infer_s3_compression(self, tips_df):
for ext in ['', '.gz', '.bz2']:
df = read_csv('s3://pandas-test/tips.csv' + ext,
engine='python', compression='infer')
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(df, tips_df)
def test_parse_public_s3_bucket_nrows_python(self, tips_df):
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' + ext, engine='python',
nrows=10, compression=comp)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(tips_df.iloc[:10], df)
def test_s3_fails(self):
with pytest.raises(IOError):
read_csv('s3://nyqpug/asdf.csv')
# Receive a permission error when trying to read a private bucket.
# It's irrelevant here that this isn't actually a table.
with pytest.raises(IOError):
read_csv('s3://cant_get_it/')
def test_read_csv_handles_boto_s3_object(self,
s3_resource,
tips_file):
# see gh-16135
s3_object = s3_resource.meta.client.get_object(
Bucket='pandas-test',
Key='tips.csv')
result = read_csv(BytesIO(s3_object["Body"].read()), encoding='utf8')
assert isinstance(result, DataFrame)
assert not result.empty
expected = read_csv(tips_file)
tm.assert_frame_equal(result, expected)
def test_read_csv_chunked_download(self, s3_resource, caplog):
# 8 MB, S3FS usees 5MB chunks
df = DataFrame(np.random.randn(100000, 4), columns=list('abcd'))
buf = BytesIO()
str_buf = StringIO()
df.to_csv(str_buf)
buf = BytesIO(str_buf.getvalue().encode('utf-8'))
s3_resource.Bucket("pandas-test").put_object(
Key="large-file.csv",
Body=buf)
with caplog.at_level(logging.DEBUG, logger='s3fs.core'):
read_csv("s3://pandas-test/large-file.csv", nrows=5)
# log of fetch_range (start, stop)
assert ((0, 5505024) in {x.args[-2:] for x in caplog.records})
| bsd-3-clause |
giserh/pysparkling | pysparkling/fileio/fs/s3.py | 1 | 2711 | from __future__ import absolute_import
import fnmatch
import logging
from io import BytesIO, StringIO
from ...utils import Tokenizer
from .file_system import FileSystem
from ...exceptions import FileSystemNotSupported
log = logging.getLogger(__name__)
boto = None
try:
import boto
except ImportError:
pass
class S3(FileSystem):
_conn = None
def __init__(self, file_name):
if boto is None:
raise FileSystemNotSupported(
'S3 not supported. Install "boto".'
)
FileSystem.__init__(self, file_name)
# obtain key
t = Tokenizer(self.file_name)
t.next('://') # skip scheme
bucket_name = t.next('/')
key_name = t.next()
conn = S3._get_conn()
bucket = conn.get_bucket(bucket_name, validate=False)
self.key = bucket.get_key(key_name)
if not self.key:
self.key = bucket.new_key(key_name)
@staticmethod
def _get_conn():
if not S3._conn:
S3._conn = boto.connect_s3()
return S3._conn
@staticmethod
def resolve_filenames(expr):
files = []
t = Tokenizer(expr)
scheme = t.next('://')
bucket_name = t.next('/')
prefix = t.next(['*', '?'])
bucket = S3._get_conn().get_bucket(
bucket_name,
validate=False
)
expr_after_bucket = expr[len(scheme)+3+len(bucket_name)+1:]
for k in bucket.list(prefix=prefix):
if fnmatch.fnmatch(k.name, expr_after_bucket) or \
fnmatch.fnmatch(k.name, expr_after_bucket+'/part*'):
files.append(scheme+'://'+bucket_name+'/'+k.name)
return files
def exists(self):
t = Tokenizer(self.file_name)
t.next('//') # skip scheme
bucket_name = t.next('/')
key_name = t.next()
conn = S3._get_conn()
bucket = conn.get_bucket(bucket_name, validate=False)
return (bucket.get_key(key_name) or
any(True for _ in bucket.list(prefix=key_name+'/')))
def load(self):
log.debug('Loading {0} with size {1}.'
''.format(self.key.name, self.key.size))
return BytesIO(self.key.get_contents_as_string())
def load_text(self):
log.debug('Loading {0} with size {1}.'
''.format(self.key.name, self.key.size))
return BytesIO(self.key.get_contents_as_string())
def dump(self, stream):
log.debug('Dumping to {0}.'.format(self.key.name))
self.key.set_contents_from_file(stream)
return self
def make_public(self, recursive=False):
self.key.make_public(recursive)
return self
| mit |
wh1100717/AutobahnPython | examples/twisted/wamp2/test3.py | 18 | 5877 | from __future__ import absolute_import
from zope.interface import implementer
from autobahn.wamp2.interfaces import *
from autobahn.wamp2.types import *
from autobahn.wamp2.error import ApplicationError, ProtocolError
from twisted.internet.defer import Deferred, inlineCallbacks
import random
def newid():
return random.randint(0, 2**53)
@implementer(ISubscriber)
@implementer(IPublisher)
@implementer(ICallee)
@implementer(ICaller)
class MockSession:
def __init__(self):
self._subscriptions = {}
self._registrations = {}
def subscribe(self, topic, options = None):
assert(type(topic) == str)
assert(options is None or isinstance(options, SubscribeOptions))
if not self._subscriptions.has_key(topic):
self._subscriptions[topic] = Subscription(newid(), topic)
d = Deferred()
d.callback(self._subscriptions[topic])
return d
def unsubscribe(self, subscription):
assert(isinstance(subscription, Subscription))
assert(subscription._isActive)
assert(subscription._topic in self._subscriptions)
subscription._isActive = False
del self._subscriptions[subscription._topic]
d = Deferred()
d.callback(None)
return d
def publish(self, topic, payload = None, options = None):
assert(type(topic) == str)
assert(options is None or isinstance(options, PublishOptions))
d = Deferred()
if topic not in ["com.myapp.mytopic1"]:
d.errback(ApplicationError(ApplicationError.NOT_AUTHORIZED))
else:
id = newid()
if self._subscriptions.has_key(topic):
event = Event(topic, payload, id)
self._subscriptions[topic].notify(event)
d.callback(id)
return d
def register(self, procedure, endpoint, options = None):
assert(type(procedure) == str)
assert(options is None or isinstance(options, RegisterOptions))
if not self._registrations.has_key(procedure):
self._registrations[procedure] = Registration(newid(), procedure, endpoint)
d = Deferred()
d.callback(self._registrations[procedure])
return d
def unregister(self, registration):
assert(isinstance(registration, Registration))
assert(registration._isActive)
assert(registration._procedure in self._registrations)
registration._isActive = False
del self._registrations[registration._procedure]
d = Deferred()
d.callback(None)
return d
def call(self, procedure, *args, **kwargs):
assert(type(procedure) == str)
invocation = Invocation()
if 'options' in kwargs:
options = kwargs['options']
del kwargs['options']
assert(isinstance(options, CallOptions))
if options.discloseMe:
invocation.caller = newid()
if options.onProgress:
invocation.progress = options.onProgress
d = Deferred()
if procedure == "com.myapp.echo":
if len(args) != 1 or len(kwargs) != 0 or type(args[0]) != str:
d.errback(ApplicationError(ApplicationError.INVALID_ARGUMENT, "procedure takes exactly 1 positional argument of type string"))
else:
d.callback(args[0])
elif procedure == "com.myapp.complex":
d.callback(CallResult(23, 7, foo = "bar"))
elif self._registrations.has_key(procedure):
try:
kwargs['invocation'] = invocation
res = self._registrations[procedure]._endpoint(*args, **kwargs)
except TypeError as err:
d.errback(ApplicationError(ApplicationError.INVALID_ARGUMENT, str(err)))
else:
d.callback(res)
else:
d.errback(ApplicationError(ApplicationError.NO_SUCH_PROCEDURE, "no procedure with URI {}".format(procedure)))
return d
import inspect
@inlineCallbacks
def test_rpc(session):
def hello(msg, invocation = Invocation):
for i in range(5):
invocation.progress(i)
return "You said {}. I say hello!".format(msg)
print inspect.getargspec(hello)
try:
reg1 = yield session.register("com.myapp.hello", hello)
print(reg1)
except ApplicationError as err:
print(err)
else:
def onProgress(i):
print("progress {}".format(i))
res = yield session.call("com.myapp.hello", "foooo", options = CallOptions(discloseMe = True, onProgress = onProgress))
print(res)
yield session.unregister(reg1)
res = yield session.call("com.myapp.hello", "baaar")
print(res)
try:
# res = yield session.call("com.myapp.echo", "Hello, world!", 23)
# res = yield session.call("com.myapp.complex", "Hello, world!", 23)
res = yield session.call("com.myapp.complex", "Hello, world!", 23, options = CallOptions(timeout = 2))
print(res.results)
print(res.kwresults)
except ApplicationError as err:
print(err)
@inlineCallbacks
def test_pubsub(session):
try:
sub1 = yield session.subscribe("com.myapp.mytopic1", SubscribeOptions(match = 'prefix'))
print(sub1)
except ApplicationError as err:
print(err)
else:
def watcher1(event):
print("watcher1: publication {} on topic {} with payload {}".format(event.publication, event.topic, event.payload))
def watcher2(event):
print("watcher1: publication {} on topic {} with payload {}".format(event.publication, event.topic, event.payload))
sub1.watch(watcher1)
sub1.watch(watcher2)
session.publish("com.myapp.mytopic1", "Hello, world!")
sub1.unwatch(watcher1)
publicationId = yield session.publish("com.myapp.mytopic1", "Hello, world!")
print(publicationId)
session.publish("com.myapp.mytopic2", "Hello, world!")
if __name__ == '__main__':
session = MockSession()
test_rpc(session)
test_pubsub(session)
| apache-2.0 |
hgiemza/DIRAC | Core/Utilities/File.py | 2 | 6776 | """Collection of DIRAC useful file related modules.
.. warning::
By default on Error they return None.
"""
#pylint: skip-file
## getGlobbedFiles gives "RuntimeError: maximum recursion depth exceeded" in pylint
import os
import hashlib
import random
import glob
import sys
import re
import errno
__RCSID__ = "$Id$"
def mkDir( path ):
""" Emulate 'mkdir -p path' (if path exists already, don't raise an exception)
"""
try:
if os.path.isdir(path):
return
os.makedirs( path )
except OSError as osError:
if osError.errno == errno.EEXIST and os.path.isdir( path ):
pass
else:
raise
def mkLink( src, dst ):
""" Protected creation of simbolic link
"""
try:
os.symlink(src, dst)
except OSError as osError:
if osError.errno == errno.EEXIST and os.path.islink(dst) and os.path.realpath(dst) == src:
pass
else:
raise
def makeGuid( fileName = None ):
"""Utility to create GUID. If a filename is provided the
GUID will correspond to its content's hexadecimal md5 checksum.
Otherwise a random seed is used to create the GUID.
The format is capitalized 8-4-4-4-12.
.. warning::
Could return None in case of OSError or IOError.
:param string fileName: name of file
"""
myMd5 = hashlib.md5()
if fileName:
try:
with open( fileName, 'r' ) as fd:
data = fd.read( 10 * 1024 * 1024 )
myMd5.update( data )
except:
return None
else:
myMd5.update( str( random.getrandbits( 128 ) ) )
md5HexString = myMd5.hexdigest().upper()
return generateGuid( md5HexString, "MD5" )
def generateGuid( checksum, checksumtype ):
""" Generate a GUID based on the file checksum
"""
if checksum:
if checksumtype == "MD5":
checksumString = checksum
elif checksumtype == "Adler32":
checksumString = str( checksum ).zfill( 32 )
else:
checksumString = ''
if checksumString:
guid = "%s-%s-%s-%s-%s" % ( checksumString[0:8],
checksumString[8:12],
checksumString[12:16],
checksumString[16:20],
checksumString[20:32] )
guid = guid.upper()
return guid
# Failed to use the check sum, generate a new guid
myMd5 = hashlib.md5()
myMd5.update( str( random.getrandbits( 128 ) ) )
md5HexString = myMd5.hexdigest()
guid = "%s-%s-%s-%s-%s" % ( md5HexString[0:8],
md5HexString[8:12],
md5HexString[12:16],
md5HexString[16:20],
md5HexString[20:32] )
guid = guid.upper()
return guid
def checkGuid( guid ):
"""Checks whether a supplied GUID is of the correct format.
The guid is a string of 36 characters [0-9A-F] long split into 5 parts of length 8-4-4-4-12.
.. warning::
As we are using GUID produced by various services and some of them could not follow
convention, this function is passing by a guid which can be made of lower case chars or even just
have 5 parts of proper length with whatever chars.
:param string guid: string to be checked
:return: True (False) if supplied string is (not) a valid GUID.
"""
reGUID = re.compile( "^[0-9A-F]{8}(-[0-9A-F]{4}){3}-[0-9A-F]{12}$" )
if reGUID.match( guid.upper() ):
return True
else:
guid = [ len( x ) for x in guid.split( "-" ) ]
if ( guid == [ 8, 4, 4, 4, 12 ] ):
return True
return False
def getSize( fileName ):
"""Get size of a file.
:param string fileName: name of file to be checked
The os module claims only OSError can be thrown,
but just for curiosity it's catching all possible exceptions.
.. warning::
On any exception it returns -1.
"""
try:
return os.stat( fileName )[6]
except OSError:
return - 1
def getGlobbedTotalSize( files ):
"""Get total size of a list of files or a single file.
Globs the parameter to allow regular expressions.
:params list files: list or tuple of strings of files
"""
totalSize = 0
if isinstance( files, (list, tuple) ):
for entry in files:
size = getGlobbedTotalSize( entry )
if size == -1:
size = 0
totalSize += size
else:
for path in glob.glob( files ):
if os.path.isdir( path ):
for content in os.listdir( path ):
totalSize += getGlobbedTotalSize( os.path.join( path, content ) )
if os.path.isfile( path ):
size = getSize( path )
if size == -1:
size = 0
totalSize += size
return totalSize
def getGlobbedFiles( files ):
"""Get list of files or a single file.
Globs the parameter to allow regular expressions.
:params list files: list or tuple of strings of files
"""
globbedFiles = []
if isinstance( files, ( list, tuple ) ):
for entry in files:
globbedFiles += getGlobbedFiles( entry )
else:
for path in glob.glob( files ):
if os.path.isdir( path ):
for content in os.listdir( path ):
globbedFiles += getGlobbedFiles( os.path.join( path, content ) )
if os.path.isfile( path ):
globbedFiles.append( path )
return globbedFiles
def getCommonPath( files ):
"""Get the common path for all files in the file list.
:param files: list of strings with paths
:type files: python:list
"""
def properSplit( dirPath ):
"""Splitting of path to drive and path parts for non-Unix file systems.
:param string dirPath: path
"""
nDrive, nPath = os.path.splitdrive( dirPath )
return [ nDrive ] + [ d for d in nPath.split( os.sep ) if d.strip() ]
if not files:
return ""
commonPath = properSplit( files[0] )
for fileName in files:
if os.path.isdir( fileName ):
dirPath = fileName
else:
dirPath = os.path.dirname( fileName )
nPath = properSplit( dirPath )
tPath = []
for i in range( min( len( commonPath ), len( nPath ) ) ):
if commonPath[ i ] != nPath[ i ]:
break
tPath .append( commonPath[ i ] )
if not tPath:
return ""
commonPath = tPath
return tPath[0] + os.sep + os.path.join( *tPath[1:] )
def getMD5ForFiles( fileList ):
"""Calculate md5 for the content of all the files.
:param fileList: list of paths
:type fileList: python:list
"""
fileList.sort()
hashMD5 = hashlib.md5()
for filePath in fileList:
if os.path.isdir( filePath ):
continue
with open( filePath, "rb" ) as fd:
buf = fd.read( 4096 )
while buf:
hashMD5.update( buf )
buf = fd.read( 4096 )
return hashMD5.hexdigest()
if __name__ == "__main__":
for p in sys.argv[1:]:
print "%s : %s bytes" % ( p, getGlobbedTotalSize( p ) )
| gpl-3.0 |
hkumarmk/oslo.messaging | oslo_messaging/notify/log_handler.py | 6 | 1762 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from oslo_config import cfg
class LoggingErrorNotificationHandler(logging.Handler):
def __init__(self, *args, **kwargs):
# NOTE(dhellmann): Avoid a cyclical import by doing this one
# at runtime.
import oslo_messaging
logging.Handler.__init__(self, *args, **kwargs)
self._transport = oslo_messaging.get_transport(cfg.CONF)
self._notifier = oslo_messaging.Notifier(
self._transport,
publisher_id='error.publisher')
def emit(self, record):
# NOTE(bnemec): Notifier registers this opt with the transport.
if ('log' in self._transport.conf.notification_driver):
# NOTE(lbragstad): If we detect that log is one of the
# notification drivers, then return. This protects from infinite
# recursion where something bad happens, it gets logged, the log
# handler sends a notification, and the log_notifier sees the
# notification and logs it.
return
self._notifier.error({},
'error_notification',
dict(error=record.msg))
PublishErrorsHandler = LoggingErrorNotificationHandler
| apache-2.0 |
Dziolas/inspire-next | inspire/testsuite/test_datefilter.py | 4 | 1894 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2015 CERN.
#
# INSPIRE is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Tests for text utls."""
from __future__ import print_function, absolute_import
from datetime import datetime
from invenio.testsuite import make_test_suite, run_test_suite, InvenioTestCase
class DateTests(InvenioTestCase):
"""Test the datefilter functions."""
def test_older_date(self):
"""Test proper handling when bad MARCXML is sent."""
from inspire.utils.datefilter import date_older_than
parsed_date = datetime.strptime("2015-01-01", "%Y-%m-%d")
other_parsed_date = datetime.strptime("2015-01-04", "%Y-%m-%d")
self.assertTrue(date_older_than(parsed_date, other_parsed_date, days=2))
def test_newer_date(self):
"""Test proper handling when bad MARCXML is sent."""
from inspire.utils.datefilter import date_older_than
parsed_date = datetime.strptime("2015-01-01", "%Y-%m-%d")
other_parsed_date = datetime.strptime("2015-01-04", "%Y-%m-%d")
self.assertFalse(date_older_than(parsed_date, other_parsed_date, days=6))
TEST_SUITE = make_test_suite(DateTests)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| gpl-2.0 |
kslundberg/pants | src/python/pants/util/dirutil.py | 5 | 8358 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import atexit
import errno
import os
import shutil
import stat
import tempfile
import threading
import uuid
from collections import defaultdict
from pants.util.strutil import ensure_text
def fast_relpath(path, start):
"""A prefix-based relpath, with no normalization or support for returning `..`."""
if not path.startswith(start):
raise ValueError('{} is not a prefix of {}'.format(start, path))
# Confirm that the split occurs on a directory boundary.
if start[-1] == '/':
slash_offset = 0
elif path[len(start)] == '/':
slash_offset = 1
else:
raise ValueError('{} is not a directory containing {}'.format(start, path))
return path[len(start)+slash_offset:]
def safe_mkdir(directory, clean=False):
"""Ensure a directory is present.
If it's not there, create it. If it is, no-op. If clean is True, ensure the dir is empty."""
if clean:
safe_rmtree(directory)
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def safe_mkdir_for(path):
"""Ensure that the parent directory for a file is present.
If it's not there, create it. If it is, no-op.
"""
safe_mkdir(os.path.dirname(path), clean=False)
def safe_file_dump(path, content):
safe_mkdir_for(path)
with open(path, 'w') as outfile:
outfile.write(content)
def safe_walk(path, **kwargs):
"""Just like os.walk, but ensures that the returned values are unicode objects.
This isn't strictly safe, in that it is possible that some paths
will not be decodeable, but that case is rare, and the only
alternative is to somehow avoid all interaction between paths and
unicode objects, which seems especially tough in the presence of
unicode_literals. See e.g.
https://mail.python.org/pipermail/python-dev/2008-December/083856.html
"""
# If os.walk is given a text argument, it yields text values; if it
# is given a binary argument, it yields binary values.
return os.walk(ensure_text(path), **kwargs)
_MKDTEMP_CLEANER = None
_MKDTEMP_DIRS = defaultdict(set)
_MKDTEMP_LOCK = threading.RLock()
def _mkdtemp_atexit_cleaner():
for td in _MKDTEMP_DIRS.pop(os.getpid(), []):
safe_rmtree(td)
def _mkdtemp_unregister_cleaner():
global _MKDTEMP_CLEANER
_MKDTEMP_CLEANER = None
def _mkdtemp_register_cleaner(cleaner):
global _MKDTEMP_CLEANER
if not cleaner:
return
assert callable(cleaner)
if _MKDTEMP_CLEANER is None:
atexit.register(cleaner)
_MKDTEMP_CLEANER = cleaner
def safe_mkdtemp(cleaner=_mkdtemp_atexit_cleaner, **kw):
"""Create a temporary directory that is cleaned up on process exit.
Arguments are as to tempfile.mkdtemp.
"""
# Proper lock sanitation on fork [issue 6721] would be desirable here.
with _MKDTEMP_LOCK:
return register_rmtree(tempfile.mkdtemp(**kw), cleaner=cleaner)
def register_rmtree(directory, cleaner=_mkdtemp_atexit_cleaner):
"""Register an existing directory to be cleaned up at process exit."""
with _MKDTEMP_LOCK:
_mkdtemp_register_cleaner(cleaner)
_MKDTEMP_DIRS[os.getpid()].add(directory)
return directory
def safe_rmtree(directory):
"""Delete a directory if it's present. If it's not present, no-op."""
shutil.rmtree(directory, ignore_errors=True)
def safe_open(filename, *args, **kwargs):
"""Open a file safely, ensuring that its directory exists."""
safe_mkdir(os.path.dirname(filename))
return open(filename, *args, **kwargs)
def safe_delete(filename):
"""Delete a file safely. If it's not present, no-op."""
try:
os.unlink(filename)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def safe_concurrent_rename(src, dst):
"""Rename src to dst, ignoring errors due to dst already existing.
Useful when concurrent processes may attempt to create dst, and it doesn't matter who wins.
"""
# Delete dst, in case it existed (with old content) even before any concurrent processes
# attempted this write. This ensures that at least one process writes the new content.
if os.path.isdir(src): # Note that dst may not exist, so we test for the type of src.
safe_rmtree(dst)
else:
safe_delete(dst)
try:
shutil.move(src, dst)
except IOError as e:
if e.errno != errno.EEXIST:
raise
def safe_concurrent_create(func, path):
"""Safely execute code that creates a file at a well-known path.
Useful when concurrent processes may attempt to create a file, and it doesn't matter who wins.
:param func: A callable that takes a single path argument and creates a file at that path.
:param path: The path to execute the callable on.
:return: func(path)'s return value.
"""
safe_mkdir_for(path)
tmp_path = '{0}.tmp.{1}'.format(path, uuid.uuid4().hex)
ret = func(tmp_path)
safe_concurrent_rename(tmp_path, path)
return ret
def chmod_plus_x(path):
"""Equivalent of unix `chmod a+x path`"""
path_mode = os.stat(path).st_mode
path_mode &= int('777', 8)
if path_mode & stat.S_IRUSR:
path_mode |= stat.S_IXUSR
if path_mode & stat.S_IRGRP:
path_mode |= stat.S_IXGRP
if path_mode & stat.S_IROTH:
path_mode |= stat.S_IXOTH
os.chmod(path, path_mode)
def relative_symlink(source_path, link_path):
"""Create a symlink at link_path pointing to relative source
:param source_path: Absolute path to source file
:param link_path: Absolute path to intended symlink
:raises ValueError if source_path or link_path are not unique, absolute paths
:raises OSError on failure UNLESS file already exists or no such file/directory
"""
if not os.path.isabs(source_path):
raise ValueError("Path for source:{} must be absolute".format(source_path))
if not os.path.isabs(link_path):
raise ValueError("Path for link:{} must be absolute".format(link_path))
if source_path == link_path:
raise ValueError("Path for link is identical to source:{}".format(source_path))
try:
if os.path.lexists(link_path):
os.unlink(link_path)
rel_path = os.path.relpath(source_path, os.path.dirname(link_path))
os.symlink(rel_path, link_path)
except OSError as e:
# Another run may beat us to deletion or creation.
if not (e.errno == errno.EEXIST or e.errno == errno.ENOENT):
raise
def relativize_path(path, rootdir):
# Note that we can't test for length and return the shorter of the two, because we need these
# paths to be stable across systems (e.g., because they get embedded in analysis files),
# and this choice might be inconsistent across systems. So we assume the relpath is always
# shorter. We relativize because of a known case of very long full path prefixes on Mesos,
# so this seems like the right heuristic.
# Note also that we mustn't call realpath on the path - we need to preserve the symlink structure.
return os.path.relpath(path, rootdir)
# When running pants under mesos/aurora, the sandbox pathname can be very long. Since it gets
# prepended to most components in the classpath (some from ivy, the rest from the build),
# in some runs the classpath gets too big and exceeds ARG_MAX.
# We prevent this by using paths relative to the current working directory.
def relativize_paths(paths, rootdir):
return [relativize_path(path, rootdir) for path in paths]
def touch(path, times=None):
"""Equivalent of unix `touch path`.
:path: The file to touch.
:times Either a tuple of (atime, mtime) or else a single time to use for both. If not
specified both atime and mtime are updated to the current time.
"""
if times:
if len(times) > 2:
raise ValueError('times must either be a tuple of (atime, mtime) or else a single time value '
'to use for both.')
if len(times) == 1:
times = (times, times)
with safe_open(path, 'a'):
os.utime(path, times)
def get_basedir(path):
"""Returns the base directory of a path.
Examples:
get_basedir('foo/bar/baz') --> 'foo'
get_basedir('/foo/bar/baz') --> ''
get_basedir('foo') --> 'foo'
"""
return path[:path.index(os.sep)] if os.sep in path else path
| apache-2.0 |
oew1v07/scikit-image | skimage/_shared/testing.py | 17 | 6751 | """Testing utilities."""
import os
import re
import threading
import functools
from tempfile import NamedTemporaryFile
from numpy import testing
import numpy as np
from ._warnings import expected_warnings
import warnings
from .. import data, io, img_as_uint, img_as_float, img_as_int, img_as_ubyte
SKIP_RE = re.compile("(\s*>>>.*?)(\s*)#\s*skip\s+if\s+(.*)$")
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def doctest_skip_parser(func):
""" Decorator replaces custom skip test markup in doctests
Say a function has a docstring::
>>> something # skip if not HAVE_AMODULE
>>> something + else
>>> something # skip if HAVE_BMODULE
This decorator will evaluate the expression after ``skip if``. If this
evaluates to True, then the comment is replaced by ``# doctest: +SKIP``. If
False, then the comment is just removed. The expression is evaluated in the
``globals`` scope of `func`.
For example, if the module global ``HAVE_AMODULE`` is False, and module
global ``HAVE_BMODULE`` is False, the returned function will have docstring::
>>> something # doctest: +SKIP
>>> something + else
>>> something
"""
lines = func.__doc__.split('\n')
new_lines = []
for line in lines:
match = SKIP_RE.match(line)
if match is None:
new_lines.append(line)
continue
code, space, expr = match.groups()
try:
# Works as a function decorator
if eval(expr, func.__globals__):
code = code + space + "# doctest: +SKIP"
except AttributeError:
# Works as a class decorator
if eval(expr, func.__init__.__globals__):
code = code + space + "# doctest: +SKIP"
new_lines.append(code)
func.__doc__ = "\n".join(new_lines)
return func
def roundtrip(img, plugin, suffix):
"""Save and read an image using a specified plugin"""
if not '.' in suffix:
suffix = '.' + suffix
temp_file = NamedTemporaryFile(suffix=suffix, delete=False)
fname = temp_file.name
temp_file.close()
io.imsave(fname, img, plugin=plugin)
new = io.imread(fname, plugin=plugin)
try:
os.remove(fname)
except Exception:
pass
return new
def color_check(plugin, fmt='png'):
"""Check roundtrip behavior for color images.
All major input types should be handled as ubytes and read
back correctly.
"""
img = img_as_ubyte(data.chelsea())
r1 = roundtrip(img, plugin, fmt)
testing.assert_allclose(img, r1)
img2 = img > 128
r2 = roundtrip(img2, plugin, fmt)
testing.assert_allclose(img2.astype(np.uint8), r2)
img3 = img_as_float(img)
with expected_warnings(['precision loss']):
r3 = roundtrip(img3, plugin, fmt)
testing.assert_allclose(r3, img)
with expected_warnings(['precision loss']):
img4 = img_as_int(img)
if fmt.lower() in (('tif', 'tiff')):
img4 -= 100
with expected_warnings(['sign loss']):
r4 = roundtrip(img4, plugin, fmt)
testing.assert_allclose(r4, img4)
else:
with expected_warnings(['sign loss|precision loss']):
r4 = roundtrip(img4, plugin, fmt)
testing.assert_allclose(r4, img_as_ubyte(img4))
img5 = img_as_uint(img)
with expected_warnings(['precision loss']):
r5 = roundtrip(img5, plugin, fmt)
testing.assert_allclose(r5, img)
def mono_check(plugin, fmt='png'):
"""Check the roundtrip behavior for images that support most types.
All major input types should be handled.
"""
img = img_as_ubyte(data.moon())
r1 = roundtrip(img, plugin, fmt)
testing.assert_allclose(img, r1)
img2 = img > 128
r2 = roundtrip(img2, plugin, fmt)
testing.assert_allclose(img2.astype(np.uint8), r2)
img3 = img_as_float(img)
with expected_warnings(['precision|\A\Z']):
r3 = roundtrip(img3, plugin, fmt)
if r3.dtype.kind == 'f':
testing.assert_allclose(img3, r3)
else:
testing.assert_allclose(r3, img_as_uint(img))
with expected_warnings(['precision loss']):
img4 = img_as_int(img)
if fmt.lower() in (('tif', 'tiff')):
img4 -= 100
with expected_warnings(['sign loss|\A\Z']):
r4 = roundtrip(img4, plugin, fmt)
testing.assert_allclose(r4, img4)
else:
with expected_warnings(['precision loss|sign loss']):
r4 = roundtrip(img4, plugin, fmt)
testing.assert_allclose(r4, img_as_uint(img4))
img5 = img_as_uint(img)
r5 = roundtrip(img5, plugin, fmt)
testing.assert_allclose(r5, img5)
def setup_test():
"""Default package level setup routine for skimage tests.
Import packages known to raise warnings, and then
force warnings to raise errors.
Also set the random seed to zero.
"""
warnings.simplefilter('default')
from scipy import signal, ndimage, special, optimize, linalg
from scipy.io import loadmat
from skimage import viewer, filter
np.random.seed(0)
warnings.simplefilter('error')
def teardown_test():
"""Default package level teardown routine for skimage tests.
Restore warnings to default behavior
"""
warnings.simplefilter('default')
def test_parallel(num_threads=2):
"""Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
Notes
-----
This decorator does not pass the return value of the decorated function.
"""
assert num_threads > 0
def wrapper(func):
@functools.wraps(func)
def inner(*args, **kwargs):
threads = []
for i in range(num_threads):
thread = threading.Thread(target=func, args=args, kwargs=kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
if __name__ == '__main__':
color_check('pil')
mono_check('pil')
mono_check('pil', 'bmp')
mono_check('pil', 'tiff')
| bsd-3-clause |
cytec/SickRage | lib/sqlalchemy/util/_collections.py | 78 | 26035 | # util/_collections.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Collection classes and helpers."""
from __future__ import absolute_import
import weakref
import operator
from .compat import threading, itertools_filterfalse
from . import py2k
import types
EMPTY_SET = frozenset()
class KeyedTuple(tuple):
"""``tuple`` subclass that adds labeled names.
E.g.::
>>> k = KeyedTuple([1, 2, 3], labels=["one", "two", "three"])
>>> k.one
1
>>> k.two
2
Result rows returned by :class:`.Query` that contain multiple
ORM entities and/or column expressions make use of this
class to return rows.
The :class:`.KeyedTuple` exhibits similar behavior to the
``collections.namedtuple()`` construct provided in the Python
standard library, however is architected very differently.
Unlike ``collections.namedtuple()``, :class:`.KeyedTuple` is
does not rely on creation of custom subtypes in order to represent
a new series of keys, instead each :class:`.KeyedTuple` instance
receives its list of keys in place. The subtype approach
of ``collections.namedtuple()`` introduces significant complexity
and performance overhead, which is not necessary for the
:class:`.Query` object's use case.
.. versionchanged:: 0.8
Compatibility methods with ``collections.namedtuple()`` have been
added including :attr:`.KeyedTuple._fields` and
:meth:`.KeyedTuple._asdict`.
.. seealso::
:ref:`ormtutorial_querying`
"""
def __new__(cls, vals, labels=None):
t = tuple.__new__(cls, vals)
t._labels = []
if labels:
t.__dict__.update(zip(labels, vals))
t._labels = labels
return t
def keys(self):
"""Return a list of string key names for this :class:`.KeyedTuple`.
.. seealso::
:attr:`.KeyedTuple._fields`
"""
return [l for l in self._labels if l is not None]
@property
def _fields(self):
"""Return a tuple of string key names for this :class:`.KeyedTuple`.
This method provides compatibility with ``collections.namedtuple()``.
.. versionadded:: 0.8
.. seealso::
:meth:`.KeyedTuple.keys`
"""
return tuple(self.keys())
def _asdict(self):
"""Return the contents of this :class:`.KeyedTuple` as a dictionary.
This method provides compatibility with ``collections.namedtuple()``,
with the exception that the dictionary returned is **not** ordered.
.. versionadded:: 0.8
"""
return dict((key, self.__dict__[key]) for key in self.keys())
class ImmutableContainer(object):
def _immutable(self, *arg, **kw):
raise TypeError("%s object is immutable" % self.__class__.__name__)
__delitem__ = __setitem__ = __setattr__ = _immutable
class immutabledict(ImmutableContainer, dict):
clear = pop = popitem = setdefault = \
update = ImmutableContainer._immutable
def __new__(cls, *args):
new = dict.__new__(cls)
dict.__init__(new, *args)
return new
def __init__(self, *args):
pass
def __reduce__(self):
return immutabledict, (dict(self), )
def union(self, d):
if not self:
return immutabledict(d)
else:
d2 = immutabledict(self)
dict.update(d2, d)
return d2
def __repr__(self):
return "immutabledict(%s)" % dict.__repr__(self)
class Properties(object):
"""Provide a __getattr__/__setattr__ interface over a dict."""
def __init__(self, data):
self.__dict__['_data'] = data
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(list(self._data.values()))
def __add__(self, other):
return list(self) + list(other)
def __setitem__(self, key, object):
self._data[key] = object
def __getitem__(self, key):
return self._data[key]
def __delitem__(self, key):
del self._data[key]
def __setattr__(self, key, object):
self._data[key] = object
def __getstate__(self):
return {'_data': self.__dict__['_data']}
def __setstate__(self, state):
self.__dict__['_data'] = state['_data']
def __getattr__(self, key):
try:
return self._data[key]
except KeyError:
raise AttributeError(key)
def __contains__(self, key):
return key in self._data
def as_immutable(self):
"""Return an immutable proxy for this :class:`.Properties`."""
return ImmutableProperties(self._data)
def update(self, value):
self._data.update(value)
def get(self, key, default=None):
if key in self:
return self[key]
else:
return default
def keys(self):
return list(self._data)
def values(self):
return list(self._data.values())
def items(self):
return list(self._data.items())
def has_key(self, key):
return key in self._data
def clear(self):
self._data.clear()
class OrderedProperties(Properties):
"""Provide a __getattr__/__setattr__ interface with an OrderedDict
as backing store."""
def __init__(self):
Properties.__init__(self, OrderedDict())
class ImmutableProperties(ImmutableContainer, Properties):
"""Provide immutable dict/object attribute to an underlying dictionary."""
class OrderedDict(dict):
"""A dict that returns keys/values/items in the order they were added."""
def __init__(self, ____sequence=None, **kwargs):
self._list = []
if ____sequence is None:
if kwargs:
self.update(**kwargs)
else:
self.update(____sequence, **kwargs)
def clear(self):
self._list = []
dict.clear(self)
def copy(self):
return self.__copy__()
def __copy__(self):
return OrderedDict(self)
def sort(self, *arg, **kw):
self._list.sort(*arg, **kw)
def update(self, ____sequence=None, **kwargs):
if ____sequence is not None:
if hasattr(____sequence, 'keys'):
for key in ____sequence.keys():
self.__setitem__(key, ____sequence[key])
else:
for key, value in ____sequence:
self[key] = value
if kwargs:
self.update(kwargs)
def setdefault(self, key, value):
if key not in self:
self.__setitem__(key, value)
return value
else:
return self.__getitem__(key)
def __iter__(self):
return iter(self._list)
if py2k:
def values(self):
return [self[key] for key in self._list]
def keys(self):
return self._list
def itervalues(self):
return iter([self[key] for key in self._list])
def iterkeys(self):
return iter(self)
def iteritems(self):
return iter(self.items())
def items(self):
return [(key, self[key]) for key in self._list]
else:
def values(self):
#return (self[key] for key in self)
return (self[key] for key in self._list)
def keys(self):
#return iter(self)
return iter(self._list)
def items(self):
#return ((key, self[key]) for key in self)
return ((key, self[key]) for key in self._list)
_debug_iter = False
if _debug_iter:
# normally disabled to reduce function call
# overhead
def __iter__(self):
len_ = len(self._list)
for item in self._list:
yield item
assert len_ == len(self._list), \
"Dictionary changed size during iteration"
def values(self):
return (self[key] for key in self)
def keys(self):
return iter(self)
def items(self):
return ((key, self[key]) for key in self)
def __setitem__(self, key, object):
if key not in self:
try:
self._list.append(key)
except AttributeError:
# work around Python pickle loads() with
# dict subclass (seems to ignore __setstate__?)
self._list = [key]
dict.__setitem__(self, key, object)
def __delitem__(self, key):
dict.__delitem__(self, key)
self._list.remove(key)
def pop(self, key, *default):
present = key in self
value = dict.pop(self, key, *default)
if present:
self._list.remove(key)
return value
def popitem(self):
item = dict.popitem(self)
self._list.remove(item[0])
return item
class OrderedSet(set):
def __init__(self, d=None):
set.__init__(self)
self._list = []
if d is not None:
self.update(d)
def add(self, element):
if element not in self:
self._list.append(element)
set.add(self, element)
def remove(self, element):
set.remove(self, element)
self._list.remove(element)
def insert(self, pos, element):
if element not in self:
self._list.insert(pos, element)
set.add(self, element)
def discard(self, element):
if element in self:
self._list.remove(element)
set.remove(self, element)
def clear(self):
set.clear(self)
self._list = []
def __getitem__(self, key):
return self._list[key]
def __iter__(self):
return iter(self._list)
def __add__(self, other):
return self.union(other)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self._list)
__str__ = __repr__
def update(self, iterable):
for e in iterable:
if e not in self:
self._list.append(e)
set.add(self, e)
return self
__ior__ = update
def union(self, other):
result = self.__class__(self)
result.update(other)
return result
__or__ = union
def intersection(self, other):
other = set(other)
return self.__class__(a for a in self if a in other)
__and__ = intersection
def symmetric_difference(self, other):
other = set(other)
result = self.__class__(a for a in self if a not in other)
result.update(a for a in other if a not in self)
return result
__xor__ = symmetric_difference
def difference(self, other):
other = set(other)
return self.__class__(a for a in self if a not in other)
__sub__ = difference
def intersection_update(self, other):
other = set(other)
set.intersection_update(self, other)
self._list = [a for a in self._list if a in other]
return self
__iand__ = intersection_update
def symmetric_difference_update(self, other):
set.symmetric_difference_update(self, other)
self._list = [a for a in self._list if a in self]
self._list += [a for a in other._list if a in self]
return self
__ixor__ = symmetric_difference_update
def difference_update(self, other):
set.difference_update(self, other)
self._list = [a for a in self._list if a in self]
return self
__isub__ = difference_update
class IdentitySet(object):
"""A set that considers only object id() for uniqueness.
This strategy has edge cases for builtin types- it's possible to have
two 'foo' strings in one of these sets, for example. Use sparingly.
"""
_working_set = set
def __init__(self, iterable=None):
self._members = dict()
if iterable:
for o in iterable:
self.add(o)
def add(self, value):
self._members[id(value)] = value
def __contains__(self, value):
return id(value) in self._members
def remove(self, value):
del self._members[id(value)]
def discard(self, value):
try:
self.remove(value)
except KeyError:
pass
def pop(self):
try:
pair = self._members.popitem()
return pair[1]
except KeyError:
raise KeyError('pop from an empty set')
def clear(self):
self._members.clear()
def __cmp__(self, other):
raise TypeError('cannot compare sets using cmp()')
def __eq__(self, other):
if isinstance(other, IdentitySet):
return self._members == other._members
else:
return False
def __ne__(self, other):
if isinstance(other, IdentitySet):
return self._members != other._members
else:
return True
def issubset(self, iterable):
other = type(self)(iterable)
if len(self) > len(other):
return False
for m in itertools_filterfalse(other._members.__contains__,
iter(self._members.keys())):
return False
return True
def __le__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.issubset(other)
def __lt__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return len(self) < len(other) and self.issubset(other)
def issuperset(self, iterable):
other = type(self)(iterable)
if len(self) < len(other):
return False
for m in itertools_filterfalse(self._members.__contains__,
iter(other._members.keys())):
return False
return True
def __ge__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.issuperset(other)
def __gt__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return len(self) > len(other) and self.issuperset(other)
def union(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
members = self._member_id_tuples()
other = _iter_id(iterable)
result._members.update(self._working_set(members).union(other))
return result
def __or__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.union(other)
def update(self, iterable):
self._members = self.union(iterable)._members
def __ior__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.update(other)
return self
def difference(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
members = self._member_id_tuples()
other = _iter_id(iterable)
result._members.update(self._working_set(members).difference(other))
return result
def __sub__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.difference(other)
def difference_update(self, iterable):
self._members = self.difference(iterable)._members
def __isub__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.difference_update(other)
return self
def intersection(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
members = self._member_id_tuples()
other = _iter_id(iterable)
result._members.update(self._working_set(members).intersection(other))
return result
def __and__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.intersection(other)
def intersection_update(self, iterable):
self._members = self.intersection(iterable)._members
def __iand__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.intersection_update(other)
return self
def symmetric_difference(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
members = self._member_id_tuples()
other = _iter_id(iterable)
result._members.update(
self._working_set(members).symmetric_difference(other))
return result
def _member_id_tuples(self):
return ((id(v), v) for v in self._members.values())
def __xor__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.symmetric_difference(other)
def symmetric_difference_update(self, iterable):
self._members = self.symmetric_difference(iterable)._members
def __ixor__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.symmetric_difference(other)
return self
def copy(self):
return type(self)(iter(self._members.values()))
__copy__ = copy
def __len__(self):
return len(self._members)
def __iter__(self):
return iter(self._members.values())
def __hash__(self):
raise TypeError('set objects are unhashable')
def __repr__(self):
return '%s(%r)' % (type(self).__name__, list(self._members.values()))
class WeakSequence(object):
def __init__(self, __elements=()):
self._storage = [
weakref.ref(element, self._remove) for element in __elements
]
def append(self, item):
self._storage.append(weakref.ref(item, self._remove))
def _remove(self, ref):
self._storage.remove(ref)
def __len__(self):
return len(self._storage)
def __iter__(self):
return (obj for obj in
(ref() for ref in self._storage) if obj is not None)
def __getitem__(self, index):
try:
obj = self._storage[index]
except KeyError:
raise IndexError("Index %s out of range" % index)
else:
return obj()
class OrderedIdentitySet(IdentitySet):
class _working_set(OrderedSet):
# a testing pragma: exempt the OIDS working set from the test suite's
# "never call the user's __hash__" assertions. this is a big hammer,
# but it's safe here: IDS operates on (id, instance) tuples in the
# working set.
__sa_hash_exempt__ = True
def __init__(self, iterable=None):
IdentitySet.__init__(self)
self._members = OrderedDict()
if iterable:
for o in iterable:
self.add(o)
class PopulateDict(dict):
"""A dict which populates missing values via a creation function.
Note the creation function takes a key, unlike
collections.defaultdict.
"""
def __init__(self, creator):
self.creator = creator
def __missing__(self, key):
self[key] = val = self.creator(key)
return val
# Define collections that are capable of storing
# ColumnElement objects as hashable keys/elements.
# At this point, these are mostly historical, things
# used to be more complicated.
column_set = set
column_dict = dict
ordered_column_set = OrderedSet
populate_column_dict = PopulateDict
def unique_list(seq, hashfunc=None):
seen = {}
if not hashfunc:
return [x for x in seq
if x not in seen
and not seen.__setitem__(x, True)]
else:
return [x for x in seq
if hashfunc(x) not in seen
and not seen.__setitem__(hashfunc(x), True)]
class UniqueAppender(object):
"""Appends items to a collection ensuring uniqueness.
Additional appends() of the same object are ignored. Membership is
determined by identity (``is a``) not equality (``==``).
"""
def __init__(self, data, via=None):
self.data = data
self._unique = {}
if via:
self._data_appender = getattr(data, via)
elif hasattr(data, 'append'):
self._data_appender = data.append
elif hasattr(data, 'add'):
self._data_appender = data.add
def append(self, item):
id_ = id(item)
if id_ not in self._unique:
self._data_appender(item)
self._unique[id_] = True
def __iter__(self):
return iter(self.data)
def coerce_generator_arg(arg):
if len(arg) == 1 and isinstance(arg[0], types.GeneratorType):
return list(arg[0])
else:
return arg
def to_list(x, default=None):
if x is None:
return default
if not isinstance(x, (list, tuple)):
return [x]
else:
return x
def to_set(x):
if x is None:
return set()
if not isinstance(x, set):
return set(to_list(x))
else:
return x
def to_column_set(x):
if x is None:
return column_set()
if not isinstance(x, column_set):
return column_set(to_list(x))
else:
return x
def update_copy(d, _new=None, **kw):
"""Copy the given dict and update with the given values."""
d = d.copy()
if _new:
d.update(_new)
d.update(**kw)
return d
def flatten_iterator(x):
"""Given an iterator of which further sub-elements may also be
iterators, flatten the sub-elements into a single iterator.
"""
for elem in x:
if not isinstance(elem, str) and hasattr(elem, '__iter__'):
for y in flatten_iterator(elem):
yield y
else:
yield elem
class LRUCache(dict):
"""Dictionary with 'squishy' removal of least
recently used items.
"""
def __init__(self, capacity=100, threshold=.5):
self.capacity = capacity
self.threshold = threshold
self._counter = 0
def _inc_counter(self):
self._counter += 1
return self._counter
def __getitem__(self, key):
item = dict.__getitem__(self, key)
item[2] = self._inc_counter()
return item[1]
def values(self):
return [i[1] for i in dict.values(self)]
def setdefault(self, key, value):
if key in self:
return self[key]
else:
self[key] = value
return value
def __setitem__(self, key, value):
item = dict.get(self, key)
if item is None:
item = [key, value, self._inc_counter()]
dict.__setitem__(self, key, item)
else:
item[1] = value
self._manage_size()
def _manage_size(self):
while len(self) > self.capacity + self.capacity * self.threshold:
by_counter = sorted(dict.values(self),
key=operator.itemgetter(2),
reverse=True)
for item in by_counter[self.capacity:]:
try:
del self[item[0]]
except KeyError:
# if we couldnt find a key, most
# likely some other thread broke in
# on us. loop around and try again
break
class ScopedRegistry(object):
"""A Registry that can store one or multiple instances of a single
class on the basis of a "scope" function.
The object implements ``__call__`` as the "getter", so by
calling ``myregistry()`` the contained object is returned
for the current scope.
:param createfunc:
a callable that returns a new object to be placed in the registry
:param scopefunc:
a callable that will return a key to store/retrieve an object.
"""
def __init__(self, createfunc, scopefunc):
"""Construct a new :class:`.ScopedRegistry`.
:param createfunc: A creation function that will generate
a new value for the current scope, if none is present.
:param scopefunc: A function that returns a hashable
token representing the current scope (such as, current
thread identifier).
"""
self.createfunc = createfunc
self.scopefunc = scopefunc
self.registry = {}
def __call__(self):
key = self.scopefunc()
try:
return self.registry[key]
except KeyError:
return self.registry.setdefault(key, self.createfunc())
def has(self):
"""Return True if an object is present in the current scope."""
return self.scopefunc() in self.registry
def set(self, obj):
"""Set the value forthe current scope."""
self.registry[self.scopefunc()] = obj
def clear(self):
"""Clear the current scope, if any."""
try:
del self.registry[self.scopefunc()]
except KeyError:
pass
class ThreadLocalRegistry(ScopedRegistry):
"""A :class:`.ScopedRegistry` that uses a ``threading.local()``
variable for storage.
"""
def __init__(self, createfunc):
self.createfunc = createfunc
self.registry = threading.local()
def __call__(self):
try:
return self.registry.value
except AttributeError:
val = self.registry.value = self.createfunc()
return val
def has(self):
return hasattr(self.registry, "value")
def set(self, obj):
self.registry.value = obj
def clear(self):
try:
del self.registry.value
except AttributeError:
pass
def _iter_id(iterable):
"""Generator: ((id(o), o) for o in iterable)."""
for item in iterable:
yield id(item), item
| gpl-3.0 |
Vassy/odoo | addons/account_budget/__init__.py | 444 | 1097 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_budget
import report
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
MingdaZhou/gnuradio | gr-digital/examples/narrowband/uhd_interface.py | 36 | 10251 | #!/usr/bin/env python
#
# Copyright 2010,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, uhd
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
def add_freq_option(parser):
"""
Hackery that has the -f / --freq option set both tx_freq and rx_freq
"""
def freq_callback(option, opt_str, value, parser):
parser.values.rx_freq = value
parser.values.tx_freq = value
if not parser.has_option('--freq'):
parser.add_option('-f', '--freq', type="eng_float",
action="callback", callback=freq_callback,
help="set Tx and/or Rx frequency to FREQ [default=%default]",
metavar="FREQ")
class uhd_interface:
def __init__(self, istx, args, sym_rate, sps, freq=None, lo_offset=None,
gain=None, spec=None, antenna=None, clock_source=None):
if(istx):
self.u = uhd.usrp_sink(device_addr=args, stream_args=uhd.stream_args('fc32'))
else:
self.u = uhd.usrp_source(device_addr=args, stream_args=uhd.stream_args('fc32'))
# Set clock source
if(clock_source):
self.u.set_clock_source(clock_source, 0)
# Set the subdevice spec
if(spec):
self.u.set_subdev_spec(spec, 0)
# Set the antenna
if(antenna):
self.u.set_antenna(antenna, 0)
self._args = args
self._ant = antenna
self._spec = spec
self._gain = self.set_gain(gain)
self._lo_offset = lo_offset
self._freq = self.set_freq(freq, lo_offset)
self._rate, self._sps = self.set_sample_rate(sym_rate, sps)
self._clock_source = clock_source
def set_sample_rate(self, sym_rate, req_sps):
start_sps = req_sps
while(True):
asked_samp_rate = sym_rate * req_sps
self.u.set_samp_rate(asked_samp_rate)
actual_samp_rate = self.u.get_samp_rate()
sps = actual_samp_rate/sym_rate
if(sps < 2):
req_sps +=1
else:
actual_sps = sps
break
if(sps != req_sps):
print "\nSymbol Rate: %f" % (sym_rate)
print "Requested sps: %f" % (start_sps)
print "Given sample rate: %f" % (actual_samp_rate)
print "Actual sps for rate: %f" % (actual_sps)
if(actual_samp_rate != asked_samp_rate):
print "\nRequested sample rate: %f" % (asked_samp_rate)
print "Actual sample rate: %f" % (actual_samp_rate)
return (actual_samp_rate, actual_sps)
def get_sample_rate(self):
return self.u.get_samp_rate()
def set_gain(self, gain=None):
if gain is None:
# if no gain was specified, use the mid-point in dB
g = self.u.get_gain_range()
gain = float(g.start()+g.stop())/2
print "\nNo gain specified."
print "Setting gain to %f (from [%f, %f])" % \
(gain, g.start(), g.stop())
self.u.set_gain(gain, 0)
return gain
def set_freq(self, freq=None, lo_offset=None):
if(freq is None):
sys.stderr.write("You must specify -f FREQ or --freq FREQ\n")
sys.exit(1)
r = self.u.set_center_freq(uhd.tune_request(freq, lo_offset))
if r:
return freq
else:
frange = self.u.get_freq_range()
sys.stderr.write(("\nRequested frequency (%f) out or range [%f, %f]\n") % \
(freq, frange.start(), frange.stop()))
sys.exit(1)
#-------------------------------------------------------------------#
# TRANSMITTER
#-------------------------------------------------------------------#
class uhd_transmitter(uhd_interface, gr.hier_block2):
def __init__(self, args, sym_rate, sps, freq=None, lo_offset=None, gain=None,
spec=None, antenna=None, clock_source=None, verbose=False):
gr.hier_block2.__init__(self, "uhd_transmitter",
gr.io_signature(1,1,gr.sizeof_gr_complex),
gr.io_signature(0,0,0))
# Set up the UHD interface as a transmitter
uhd_interface.__init__(self, True, args, sym_rate, sps,
freq, lo_offset, gain, spec, antenna, clock_source)
self.connect(self, self.u)
if(verbose):
self._print_verbage()
def add_options(parser):
add_freq_option(parser)
parser.add_option("-a", "--args", type="string", default="",
help="UHD device address args [default=%default]")
parser.add_option("", "--spec", type="string", default=None,
help="Subdevice of UHD device where appropriate")
parser.add_option("-A", "--antenna", type="string", default=None,
help="select Rx Antenna where appropriate")
parser.add_option("", "--tx-freq", type="eng_float", default=None,
help="set transmit frequency to FREQ [default=%default]",
metavar="FREQ")
parser.add_option("", "--lo-offset", type="eng_float", default=0,
help="set local oscillator offset in Hz (default is 0)")
parser.add_option("", "--tx-gain", type="eng_float", default=None,
help="set transmit gain in dB (default is midpoint)")
parser.add_option("-C", "--clock-source", type="string", default=None,
help="select clock source (e.g. 'external') [default=%default]")
parser.add_option("-v", "--verbose", action="store_true", default=False)
# Make a static method to call before instantiation
add_options = staticmethod(add_options)
def _print_verbage(self):
"""
Prints information about the UHD transmitter
"""
print "\nUHD Transmitter:"
print "Args: %s" % (self._args)
print "Freq: %sHz" % (eng_notation.num_to_str(self._freq))
print "LO Offset: %sHz" % (eng_notation.num_to_str(self._lo_offset))
print "Gain: %f dB" % (self._gain)
print "Sample Rate: %ssps" % (eng_notation.num_to_str(self._rate))
print "Antenna: %s" % (self._ant)
print "Subdev Spec: %s" % (self._spec)
print "Clock Source: %s" % (self._clock_source)
#-------------------------------------------------------------------#
# RECEIVER
#-------------------------------------------------------------------#
class uhd_receiver(uhd_interface, gr.hier_block2):
def __init__(self, args, sym_rate, sps, freq=None, lo_offset=None, gain=None,
spec=None, antenna=None, clock_source=None, verbose=False):
gr.hier_block2.__init__(self, "uhd_receiver",
gr.io_signature(0,0,0),
gr.io_signature(1,1,gr.sizeof_gr_complex))
# Set up the UHD interface as a receiver
uhd_interface.__init__(self, False, args, sym_rate, sps,
freq, lo_offset, gain, spec, antenna, clock_source)
self.connect(self.u, self)
if(verbose):
self._print_verbage()
def add_options(parser):
add_freq_option(parser)
parser.add_option("-a", "--args", type="string", default="",
help="UHD device address args [default=%default]")
parser.add_option("", "--spec", type="string", default=None,
help="Subdevice of UHD device where appropriate")
parser.add_option("-A", "--antenna", type="string", default=None,
help="select Rx Antenna where appropriate")
parser.add_option("", "--rx-freq", type="eng_float", default=None,
help="set receive frequency to FREQ [default=%default]",
metavar="FREQ")
parser.add_option("", "--lo-offset", type="eng_float", default=0,
help="set local oscillator offset in Hz (default is 0)")
parser.add_option("", "--rx-gain", type="eng_float", default=None,
help="set receive gain in dB (default is midpoint)")
parser.add_option("-C", "--clock-source", type="string", default=None,
help="select clock source (e.g. 'external') [default=%default]")
if not parser.has_option("--verbose"):
parser.add_option("-v", "--verbose", action="store_true", default=False)
# Make a static method to call before instantiation
add_options = staticmethod(add_options)
def _print_verbage(self):
"""
Prints information about the UHD transmitter
"""
print "\nUHD Receiver:"
print "UHD Args: %s" % (self._args)
print "Freq: %sHz" % (eng_notation.num_to_str(self._freq))
print "LO Offset: %sHz" % (eng_notation.num_to_str(self._lo_offset))
print "Gain: %f dB" % (self._gain)
print "Sample Rate: %ssps" % (eng_notation.num_to_str(self._rate))
print "Antenna: %s" % (self._ant)
print "Spec: %s" % (self._spec)
print "Clock Source: %s" % (self._clock_source)
| gpl-3.0 |
hamzehd/edx-platform | cms/djangoapps/contentstore/tests/test_libraries.py | 82 | 40249 | """
Content library unit tests that require the CMS runtime.
"""
from django.test.utils import override_settings
from contentstore.tests.utils import AjaxEnabledTestClient, parse_json
from contentstore.utils import reverse_url, reverse_usage_url, reverse_library_url
from contentstore.views.item import _duplicate_item
from contentstore.views.preview import _load_preview_module
from contentstore.views.tests.test_library import LIBRARY_REST_URL
import ddt
from mock import patch
from student.auth import has_studio_read_access, has_studio_write_access
from student.roles import (
CourseInstructorRole, CourseStaffRole, CourseCreatorRole, LibraryUserRole,
OrgStaffRole, OrgInstructorRole, OrgLibraryUserRole,
)
from xblock.reference.user_service import XBlockUser
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from mock import Mock
from opaque_keys.edx.locator import CourseKey, LibraryLocator
from openedx.core.djangoapps.content.course_structures.tests import SignalDisconnectTestMixin
class LibraryTestCase(ModuleStoreTestCase):
"""
Common functionality for content libraries tests
"""
def setUp(self):
user_password = super(LibraryTestCase, self).setUp()
self.client = AjaxEnabledTestClient()
self.client.login(username=self.user.username, password=user_password)
self.lib_key = self._create_library()
self.library = modulestore().get_library(self.lib_key)
self.session_data = {} # Used by _bind_module
def _create_library(self, org="org", library="lib", display_name="Test Library"):
"""
Helper method used to create a library. Uses the REST API.
"""
response = self.client.ajax_post(LIBRARY_REST_URL, {
'org': org,
'library': library,
'display_name': display_name,
})
self.assertEqual(response.status_code, 200)
lib_info = parse_json(response)
lib_key = CourseKey.from_string(lib_info['library_key'])
self.assertIsInstance(lib_key, LibraryLocator)
return lib_key
def _add_library_content_block(self, course, library_key, other_settings=None):
"""
Helper method to add a LibraryContent block to a course.
The block will be configured to select content from the library
specified by library_key.
other_settings can be a dict of Scope.settings fields to set on the block.
"""
return ItemFactory.create(
category='library_content',
parent_location=course.location,
user_id=self.user.id,
publish_item=False,
source_library_id=unicode(library_key),
**(other_settings or {})
)
def _add_simple_content_block(self):
""" Adds simple HTML block to library """
return ItemFactory.create(
category="html", parent_location=self.library.location,
user_id=self.user.id, publish_item=False
)
def _refresh_children(self, lib_content_block, status_code_expected=200):
"""
Helper method: Uses the REST API to call the 'refresh_children' handler
of a LibraryContent block
"""
if 'user' not in lib_content_block.runtime._services: # pylint: disable=protected-access
mocked_user_service = Mock(user_id=self.user.id)
mocked_user_service.get_current_user.return_value = XBlockUser(is_current_user=True)
lib_content_block.runtime._services['user'] = mocked_user_service # pylint: disable=protected-access
handler_url = reverse_usage_url(
'component_handler',
lib_content_block.location,
kwargs={'handler': 'refresh_children'}
)
response = self.client.ajax_post(handler_url)
self.assertEqual(response.status_code, status_code_expected)
return modulestore().get_item(lib_content_block.location)
def _bind_module(self, descriptor, user=None):
"""
Helper to use the CMS's module system so we can access student-specific fields.
"""
if user is None:
user = self.user
if user not in self.session_data:
self.session_data[user] = {}
request = Mock(user=user, session=self.session_data[user])
_load_preview_module(request, descriptor) # pylint: disable=protected-access
def _update_item(self, usage_key, metadata):
"""
Helper method: Uses the REST API to update the fields of an XBlock.
This will result in the XBlock's editor_saved() method being called.
"""
update_url = reverse_usage_url("xblock_handler", usage_key)
return self.client.ajax_post(
update_url,
data={
'metadata': metadata,
}
)
def _list_libraries(self):
"""
Use the REST API to get a list of libraries visible to the current user.
"""
response = self.client.get_json(LIBRARY_REST_URL)
self.assertEqual(response.status_code, 200)
return parse_json(response)
@ddt.ddt
class TestLibraries(LibraryTestCase):
"""
High-level tests for libraries
"""
@ddt.data(
(2, 1, 1),
(2, 2, 2),
(2, 20, 2),
)
@ddt.unpack
def test_max_items(self, num_to_create, num_to_select, num_expected):
"""
Test the 'max_count' property of LibraryContent blocks.
"""
for _ in range(num_to_create):
self._add_simple_content_block()
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
lc_block = self._add_library_content_block(course, self.lib_key, {'max_count': num_to_select})
self.assertEqual(len(lc_block.children), 0)
lc_block = self._refresh_children(lc_block)
# Now, we want to make sure that .children has the total # of potential
# children, and that get_child_descriptors() returns the actual children
# chosen for a given student.
# In order to be able to call get_child_descriptors(), we must first
# call bind_for_student:
self._bind_module(lc_block)
self.assertEqual(len(lc_block.children), num_to_create)
self.assertEqual(len(lc_block.get_child_descriptors()), num_expected)
def test_consistent_children(self):
"""
Test that the same student will always see the same selected child block
"""
# Create many blocks in the library and add them to a course:
for num in range(8):
ItemFactory.create(
data="This is #{}".format(num + 1),
category="html", parent_location=self.library.location, user_id=self.user.id, publish_item=False
)
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
lc_block = self._add_library_content_block(course, self.lib_key, {'max_count': 1})
lc_block_key = lc_block.location
lc_block = self._refresh_children(lc_block)
def get_child_of_lc_block(block):
"""
Fetch the child shown to the current user.
"""
children = block.get_child_descriptors()
self.assertEqual(len(children), 1)
return children[0]
# Check which child a student will see:
self._bind_module(lc_block)
chosen_child = get_child_of_lc_block(lc_block)
chosen_child_defn_id = chosen_child.definition_locator.definition_id
lc_block.save()
modulestore().update_item(lc_block, self.user.id)
# Now re-load the block and try again:
def check():
"""
Confirm that chosen_child is still the child seen by the test student
"""
for _ in range(6): # Repeat many times b/c blocks are randomized
lc_block = modulestore().get_item(lc_block_key) # Reload block from the database
self._bind_module(lc_block)
current_child = get_child_of_lc_block(lc_block)
self.assertEqual(current_child.location, chosen_child.location)
self.assertEqual(current_child.data, chosen_child.data)
self.assertEqual(current_child.definition_locator.definition_id, chosen_child_defn_id)
check()
# Refresh the children:
lc_block = self._refresh_children(lc_block)
# Now re-load the block and try yet again, in case refreshing the children changed anything:
check()
def test_definition_shared_with_library(self):
"""
Test that the same block definition is used for the library and course[s]
"""
block1 = self._add_simple_content_block()
def_id1 = block1.definition_locator.definition_id
block2 = self._add_simple_content_block()
def_id2 = block2.definition_locator.definition_id
self.assertNotEqual(def_id1, def_id2)
# Next, create a course:
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
# Add a LibraryContent block to the course:
lc_block = self._add_library_content_block(course, self.lib_key)
lc_block = self._refresh_children(lc_block)
for child_key in lc_block.children:
child = modulestore().get_item(child_key)
def_id = child.definition_locator.definition_id
self.assertIn(def_id, (def_id1, def_id2))
def test_fields(self):
"""
Test that blocks used from a library have the same field values as
defined by the library author.
"""
data_value = "A Scope.content value"
name_value = "A Scope.settings value"
lib_block = ItemFactory.create(
category="html",
parent_location=self.library.location,
user_id=self.user.id,
publish_item=False,
display_name=name_value,
data=data_value,
)
self.assertEqual(lib_block.data, data_value)
self.assertEqual(lib_block.display_name, name_value)
# Next, create a course:
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
# Add a LibraryContent block to the course:
lc_block = self._add_library_content_block(course, self.lib_key)
lc_block = self._refresh_children(lc_block)
course_block = modulestore().get_item(lc_block.children[0])
self.assertEqual(course_block.data, data_value)
self.assertEqual(course_block.display_name, name_value)
def test_block_with_children(self):
"""
Test that blocks used from a library can have children.
"""
data_value = "A Scope.content value"
name_value = "A Scope.settings value"
# In the library, create a vertical block with a child:
vert_block = ItemFactory.create(
category="vertical",
parent_location=self.library.location,
user_id=self.user.id,
publish_item=False,
)
child_block = ItemFactory.create(
category="html",
parent_location=vert_block.location,
user_id=self.user.id,
publish_item=False,
display_name=name_value,
data=data_value,
)
self.assertEqual(child_block.data, data_value)
self.assertEqual(child_block.display_name, name_value)
# Next, create a course:
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
# Add a LibraryContent block to the course:
lc_block = self._add_library_content_block(course, self.lib_key)
lc_block = self._refresh_children(lc_block)
self.assertEqual(len(lc_block.children), 1)
course_vert_block = modulestore().get_item(lc_block.children[0])
self.assertEqual(len(course_vert_block.children), 1)
course_child_block = modulestore().get_item(course_vert_block.children[0])
self.assertEqual(course_child_block.data, data_value)
self.assertEqual(course_child_block.display_name, name_value)
def test_change_after_first_sync(self):
"""
Check that nothing goes wrong if we (A) Set up a LibraryContent block
and use it successfully, then (B) Give it an invalid configuration.
No children should be deleted until the configuration is fixed.
"""
# Add a block to the library:
data_value = "Hello world!"
ItemFactory.create(
category="html",
parent_location=self.library.location,
user_id=self.user.id,
publish_item=False,
display_name="HTML BLock",
data=data_value,
)
# Create a course:
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
# Add a LibraryContent block to the course:
lc_block = self._add_library_content_block(course, self.lib_key)
lc_block = self._refresh_children(lc_block)
self.assertEqual(len(lc_block.children), 1)
# Now, change the block settings to have an invalid library key:
resp = self._update_item(
lc_block.location,
{"source_library_id": "library-v1:NOT+FOUND"},
)
self.assertEqual(resp.status_code, 200)
lc_block = modulestore().get_item(lc_block.location)
self.assertEqual(len(lc_block.children), 1) # Children should not be deleted due to a bad setting.
html_block = modulestore().get_item(lc_block.children[0])
self.assertEqual(html_block.data, data_value)
def test_refreshes_children_if_libraries_change(self):
""" Tests that children are automatically refreshed if libraries list changes """
library2key = self._create_library("org2", "lib2", "Library2")
library2 = modulestore().get_library(library2key)
data1, data2 = "Hello world!", "Hello other world!"
ItemFactory.create(
category="html",
parent_location=self.library.location,
user_id=self.user.id,
publish_item=False,
display_name="Lib1: HTML BLock",
data=data1,
)
ItemFactory.create(
category="html",
parent_location=library2.location,
user_id=self.user.id,
publish_item=False,
display_name="Lib 2: HTML BLock",
data=data2,
)
# Create a course:
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
# Add a LibraryContent block to the course:
lc_block = self._add_library_content_block(course, self.lib_key)
lc_block = self._refresh_children(lc_block)
self.assertEqual(len(lc_block.children), 1)
# Now, change the block settings to have an invalid library key:
resp = self._update_item(
lc_block.location,
{"source_library_id": str(library2key)},
)
self.assertEqual(resp.status_code, 200)
lc_block = modulestore().get_item(lc_block.location)
self.assertEqual(len(lc_block.children), 1)
html_block = modulestore().get_item(lc_block.children[0])
self.assertEqual(html_block.data, data2)
@patch("xmodule.library_tools.SearchEngine.get_search_engine", Mock(return_value=None))
def test_refreshes_children_if_capa_type_change(self):
""" Tests that children are automatically refreshed if capa type field changes """
name1, name2 = "Option Problem", "Multiple Choice Problem"
ItemFactory.create(
category="problem",
parent_location=self.library.location,
user_id=self.user.id,
publish_item=False,
display_name=name1,
data="<problem><optionresponse></optionresponse></problem>",
)
ItemFactory.create(
category="problem",
parent_location=self.library.location,
user_id=self.user.id,
publish_item=False,
display_name=name2,
data="<problem><multiplechoiceresponse></multiplechoiceresponse></problem>",
)
# Create a course:
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
# Add a LibraryContent block to the course:
lc_block = self._add_library_content_block(course, self.lib_key)
lc_block = self._refresh_children(lc_block)
self.assertEqual(len(lc_block.children), 2)
resp = self._update_item(
lc_block.location,
{"capa_type": 'optionresponse'},
)
self.assertEqual(resp.status_code, 200)
lc_block = modulestore().get_item(lc_block.location)
self.assertEqual(len(lc_block.children), 1)
html_block = modulestore().get_item(lc_block.children[0])
self.assertEqual(html_block.display_name, name1)
resp = self._update_item(
lc_block.location,
{"capa_type": 'multiplechoiceresponse'},
)
self.assertEqual(resp.status_code, 200)
lc_block = modulestore().get_item(lc_block.location)
self.assertEqual(len(lc_block.children), 1)
html_block = modulestore().get_item(lc_block.children[0])
self.assertEqual(html_block.display_name, name2)
def test_refresh_fails_for_unknown_library(self):
""" Tests that refresh children fails if unknown library is configured """
# Create a course:
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
# Add a LibraryContent block to the course:
lc_block = self._add_library_content_block(course, self.lib_key)
lc_block = self._refresh_children(lc_block)
self.assertEqual(len(lc_block.children), 0)
# Now, change the block settings to have an invalid library key:
resp = self._update_item(
lc_block.location,
{"source_library_id": "library-v1:NOT+FOUND"},
)
self.assertEqual(resp.status_code, 200)
with self.assertRaises(ValueError):
self._refresh_children(lc_block, status_code_expected=400)
@ddt.ddt
@patch('django.conf.settings.SEARCH_ENGINE', None)
class TestLibraryAccess(SignalDisconnectTestMixin, LibraryTestCase):
"""
Test Roles and Permissions related to Content Libraries
"""
def setUp(self):
""" Create a library, staff user, and non-staff user """
super(TestLibraryAccess, self).setUp()
self.non_staff_user, self.non_staff_user_password = self.create_non_staff_user()
def _login_as_non_staff_user(self, logout_first=True):
""" Login as a user that starts out with no roles/permissions granted. """
if logout_first:
self.client.logout() # We start logged in as a staff user
self.client.login(username=self.non_staff_user.username, password=self.non_staff_user_password)
def _assert_cannot_create_library(self, org="org", library="libfail", expected_code=403):
""" Ensure the current user is not able to create a library. """
self.assertTrue(expected_code >= 300)
response = self.client.ajax_post(
LIBRARY_REST_URL,
{'org': org, 'library': library, 'display_name': "Irrelevant"}
)
self.assertEqual(response.status_code, expected_code)
key = LibraryLocator(org=org, library=library)
self.assertEqual(modulestore().get_library(key), None)
def _can_access_library(self, library):
"""
Use the normal studio library URL to check if we have access
`library` can be a LibraryLocator or the library's root XBlock
"""
if isinstance(library, (basestring, LibraryLocator)):
lib_key = library
else:
lib_key = library.location.library_key
response = self.client.get(reverse_library_url('library_handler', unicode(lib_key)))
self.assertIn(response.status_code, (200, 302, 403))
return response.status_code == 200
def tearDown(self):
"""
Log out when done each test
"""
self.client.logout()
super(TestLibraryAccess, self).tearDown()
def test_creation(self):
"""
The user that creates a library should have instructor (admin) and staff permissions
"""
# self.library has been auto-created by the staff user.
self.assertTrue(has_studio_write_access(self.user, self.lib_key))
self.assertTrue(has_studio_read_access(self.user, self.lib_key))
# Make sure the user was actually assigned the instructor role and not just using is_staff superpowers:
self.assertTrue(CourseInstructorRole(self.lib_key).has_user(self.user))
# Now log out and ensure we are forbidden from creating a library:
self.client.logout()
self._assert_cannot_create_library(expected_code=302) # 302 redirect to login expected
# Now check that logged-in users without CourseCreator role can still create libraries
self._login_as_non_staff_user(logout_first=False)
self.assertFalse(CourseCreatorRole().has_user(self.non_staff_user))
with patch.dict('django.conf.settings.FEATURES', {'ENABLE_CREATOR_GROUP': True}):
lib_key2 = self._create_library(library="lib2", display_name="Test Library 2")
library2 = modulestore().get_library(lib_key2)
self.assertIsNotNone(library2)
@ddt.data(
CourseInstructorRole,
CourseStaffRole,
LibraryUserRole,
)
def test_acccess(self, access_role):
"""
Test the various roles that allow viewing libraries are working correctly.
"""
# At this point, one library exists, created by the currently-logged-in staff user.
# Create another library as staff:
library2_key = self._create_library(library="lib2")
# Login as non_staff_user:
self._login_as_non_staff_user()
# non_staff_user shouldn't be able to access any libraries:
lib_list = self._list_libraries()
self.assertEqual(len(lib_list), 0)
self.assertFalse(self._can_access_library(self.library))
self.assertFalse(self._can_access_library(library2_key))
# Now manually intervene to give non_staff_user access to library2_key:
access_role(library2_key).add_users(self.non_staff_user)
# Now non_staff_user should be able to access library2_key only:
lib_list = self._list_libraries()
self.assertEqual(len(lib_list), 1)
self.assertEqual(lib_list[0]["library_key"], unicode(library2_key))
self.assertTrue(self._can_access_library(library2_key))
self.assertFalse(self._can_access_library(self.library))
@ddt.data(
OrgStaffRole,
OrgInstructorRole,
OrgLibraryUserRole,
)
def test_org_based_access(self, org_access_role):
"""
Test the various roles that allow viewing all of an organization's
libraries are working correctly.
"""
# Create some libraries as the staff user:
lib_key_pacific = self._create_library(org="PacificX", library="libP")
lib_key_atlantic = self._create_library(org="AtlanticX", library="libA")
# Login as a non-staff:
self._login_as_non_staff_user()
# Now manually intervene to give non_staff_user access to all "PacificX" libraries:
org_access_role(lib_key_pacific.org).add_users(self.non_staff_user)
# Now non_staff_user should be able to access lib_key_pacific only:
lib_list = self._list_libraries()
self.assertEqual(len(lib_list), 1)
self.assertEqual(lib_list[0]["library_key"], unicode(lib_key_pacific))
self.assertTrue(self._can_access_library(lib_key_pacific))
self.assertFalse(self._can_access_library(lib_key_atlantic))
self.assertFalse(self._can_access_library(self.lib_key))
@ddt.data(True, False)
def test_read_only_role(self, use_org_level_role):
"""
Test the read-only role (LibraryUserRole and its org-level equivalent)
"""
# As staff user, add a block to self.library:
block = self._add_simple_content_block()
# Login as a non_staff_user:
self._login_as_non_staff_user()
self.assertFalse(self._can_access_library(self.library))
block_url = reverse_usage_url('xblock_handler', block.location)
def can_read_block():
""" Check if studio lets us view the XBlock in the library """
response = self.client.get_json(block_url)
self.assertIn(response.status_code, (200, 403)) # 400 would be ambiguous
return response.status_code == 200
def can_edit_block():
""" Check if studio lets us edit the XBlock in the library """
response = self.client.ajax_post(block_url)
self.assertIn(response.status_code, (200, 403)) # 400 would be ambiguous
return response.status_code == 200
def can_delete_block():
""" Check if studio lets us delete the XBlock in the library """
response = self.client.delete(block_url)
self.assertIn(response.status_code, (200, 403)) # 400 would be ambiguous
return response.status_code == 200
def can_copy_block():
""" Check if studio lets us duplicate the XBlock in the library """
response = self.client.ajax_post(reverse_url('xblock_handler'), {
'parent_locator': unicode(self.library.location),
'duplicate_source_locator': unicode(block.location),
})
self.assertIn(response.status_code, (200, 403)) # 400 would be ambiguous
return response.status_code == 200
def can_create_block():
""" Check if studio lets us make a new XBlock in the library """
response = self.client.ajax_post(reverse_url('xblock_handler'), {
'parent_locator': unicode(self.library.location), 'category': 'html',
})
self.assertIn(response.status_code, (200, 403)) # 400 would be ambiguous
return response.status_code == 200
# Check that we do not have read or write access to block:
self.assertFalse(can_read_block())
self.assertFalse(can_edit_block())
self.assertFalse(can_delete_block())
self.assertFalse(can_copy_block())
self.assertFalse(can_create_block())
# Give non_staff_user read-only permission:
if use_org_level_role:
OrgLibraryUserRole(self.lib_key.org).add_users(self.non_staff_user)
else:
LibraryUserRole(self.lib_key).add_users(self.non_staff_user)
self.assertTrue(self._can_access_library(self.library))
self.assertTrue(can_read_block())
self.assertFalse(can_edit_block())
self.assertFalse(can_delete_block())
self.assertFalse(can_copy_block())
self.assertFalse(can_create_block())
@ddt.data(
(LibraryUserRole, CourseStaffRole, True),
(CourseStaffRole, CourseStaffRole, True),
(None, CourseStaffRole, False),
(LibraryUserRole, None, False),
)
@ddt.unpack
def test_duplicate_across_courses(self, library_role, course_role, expected_result):
"""
Test that the REST API will correctly allow/refuse when copying
from a library with (write, read, or no) access to a course with (write or no) access.
"""
# As staff user, add a block to self.library:
block = self._add_simple_content_block()
# And create a course:
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
self._login_as_non_staff_user()
# Assign roles:
if library_role:
library_role(self.lib_key).add_users(self.non_staff_user)
if course_role:
course_role(course.location.course_key).add_users(self.non_staff_user)
# Copy block to the course:
response = self.client.ajax_post(reverse_url('xblock_handler'), {
'parent_locator': unicode(course.location),
'duplicate_source_locator': unicode(block.location),
})
self.assertIn(response.status_code, (200, 403)) # 400 would be ambiguous
duplicate_action_allowed = (response.status_code == 200)
self.assertEqual(duplicate_action_allowed, expected_result)
@ddt.data(
(LibraryUserRole, CourseStaffRole, True),
(CourseStaffRole, CourseStaffRole, True),
(None, CourseStaffRole, False),
(LibraryUserRole, None, False),
)
@ddt.unpack
def test_refresh_library_content_permissions(self, library_role, course_role, expected_result):
"""
Test that the LibraryContent block's 'refresh_children' handler will correctly
handle permissions and allow/refuse when updating its content with the latest
version of a library. We try updating from a library with (write, read, or no)
access to a course with (write or no) access.
"""
# As staff user, add a block to self.library:
self._add_simple_content_block()
# And create a course:
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
self._login_as_non_staff_user()
# Assign roles:
if library_role:
library_role(self.lib_key).add_users(self.non_staff_user)
if course_role:
course_role(course.location.course_key).add_users(self.non_staff_user)
# Try updating our library content block:
lc_block = self._add_library_content_block(course, self.lib_key)
# We must use the CMS's module system in order to get permissions checks.
self._bind_module(lc_block, user=self.non_staff_user)
lc_block = self._refresh_children(lc_block, status_code_expected=200 if expected_result else 403)
self.assertEqual(len(lc_block.children), 1 if expected_result else 0)
@ddt.ddt
@override_settings(SEARCH_ENGINE=None)
class TestOverrides(LibraryTestCase):
"""
Test that overriding block Scope.settings fields from a library in a specific course works
"""
def setUp(self):
super(TestOverrides, self).setUp()
self.original_display_name = "A Problem Block"
self.original_weight = 1
# Create a problem block in the library:
self.problem = ItemFactory.create(
category="problem",
parent_location=self.library.location,
display_name=self.original_display_name, # display_name is a Scope.settings field
weight=self.original_weight, # weight is also a Scope.settings field
user_id=self.user.id,
publish_item=False,
)
# Refresh library now that we've added something.
self.library = modulestore().get_library(self.lib_key)
# Also create a course:
with modulestore().default_store(ModuleStoreEnum.Type.split):
self.course = CourseFactory.create()
# Add a LibraryContent block to the course:
self.lc_block = self._add_library_content_block(self.course, self.lib_key)
self.lc_block = self._refresh_children(self.lc_block)
self.problem_in_course = modulestore().get_item(self.lc_block.children[0])
def test_overrides(self):
"""
Test that we can override Scope.settings values in a course.
"""
new_display_name = "Modified Problem Title"
new_weight = 10
self.problem_in_course.display_name = new_display_name
self.problem_in_course.weight = new_weight
modulestore().update_item(self.problem_in_course, self.user.id)
# Add a second LibraryContent block to the course, with no override:
lc_block2 = self._add_library_content_block(self.course, self.lib_key)
lc_block2 = self._refresh_children(lc_block2)
# Re-load the two problem blocks - one with and one without an override:
self.problem_in_course = modulestore().get_item(self.lc_block.children[0])
problem2_in_course = modulestore().get_item(lc_block2.children[0])
self.assertEqual(self.problem_in_course.display_name, new_display_name)
self.assertEqual(self.problem_in_course.weight, new_weight)
self.assertEqual(problem2_in_course.display_name, self.original_display_name)
self.assertEqual(problem2_in_course.weight, self.original_weight)
def test_reset_override(self):
"""
If we override a setting and then reset it, we should get the library value.
"""
new_display_name = "Modified Problem Title"
new_weight = 10
self.problem_in_course.display_name = new_display_name
self.problem_in_course.weight = new_weight
modulestore().update_item(self.problem_in_course, self.user.id)
self.problem_in_course = modulestore().get_item(self.problem_in_course.location)
self.assertEqual(self.problem_in_course.display_name, new_display_name)
self.assertEqual(self.problem_in_course.weight, new_weight)
# Reset:
for field_name in ["display_name", "weight"]:
self.problem_in_course.fields[field_name].delete_from(self.problem_in_course)
# Save, reload, and verify:
modulestore().update_item(self.problem_in_course, self.user.id)
self.problem_in_course = modulestore().get_item(self.problem_in_course.location)
self.assertEqual(self.problem_in_course.display_name, self.original_display_name)
self.assertEqual(self.problem_in_course.weight, self.original_weight)
def test_consistent_definitions(self):
"""
Make sure that the new child of the LibraryContent block
shares its definition with the original (self.problem).
This test is specific to split mongo.
"""
definition_id = self.problem.definition_locator.definition_id
self.assertEqual(self.problem_in_course.definition_locator.definition_id, definition_id)
# Now even if we change some Scope.settings fields and refresh, the definition should be unchanged
self.problem.weight = 20
self.problem.display_name = "NEW"
modulestore().update_item(self.problem, self.user.id)
self.lc_block = self._refresh_children(self.lc_block)
self.problem_in_course = modulestore().get_item(self.problem_in_course.location)
self.assertEqual(self.problem.definition_locator.definition_id, definition_id)
self.assertEqual(self.problem_in_course.definition_locator.definition_id, definition_id)
@ddt.data(False, True)
def test_persistent_overrides(self, duplicate):
"""
Test that when we override Scope.settings values in a course,
the override values persist even when the block is refreshed
with updated blocks from the library.
"""
new_display_name = "Modified Problem Title"
new_weight = 15
self.problem_in_course.display_name = new_display_name
self.problem_in_course.weight = new_weight
modulestore().update_item(self.problem_in_course, self.user.id)
if duplicate:
# Check that this also works when the RCB is duplicated.
self.lc_block = modulestore().get_item(
_duplicate_item(self.course.location, self.lc_block.location, self.user)
)
self.problem_in_course = modulestore().get_item(self.lc_block.children[0])
else:
self.problem_in_course = modulestore().get_item(self.problem_in_course.location)
self.assertEqual(self.problem_in_course.display_name, new_display_name)
self.assertEqual(self.problem_in_course.weight, new_weight)
# Change the settings in the library version:
self.problem.display_name = "X"
self.problem.weight = 99
new_data_value = "<problem><p>Changed data to check that non-overriden fields *do* get updated.</p></problem>"
self.problem.data = new_data_value
modulestore().update_item(self.problem, self.user.id)
self.lc_block = self._refresh_children(self.lc_block)
self.problem_in_course = modulestore().get_item(self.problem_in_course.location)
self.assertEqual(self.problem_in_course.display_name, new_display_name)
self.assertEqual(self.problem_in_course.weight, new_weight)
self.assertEqual(self.problem_in_course.data, new_data_value)
def test_duplicated_version(self):
"""
Test that if a library is updated, and the content block is duplicated,
the new block will use the old library version and not the new one.
"""
store = modulestore()
self.assertEqual(len(self.library.children), 1)
self.assertEqual(len(self.lc_block.children), 1)
# Edit the only problem in the library:
self.problem.display_name = "--changed in library--"
store.update_item(self.problem, self.user.id)
# Create an additional problem block in the library:
ItemFactory.create(
category="problem",
parent_location=self.library.location,
user_id=self.user.id,
publish_item=False,
)
# Refresh our reference to the library
self.library = store.get_library(self.lib_key)
# Refresh our reference to the block
self.lc_block = store.get_item(self.lc_block.location)
self.problem_in_course = store.get_item(self.problem_in_course.location)
# The library has changed...
self.assertEqual(len(self.library.children), 2)
# But the block hasn't.
self.assertEqual(len(self.lc_block.children), 1)
self.assertEqual(self.problem_in_course.location, self.lc_block.children[0])
self.assertEqual(self.problem_in_course.display_name, self.original_display_name)
# Duplicate self.lc_block:
duplicate = store.get_item(
_duplicate_item(self.course.location, self.lc_block.location, self.user)
)
# The duplicate should have identical children to the original:
self.assertEqual(len(duplicate.children), 1)
self.assertTrue(self.lc_block.source_library_version)
self.assertEqual(self.lc_block.source_library_version, duplicate.source_library_version)
problem2_in_course = store.get_item(duplicate.children[0])
self.assertEqual(problem2_in_course.display_name, self.original_display_name)
class TestIncompatibleModuleStore(LibraryTestCase):
"""
Tests for proper validation errors with an incompatible course modulestore.
"""
def setUp(self):
super(TestIncompatibleModuleStore, self).setUp()
# Create a course in an incompatible modulestore.
with modulestore().default_store(ModuleStoreEnum.Type.mongo):
self.course = CourseFactory.create()
# Add a LibraryContent block to the course:
self.lc_block = self._add_library_content_block(self.course, self.lib_key)
def test_incompatible_modulestore(self):
"""
Verifies that, if a user is using a modulestore that doesn't support libraries,
a validation error will be produced.
"""
validation = self.lc_block.validate()
self.assertEqual(validation.summary.type, validation.summary.ERROR)
self.assertIn(
"This course does not support content libraries.", validation.summary.text)
| agpl-3.0 |
defance/edx-platform | common/djangoapps/third_party_auth/models.py | 29 | 25162 | # -*- coding: utf-8 -*-
"""
Models used to implement SAML SSO support in third_party_auth
(inlcuding Shibboleth support)
"""
from __future__ import absolute_import
from config_models.models import ConfigurationModel, cache
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
import json
import logging
from provider.utils import long_token
from provider.oauth2.models import Client
from social.backends.base import BaseAuth
from social.backends.oauth import OAuthAuth
from social.backends.saml import SAMLAuth, SAMLIdentityProvider
from .lti import LTIAuthBackend, LTI_PARAMS_KEY
from social.exceptions import SocialAuthBaseException
from social.utils import module_member
log = logging.getLogger(__name__)
# A dictionary of {name: class} entries for each python-social-auth backend available.
# Because this setting can specify arbitrary code to load and execute, it is set via
# normal Django settings only and cannot be changed at runtime:
def _load_backend_classes(base_class=BaseAuth):
""" Load the list of python-social-auth backend classes from Django settings """
for class_path in settings.AUTHENTICATION_BACKENDS:
auth_class = module_member(class_path)
if issubclass(auth_class, base_class):
yield auth_class
_PSA_BACKENDS = {backend_class.name: backend_class for backend_class in _load_backend_classes()}
_PSA_OAUTH2_BACKENDS = [backend_class.name for backend_class in _load_backend_classes(OAuthAuth)]
_PSA_SAML_BACKENDS = [backend_class.name for backend_class in _load_backend_classes(SAMLAuth)]
_LTI_BACKENDS = [backend_class.name for backend_class in _load_backend_classes(LTIAuthBackend)]
def clean_json(value, of_type):
""" Simple helper method to parse and clean JSON """
if not value.strip():
return json.dumps(of_type())
try:
value_python = json.loads(value)
except ValueError as err:
raise ValidationError("Invalid JSON: {}".format(err.message))
if not isinstance(value_python, of_type):
raise ValidationError("Expected a JSON {}".format(of_type))
return json.dumps(value_python, indent=4)
class AuthNotConfigured(SocialAuthBaseException):
""" Exception when SAMLProviderData or other required info is missing """
def __init__(self, provider_name):
super(AuthNotConfigured, self).__init__()
self.provider_name = provider_name
def __str__(self):
return _('Authentication with {} is currently unavailable.').format(
self.provider_name
)
class ProviderConfig(ConfigurationModel):
"""
Abstract Base Class for configuring a third_party_auth provider
"""
icon_class = models.CharField(
max_length=50, default='fa-sign-in',
help_text=(
'The Font Awesome (or custom) icon class to use on the login button for this provider. '
'Examples: fa-google-plus, fa-facebook, fa-linkedin, fa-sign-in, fa-university'
),
)
name = models.CharField(max_length=50, blank=False, help_text="Name of this provider (shown to users)")
secondary = models.BooleanField(
default=False,
help_text=_(
'Secondary providers are displayed less prominently, '
'in a separate list of "Institution" login providers.'
),
)
skip_registration_form = models.BooleanField(
default=False,
help_text=_(
"If this option is enabled, users will not be asked to confirm their details "
"(name, email, etc.) during the registration process. Only select this option "
"for trusted providers that are known to provide accurate user information."
),
)
skip_email_verification = models.BooleanField(
default=False,
help_text=_(
"If this option is selected, users will not be required to confirm their "
"email, and their account will be activated immediately upon registration."
),
)
prefix = None # used for provider_id. Set to a string value in subclass
backend_name = None # Set to a field or fixed value in subclass
accepts_logins = True # Whether to display a sign-in button when the provider is enabled
# "enabled" field is inherited from ConfigurationModel
class Meta(object):
app_label = "third_party_auth"
abstract = True
@property
def provider_id(self):
""" Unique string key identifying this provider. Must be URL and css class friendly. """
assert self.prefix is not None
return "-".join((self.prefix, ) + tuple(getattr(self, field) for field in self.KEY_FIELDS))
@property
def backend_class(self):
""" Get the python-social-auth backend class used for this provider """
return _PSA_BACKENDS[self.backend_name]
def get_url_params(self):
""" Get a dict of GET parameters to append to login links for this provider """
return {}
def is_active_for_pipeline(self, pipeline):
""" Is this provider being used for the specified pipeline? """
return self.backend_name == pipeline['backend']
def match_social_auth(self, social_auth):
""" Is this provider being used for this UserSocialAuth entry? """
return self.backend_name == social_auth.provider
def get_remote_id_from_social_auth(self, social_auth):
""" Given a UserSocialAuth object, return the remote ID used by this provider. """
# This is generally the same thing as the UID, expect when one backend is used for multiple providers
assert self.match_social_auth(social_auth)
return social_auth.uid
def get_social_auth_uid(self, remote_id):
"""
Return the uid in social auth.
This is default implementation. Subclass may override with a different one.
"""
return remote_id
@classmethod
def get_register_form_data(cls, pipeline_kwargs):
"""Gets dict of data to display on the register form.
common.djangoapps.student.views.register_user uses this to populate the
new account creation form with values supplied by the user's chosen
provider, preventing duplicate data entry.
Args:
pipeline_kwargs: dict of string -> object. Keyword arguments
accumulated by the pipeline thus far.
Returns:
Dict of string -> string. Keys are names of form fields; values are
values for that field. Where there is no value, the empty string
must be used.
"""
# Details about the user sent back from the provider.
details = pipeline_kwargs.get('details')
# Get the username separately to take advantage of the de-duping logic
# built into the pipeline. The provider cannot de-dupe because it can't
# check the state of taken usernames in our system. Note that there is
# technically a data race between the creation of this value and the
# creation of the user object, so it is still possible for users to get
# an error on submit.
suggested_username = pipeline_kwargs.get('username')
return {
'email': details.get('email', ''),
'name': details.get('fullname', ''),
'username': suggested_username,
}
def get_authentication_backend(self):
"""Gets associated Django settings.AUTHENTICATION_BACKEND string."""
return '{}.{}'.format(self.backend_class.__module__, self.backend_class.__name__)
class OAuth2ProviderConfig(ProviderConfig):
"""
Configuration Entry for an OAuth2 based provider.
Also works for OAuth1 providers.
"""
prefix = 'oa2'
KEY_FIELDS = ('backend_name', ) # Backend name is unique
backend_name = models.CharField(
max_length=50, choices=[(name, name) for name in _PSA_OAUTH2_BACKENDS], blank=False, db_index=True,
help_text=(
"Which python-social-auth OAuth2 provider backend to use. "
"The list of backend choices is determined by the THIRD_PARTY_AUTH_BACKENDS setting."
# To be precise, it's set by AUTHENTICATION_BACKENDS - which aws.py sets from THIRD_PARTY_AUTH_BACKENDS
)
)
key = models.TextField(blank=True, verbose_name="Client ID")
secret = models.TextField(
blank=True,
verbose_name="Client Secret",
help_text=(
'For increased security, you can avoid storing this in your database by leaving '
' this field blank and setting '
'SOCIAL_AUTH_OAUTH_SECRETS = {"(backend name)": "secret", ...} '
'in your instance\'s Django settings (or lms.auth.json)'
)
)
other_settings = models.TextField(blank=True, help_text="Optional JSON object with advanced settings, if any.")
class Meta(object):
app_label = "third_party_auth"
verbose_name = "Provider Configuration (OAuth)"
verbose_name_plural = verbose_name
def clean(self):
""" Standardize and validate fields """
super(OAuth2ProviderConfig, self).clean()
self.other_settings = clean_json(self.other_settings, dict)
def get_setting(self, name):
""" Get the value of a setting, or raise KeyError """
if name == "KEY":
return self.key
if name == "SECRET":
if self.secret:
return self.secret
# To allow instances to avoid storing secrets in the DB, the secret can also be set via Django:
return getattr(settings, 'SOCIAL_AUTH_OAUTH_SECRETS', {}).get(self.backend_name, '')
if self.other_settings:
other_settings = json.loads(self.other_settings)
assert isinstance(other_settings, dict), "other_settings should be a JSON object (dictionary)"
return other_settings[name]
raise KeyError
class SAMLProviderConfig(ProviderConfig):
"""
Configuration Entry for a SAML/Shibboleth provider.
"""
prefix = 'saml'
KEY_FIELDS = ('idp_slug', )
backend_name = models.CharField(
max_length=50, default='tpa-saml', choices=[(name, name) for name in _PSA_SAML_BACKENDS], blank=False,
help_text="Which python-social-auth provider backend to use. 'tpa-saml' is the standard edX SAML backend.")
idp_slug = models.SlugField(
max_length=30, db_index=True,
help_text=(
'A short string uniquely identifying this provider. '
'Cannot contain spaces and should be a usable as a CSS class. Examples: "ubc", "mit-staging"'
))
entity_id = models.CharField(
max_length=255, verbose_name="Entity ID", help_text="Example: https://idp.testshib.org/idp/shibboleth")
metadata_source = models.CharField(
max_length=255,
help_text=(
"URL to this provider's XML metadata. Should be an HTTPS URL. "
"Example: https://www.testshib.org/metadata/testshib-providers.xml"
))
attr_user_permanent_id = models.CharField(
max_length=128, blank=True, verbose_name="User ID Attribute",
help_text="URN of the SAML attribute that we can use as a unique, persistent user ID. Leave blank for default.")
attr_full_name = models.CharField(
max_length=128, blank=True, verbose_name="Full Name Attribute",
help_text="URN of SAML attribute containing the user's full name. Leave blank for default.")
attr_first_name = models.CharField(
max_length=128, blank=True, verbose_name="First Name Attribute",
help_text="URN of SAML attribute containing the user's first name. Leave blank for default.")
attr_last_name = models.CharField(
max_length=128, blank=True, verbose_name="Last Name Attribute",
help_text="URN of SAML attribute containing the user's last name. Leave blank for default.")
attr_username = models.CharField(
max_length=128, blank=True, verbose_name="Username Hint Attribute",
help_text="URN of SAML attribute to use as a suggested username for this user. Leave blank for default.")
attr_email = models.CharField(
max_length=128, blank=True, verbose_name="Email Attribute",
help_text="URN of SAML attribute containing the user's email address[es]. Leave blank for default.")
other_settings = models.TextField(
verbose_name="Advanced settings", blank=True,
help_text=(
'For advanced use cases, enter a JSON object with addtional configuration. '
'The tpa-saml backend supports only {"requiredEntitlements": ["urn:..."]} '
'which can be used to require the presence of a specific eduPersonEntitlement.'
))
def clean(self):
""" Standardize and validate fields """
super(SAMLProviderConfig, self).clean()
self.other_settings = clean_json(self.other_settings, dict)
class Meta(object):
app_label = "third_party_auth"
verbose_name = "Provider Configuration (SAML IdP)"
verbose_name_plural = "Provider Configuration (SAML IdPs)"
def get_url_params(self):
""" Get a dict of GET parameters to append to login links for this provider """
return {'idp': self.idp_slug}
def is_active_for_pipeline(self, pipeline):
""" Is this provider being used for the specified pipeline? """
return self.backend_name == pipeline['backend'] and self.idp_slug == pipeline['kwargs']['response']['idp_name']
def match_social_auth(self, social_auth):
""" Is this provider being used for this UserSocialAuth entry? """
prefix = self.idp_slug + ":"
return self.backend_name == social_auth.provider and social_auth.uid.startswith(prefix)
def get_remote_id_from_social_auth(self, social_auth):
""" Given a UserSocialAuth object, return the remote ID used by this provider. """
assert self.match_social_auth(social_auth)
# Remove the prefix from the UID
return social_auth.uid[len(self.idp_slug) + 1:]
def get_social_auth_uid(self, remote_id):
""" Get social auth uid from remote id by prepending idp_slug to the remote id """
return '{}:{}'.format(self.idp_slug, remote_id)
def get_config(self):
"""
Return a SAMLIdentityProvider instance for use by SAMLAuthBackend.
Essentially this just returns the values of this object and its
associated 'SAMLProviderData' entry.
"""
if self.other_settings:
conf = json.loads(self.other_settings)
else:
conf = {}
attrs = (
'attr_user_permanent_id', 'attr_full_name', 'attr_first_name',
'attr_last_name', 'attr_username', 'attr_email', 'entity_id')
for field in attrs:
val = getattr(self, field)
if val:
conf[field] = val
# Now get the data fetched automatically from the metadata.xml:
data = SAMLProviderData.current(self.entity_id)
if not data or not data.is_valid():
log.error("No SAMLProviderData found for %s. Run 'manage.py saml pull' to fix or debug.", self.entity_id)
raise AuthNotConfigured(provider_name=self.name)
conf['x509cert'] = data.public_key
conf['url'] = data.sso_url
return SAMLIdentityProvider(self.idp_slug, **conf)
class SAMLConfiguration(ConfigurationModel):
"""
General configuration required for this edX instance to act as a SAML
Service Provider and allow users to authenticate via third party SAML
Identity Providers (IdPs)
"""
private_key = models.TextField(
help_text=(
'To generate a key pair as two files, run '
'"openssl req -new -x509 -days 3652 -nodes -out saml.crt -keyout saml.key". '
'Paste the contents of saml.key here. '
'For increased security, you can avoid storing this in your database by leaving '
'this field blank and setting it via the SOCIAL_AUTH_SAML_SP_PRIVATE_KEY setting '
'in your instance\'s Django settings (or lms.auth.json).'
),
blank=True,
)
public_key = models.TextField(
help_text=(
'Public key certificate. '
'For increased security, you can avoid storing this in your database by leaving '
'this field blank and setting it via the SOCIAL_AUTH_SAML_SP_PUBLIC_CERT setting '
'in your instance\'s Django settings (or lms.auth.json).'
),
blank=True,
)
entity_id = models.CharField(max_length=255, default="http://saml.example.com", verbose_name="Entity ID")
org_info_str = models.TextField(
verbose_name="Organization Info",
default='{"en-US": {"url": "http://www.example.com", "displayname": "Example Inc.", "name": "example"}}',
help_text="JSON dictionary of 'url', 'displayname', and 'name' for each language",
)
other_config_str = models.TextField(
default='{\n"SECURITY_CONFIG": {"metadataCacheDuration": 604800, "signMetadata": false}\n}',
help_text=(
"JSON object defining advanced settings that are passed on to python-saml. "
"Valid keys that can be set here include: SECURITY_CONFIG and SP_EXTRA"
),
)
class Meta(object):
app_label = "third_party_auth"
verbose_name = "SAML Configuration"
verbose_name_plural = verbose_name
def clean(self):
""" Standardize and validate fields """
super(SAMLConfiguration, self).clean()
self.org_info_str = clean_json(self.org_info_str, dict)
self.other_config_str = clean_json(self.other_config_str, dict)
self.private_key = (
self.private_key
.replace("-----BEGIN RSA PRIVATE KEY-----", "")
.replace("-----BEGIN PRIVATE KEY-----", "")
.replace("-----END RSA PRIVATE KEY-----", "")
.replace("-----END PRIVATE KEY-----", "")
.strip()
)
self.public_key = (
self.public_key
.replace("-----BEGIN CERTIFICATE-----", "")
.replace("-----END CERTIFICATE-----", "")
.strip()
)
def get_setting(self, name):
""" Get the value of a setting, or raise KeyError """
if name == "ORG_INFO":
return json.loads(self.org_info_str)
if name == "SP_ENTITY_ID":
return self.entity_id
if name == "SP_PUBLIC_CERT":
if self.public_key:
return self.public_key
# To allow instances to avoid storing keys in the DB, the key pair can also be set via Django:
return getattr(settings, 'SOCIAL_AUTH_SAML_SP_PUBLIC_CERT', '')
if name == "SP_PRIVATE_KEY":
if self.private_key:
return self.private_key
# To allow instances to avoid storing keys in the DB, the private key can also be set via Django:
return getattr(settings, 'SOCIAL_AUTH_SAML_SP_PRIVATE_KEY', '')
other_config = json.loads(self.other_config_str)
if name in ("TECHNICAL_CONTACT", "SUPPORT_CONTACT"):
contact = {
"givenName": "{} Support".format(settings.PLATFORM_NAME),
"emailAddress": settings.TECH_SUPPORT_EMAIL
}
contact.update(other_config.get(name, {}))
return contact
return other_config[name] # SECURITY_CONFIG, SP_EXTRA, or similar extra settings
class SAMLProviderData(models.Model):
"""
Data about a SAML IdP that is fetched automatically by 'manage.py saml pull'
This data is only required during the actual authentication process.
"""
cache_timeout = 600
fetched_at = models.DateTimeField(db_index=True, null=False)
expires_at = models.DateTimeField(db_index=True, null=True)
entity_id = models.CharField(max_length=255, db_index=True) # This is the key for lookups in this table
sso_url = models.URLField(verbose_name="SSO URL")
public_key = models.TextField()
class Meta(object):
app_label = "third_party_auth"
verbose_name = "SAML Provider Data"
verbose_name_plural = verbose_name
ordering = ('-fetched_at', )
def is_valid(self):
""" Is this data valid? """
if self.expires_at and timezone.now() > self.expires_at:
return False
return bool(self.entity_id and self.sso_url and self.public_key)
is_valid.boolean = True
@classmethod
def cache_key_name(cls, entity_id):
""" Return the name of the key to use to cache the current data """
return 'configuration/{}/current/{}'.format(cls.__name__, entity_id)
@classmethod
def current(cls, entity_id):
"""
Return the active data entry, if any, otherwise None
"""
cached = cache.get(cls.cache_key_name(entity_id))
if cached is not None:
return cached
try:
current = cls.objects.filter(entity_id=entity_id).order_by('-fetched_at')[0]
except IndexError:
current = None
cache.set(cls.cache_key_name(entity_id), current, cls.cache_timeout)
return current
class LTIProviderConfig(ProviderConfig):
"""
Configuration required for this edX instance to act as a LTI
Tool Provider and allow users to authenticate and be enrolled in a
course via third party LTI Tool Consumers.
"""
prefix = 'lti'
backend_name = 'lti'
icon_class = None # This provider is not visible to users
secondary = False # This provider is not visible to users
accepts_logins = False # LTI login cannot be initiated by the tool provider
KEY_FIELDS = ('lti_consumer_key', )
lti_consumer_key = models.CharField(
max_length=255,
help_text=(
'The name that the LTI Tool Consumer will use to identify itself'
)
)
lti_hostname = models.CharField(
default='localhost',
max_length=255,
help_text=(
'The domain that will be acting as the LTI consumer.'
),
db_index=True
)
lti_consumer_secret = models.CharField(
default=long_token,
max_length=255,
help_text=(
'The shared secret that the LTI Tool Consumer will use to '
'authenticate requests. Only this edX instance and this '
'tool consumer instance should know this value. '
'For increased security, you can avoid storing this in '
'your database by leaving this field blank and setting '
'SOCIAL_AUTH_LTI_CONSUMER_SECRETS = {"consumer key": "secret", ...} '
'in your instance\'s Django setttigs (or lms.auth.json)'
),
blank=True,
)
lti_max_timestamp_age = models.IntegerField(
default=10,
help_text=(
'The maximum age of oauth_timestamp values, in seconds.'
)
)
def match_social_auth(self, social_auth):
""" Is this provider being used for this UserSocialAuth entry? """
prefix = self.lti_consumer_key + ":"
return self.backend_name == social_auth.provider and social_auth.uid.startswith(prefix)
def get_remote_id_from_social_auth(self, social_auth):
""" Given a UserSocialAuth object, return the remote ID used by this provider. """
assert self.match_social_auth(social_auth)
# Remove the prefix from the UID
return social_auth.uid[len(self.lti_consumer_key) + 1:]
def is_active_for_pipeline(self, pipeline):
""" Is this provider being used for the specified pipeline? """
try:
return (
self.backend_name == pipeline['backend'] and
self.lti_consumer_key == pipeline['kwargs']['response'][LTI_PARAMS_KEY]['oauth_consumer_key']
)
except KeyError:
return False
def get_lti_consumer_secret(self):
""" If the LTI consumer secret is not stored in the database, check Django settings instead """
if self.lti_consumer_secret:
return self.lti_consumer_secret
return getattr(settings, 'SOCIAL_AUTH_LTI_CONSUMER_SECRETS', {}).get(self.lti_consumer_key, '')
class Meta(object):
app_label = "third_party_auth"
verbose_name = "Provider Configuration (LTI)"
verbose_name_plural = verbose_name
class ProviderApiPermissions(models.Model):
"""
This model links OAuth2 client with provider Id.
It gives permission for a OAuth2 client to access the information under certain IdPs.
"""
client = models.ForeignKey(Client)
provider_id = models.CharField(
max_length=255,
help_text=(
'Uniquely identify a provider. This is different from backend_name.'
)
)
class Meta(object):
app_label = "third_party_auth"
verbose_name = "Provider API Permission"
verbose_name_plural = verbose_name + 's'
| agpl-3.0 |
ralphbean/ansible | lib/ansible/runner/action_plugins/include_vars.py | 138 | 2167 | # (c) 2013-2014, Benno Joy <benno@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
from ansible.utils import template
from ansible import utils
from ansible import errors
from ansible.runner.return_data import ReturnData
class ActionModule(object):
TRANSFERS_FILES = False
def __init__(self, runner):
self.runner = runner
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
if not module_args:
result = dict(failed=True, msg="No source file given")
return ReturnData(conn=conn, comm_ok=True, result=result)
source = module_args
source = template.template(self.runner.basedir, source, inject)
if '_original_file' in inject:
source = utils.path_dwim_relative(inject['_original_file'], 'vars', source, self.runner.basedir)
else:
source = utils.path_dwim(self.runner.basedir, source)
if os.path.exists(source):
data = utils.parse_yaml_from_file(source, vault_password=self.runner.vault_pass)
if data and type(data) != dict:
raise errors.AnsibleError("%s must be stored as a dictionary/hash" % source)
elif data is None:
data = {}
result = dict(ansible_facts=data)
return ReturnData(conn=conn, comm_ok=True, result=result)
else:
result = dict(failed=True, msg="Source file not found.", file=source)
return ReturnData(conn=conn, comm_ok=True, result=result)
| gpl-3.0 |
broferek/ansible | lib/ansible/modules/cloud/azure/azure_rm_gallery_info.py | 13 | 8851 | #!/usr/bin/python
#
# Copyright (c) 2019 Liu Qingyi, (@smile37773)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_gallery_info
version_added: '2.9'
short_description: Get Azure Shared Image Gallery info
description:
- Get info of Azure Shared Image Gallery.
options:
resource_group:
description:
- The name of the resource group.
type: str
name:
description:
- Resource name
type: str
extends_documentation_fragment:
- azure
author:
- Liu Qingyi (@smile37773)
'''
EXAMPLES = '''
- name: List galleries in a subscription.
azure_rm_gallery_info:
- name: List galleries in a resource group.
azure_rm_gallery_info:
resource_group: myResourceGroup
- name: Get a gallery.
azure_rm_gallery_info:
resource_group: myResourceGroup
name: myGallery
'''
RETURN = '''
galleries:
description:
- A list of dict results where the key is the name of the gallery and the values are the info for that gallery.
returned: always
type: complex
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/galleries/myGallery"
name:
description:
- Resource name.
returned: always
type: str
sample: "myGallery"
location:
description:
- Resource location.
returned: always
type: str
sample: "eastus"
tags:
description:
- Resource tags.
returned: always
type: dict
sample: { "tag": "value" }
description:
description:
- This is the gallery description.
returned: always
type: str
sample: "This is the gallery description."
provisioning_state:
description:
- The current state of the gallery.
returned: always
type: str
sample: "Succeeded"
'''
import time
import json
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils.azure_rm_common_rest import GenericRestClient
from copy import deepcopy
try:
from msrestazure.azure_exceptions import CloudError
except Exception:
# handled in azure_rm_common
pass
class AzureRMGalleriesInfo(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str'
),
name=dict(
type='str'
)
)
self.resource_group = None
self.name = None
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.url = None
self.status_code = [200]
self.query_parameters = {}
self.query_parameters['api-version'] = '2019-03-01'
self.header_parameters = {}
self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
self.mgmt_client = None
super(AzureRMGalleriesInfo, self).__init__(self.module_arg_spec, supports_tags=False)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
base_url=self._cloud_environment.endpoints.resource_manager)
if (self.resource_group is not None and self.name is not None):
# self.results['galleries'] = self.format_item(self.get())
self.results['galleries'] = self.get()
elif (self.resource_group is not None):
# self.results['galleries'] = self.format_item(self.listbyresourcegroup())
self.results['galleries'] = self.listbyresourcegroup()
else:
# self.results['galleries'] = [self.format_item(self.list())]
self.results['galleries'] = self.list()
return self.results
def get(self):
response = None
results = {}
# prepare url
self.url = ('/subscriptions' +
'/{{ subscription_id }}' +
'/resourceGroups' +
'/{{ resource_group }}' +
'/providers' +
'/Microsoft.Compute' +
'/galleries' +
'/{{ gallery_name }}')
self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
self.url = self.url.replace('{{ resource_group }}', self.resource_group)
self.url = self.url.replace('{{ gallery_name }}', self.name)
try:
response = self.mgmt_client.query(self.url,
'GET',
self.query_parameters,
self.header_parameters,
None,
self.status_code,
600,
30)
results = json.loads(response.text)
# self.log('Response : {0}'.format(response))
except CloudError as e:
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
return self.format_item(results)
def listbyresourcegroup(self):
response = None
results = {}
# prepare url
self.url = ('/subscriptions' +
'/{{ subscription_id }}' +
'/resourceGroups' +
'/{{ resource_group }}' +
'/providers' +
'/Microsoft.Compute' +
'/galleries')
self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
self.url = self.url.replace('{{ resource_group }}', self.resource_group)
try:
response = self.mgmt_client.query(self.url,
'GET',
self.query_parameters,
self.header_parameters,
None,
self.status_code,
600,
30)
results = json.loads(response.text)
# self.log('Response : {0}'.format(response))
except CloudError as e:
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
return [self.format_item(x) for x in results['value']] if results['value'] else []
def list(self):
response = None
results = {}
# prepare url
self.url = ('/subscriptions' +
'/{{ subscription_id }}' +
'/providers' +
'/Microsoft.Compute' +
'/galleries')
self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
try:
response = self.mgmt_client.query(self.url,
'GET',
self.query_parameters,
self.header_parameters,
None,
self.status_code,
600,
30)
results = json.loads(response.text)
# self.log('Response : {0}'.format(response))
except CloudError as e:
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
return [self.format_item(x) for x in results['value']] if results['value'] else []
def format_item(self, item):
d = {
'id': item['id'],
'name': item['name'],
'location': item['location'],
'tags': item.get('tags'),
'description': item['properties']['description'],
'provisioning_state': item['properties']['provisioningState']
}
return d
def main():
AzureRMGalleriesInfo()
if __name__ == '__main__':
main()
| gpl-3.0 |
apporc/nova | nova/tests/unit/virt/libvirt/test_designer.py | 10 | 8995 | # Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.pci import utils as pci_utils
from nova import test
from nova.tests.unit import matchers
from nova.virt.libvirt import config
from nova.virt.libvirt import designer
class DesignerTestCase(test.NoDBTestCase):
def test_set_vif_bandwidth_config_no_extra_specs(self):
# Test whether test_set_vif_bandwidth_config_no_extra_specs fails when
# its second parameter has no 'extra_specs' field.
try:
# The conf will never be user be used, so we can use 'None'.
# An empty dictionary is fine: all that matters it that there is no
# 'extra_specs' field.
designer.set_vif_bandwidth_config(None, {})
except KeyError as e:
self.fail('KeyError: %s' % e)
def test_set_vif_guest_frontend_config(self):
conf = config.LibvirtConfigGuestInterface()
designer.set_vif_guest_frontend_config(conf, 'fake-mac',
'fake-model', 'fake-driver',
'fake-queues')
self.assertEqual('fake-mac', conf.mac_addr)
self.assertEqual('fake-model', conf.model)
self.assertEqual('fake-driver', conf.driver_name)
self.assertEqual('fake-queues', conf.vhost_queues)
def test_set_vif_host_backend_bridge_config(self):
conf = config.LibvirtConfigGuestInterface()
designer.set_vif_host_backend_bridge_config(conf, 'fake-bridge',
'fake-tap')
self.assertEqual('bridge', conf.net_type)
self.assertEqual('fake-bridge', conf.source_dev)
self.assertEqual('fake-tap', conf.target_dev)
def test_set_vif_host_backend_ethernet_config(self):
conf = config.LibvirtConfigGuestInterface()
designer.set_vif_host_backend_ethernet_config(conf, 'fake-tap')
self.assertEqual('ethernet', conf.net_type)
self.assertEqual('fake-tap', conf.target_dev)
self.assertEqual('', conf.script)
def test_set_vif_host_backend_ovs_config(self):
conf = config.LibvirtConfigGuestInterface()
designer.set_vif_host_backend_ovs_config(conf, 'fake-bridge',
'fake-interface', 'fake-tap')
self.assertEqual('bridge', conf.net_type)
self.assertEqual('fake-bridge', conf.source_dev)
self.assertEqual('openvswitch', conf.vporttype)
self.assertEqual('fake-tap', conf.target_dev)
def test_set_vif_host_backend_802qbg_config(self):
conf = config.LibvirtConfigGuestInterface()
designer.set_vif_host_backend_802qbg_config(conf, 'fake-devname',
'fake-managerid',
'fake-typeid',
'fake-typeidversion',
'fake-instanceid',
'fake-tap')
self.assertEqual('direct', conf.net_type)
self.assertEqual('fake-devname', conf.source_dev)
self.assertEqual('vepa', conf.source_mode)
self.assertEqual('802.1Qbg', conf.vporttype)
expected = [{'key': 'managerid', 'value': 'fake-managerid'},
{'key': 'typeid', 'value': 'fake-typeid'},
{'key': 'typeidversion', 'value': 'fake-typeidversion'},
{'key': 'instanceid', 'value': 'fake-instanceid'}]
self.assertThat(expected, matchers.DictListMatches(conf.vportparams))
self.assertEqual('fake-tap', conf.target_dev)
@mock.patch.object(pci_utils, 'get_ifname_by_pci_address',
return_value='fake-devname')
def test_set_vif_host_backend_802qbh_config_direct(self,
mock_pci):
conf = config.LibvirtConfigGuestInterface()
designer.set_vif_host_backend_802qbh_config(conf, 'direct',
'fake-pci-dev',
'fake-profileid',
'fake-tap')
self.assertEqual('direct', conf.net_type)
self.assertEqual('fake-devname', conf.source_dev)
self.assertEqual('passthrough', conf.source_mode)
self.assertEqual('vhost', conf.driver_name)
mock_pci.assert_called_with('fake-pci-dev')
self.assertEqual('802.1Qbh', conf.vporttype)
self.assertEqual('fake-tap', conf.target_dev)
def test_set_vif_host_backend_802qbh_config_hostdev(self):
conf = config.LibvirtConfigGuestInterface()
designer.set_vif_host_backend_802qbh_config(conf, 'hostdev',
'fake-devname',
'fake-profileid',
'fake-tap')
self.assertEqual('hostdev', conf.net_type)
self.assertEqual('fake-devname', conf.source_dev)
self.assertIsNone(conf.model)
self.assertEqual('802.1Qbh', conf.vporttype)
self.assertEqual('fake-tap', conf.target_dev)
@mock.patch.object(pci_utils, 'get_ifname_by_pci_address',
return_value='fake-devname')
def test_set_vif_host_backend_hw_veb_direct(self,
mock_pci):
conf = config.LibvirtConfigGuestInterface()
designer.set_vif_host_backend_hw_veb(conf, 'direct',
'fake-pci-dev',
'fake-vlan',
'fake-tap')
self.assertEqual('direct', conf.net_type)
self.assertEqual('fake-devname', conf.source_dev)
self.assertEqual('passthrough', conf.source_mode)
self.assertEqual('vhost', conf.driver_name)
self.assertEqual('fake-tap', conf.target_dev)
mock_pci.assert_called_with('fake-pci-dev')
def test_set_vif_host_backend_hw_veb_hostdev(self):
conf = config.LibvirtConfigGuestInterface()
designer.set_vif_host_backend_hw_veb(conf, 'hostdev',
'fake-devname',
'fake-vlan',
'fake-tap')
self.assertEqual('hostdev', conf.net_type)
self.assertEqual('fake-devname', conf.source_dev)
self.assertIsNone(conf.model)
self.assertEqual('fake-vlan', conf.vlan)
self.assertEqual('fake-tap', conf.target_dev)
@mock.patch.object(pci_utils, 'get_pci_address_fields',
return_value=('fake-domain', 'fake-bus',
'fake-slot', 'fake-function'))
def test_set_vif_host_backend_ib_hostdev_config(self,
mock_pci_fields):
conf = config.LibvirtConfigGuestInterface()
designer.set_vif_host_backend_ib_hostdev_config(conf,
'fake-pci-slot')
self.assertEqual('fake-domain', conf.domain)
self.assertEqual('fake-bus', conf.bus)
self.assertEqual('fake-slot', conf.slot)
self.assertEqual('fake-function', conf.function)
mock_pci_fields.assert_called_with('fake-pci-slot')
def test_set_vif_host_backend_direct_config(self):
conf = config.LibvirtConfigGuestInterface()
designer.set_vif_host_backend_direct_config(conf, 'fake-devname',
mode="passthrough")
self.assertEqual('direct', conf.net_type)
self.assertEqual('fake-devname', conf.source_dev)
self.assertEqual('passthrough', conf.source_mode)
self.assertEqual('virtio', conf.model)
def test_set_vif_host_backend_vhostuser_config(self):
conf = config.LibvirtConfigGuestInterface()
designer.set_vif_host_backend_vhostuser_config(conf, 'fake-mode',
'fake-path')
self.assertEqual('vhostuser', conf.net_type)
self.assertEqual('unix', conf.vhostuser_type)
self.assertEqual('fake-mode', conf.vhostuser_mode)
self.assertEqual('fake-path', conf.vhostuser_path)
| apache-2.0 |
Cian47/anti_bicycle_theft | python-api/env/lib/python3.5/site-packages/pip/_vendor/requests/packages/urllib3/util/timeout.py | 1004 | 9544 | # The default socket timeout, used by httplib to indicate that no timeout was
# specified by the user
from socket import _GLOBAL_DEFAULT_TIMEOUT
import time
from ..exceptions import TimeoutStateError
# A sentinel value to indicate that no timeout was specified by the user in
# urllib3
_Default = object()
def current_time():
"""
Retrieve the current time. This function is mocked out in unit testing.
"""
return time.time()
class Timeout(object):
""" Timeout configuration.
Timeouts can be defined as a default for a pool::
timeout = Timeout(connect=2.0, read=7.0)
http = PoolManager(timeout=timeout)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
Timeouts can be disabled by setting all the parameters to ``None``::
no_timeout = Timeout(connect=None, read=None)
response = http.request('GET', 'http://example.com/, timeout=no_timeout)
:param total:
This combines the connect and read timeouts into one; the read timeout
will be set to the time leftover from the connect attempt. In the
event that both a connect timeout and a total are specified, or a read
timeout and a total are specified, the shorter timeout will be applied.
Defaults to None.
:type total: integer, float, or None
:param connect:
The maximum amount of time to wait for a connection attempt to a server
to succeed. Omitting the parameter will default the connect timeout to
the system default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout for connection attempts.
:type connect: integer, float, or None
:param read:
The maximum amount of time to wait between consecutive
read operations for a response from the server. Omitting
the parameter will default the read timeout to the system
default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout.
:type read: integer, float, or None
.. note::
Many factors can affect the total amount of time for urllib3 to return
an HTTP response.
For example, Python's DNS resolver does not obey the timeout specified
on the socket. Other factors that can affect total request time include
high CPU load, high swap, the program running at a low priority level,
or other behaviors.
In addition, the read and total timeouts only measure the time between
read operations on the socket connecting the client and the server,
not the total amount of time for the request to return a complete
response. For most requests, the timeout is raised because the server
has not sent the first byte in the specified time. This is not always
the case; if a server streams one byte every fifteen seconds, a timeout
of 20 seconds will not trigger, even though the request will take
several minutes to complete.
If your goal is to cut off any request after a set amount of wall clock
time, consider having a second "watcher" thread to cut off a slow
request.
"""
#: A sentinel object representing the default timeout value
DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
def __init__(self, total=None, connect=_Default, read=_Default):
self._connect = self._validate_timeout(connect, 'connect')
self._read = self._validate_timeout(read, 'read')
self.total = self._validate_timeout(total, 'total')
self._start_connect = None
def __str__(self):
return '%s(connect=%r, read=%r, total=%r)' % (
type(self).__name__, self._connect, self._read, self.total)
@classmethod
def _validate_timeout(cls, value, name):
""" Check that a timeout attribute is valid.
:param value: The timeout value to validate
:param name: The name of the timeout attribute to validate. This is
used to specify in error messages.
:return: The validated and casted version of the given value.
:raises ValueError: If the type is not an integer or a float, or if it
is a numeric value less than zero.
"""
if value is _Default:
return cls.DEFAULT_TIMEOUT
if value is None or value is cls.DEFAULT_TIMEOUT:
return value
try:
float(value)
except (TypeError, ValueError):
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
try:
if value < 0:
raise ValueError("Attempted to set %s timeout to %s, but the "
"timeout cannot be set to a value less "
"than 0." % (name, value))
except TypeError: # Python 3
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
return value
@classmethod
def from_float(cls, timeout):
""" Create a new Timeout from a legacy timeout value.
The timeout value used by httplib.py sets the same timeout on the
connect(), and recv() socket requests. This creates a :class:`Timeout`
object that sets the individual timeouts to the ``timeout`` value
passed to this function.
:param timeout: The legacy timeout value.
:type timeout: integer, float, sentinel default object, or None
:return: Timeout object
:rtype: :class:`Timeout`
"""
return Timeout(read=timeout, connect=timeout)
def clone(self):
""" Create a copy of the timeout object
Timeout properties are stored per-pool but each request needs a fresh
Timeout object to ensure each one has its own start/stop configured.
:return: a copy of the timeout object
:rtype: :class:`Timeout`
"""
# We can't use copy.deepcopy because that will also create a new object
# for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
# detect the user default.
return Timeout(connect=self._connect, read=self._read,
total=self.total)
def start_connect(self):
""" Start the timeout clock, used during a connect() attempt
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to start a timer that has been started already.
"""
if self._start_connect is not None:
raise TimeoutStateError("Timeout timer has already been started.")
self._start_connect = current_time()
return self._start_connect
def get_connect_duration(self):
""" Gets the time elapsed since the call to :meth:`start_connect`.
:return: Elapsed time.
:rtype: float
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to get duration for a timer that hasn't been started.
"""
if self._start_connect is None:
raise TimeoutStateError("Can't get connect duration for timer "
"that has not started.")
return current_time() - self._start_connect
@property
def connect_timeout(self):
""" Get the value to use when setting a connection timeout.
This will be a positive float or integer, the value None
(never timeout), or the default system timeout.
:return: Connect timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
"""
if self.total is None:
return self._connect
if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
return self.total
return min(self._connect, self.total)
@property
def read_timeout(self):
""" Get the value for the read timeout.
This assumes some time has elapsed in the connection timeout and
computes the read timeout appropriately.
If self.total is set, the read timeout is dependent on the amount of
time taken by the connect timeout. If the connection time has not been
established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
raised.
:return: Value to use for the read timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
:raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
has not yet been called on this object.
"""
if (self.total is not None and
self.total is not self.DEFAULT_TIMEOUT and
self._read is not None and
self._read is not self.DEFAULT_TIMEOUT):
# In case the connect timeout has not yet been established.
if self._start_connect is None:
return self._read
return max(0, min(self.total - self.get_connect_duration(),
self._read))
elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
return max(0, self.total - self.get_connect_duration())
else:
return self._read
| mit |
disqus/django-old | django/contrib/gis/tests/geoapp/test_feeds.py | 155 | 4153 | from xml.dom import minidom
from django.conf import settings
from django.contrib.sites.models import Site
from django.test import TestCase
from models import City
class GeoFeedTest(TestCase):
urls = 'django.contrib.gis.tests.geoapp.urls'
def setUp(self):
Site(id=settings.SITE_ID, domain="example.com", name="example.com").save()
self.old_Site_meta_installed = Site._meta.installed
Site._meta.installed = True
def tearDown(self):
Site._meta.installed = self.old_Site_meta_installed
def assertChildNodes(self, elem, expected):
"Taken from regressiontests/syndication/tests.py."
actual = set([n.nodeName for n in elem.childNodes])
expected = set(expected)
self.assertEqual(actual, expected)
def test_geofeed_rss(self):
"Tests geographic feeds using GeoRSS over RSSv2."
# Uses `GEOSGeometry` in `item_geometry`
doc1 = minidom.parseString(self.client.get('/feeds/rss1/').content)
# Uses a 2-tuple in `item_geometry`
doc2 = minidom.parseString(self.client.get('/feeds/rss2/').content)
feed1, feed2 = doc1.firstChild, doc2.firstChild
# Making sure the box got added to the second GeoRSS feed.
self.assertChildNodes(feed2.getElementsByTagName('channel')[0],
['title', 'link', 'description', 'language',
'lastBuildDate', 'item', 'georss:box', 'atom:link']
)
# Incrementing through the feeds.
for feed in [feed1, feed2]:
# Ensuring the georss namespace was added to the <rss> element.
self.assertEqual(feed.getAttribute(u'xmlns:georss'), u'http://www.georss.org/georss')
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), City.objects.count())
# Ensuring the georss element was added to each item in the feed.
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'georss:point'])
def test_geofeed_atom(self):
"Testing geographic feeds using GeoRSS over Atom."
doc1 = minidom.parseString(self.client.get('/feeds/atom1/').content)
doc2 = minidom.parseString(self.client.get('/feeds/atom2/').content)
feed1, feed2 = doc1.firstChild, doc2.firstChild
# Making sure the box got added to the second GeoRSS feed.
self.assertChildNodes(feed2, ['title', 'link', 'id', 'updated', 'entry', 'georss:box'])
for feed in [feed1, feed2]:
# Ensuring the georsss namespace was added to the <feed> element.
self.assertEqual(feed.getAttribute(u'xmlns:georss'), u'http://www.georss.org/georss')
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), City.objects.count())
# Ensuring the georss element was added to each entry in the feed.
for entry in entries:
self.assertChildNodes(entry, ['title', 'link', 'id', 'summary', 'georss:point'])
def test_geofeed_w3c(self):
"Testing geographic feeds using W3C Geo."
doc = minidom.parseString(self.client.get('/feeds/w3cgeo1/').content)
feed = doc.firstChild
# Ensuring the geo namespace was added to the <feed> element.
self.assertEqual(feed.getAttribute(u'xmlns:geo'), u'http://www.w3.org/2003/01/geo/wgs84_pos#')
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), City.objects.count())
# Ensuring the geo:lat and geo:lon element was added to each item in the feed.
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'geo:lat', 'geo:lon'])
# Boxes and Polygons aren't allowed in W3C Geo feeds.
self.assertRaises(ValueError, self.client.get, '/feeds/w3cgeo2/') # Box in <channel>
self.assertRaises(ValueError, self.client.get, '/feeds/w3cgeo3/') # Polygons in <entry>
| bsd-3-clause |
hakanzy/fifo | example/app.py | 2 | 1136 | from fifo import FifoClient
fifo = FifoClient('redis://localhost:6379/0')
# queue up a single task
task_id = fifo.queue_task('example.tasks.multiply', (6, 7),
max_wait=2, result_timeout=30)
# wait for result for at most 5s
result = fifo.wait(task_id, timeout=5)
print(task_id, result['body'])
# queue up many tasks in one call
tasks_args = [(x - 1, x) for x in xrange(10)]
task_ids = fifo.queue_tasks('example.tasks.multiply', tasks_args,
max_wait=30, result_timeout=30)
# wait for all the results for at most 5s
results = fifo.wait_for_group(task_ids, timeout=5)
for task_id, result in results.items():
print(task_id, result['status'], result['body'])
# queue up a single task that will fail
task_id = fifo.queue_task('example.tasks.buggy', None,
max_wait=2, result_timeout=30)
# wait for result for at most 5s
result = fifo.wait(task_id, timeout=5)
print(task_id, result['status'], result['body'])
# queue up a single task
task_id = fifo.queue_task('example.tasks.multiply', (6, 7),
max_wait=2, result_timeout=0)
| bsd-2-clause |
wikimedia/operations-debs-linux | Documentation/target/tcm_mod_builder.py | 200 | 39622 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .aborted_task = " + fabric_mod_name + "_aborted_task,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('aborted_task\)\(', fo):
buf += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
mwx1993/TACTIC | src/pyasm/common/zip_util.py | 6 | 6998 | ###########################################################
#
# Copyright (c) 2011, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
import zipfile, os, codecs, datetime
class ZipUtil(object):
def zip_dir(cls, dir, zip_path=None, ignore_dirs=[], include_dirs=[]):
if not zip_path:
zip_path = "./%s.zip" % os.path.basename(dir)
print "zip_path: ", zip_path
if os.path.exists(zip_path):
os.unlink(zip_path)
# check if the folder exists
dirname = os.path.dirname(zip_path)
if not os.path.exists(dirname):
os.makedirs(dirname)
f = codecs.open(zip_path, 'wb')
zip = zipfile.ZipFile(f, 'w', compression=zipfile.ZIP_DEFLATED)
# Probably not, this may work better in windows without compression
#zip = zipfile.ZipFile(f, 'w', compression=zipfile.ZIP_STORED)
try:
count = 0
for root, dirs, files in os.walk(dir):
for ignore_dir in ignore_dirs:
if ignore_dir in dirs:
dirs.remove(ignore_dir)
if root == dir and include_dirs:
del dirs[:]
dirs.extend(include_dirs)
continue
for file in files:
path = "%s/%s" % (root, file)
relpath = path.replace("%s/" % os.path.dirname(dir), "")
#relpath = "%s/%s" % (os.path.basename(root), file)
if os.path.islink(path):
zip_info = zipfile.ZipInfo(root)
zip_info.create_system = 3
zip_info.external_attr = 271663808L
zip_info.filename = relpath
zip.writestr(zip_info, os.readlink(path) )
else:
zip.write(path, relpath)
count += 1
finally:
zip.close()
if not count and os.path.exists(zip_path):
os.unlink(zip_path)
zip_dir = classmethod(zip_dir)
# take from: https://gist.github.com/610907
def zip_dir2(cls, dir, zip_path=None):
'''Zip up a directory and preserve symlinks and empty directories'''
if not os.path.exists(dir):
return
if not zip_path:
zip_path = "./%s.zip" % os.path.basename(dir)
if os.path.exists(zip_path):
os.unlink(zip_path)
inputDir = dir
outputZip = zip_path
zipOut = zipfile.ZipFile(outputZip, 'w', compression=zipfile.ZIP_DEFLATED)
rootLen = len(os.path.dirname(inputDir))
def _ArchiveDirectory(parentDirectory):
contents = os.listdir(parentDirectory)
#store empty directories
if not contents:
#http://www.velocityreviews.com/forums/t318840-add-empty-directory-using-zipfile.html
archiveRoot = parentDirectory[rootLen:].replace('\\', '/').lstrip('/')
zipInfo = zipfile.ZipInfo(archiveRoot+'/')
zipOut.writestr(zipInfo, '')
for item in contents:
fullPath = os.path.join(parentDirectory, item)
if os.path.isdir(fullPath) and not os.path.islink(fullPath):
_ArchiveDirectory(fullPath)
else:
archiveRoot = fullPath[rootLen:].replace('\\', '/').lstrip('/')
if os.path.islink(fullPath):
# http://www.mail-archive.com/python-list@python.org/msg34223.html
zipInfo = zipfile.ZipInfo(archiveRoot)
zipInfo.create_system = 3
# long type of hex val of '0xA1ED0000L',
# say, symlink attr magic...
zipInfo.external_attr = 2716663808L
zipOut.writestr(zipInfo, os.readlink(fullPath))
else:
zipOut.write(fullPath, archiveRoot, zipfile.ZIP_DEFLATED)
_ArchiveDirectory(inputDir)
zipOut.close()
zip_dir2 = classmethod(zip_dir2)
def extract(cls, zip_path, base_dir=None):
# first check if this is a zip file
if not os.path.exists(zip_path):
raise Exception("Path [%s] does not exist" % zip_path)
is_zip = zipfile.is_zipfile(zip_path)
if not is_zip:
raise Exception("Path [%s] is not a zip file" % zip_path)
# TODO: make sure all paths are relative
if not base_dir:
base_dir = os.path.dirname(zip_path)
paths = []
f = codecs.open(zip_path, 'rb')
zf = zipfile.ZipFile(f, 'r')
if hasattr(zf, 'extractall'):
try:
zf.extractall(path=base_dir)
except Exception, e:
print "WARNING extracting zip: ", e
return paths # This does not fill in the paths
name_list = zf.namelist()
for file_path in name_list:
try:
data = zf.read(file_path)
except KeyError:
print 'ERROR: Did not find %s in zip file' % filename
else:
new_path = "%s/%s" % (base_dir, file_path)
new_dir = os.path.dirname(new_path)
if not os.path.exists(new_dir):
os.makedirs(new_dir)
nf = codecs.open(new_path, 'wb')
nf.write(data)
nf.close()
paths.append(new_path)
return paths
extract = classmethod(extract)
def get_file_paths(cls, path):
paths = []
zf = zipfile.ZipFile(path)
for info in zf.infolist():
paths.append( info.filename )
return paths
get_file_paths = classmethod(get_file_paths)
def print_info(cls, path):
zf = zipfile.ZipFile(path)
for info in zf.infolist():
print info.filename
print '\tComment:\t', info.comment
print '\tModified:\t', datetime.datetime(*info.date_time)
print '\tSystem:\t\t', info.create_system, '(0 = Windows, 3 = Unix)'
print '\tZIP version:\t', info.create_version
print '\tCompressed:\t', info.compress_size, 'bytes'
print '\tUncompressed:\t', info.file_size, 'bytes'
print
print_info = classmethod(print_info)
if __name__ == '__main__':
zip = ZipUtil()
zip.zip_dir("C:/test/mp3", "C:/test/mp3.zip")
zip.extract("C:/test/mp3.zip", base_dir = "C:/test/output")
"""
zip.zip_dir("zip_this", "/home/apache/test/cow.zip")
zip.print_info("/home/apache/test/cow.zip")
zip.extract("/home/apache/test/cow.zip", "/home/apache/test2")
"""
| epl-1.0 |
aselle/tensorflow | tensorflow/contrib/rnn/python/kernel_tests/core_rnn_cell_test.py | 5 | 46032 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for RNN cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.contrib import rnn as contrib_rnn
from tensorflow.contrib.rnn.python.ops import core_rnn_cell
from tensorflow.contrib.rnn.python.ops import rnn_cell as contrib_rnn_cell
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.training.checkpointable import util as checkpointable_utils
# pylint: enable=protected-access
Linear = core_rnn_cell._Linear # pylint: disable=invalid-name
class RNNCellTest(test.TestCase):
def testLinear(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(1.0)):
x = array_ops.zeros([1, 2])
l = Linear([x], 2, False)([x])
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([l], {x.name: np.array([[1., 2.]])})
self.assertAllClose(res[0], [[3.0, 3.0]])
# Checks prevent you from accidentally creating a shared function.
with self.assertRaises(ValueError):
l1 = Linear([x], 2, False)([x])
# But you can create a new one in a new scope and share the variables.
with variable_scope.variable_scope("l1") as new_scope:
l1 = Linear([x], 2, False)([x])
with variable_scope.variable_scope(new_scope, reuse=True):
Linear([l1], 2, False)([l1])
self.assertEqual(len(variables_lib.trainable_variables()), 2)
def testBasicRNNCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
cell = rnn_cell_impl.BasicRNNCell(2)
g, _ = cell(x, m)
self.assertEqual([
"root/basic_rnn_cell/%s:0" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/basic_rnn_cell/%s:0" % rnn_cell_impl._BIAS_VARIABLE_NAME
], [v.name for v in cell.trainable_variables])
self.assertFalse(cell.non_trainable_variables)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
self.assertEqual(res[0].shape, (1, 2))
def testBasicRNNCellNotTrainable(self):
with self.test_session() as sess:
def not_trainable_getter(getter, *args, **kwargs):
kwargs["trainable"] = False
return getter(*args, **kwargs)
with variable_scope.variable_scope(
"root",
initializer=init_ops.constant_initializer(0.5),
custom_getter=not_trainable_getter):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
cell = rnn_cell_impl.BasicRNNCell(2)
g, _ = cell(x, m)
self.assertFalse(cell.trainable_variables)
self.assertEqual([
"root/basic_rnn_cell/%s:0" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/basic_rnn_cell/%s:0" % rnn_cell_impl._BIAS_VARIABLE_NAME
], [v.name for v in cell.non_trainable_variables])
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
self.assertEqual(res[0].shape, (1, 2))
def testIndRNNCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
cell = contrib_rnn_cell.IndRNNCell(2)
g, _ = cell(x, m)
self.assertEqual([
"root/ind_rnn_cell/%s_w:0" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/ind_rnn_cell/%s_u:0" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/ind_rnn_cell/%s:0" % rnn_cell_impl._BIAS_VARIABLE_NAME
], [v.name for v in cell.trainable_variables])
self.assertFalse(cell.non_trainable_variables)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
self.assertEqual(res[0].shape, (1, 2))
def testGRUCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
g, _ = rnn_cell_impl.GRUCell(2)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
# Smoke test
self.assertAllClose(res[0], [[0.175991, 0.175991]])
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.5)):
# Test GRUCell with input_size != num_units.
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 2])
g, _ = rnn_cell_impl.GRUCell(2)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
# Smoke test
self.assertAllClose(res[0], [[0.156736, 0.156736]])
def testIndyGRUCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
g, _ = contrib_rnn_cell.IndyGRUCell(2)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
# Smoke test
self.assertAllClose(res[0], [[0.185265, 0.17704]])
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.5)):
# Test IndyGRUCell with input_size != num_units.
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 2])
g, _ = contrib_rnn_cell.IndyGRUCell(2)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
# Smoke test
self.assertAllClose(res[0], [[0.155127, 0.157328]])
def testSRUCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
g, _ = contrib_rnn_cell.SRUCell(2)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
# Smoke test
self.assertAllClose(res[0], [[0.509682, 0.509682]])
def testSRUCellWithDiffSize(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 2])
g, _ = contrib_rnn_cell.SRUCell(2)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
# Smoke test
self.assertAllClose(res[0], [[0.55255556, 0.55255556]])
def testBasicLSTMCell(self):
for dtype in [dtypes.float16, dtypes.float32]:
np_dtype = dtype.as_numpy_dtype
with self.test_session(graph=ops.Graph()) as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2], dtype=dtype)
m = array_ops.zeros([1, 8], dtype=dtype)
cell = rnn_cell_impl.MultiRNNCell(
[
rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=False)
for _ in range(2)
],
state_is_tuple=False)
self.assertEqual(cell.dtype, None)
self.assertEqual("cell-0", cell._checkpoint_dependencies[0].name)
self.assertEqual("cell-1", cell._checkpoint_dependencies[1].name)
cell.get_config() # Should not throw an error
g, out_m = cell(x, m)
# Layer infers the input type.
self.assertEqual(cell.dtype, dtype.name)
expected_variable_names = [
"root/multi_rnn_cell/cell_0/basic_lstm_cell/%s:0" %
rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_0/basic_lstm_cell/%s:0" %
rnn_cell_impl._BIAS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_1/basic_lstm_cell/%s:0" %
rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_1/basic_lstm_cell/%s:0" %
rnn_cell_impl._BIAS_VARIABLE_NAME
]
self.assertEqual(expected_variable_names,
[v.name for v in cell.trainable_variables])
self.assertFalse(cell.non_trainable_variables)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g, out_m], {
x.name: np.array([[1., 1.]]),
m.name: 0.1 * np.ones([1, 8])
})
self.assertEqual(len(res), 2)
variables = variables_lib.global_variables()
self.assertEqual(expected_variable_names, [v.name for v in variables])
# The numbers in results were not calculated, this is just a
# smoke test.
self.assertAllClose(res[0], np.array(
[[0.240, 0.240]], dtype=np_dtype), 1e-2)
expected_mem = np.array(
[[0.689, 0.689, 0.448, 0.448, 0.398, 0.398, 0.240, 0.240]],
dtype=np_dtype)
self.assertAllClose(res[1], expected_mem, 1e-2)
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.5)):
# Test BasicLSTMCell with input_size != num_units.
x = array_ops.zeros([1, 3], dtype=dtype)
m = array_ops.zeros([1, 4], dtype=dtype)
g, out_m = rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=False)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g, out_m], {
x.name: np.array([[1., 1., 1.]], dtype=np_dtype),
m.name: 0.1 * np.ones([1, 4], dtype=np_dtype)
})
self.assertEqual(len(res), 2)
def testBasicLSTMCellDimension0Error(self):
"""Tests that dimension 0 in both(x and m) shape must be equal."""
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
num_units = 2
state_size = num_units * 2
batch_size = 3
input_size = 4
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size - 1, state_size])
with self.assertRaises(ValueError):
g, out_m = rnn_cell_impl.BasicLSTMCell(
num_units, state_is_tuple=False)(x, m)
sess.run([variables_lib.global_variables_initializer()])
sess.run(
[g, out_m], {
x.name: 1 * np.ones([batch_size, input_size]),
m.name: 0.1 * np.ones([batch_size - 1, state_size])
})
def testBasicLSTMCellStateSizeError(self):
"""Tests that state_size must be num_units * 2."""
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
num_units = 2
state_size = num_units * 3 # state_size must be num_units * 2
batch_size = 3
input_size = 4
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size, state_size])
with self.assertRaises(ValueError):
g, out_m = rnn_cell_impl.BasicLSTMCell(
num_units, state_is_tuple=False)(x, m)
sess.run([variables_lib.global_variables_initializer()])
sess.run(
[g, out_m], {
x.name: 1 * np.ones([batch_size, input_size]),
m.name: 0.1 * np.ones([batch_size, state_size])
})
def testBasicLSTMCellStateTupleType(self):
with self.test_session():
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m0 = (array_ops.zeros([1, 2]),) * 2
m1 = (array_ops.zeros([1, 2]),) * 2
cell = rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.BasicLSTMCell(2) for _ in range(2)],
state_is_tuple=True)
self.assertTrue(isinstance(cell.state_size, tuple))
self.assertTrue(
isinstance(cell.state_size[0], rnn_cell_impl.LSTMStateTuple))
self.assertTrue(
isinstance(cell.state_size[1], rnn_cell_impl.LSTMStateTuple))
# Pass in regular tuples
_, (out_m0, out_m1) = cell(x, (m0, m1))
self.assertTrue(isinstance(out_m0, rnn_cell_impl.LSTMStateTuple))
self.assertTrue(isinstance(out_m1, rnn_cell_impl.LSTMStateTuple))
# Pass in LSTMStateTuples
variable_scope.get_variable_scope().reuse_variables()
zero_state = cell.zero_state(1, dtypes.float32)
self.assertTrue(isinstance(zero_state, tuple))
self.assertTrue(isinstance(zero_state[0], rnn_cell_impl.LSTMStateTuple))
self.assertTrue(isinstance(zero_state[1], rnn_cell_impl.LSTMStateTuple))
_, (out_m0, out_m1) = cell(x, zero_state)
self.assertTrue(isinstance(out_m0, rnn_cell_impl.LSTMStateTuple))
self.assertTrue(isinstance(out_m1, rnn_cell_impl.LSTMStateTuple))
def testBasicLSTMCellWithStateTuple(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m0 = array_ops.zeros([1, 4])
m1 = array_ops.zeros([1, 4])
cell = rnn_cell_impl.MultiRNNCell(
[
rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=False)
for _ in range(2)
],
state_is_tuple=True)
g, (out_m0, out_m1) = cell(x, (m0, m1))
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g, out_m0, out_m1], {
x.name: np.array([[1., 1.]]),
m0.name: 0.1 * np.ones([1, 4]),
m1.name: 0.1 * np.ones([1, 4])
})
self.assertEqual(len(res), 3)
# The numbers in results were not calculated, this is just a smoke test.
# Note, however, these values should match the original
# version having state_is_tuple=False.
self.assertAllClose(res[0], [[0.24024698, 0.24024698]])
expected_mem0 = np.array(
[[0.68967271, 0.68967271, 0.44848421, 0.44848421]])
expected_mem1 = np.array(
[[0.39897051, 0.39897051, 0.24024698, 0.24024698]])
self.assertAllClose(res[1], expected_mem0)
self.assertAllClose(res[2], expected_mem1)
def testIndyLSTMCell(self):
for dtype in [dtypes.float16, dtypes.float32]:
np_dtype = dtype.as_numpy_dtype
with self.test_session(graph=ops.Graph()) as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2], dtype=dtype)
state_0 = (array_ops.zeros([1, 2], dtype=dtype),) * 2
state_1 = (array_ops.zeros([1, 2], dtype=dtype),) * 2
cell = rnn_cell_impl.MultiRNNCell(
[contrib_rnn_cell.IndyLSTMCell(2) for _ in range(2)])
self.assertEqual(cell.dtype, None)
self.assertEqual("cell-0", cell._checkpoint_dependencies[0].name)
self.assertEqual("cell-1", cell._checkpoint_dependencies[1].name)
cell.get_config() # Should not throw an error
g, (out_state_0, out_state_1) = cell(x, (state_0, state_1))
# Layer infers the input type.
self.assertEqual(cell.dtype, dtype.name)
expected_variable_names = [
"root/multi_rnn_cell/cell_0/indy_lstm_cell/%s_w:0" %
rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_0/indy_lstm_cell/%s_u:0" %
rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_0/indy_lstm_cell/%s:0" %
rnn_cell_impl._BIAS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_1/indy_lstm_cell/%s_w:0" %
rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_1/indy_lstm_cell/%s_u:0" %
rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_1/indy_lstm_cell/%s:0" %
rnn_cell_impl._BIAS_VARIABLE_NAME
]
self.assertEqual(expected_variable_names,
[v.name for v in cell.trainable_variables])
self.assertFalse(cell.non_trainable_variables)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g, out_state_0, out_state_1], {
x.name: np.array([[1., 1.]]),
state_0[0].name: 0.1 * np.ones([1, 2]),
state_0[1].name: 0.1 * np.ones([1, 2]),
state_1[0].name: 0.1 * np.ones([1, 2]),
state_1[1].name: 0.1 * np.ones([1, 2]),
})
self.assertEqual(len(res), 3)
variables = variables_lib.global_variables()
self.assertEqual(expected_variable_names, [v.name for v in variables])
# Only check the range of outputs as this is just a smoke test.
self.assertAllInRange(res[0], -1.0, 1.0)
self.assertAllInRange(res[1], -1.0, 1.0)
self.assertAllInRange(res[2], -1.0, 1.0)
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.5)):
# Test IndyLSTMCell with input_size != num_units.
x = array_ops.zeros([1, 3], dtype=dtype)
state = (array_ops.zeros([1, 2], dtype=dtype),) * 2
g, out_state = contrib_rnn_cell.IndyLSTMCell(2)(x, state)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g, out_state], {
x.name: np.array([[1., 1., 1.]], dtype=np_dtype),
state[0].name: 0.1 * np.ones([1, 2], dtype=np_dtype),
state[1].name: 0.1 * np.ones([1, 2], dtype=np_dtype),
})
self.assertEqual(len(res), 2)
def testLSTMCell(self):
with self.test_session() as sess:
num_units = 8
num_proj = 6
state_size = num_units + num_proj
batch_size = 3
input_size = 2
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size, state_size])
cell = rnn_cell_impl.LSTMCell(
num_units=num_units,
num_proj=num_proj,
forget_bias=1.0,
state_is_tuple=False)
output, state = cell(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[output, state], {
x.name: np.array([[1., 1.], [2., 2.], [3., 3.]]),
m.name: 0.1 * np.ones((batch_size, state_size))
})
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_proj))
self.assertEqual(res[1].shape, (batch_size, state_size))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6)
self.assertTrue(
float(np.linalg.norm((res[1][0, :] - res[1][i, :]))) > 1e-6)
def testLSTMCellVariables(self):
with self.test_session():
num_units = 8
num_proj = 6
state_size = num_units + num_proj
batch_size = 3
input_size = 2
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size, state_size])
cell = rnn_cell_impl.LSTMCell(
num_units=num_units,
num_proj=num_proj,
forget_bias=1.0,
state_is_tuple=False)
cell(x, m) # Execute to create variables
variables = variables_lib.global_variables()
self.assertEquals(variables[0].op.name, "root/lstm_cell/kernel")
self.assertEquals(variables[1].op.name, "root/lstm_cell/bias")
self.assertEquals(variables[2].op.name,
"root/lstm_cell/projection/kernel")
def testLSTMCellLayerNorm(self):
with self.test_session() as sess:
num_units = 2
num_proj = 3
batch_size = 1
input_size = 4
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([batch_size, input_size])
c = array_ops.zeros([batch_size, num_units])
h = array_ops.zeros([batch_size, num_proj])
state = rnn_cell_impl.LSTMStateTuple(c, h)
cell = contrib_rnn_cell.LayerNormLSTMCell(
num_units=num_units,
num_proj=num_proj,
forget_bias=1.0,
layer_norm=True,
norm_gain=1.0,
norm_shift=0.0)
g, out_m = cell(x, state)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g, out_m], {
x.name: np.ones((batch_size, input_size)),
c.name: 0.1 * np.ones((batch_size, num_units)),
h.name: 0.1 * np.ones((batch_size, num_proj))
})
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_proj))
self.assertEqual(res[1][0].shape, (batch_size, num_units))
self.assertEqual(res[1][1].shape, (batch_size, num_proj))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) < 1e-6)
self.assertTrue(
float(np.linalg.norm((res[1][0, :] - res[1][i, :]))) < 1e-6)
@test_util.run_in_graph_and_eager_modes
def testWrapperCheckpointing(self):
for wrapper_type in [
rnn_cell_impl.DropoutWrapper,
rnn_cell_impl.ResidualWrapper,
lambda cell: rnn_cell_impl.MultiRNNCell([cell])]:
with self.test_session():
cell = rnn_cell_impl.BasicRNNCell(1)
wrapper = wrapper_type(cell)
wrapper(array_ops.ones([1, 1]),
state=wrapper.zero_state(batch_size=1, dtype=dtypes.float32))
self.evaluate([v.initializer for v in cell.variables])
checkpoint = checkpointable_utils.Checkpoint(wrapper=wrapper)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
self.evaluate(cell._bias.assign([40.]))
save_path = checkpoint.save(prefix)
self.evaluate(cell._bias.assign([0.]))
checkpoint.restore(save_path).assert_consumed().run_restore_ops()
self.assertAllEqual([40.], self.evaluate(cell._bias))
def testOutputProjectionWrapper(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 3])
cell = contrib_rnn.OutputProjectionWrapper(rnn_cell_impl.GRUCell(3), 2)
g, new_m = cell(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g, new_m], {
x.name: np.array([[1., 1., 1.]]),
m.name: np.array([[0.1, 0.1, 0.1]])
})
self.assertEqual(res[1].shape, (1, 3))
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.231907, 0.231907]])
def testInputProjectionWrapper(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 3])
cell = contrib_rnn.InputProjectionWrapper(
rnn_cell_impl.GRUCell(3), num_proj=3)
g, new_m = cell(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g, new_m], {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1, 0.1]])
})
self.assertEqual(res[1].shape, (1, 3))
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.154605, 0.154605, 0.154605]])
def testResidualWrapper(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 3])
base_cell = rnn_cell_impl.GRUCell(3)
g, m_new = base_cell(x, m)
variable_scope.get_variable_scope().reuse_variables()
wrapper_object = rnn_cell_impl.ResidualWrapper(base_cell)
(name, dep), = wrapper_object._checkpoint_dependencies
wrapper_object.get_config() # Should not throw an error
self.assertIs(dep, base_cell)
self.assertEqual("cell", name)
g_res, m_new_res = wrapper_object(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g, g_res, m_new, m_new_res], {
x: np.array([[1., 1., 1.]]),
m: np.array([[0.1, 0.1, 0.1]])
})
# Residual connections
self.assertAllClose(res[1], res[0] + [1., 1., 1.])
# States are left untouched
self.assertAllClose(res[2], res[3])
def testResidualWrapperWithSlice(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 5])
m = array_ops.zeros([1, 3])
base_cell = rnn_cell_impl.GRUCell(3)
g, m_new = base_cell(x, m)
variable_scope.get_variable_scope().reuse_variables()
def residual_with_slice_fn(inp, out):
inp_sliced = array_ops.slice(inp, [0, 0], [-1, 3])
return inp_sliced + out
g_res, m_new_res = rnn_cell_impl.ResidualWrapper(
base_cell, residual_with_slice_fn)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res_g, res_g_res, res_m_new, res_m_new_res = sess.run(
[g, g_res, m_new, m_new_res], {
x: np.array([[1., 1., 1., 1., 1.]]),
m: np.array([[0.1, 0.1, 0.1]])
})
# Residual connections
self.assertAllClose(res_g_res, res_g + [1., 1., 1.])
# States are left untouched
self.assertAllClose(res_m_new, res_m_new_res)
def testDeviceWrapper(self):
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 3])
wrapped = rnn_cell_impl.GRUCell(3)
cell = rnn_cell_impl.DeviceWrapper(wrapped, "/cpu:14159")
(name, dep), = cell._checkpoint_dependencies
cell.get_config() # Should not throw an error
self.assertIs(dep, wrapped)
self.assertEqual("cell", name)
outputs, _ = cell(x, m)
self.assertTrue("cpu:14159" in outputs.device.lower())
def _retrieve_cpu_gpu_stats(self, run_metadata):
cpu_stats = None
gpu_stats = None
step_stats = run_metadata.step_stats
for ds in step_stats.dev_stats:
if "cpu:0" in ds.device[-5:].lower():
cpu_stats = ds.node_stats
if "gpu:0" == ds.device[-5:].lower():
gpu_stats = ds.node_stats
return cpu_stats, gpu_stats
def testDeviceWrapperDynamicExecutionNodesAreAllProperlyLocated(self):
if not test.is_gpu_available():
# Can't perform this test w/o a GPU
return
gpu_dev = test.gpu_device_name()
with self.test_session(use_gpu=True) as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 1, 3])
cell = rnn_cell_impl.DeviceWrapper(rnn_cell_impl.GRUCell(3), gpu_dev)
with ops.device("/cpu:0"):
outputs, _ = rnn.dynamic_rnn(
cell=cell, inputs=x, dtype=dtypes.float32)
run_metadata = config_pb2.RunMetadata()
opts = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
sess.run([variables_lib.global_variables_initializer()])
_ = sess.run(outputs, options=opts, run_metadata=run_metadata)
cpu_stats, gpu_stats = self._retrieve_cpu_gpu_stats(run_metadata)
self.assertFalse([s for s in cpu_stats if "gru_cell" in s.node_name])
self.assertTrue([s for s in gpu_stats if "gru_cell" in s.node_name])
def testEmbeddingWrapper(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 1], dtype=dtypes.int32)
m = array_ops.zeros([1, 2])
embedding_cell = contrib_rnn.EmbeddingWrapper(
rnn_cell_impl.GRUCell(2), embedding_classes=3, embedding_size=2)
self.assertEqual(embedding_cell.output_size, 2)
g, new_m = embedding_cell(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g, new_m], {
x.name: np.array([[1]]),
m.name: np.array([[0.1, 0.1]])
})
self.assertEqual(res[1].shape, (1, 2))
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.17139, 0.17139]])
def testEmbeddingWrapperWithDynamicRnn(self):
with self.test_session() as sess:
with variable_scope.variable_scope("root"):
inputs = ops.convert_to_tensor([[[0], [0]]], dtype=dtypes.int64)
input_lengths = ops.convert_to_tensor([2], dtype=dtypes.int64)
embedding_cell = contrib_rnn.EmbeddingWrapper(
rnn_cell_impl.BasicLSTMCell(1, state_is_tuple=True),
embedding_classes=1,
embedding_size=2)
outputs, _ = rnn.dynamic_rnn(
cell=embedding_cell,
inputs=inputs,
sequence_length=input_lengths,
dtype=dtypes.float32)
sess.run([variables_lib.global_variables_initializer()])
# This will fail if output's dtype is inferred from input's.
sess.run(outputs)
def testMultiRNNCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 4])
_, ml = rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.GRUCell(2) for _ in range(2)],
state_is_tuple=False)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(ml, {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1, 0.1, 0.1]])
})
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res, [[0.175991, 0.175991, 0.13248, 0.13248]])
def testMultiRNNCellWithStateTuple(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m_bad = array_ops.zeros([1, 4])
m_good = (array_ops.zeros([1, 2]), array_ops.zeros([1, 2]))
# Test incorrectness of state
with self.assertRaisesRegexp(ValueError, "Expected state .* a tuple"):
rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.GRUCell(2) for _ in range(2)],
state_is_tuple=True)(x, m_bad)
_, ml = rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.GRUCell(2) for _ in range(2)],
state_is_tuple=True)(x, m_good)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
ml, {
x.name: np.array([[1., 1.]]),
m_good[0].name: np.array([[0.1, 0.1]]),
m_good[1].name: np.array([[0.1, 0.1]])
})
# The numbers in results were not calculated, this is just a
# smoke test. However, these numbers should match those of
# the test testMultiRNNCell.
self.assertAllClose(res[0], [[0.175991, 0.175991]])
self.assertAllClose(res[1], [[0.13248, 0.13248]])
class DropoutWrapperTest(test.TestCase):
def _testDropoutWrapper(self,
batch_size=None,
time_steps=None,
parallel_iterations=None,
**kwargs):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
if batch_size is None and time_steps is None:
# 2 time steps, batch size 1, depth 3
batch_size = 1
time_steps = 2
x = constant_op.constant(
[[[2., 2., 2.]], [[1., 1., 1.]]], dtype=dtypes.float32)
m = rnn_cell_impl.LSTMStateTuple(
*[constant_op.constant([[0.1, 0.1, 0.1]], dtype=dtypes.float32
)] * 2)
else:
x = constant_op.constant(
np.random.randn(time_steps, batch_size, 3).astype(np.float32))
m = rnn_cell_impl.LSTMStateTuple(*[
constant_op.
constant([[0.1, 0.1, 0.1]] * batch_size, dtype=dtypes.float32)
] * 2)
outputs, final_state = rnn.dynamic_rnn(
cell=rnn_cell_impl.DropoutWrapper(
rnn_cell_impl.LSTMCell(3), dtype=x.dtype, **kwargs),
time_major=True,
parallel_iterations=parallel_iterations,
inputs=x,
initial_state=m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([outputs, final_state])
self.assertEqual(res[0].shape, (time_steps, batch_size, 3))
self.assertEqual(res[1].c.shape, (batch_size, 3))
self.assertEqual(res[1].h.shape, (batch_size, 3))
return res
def testWrappedCellProperty(self):
cell = rnn_cell_impl.BasicRNNCell(10)
wrapper = rnn_cell_impl.DropoutWrapper(cell)
# Github issue 15810
self.assertEqual(wrapper.wrapped_cell, cell)
def testDropoutWrapperKeepAllConstantInput(self):
keep = array_ops.ones([])
res = self._testDropoutWrapper(
input_keep_prob=keep, output_keep_prob=keep, state_keep_prob=keep)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]], [[0.895509, 0.895509, 0.895509]]],
dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
self.assertAllClose(true_full_output, res[0])
self.assertAllClose(true_full_output[1], res[1].h)
self.assertAllClose(true_full_final_c, res[1].c)
def testDropoutWrapperKeepAll(self):
keep = variable_scope.get_variable("all", initializer=1.0)
res = self._testDropoutWrapper(
input_keep_prob=keep, output_keep_prob=keep, state_keep_prob=keep)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]], [[0.895509, 0.895509, 0.895509]]],
dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
self.assertAllClose(true_full_output, res[0])
self.assertAllClose(true_full_output[1], res[1].h)
self.assertAllClose(true_full_final_c, res[1].c)
def testDropoutWrapperWithSeed(self):
keep_some = 0.5
random_seed.set_random_seed(2)
## Use parallel_iterations = 1 in both calls to
## _testDropoutWrapper to ensure the (per-time step) dropout is
## consistent across both calls. Otherwise the seed may not end
## up being munged consistently across both graphs.
res_standard_1 = self._testDropoutWrapper(
input_keep_prob=keep_some,
output_keep_prob=keep_some,
state_keep_prob=keep_some,
seed=10,
parallel_iterations=1)
# Clear away the graph and the test session (which keeps variables around)
ops.reset_default_graph()
self._ClearCachedSession()
random_seed.set_random_seed(2)
res_standard_2 = self._testDropoutWrapper(
input_keep_prob=keep_some,
output_keep_prob=keep_some,
state_keep_prob=keep_some,
seed=10,
parallel_iterations=1)
self.assertAllClose(res_standard_1[0], res_standard_2[0])
self.assertAllClose(res_standard_1[1].c, res_standard_2[1].c)
self.assertAllClose(res_standard_1[1].h, res_standard_2[1].h)
def testDropoutWrapperKeepNoOutput(self):
keep_all = variable_scope.get_variable("all", initializer=1.0)
keep_none = variable_scope.get_variable("none", initializer=1e-10)
res = self._testDropoutWrapper(
input_keep_prob=keep_all,
output_keep_prob=keep_none,
state_keep_prob=keep_all)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]], [[0.895509, 0.895509, 0.895509]]],
dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
self.assertAllClose(np.zeros(res[0].shape), res[0])
self.assertAllClose(true_full_output[1], res[1].h)
self.assertAllClose(true_full_final_c, res[1].c)
def testDropoutWrapperKeepNoStateExceptLSTMCellMemory(self):
keep_all = variable_scope.get_variable("all", initializer=1.0)
keep_none = variable_scope.get_variable("none", initializer=1e-10)
# Even though we dropout state, by default DropoutWrapper never
# drops out the memory ("c") term of an LSTMStateTuple.
res = self._testDropoutWrapper(
input_keep_prob=keep_all,
output_keep_prob=keep_all,
state_keep_prob=keep_none)
true_c_state = np.array([[1.713925, 1.713925, 1.713925]], dtype=np.float32)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]], [[0.895509, 0.895509, 0.895509]]],
dtype=np.float32)
self.assertAllClose(true_full_output[0], res[0][0])
# Second output is modified by zero input state
self.assertGreater(np.linalg.norm(true_full_output[1] - res[0][1]), 1e-4)
# h state has been set to zero
self.assertAllClose(np.zeros(res[1].h.shape), res[1].h)
# c state of an LSTMStateTuple is NEVER modified.
self.assertAllClose(true_c_state, res[1].c)
def testDropoutWrapperKeepNoInput(self):
keep_all = variable_scope.get_variable("all", initializer=1.0)
keep_none = variable_scope.get_variable("none", initializer=1e-10)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]], [[0.895509, 0.895509, 0.895509]]],
dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
# All outputs are different because inputs are zeroed out
res = self._testDropoutWrapper(
input_keep_prob=keep_none,
output_keep_prob=keep_all,
state_keep_prob=keep_all)
self.assertGreater(np.linalg.norm(res[0] - true_full_output), 1e-4)
self.assertGreater(np.linalg.norm(res[1].h - true_full_output[1]), 1e-4)
self.assertGreater(np.linalg.norm(res[1].c - true_full_final_c), 1e-4)
def testDropoutWrapperRecurrentOutput(self):
keep_some = 0.8
keep_all = variable_scope.get_variable("all", initializer=1.0)
res = self._testDropoutWrapper(
input_keep_prob=keep_all,
output_keep_prob=keep_some,
state_keep_prob=keep_all,
variational_recurrent=True,
input_size=3,
batch_size=5,
time_steps=7)
# Ensure the same dropout pattern for all time steps
output_mask = np.abs(res[0]) > 1e-6
for m in output_mask[1:]:
self.assertAllClose(output_mask[0], m)
def testDropoutWrapperRecurrentStateInputAndOutput(self):
keep_some = 0.9
res = self._testDropoutWrapper(
input_keep_prob=keep_some,
output_keep_prob=keep_some,
state_keep_prob=keep_some,
variational_recurrent=True,
input_size=3,
batch_size=5,
time_steps=7)
# Smoke test for the state/input masks.
output_mask = np.abs(res[0]) > 1e-6
for time_step in output_mask:
# Ensure the same dropout output pattern for all time steps
self.assertAllClose(output_mask[0], time_step)
for batch_entry in time_step:
# Assert all batch entries get the same mask
self.assertAllClose(batch_entry, time_step[0])
# For state, ensure all batch entries have the same mask
state_c_mask = np.abs(res[1].c) > 1e-6
state_h_mask = np.abs(res[1].h) > 1e-6
for batch_entry in state_c_mask:
self.assertAllClose(batch_entry, state_c_mask[0])
for batch_entry in state_h_mask:
self.assertAllClose(batch_entry, state_h_mask[0])
def testDropoutWrapperRecurrentStateInputAndOutputWithSeed(self):
keep_some = 0.9
random_seed.set_random_seed(2347)
np.random.seed(23487)
res0 = self._testDropoutWrapper(
input_keep_prob=keep_some,
output_keep_prob=keep_some,
state_keep_prob=keep_some,
variational_recurrent=True,
input_size=3,
batch_size=5,
time_steps=7,
seed=-234987)
ops.reset_default_graph()
self._ClearCachedSession()
random_seed.set_random_seed(2347)
np.random.seed(23487)
res1 = self._testDropoutWrapper(
input_keep_prob=keep_some,
output_keep_prob=keep_some,
state_keep_prob=keep_some,
variational_recurrent=True,
input_size=3,
batch_size=5,
time_steps=7,
seed=-234987)
output_mask = np.abs(res0[0]) > 1e-6
for time_step in output_mask:
# Ensure the same dropout output pattern for all time steps
self.assertAllClose(output_mask[0], time_step)
for batch_entry in time_step:
# Assert all batch entries get the same mask
self.assertAllClose(batch_entry, time_step[0])
# For state, ensure all batch entries have the same mask
state_c_mask = np.abs(res0[1].c) > 1e-6
state_h_mask = np.abs(res0[1].h) > 1e-6
for batch_entry in state_c_mask:
self.assertAllClose(batch_entry, state_c_mask[0])
for batch_entry in state_h_mask:
self.assertAllClose(batch_entry, state_h_mask[0])
# Ensure seeded calculation is identical.
self.assertAllClose(res0[0], res1[0])
self.assertAllClose(res0[1].c, res1[1].c)
self.assertAllClose(res0[1].h, res1[1].h)
def basic_rnn_cell(inputs, state, num_units, scope=None):
if state is None:
if inputs is not None:
batch_size = inputs.get_shape()[0]
dtype = inputs.dtype
else:
batch_size = 0
dtype = dtypes.float32
init_output = array_ops.zeros(
array_ops.stack([batch_size, num_units]), dtype=dtype)
init_state = array_ops.zeros(
array_ops.stack([batch_size, num_units]), dtype=dtype)
init_output.set_shape([batch_size, num_units])
init_state.set_shape([batch_size, num_units])
return init_output, init_state
else:
with variable_scope.variable_scope(scope, "basic_rnn_cell",
[inputs, state]):
output = math_ops.tanh(
Linear([inputs, state], num_units, True)([inputs, state]))
return output, output
if __name__ == "__main__":
test.main()
| apache-2.0 |
caotianwei/django | django/contrib/postgres/fields/hstore.py | 172 | 2803 | import json
from django.contrib.postgres import forms, lookups
from django.contrib.postgres.fields.array import ArrayField
from django.core import exceptions
from django.db.models import Field, TextField, Transform
from django.utils import six
from django.utils.translation import ugettext_lazy as _
__all__ = ['HStoreField']
class HStoreField(Field):
empty_strings_allowed = False
description = _('Map of strings to strings')
default_error_messages = {
'not_a_string': _('The value of "%(key)s" is not a string.'),
}
def db_type(self, connection):
return 'hstore'
def get_transform(self, name):
transform = super(HStoreField, self).get_transform(name)
if transform:
return transform
return KeyTransformFactory(name)
def validate(self, value, model_instance):
super(HStoreField, self).validate(value, model_instance)
for key, val in value.items():
if not isinstance(val, six.string_types):
raise exceptions.ValidationError(
self.error_messages['not_a_string'],
code='not_a_string',
params={'key': key},
)
def to_python(self, value):
if isinstance(value, six.string_types):
value = json.loads(value)
return value
def value_to_string(self, obj):
return json.dumps(self.value_from_object(obj))
def formfield(self, **kwargs):
defaults = {
'form_class': forms.HStoreField,
}
defaults.update(kwargs)
return super(HStoreField, self).formfield(**defaults)
HStoreField.register_lookup(lookups.DataContains)
HStoreField.register_lookup(lookups.ContainedBy)
HStoreField.register_lookup(lookups.HasKey)
HStoreField.register_lookup(lookups.HasKeys)
HStoreField.register_lookup(lookups.HasAnyKeys)
class KeyTransform(Transform):
output_field = TextField()
def __init__(self, key_name, *args, **kwargs):
super(KeyTransform, self).__init__(*args, **kwargs)
self.key_name = key_name
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return "(%s -> '%s')" % (lhs, self.key_name), params
class KeyTransformFactory(object):
def __init__(self, key_name):
self.key_name = key_name
def __call__(self, *args, **kwargs):
return KeyTransform(self.key_name, *args, **kwargs)
@HStoreField.register_lookup
class KeysTransform(lookups.FunctionTransform):
lookup_name = 'keys'
function = 'akeys'
output_field = ArrayField(TextField())
@HStoreField.register_lookup
class ValuesTransform(lookups.FunctionTransform):
lookup_name = 'values'
function = 'avals'
output_field = ArrayField(TextField())
| bsd-3-clause |
mancoast/CPythonPyc_test | cpython/250_test_hashlib.py | 7 | 6564 | # Test hashlib module
#
# $Id: test_hashlib.py 39316 2005-08-21 18:45:59Z greg $
#
# Copyright (C) 2005 Gregory P. Smith (greg@electricrain.com)
# Licensed to PSF under a Contributor Agreement.
#
import hashlib
import unittest
from test import test_support
def hexstr(s):
import string
h = string.hexdigits
r = ''
for c in s:
i = ord(c)
r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
return r
class HashLibTestCase(unittest.TestCase):
supported_hash_names = ( 'md5', 'MD5', 'sha1', 'SHA1',
'sha224', 'SHA224', 'sha256', 'SHA256',
'sha384', 'SHA384', 'sha512', 'SHA512' )
def test_unknown_hash(self):
try:
hashlib.new('spam spam spam spam spam')
except ValueError:
pass
else:
self.assert_(0 == "hashlib didn't reject bogus hash name")
def test_hexdigest(self):
for name in self.supported_hash_names:
h = hashlib.new(name)
self.assert_(hexstr(h.digest()) == h.hexdigest())
def test_large_update(self):
aas = 'a' * 128
bees = 'b' * 127
cees = 'c' * 126
for name in self.supported_hash_names:
m1 = hashlib.new(name)
m1.update(aas)
m1.update(bees)
m1.update(cees)
m2 = hashlib.new(name)
m2.update(aas + bees + cees)
self.assertEqual(m1.digest(), m2.digest())
def check(self, name, data, digest):
# test the direct constructors
computed = getattr(hashlib, name)(data).hexdigest()
self.assert_(computed == digest)
# test the general new() interface
computed = hashlib.new(name, data).hexdigest()
self.assert_(computed == digest)
def test_case_md5_0(self):
self.check('md5', '', 'd41d8cd98f00b204e9800998ecf8427e')
def test_case_md5_1(self):
self.check('md5', 'abc', '900150983cd24fb0d6963f7d28e17f72')
def test_case_md5_2(self):
self.check('md5', 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789',
'd174ab98d277d9f5a5611c2c9f419d9f')
# use the three examples from Federal Information Processing Standards
# Publication 180-1, Secure Hash Standard, 1995 April 17
# http://www.itl.nist.gov/div897/pubs/fip180-1.htm
def test_case_sha1_0(self):
self.check('sha1', "",
"da39a3ee5e6b4b0d3255bfef95601890afd80709")
def test_case_sha1_1(self):
self.check('sha1', "abc",
"a9993e364706816aba3e25717850c26c9cd0d89d")
def test_case_sha1_2(self):
self.check('sha1', "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
"84983e441c3bd26ebaae4aa1f95129e5e54670f1")
def test_case_sha1_3(self):
self.check('sha1', "a" * 1000000,
"34aa973cd4c4daa4f61eeb2bdbad27316534016f")
# use the examples from Federal Information Processing Standards
# Publication 180-2, Secure Hash Standard, 2002 August 1
# http://csrc.nist.gov/publications/fips/fips180-2/fips180-2.pdf
def test_case_sha224_0(self):
self.check('sha224', "",
"d14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f")
def test_case_sha224_1(self):
self.check('sha224', "abc",
"23097d223405d8228642a477bda255b32aadbce4bda0b3f7e36c9da7")
def test_case_sha224_2(self):
self.check('sha224',
"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
"75388b16512776cc5dba5da1fd890150b0c6455cb4f58b1952522525")
def test_case_sha224_3(self):
self.check('sha224', "a" * 1000000,
"20794655980c91d8bbb4c1ea97618a4bf03f42581948b2ee4ee7ad67")
def test_case_sha256_0(self):
self.check('sha256', "",
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")
def test_case_sha256_1(self):
self.check('sha256', "abc",
"ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad")
def test_case_sha256_2(self):
self.check('sha256',
"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
"248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1")
def test_case_sha256_3(self):
self.check('sha256', "a" * 1000000,
"cdc76e5c9914fb9281a1c7e284d73e67f1809a48a497200e046d39ccc7112cd0")
def test_case_sha384_0(self):
self.check('sha384', "",
"38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e1da"+
"274edebfe76f65fbd51ad2f14898b95b")
def test_case_sha384_1(self):
self.check('sha384', "abc",
"cb00753f45a35e8bb5a03d699ac65007272c32ab0eded1631a8b605a43ff5bed"+
"8086072ba1e7cc2358baeca134c825a7")
def test_case_sha384_2(self):
self.check('sha384',
"abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmn"+
"hijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu",
"09330c33f71147e83d192fc782cd1b4753111b173b3b05d22fa08086e3b0f712"+
"fcc7c71a557e2db966c3e9fa91746039")
def test_case_sha384_3(self):
self.check('sha384', "a" * 1000000,
"9d0e1809716474cb086e834e310a4a1ced149e9c00f248527972cec5704c2a5b"+
"07b8b3dc38ecc4ebae97ddd87f3d8985")
def test_case_sha512_0(self):
self.check('sha512', "",
"cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce"+
"47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e")
def test_case_sha512_1(self):
self.check('sha512', "abc",
"ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a"+
"2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f")
def test_case_sha512_2(self):
self.check('sha512',
"abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmn"+
"hijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu",
"8e959b75dae313da8cf4f72814fc143f8f7779c6eb9f7fa17299aeadb6889018"+
"501d289e4900f7e4331b99dec4b5433ac7d329eeb6dd26545e96e55b874be909")
def test_case_sha512_3(self):
self.check('sha512', "a" * 1000000,
"e718483d0ce769644e2e42c7bc15b4638e1f98b13b2044285632a803afa973eb"+
"de0ff244877ea60a4cb0432ce577c31beb009c5c2c49aa2e4eadb217ad8cc09b")
def test_main():
test_support.run_unittest(HashLibTestCase)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
procangroup/edx-platform | common/lib/xmodule/xmodule/modulestore/split_mongo/definition_lazy_loader.py | 213 | 1560 | from opaque_keys.edx.locator import DefinitionLocator
import copy
class DefinitionLazyLoader(object):
"""
A placeholder to put into an xblock in place of its definition which
when accessed knows how to get its content. Only useful if the containing
object doesn't force access during init but waits until client wants the
definition. Only works if the modulestore is a split mongo store.
"""
def __init__(self, modulestore, course_key, block_type, definition_id, field_converter):
"""
Simple placeholder for yet-to-be-fetched data
:param modulestore: the pymongo db connection with the definitions
:param definition_locator: the id of the record in the above to fetch
"""
self.modulestore = modulestore
self.course_key = course_key
self.definition_locator = DefinitionLocator(block_type, definition_id)
self.field_converter = field_converter
def fetch(self):
"""
Fetch the definition. Note, the caller should replace this lazy
loader pointer with the result so as not to fetch more than once
"""
# get_definition may return a cached value perhaps from another course or code path
# so, we copy the result here so that updates don't cross-pollinate nor change the cached
# value in such a way that we can't tell that the definition's been updated.
definition = self.modulestore.get_definition(self.course_key, self.definition_locator.definition_id)
return copy.deepcopy(definition)
| agpl-3.0 |
int19h/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/pythonwin/pywin/Demos/app/basictimerapp.py | 6 | 6382 | # basictimerapp - a really simple timer application.
# This should be run using the command line:
# pythonwin /app demos\basictimerapp.py
import win32ui
import win32api
import win32con
import sys
from pywin.framework import app, cmdline, dlgappcore, cmdline
import timer
import time
import string
class TimerAppDialog(dlgappcore.AppDialog):
softspace=1
def __init__(self, appName = ""):
dlgappcore.AppDialog.__init__(self, win32ui.IDD_GENERAL_STATUS)
self.timerAppName = appName
self.argOff = 0
if len(self.timerAppName)==0:
if len(sys.argv)>1 and sys.argv[1][0]!='/':
self.timerAppName = sys.argv[1]
self.argOff = 1
def PreDoModal(self):
# sys.stderr = sys.stdout
pass
def ProcessArgs(self, args):
for arg in args:
if arg=="/now":
self.OnOK()
def OnInitDialog(self):
win32ui.SetProfileFileName('pytimer.ini')
self.title = win32ui.GetProfileVal(self.timerAppName, "Title", "Remote System Timer")
self.buildTimer = win32ui.GetProfileVal(self.timerAppName, "Timer", "EachMinuteIntervaler()")
self.doWork = win32ui.GetProfileVal(self.timerAppName, "Work", "DoDemoWork()")
# replace "\n" with real \n.
self.doWork = self.doWork.replace('\\n','\n')
dlgappcore.AppDialog.OnInitDialog(self)
self.SetWindowText(self.title)
self.prompt1 = self.GetDlgItem(win32ui.IDC_PROMPT1)
self.prompt2 = self.GetDlgItem(win32ui.IDC_PROMPT2)
self.prompt3 = self.GetDlgItem(win32ui.IDC_PROMPT3)
self.butOK = self.GetDlgItem(win32con.IDOK)
self.butCancel = self.GetDlgItem(win32con.IDCANCEL)
self.prompt1.SetWindowText("Python Timer App")
self.prompt2.SetWindowText("")
self.prompt3.SetWindowText("")
self.butOK.SetWindowText("Do it now")
self.butCancel.SetWindowText("Close")
self.timerManager = TimerManager(self)
self.ProcessArgs(sys.argv[self.argOff:])
self.timerManager.go()
return 1
def OnDestroy(self,msg):
dlgappcore.AppDialog.OnDestroy(self, msg)
self.timerManager.stop()
def OnOK(self):
# stop the timer, then restart after setting special boolean
self.timerManager.stop()
self.timerManager.bConnectNow = 1
self.timerManager.go()
return
# def OnCancel(self): default behaviour - cancel == close.
# return
class TimerManager:
def __init__(self, dlg):
self.dlg = dlg
self.timerId = None
self.intervaler = eval(self.dlg.buildTimer)
self.bConnectNow = 0
self.bHaveSetPrompt1 = 0
def CaptureOutput(self):
self.oldOut = sys.stdout
self.oldErr = sys.stderr
sys.stdout = sys.stderr = self
self.bHaveSetPrompt1 = 0
def ReleaseOutput(self):
sys.stdout = self.oldOut
sys.stderr = self.oldErr
def write(self, str):
s = str.strip()
if len(s):
if self.bHaveSetPrompt1:
dest = self.dlg.prompt3
else:
dest = self.dlg.prompt1
self.bHaveSetPrompt1 = 1
dest.SetWindowText(s)
def go(self):
self.OnTimer(None,None)
def stop(self):
if self.timerId: timer.kill_timer (self.timerId)
self.timerId = None
def OnTimer(self, id, timeVal):
if id: timer.kill_timer (id)
if self.intervaler.IsTime() or self.bConnectNow :
# do the work.
try:
self.dlg.SetWindowText(self.dlg.title + " - Working...")
self.dlg.butOK.EnableWindow(0)
self.dlg.butCancel.EnableWindow(0)
self.CaptureOutput()
try:
exec(self.dlg.doWork)
print("The last operation completed successfully.")
except:
t, v, tb = sys.exc_info()
str = "Failed: %s: %s" % (t, repr(v))
print(str)
self.oldErr.write(str)
tb = None # Prevent cycle
finally:
self.ReleaseOutput()
self.dlg.butOK.EnableWindow()
self.dlg.butCancel.EnableWindow()
self.dlg.SetWindowText(self.dlg.title)
else:
now = time.time()
nextTime = self.intervaler.GetNextTime()
if nextTime:
timeDiffSeconds = nextTime - now
timeDiffMinutes = int(timeDiffSeconds / 60)
timeDiffSeconds = timeDiffSeconds % 60
timeDiffHours = int(timeDiffMinutes / 60)
timeDiffMinutes = timeDiffMinutes % 60
self.dlg.prompt1.SetWindowText("Next connection due in %02d:%02d:%02d" % (timeDiffHours,timeDiffMinutes,timeDiffSeconds))
self.timerId = timer.set_timer (self.intervaler.GetWakeupInterval(), self.OnTimer)
self.bConnectNow = 0
class TimerIntervaler:
def __init__(self):
self.nextTime = None
self.wakeUpInterval = 2000
def GetWakeupInterval(self):
return self.wakeUpInterval
def GetNextTime(self):
return self.nextTime
def IsTime(self):
now = time.time()
if self.nextTime is None:
self.nextTime = self.SetFirstTime(now)
ret = 0
if now >= self.nextTime:
ret = 1
self.nextTime = self.SetNextTime(self.nextTime, now)
# do the work.
return ret
class EachAnyIntervaler(TimerIntervaler):
def __init__(self, timeAt, timePos, timeAdd, wakeUpInterval = None):
TimerIntervaler.__init__(self)
self.timeAt = timeAt
self.timePos = timePos
self.timeAdd = timeAdd
if wakeUpInterval:
self.wakeUpInterval = wakeUpInterval
def SetFirstTime(self, now):
timeTup = time.localtime(now)
lst = []
for item in timeTup:
lst.append(item)
bAdd = timeTup[self.timePos] > self.timeAt
lst[self.timePos] = self.timeAt
for pos in range(self.timePos+1, 6):
lst[pos]=0
ret = time.mktime(tuple(lst))
if (bAdd):
ret = ret + self.timeAdd
return ret;
def SetNextTime(self, lastTime, now):
return lastTime + self.timeAdd
class EachMinuteIntervaler(EachAnyIntervaler):
def __init__(self, at=0):
EachAnyIntervaler.__init__(self, at, 5, 60, 2000)
class EachHourIntervaler(EachAnyIntervaler):
def __init__(self, at=0):
EachAnyIntervaler.__init__(self, at, 4, 3600, 10000)
class EachDayIntervaler(EachAnyIntervaler):
def __init__(self,at=0):
EachAnyIntervaler.__init__(self, at, 3, 86400, 10000)
class TimerDialogApp(dlgappcore.DialogApp):
def CreateDialog(self):
return TimerAppDialog()
def DoDemoWork():
print("Doing the work...")
print("About to connect")
win32api.MessageBeep(win32con.MB_ICONASTERISK)
win32api.Sleep(2000)
print("Doing something else...")
win32api.MessageBeep(win32con.MB_ICONEXCLAMATION)
win32api.Sleep(2000)
print("More work.")
win32api.MessageBeep(win32con.MB_ICONHAND)
win32api.Sleep(2000)
print("The last bit.")
win32api.MessageBeep(win32con.MB_OK)
win32api.Sleep(2000)
app = TimerDialogApp()
def t():
t = TimerAppDialog("Test Dialog")
t.DoModal()
return t
if __name__=='__main__':
import demoutils
demoutils.NeedApp()
| apache-2.0 |
birsoyo/conan | conans/test/functional/upload_recorder_test.py | 2 | 5508 | import unittest
from datetime import datetime
from conans.client.recorder.upload_recoder import UploadRecorder
class UploadRecorderTest(unittest.TestCase):
def setUp(self):
self.recorder = UploadRecorder()
def empty_test(self):
info = self.recorder.get_info()
expected_result = {'error': False, 'uploaded': []}
self.assertEqual(expected_result, info)
def sequential_test(self):
self.recorder.add_recipe("fake/0.1@user/channel", "my_remote", "https://fake_url.com")
self.recorder.add_package("fake/0.1@user/channel", "fake_package_id")
self.recorder.add_recipe("fakefake/0.1@user/channel", "my_remote2", "https://fake_url2.com")
self.recorder.add_package("fakefake/0.1@user/channel", "fakefake_package_id1")
self.recorder.add_package("fakefake/0.1@user/channel", "fakefake_package_id2")
info = self.recorder.get_info()
expected_result_without_time = {
"error": False,
"uploaded": [
{
"recipe": {
"id": "fake/0.1@user/channel",
"remote_name": "my_remote",
"remote_url": "https://fake_url.com"
},
"packages": [
{
"id": "fake_package_id"
}
]
},
{
"recipe": {
"id": "fakefake/0.1@user/channel",
"remote_name": "my_remote2",
"remote_url": "https://fake_url2.com"
},
"packages": [
{
"id": "fakefake_package_id1"
},
{
"id": "fakefake_package_id2"
}
]
}
]
}
self._check_result(expected_result_without_time, info)
def unordered_test(self):
self.recorder.add_recipe("fake1/0.1@user/channel", "my_remote1", "https://fake_url1.com")
self.recorder.add_recipe("fake2/0.1@user/channel", "my_remote2", "https://fake_url2.com")
self.recorder.add_recipe("fake3/0.1@user/channel", "my_remote3", "https://fake_url3.com")
self.recorder.add_package("fake1/0.1@user/channel", "fake1_package_id1")
self.recorder.add_package("fake2/0.1@user/channel", "fake2_package_id1")
self.recorder.add_package("fake2/0.1@user/channel", "fake2_package_id2")
info = self.recorder.get_info()
expected_result_without_time = {
"error": False,
"uploaded": [
{
"recipe": {
"id": "fake1/0.1@user/channel",
"remote_name": "my_remote1",
"remote_url": "https://fake_url1.com"
},
"packages": [
{
"id": "fake1_package_id1"
}
]
},
{
"recipe": {
"id": "fake2/0.1@user/channel",
"remote_name": "my_remote2",
"remote_url": "https://fake_url2.com"
},
"packages": [
{
"id": "fake2_package_id1"
},
{
"id": "fake2_package_id2"
}
]
},
{
"recipe": {
"id": "fake3/0.1@user/channel",
"remote_name": "my_remote3",
"remote_url": "https://fake_url3.com"
},
"packages": [
]
}
]
}
self._check_result(expected_result_without_time, info)
def _check_result(self, expeceted, result):
for i, item in enumerate(result["uploaded"]):
assert item["recipe"]["time"]
del result["uploaded"][i]["recipe"]["time"]
for j, package in enumerate(item["packages"]):
assert package["time"], datetime
del result["uploaded"][i]["packages"][j]["time"]
self.assertEqual(expeceted, result)
| mit |
petrus-v/server-tools | mass_editing/wizard/__init__.py | 6 | 1076 | # -*- coding: utf-8 -*-
##############################################################################
#
# This module uses OpenERP, Open Source Management Solution Framework.
# Copyright (C) 2012-Today Serpent Consulting Services
# (<http://www.serpentcs.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from . import mass_editing_wizard
| agpl-3.0 |
nugget/home-assistant | homeassistant/components/tts/microsoft.py | 4 | 3963 | """
Support for the Microsoft Cognitive Services text-to-speech service.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/tts.microsoft/
"""
import logging
from http.client import HTTPException
import voluptuous as vol
from homeassistant.components.tts import Provider, PLATFORM_SCHEMA, CONF_LANG
from homeassistant.const import CONF_TYPE, CONF_API_KEY
import homeassistant.helpers.config_validation as cv
CONF_GENDER = 'gender'
CONF_OUTPUT = 'output'
CONF_RATE = 'rate'
CONF_VOLUME = 'volume'
CONF_PITCH = 'pitch'
CONF_CONTOUR = 'contour'
REQUIREMENTS = ["pycsspeechtts==1.0.2"]
_LOGGER = logging.getLogger(__name__)
SUPPORTED_LANGUAGES = [
'ar-eg', 'ar-sa', 'ca-es', 'cs-cz', 'da-dk', 'de-at', 'de-ch', 'de-de',
'el-gr', 'en-au', 'en-ca', 'en-gb', 'en-ie', 'en-in', 'en-us', 'es-es',
'es-mx', 'fi-fi', 'fr-ca', 'fr-ch', 'fr-fr', 'he-il', 'hi-in', 'hu-hu',
'id-id', 'it-it', 'ja-jp', 'ko-kr', 'nb-no', 'nl-nl', 'pl-pl', 'pt-br',
'pt-pt', 'ro-ro', 'ru-ru', 'sk-sk', 'sv-se', 'th-th', 'tr-tr', 'zh-cn',
'zh-hk', 'zh-tw',
]
GENDERS = [
'Female', 'Male',
]
DEFAULT_LANG = 'en-us'
DEFAULT_GENDER = 'Female'
DEFAULT_TYPE = 'ZiraRUS'
DEFAULT_OUTPUT = 'audio-16khz-128kbitrate-mono-mp3'
DEFAULT_RATE = 0
DEFAULT_VOLUME = 0
DEFAULT_PITCH = "default"
DEFAULT_CONTOUR = ""
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORTED_LANGUAGES),
vol.Optional(CONF_GENDER, default=DEFAULT_GENDER): vol.In(GENDERS),
vol.Optional(CONF_TYPE, default=DEFAULT_TYPE): cv.string,
vol.Optional(CONF_RATE, default=DEFAULT_RATE):
vol.All(vol.Coerce(int), vol.Range(-100, 100)),
vol.Optional(CONF_VOLUME, default=DEFAULT_VOLUME):
vol.All(vol.Coerce(int), vol.Range(-100, 100)),
vol.Optional(CONF_PITCH, default=DEFAULT_PITCH): cv.string,
vol.Optional(CONF_CONTOUR, default=DEFAULT_CONTOUR): cv.string,
})
def get_engine(hass, config):
"""Set up Microsoft speech component."""
return MicrosoftProvider(config[CONF_API_KEY], config[CONF_LANG],
config[CONF_GENDER], config[CONF_TYPE],
config[CONF_RATE], config[CONF_VOLUME],
config[CONF_PITCH], config[CONF_CONTOUR])
class MicrosoftProvider(Provider):
"""The Microsoft speech API provider."""
def __init__(self, apikey, lang, gender, ttype, rate, volume,
pitch, contour):
"""Init Microsoft TTS service."""
self._apikey = apikey
self._lang = lang
self._gender = gender
self._type = ttype
self._output = DEFAULT_OUTPUT
self._rate = "{}%".format(rate)
self._volume = "{}%".format(volume)
self._pitch = pitch
self._contour = contour
self.name = 'Microsoft'
@property
def default_language(self):
"""Return the default language."""
return self._lang
@property
def supported_languages(self):
"""Return list of supported languages."""
return SUPPORTED_LANGUAGES
def get_tts_audio(self, message, language, options=None):
"""Load TTS from Microsoft."""
if language is None:
language = self._lang
from pycsspeechtts import pycsspeechtts
try:
trans = pycsspeechtts.TTSTranslator(self._apikey)
data = trans.speak(language=language, gender=self._gender,
voiceType=self._type, output=self._output,
rate=self._rate, volume=self._volume,
pitch=self._pitch, contour=self._contour,
text=message)
except HTTPException as ex:
_LOGGER.error("Error occurred for Microsoft TTS: %s", ex)
return(None, None)
return ("mp3", data)
| apache-2.0 |
odoo-turkiye/odoo | addons/procurement/procurement.py | 74 | 15823 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from psycopg2 import OperationalError
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
import openerp
PROCUREMENT_PRIORITIES = [('0', 'Not urgent'), ('1', 'Normal'), ('2', 'Urgent'), ('3', 'Very Urgent')]
class procurement_group(osv.osv):
'''
The procurement group class is used to group products together
when computing procurements. (tasks, physical products, ...)
The goal is that when you have one sale order of several products
and the products are pulled from the same or several location(s), to keep
having the moves grouped into pickings that represent the sale order.
Used in: sales order (to group delivery order lines like the so), pull/push
rules (to pack like the delivery order), on orderpoints (e.g. for wave picking
all the similar products together).
Grouping is made only if the source and the destination is the same.
Suppose you have 4 lines on a picking from Output where 2 lines will need
to come from Input (crossdock) and 2 lines coming from Stock -> Output As
the four procurement orders will have the same group ids from the SO, the
move from input will have a stock.picking with 2 grouped lines and the move
from stock will have 2 grouped lines also.
The name is usually the name of the original document (sale order) or a
sequence computed if created manually.
'''
_name = 'procurement.group'
_description = 'Procurement Requisition'
_order = "id desc"
_columns = {
'name': fields.char('Reference', required=True),
'move_type': fields.selection([
('direct', 'Partial'), ('one', 'All at once')],
'Delivery Method', required=True),
'procurement_ids': fields.one2many('procurement.order', 'group_id', 'Procurements'),
}
_defaults = {
'name': lambda self, cr, uid, c: self.pool.get('ir.sequence').get(cr, uid, 'procurement.group') or '',
'move_type': lambda self, cr, uid, c: 'direct'
}
class procurement_rule(osv.osv):
'''
A rule describe what a procurement should do; produce, buy, move, ...
'''
_name = 'procurement.rule'
_description = "Procurement Rule"
_order = "name"
def _get_action(self, cr, uid, context=None):
return []
_columns = {
'name': fields.char('Name', required=True,
help="This field will fill the packing origin and the name of its moves"),
'active': fields.boolean('Active', help="If unchecked, it will allow you to hide the rule without removing it."),
'group_propagation_option': fields.selection([('none', 'Leave Empty'), ('propagate', 'Propagate'), ('fixed', 'Fixed')], string="Propagation of Procurement Group"),
'group_id': fields.many2one('procurement.group', 'Fixed Procurement Group'),
'action': fields.selection(selection=lambda s, cr, uid, context=None: s._get_action(cr, uid, context=context),
string='Action', required=True),
'sequence': fields.integer('Sequence'),
'company_id': fields.many2one('res.company', 'Company'),
}
_defaults = {
'group_propagation_option': 'propagate',
'sequence': 20,
'active': True,
}
class procurement_order(osv.osv):
"""
Procurement Orders
"""
_name = "procurement.order"
_description = "Procurement"
_order = 'priority desc, date_planned, id asc'
_inherit = ['mail.thread']
_log_create = False
_columns = {
'name': fields.text('Description', required=True),
'origin': fields.char('Source Document',
help="Reference of the document that created this Procurement.\n"
"This is automatically completed by Odoo."),
'company_id': fields.many2one('res.company', 'Company', required=True),
# These two fields are used for shceduling
'priority': fields.selection(PROCUREMENT_PRIORITIES, 'Priority', required=True, select=True, track_visibility='onchange'),
'date_planned': fields.datetime('Scheduled Date', required=True, select=True, track_visibility='onchange'),
'group_id': fields.many2one('procurement.group', 'Procurement Group'),
'rule_id': fields.many2one('procurement.rule', 'Rule', track_visibility='onchange', help="Chosen rule for the procurement resolution. Usually chosen by the system but can be manually set by the procurement manager to force an unusual behavior."),
'product_id': fields.many2one('product.product', 'Product', required=True, states={'confirmed': [('readonly', False)]}, readonly=True),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True, states={'confirmed': [('readonly', False)]}, readonly=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True, states={'confirmed': [('readonly', False)]}, readonly=True),
'product_uos_qty': fields.float('UoS Quantity', states={'confirmed': [('readonly', False)]}, readonly=True),
'product_uos': fields.many2one('product.uom', 'Product UoS', states={'confirmed': [('readonly', False)]}, readonly=True),
'state': fields.selection([
('cancel', 'Cancelled'),
('confirmed', 'Confirmed'),
('exception', 'Exception'),
('running', 'Running'),
('done', 'Done')
], 'Status', required=True, track_visibility='onchange', copy=False),
}
_defaults = {
'state': 'confirmed',
'priority': '1',
'date_planned': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'procurement.order', context=c)
}
def unlink(self, cr, uid, ids, context=None):
procurements = self.read(cr, uid, ids, ['state'], context=context)
unlink_ids = []
for s in procurements:
if s['state'] == 'cancel':
unlink_ids.append(s['id'])
else:
raise osv.except_osv(_('Invalid Action!'),
_('Cannot delete Procurement Order(s) which are in %s state.') % s['state'])
return osv.osv.unlink(self, cr, uid, unlink_ids, context=context)
def do_view_procurements(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing procurement orders
of same procurement group of given ids.
'''
act_obj = self.pool.get('ir.actions.act_window')
action_id = self.pool.get('ir.model.data').xmlid_to_res_id(cr, uid, 'procurement.do_view_procurements', raise_if_not_found=True)
result = act_obj.read(cr, uid, [action_id], context=context)[0]
group_ids = set([proc.group_id.id for proc in self.browse(cr, uid, ids, context=context) if proc.group_id])
result['domain'] = "[('group_id','in',[" + ','.join(map(str, list(group_ids))) + "])]"
return result
def onchange_product_id(self, cr, uid, ids, product_id, context=None):
""" Finds UoM and UoS of changed product.
@param product_id: Changed id of product.
@return: Dictionary of values.
"""
if product_id:
w = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
v = {
'product_uom': w.uom_id.id,
'product_uos': w.uos_id and w.uos_id.id or w.uom_id.id
}
return {'value': v}
return {}
def get_cancel_ids(self, cr, uid, ids, context=None):
return [proc.id for proc in self.browse(cr, uid, ids, context=context) if proc.state != 'done']
def cancel(self, cr, uid, ids, context=None):
#cancel only the procurements that aren't done already
to_cancel_ids = self.get_cancel_ids(cr, uid, ids, context=context)
if to_cancel_ids:
return self.write(cr, uid, to_cancel_ids, {'state': 'cancel'}, context=context)
def reset_to_confirmed(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'confirmed'}, context=context)
def run(self, cr, uid, ids, autocommit=False, context=None):
for procurement_id in ids:
#we intentionnaly do the browse under the for loop to avoid caching all ids which would be resource greedy
#and useless as we'll make a refresh later that will invalidate all the cache (and thus the next iteration
#will fetch all the ids again)
procurement = self.browse(cr, uid, procurement_id, context=context)
if procurement.state not in ("running", "done"):
try:
if self._assign(cr, uid, procurement, context=context):
res = self._run(cr, uid, procurement, context=context or {})
if res:
self.write(cr, uid, [procurement.id], {'state': 'running'}, context=context)
else:
self.write(cr, uid, [procurement.id], {'state': 'exception'}, context=context)
else:
self.message_post(cr, uid, [procurement.id], body=_('No rule matching this procurement'), context=context)
self.write(cr, uid, [procurement.id], {'state': 'exception'}, context=context)
if autocommit:
cr.commit()
except OperationalError:
if autocommit:
cr.rollback()
continue
else:
raise
return True
def check(self, cr, uid, ids, autocommit=False, context=None):
done_ids = []
for procurement in self.browse(cr, uid, ids, context=context):
try:
result = self._check(cr, uid, procurement, context=context)
if result:
done_ids.append(procurement.id)
if autocommit:
cr.commit()
except OperationalError:
if autocommit:
cr.rollback()
continue
else:
raise
if done_ids:
self.write(cr, uid, done_ids, {'state': 'done'}, context=context)
return done_ids
#
# Method to overwrite in different procurement modules
#
def _find_suitable_rule(self, cr, uid, procurement, context=None):
'''This method returns a procurement.rule that depicts what to do with the given procurement
in order to complete its needs. It returns False if no suiting rule is found.
:param procurement: browse record
:rtype: int or False
'''
return False
def _assign(self, cr, uid, procurement, context=None):
'''This method check what to do with the given procurement in order to complete its needs.
It returns False if no solution is found, otherwise it stores the matching rule (if any) and
returns True.
:param procurement: browse record
:rtype: boolean
'''
#if the procurement already has a rule assigned, we keep it (it has a higher priority as it may have been chosen manually)
if procurement.rule_id:
return True
elif procurement.product_id.type != 'service':
rule_id = self._find_suitable_rule(cr, uid, procurement, context=context)
if rule_id:
self.write(cr, uid, [procurement.id], {'rule_id': rule_id}, context=context)
return True
return False
def _run(self, cr, uid, procurement, context=None):
'''This method implements the resolution of the given procurement
:param procurement: browse record
:returns: True if the resolution of the procurement was a success, False otherwise to set it in exception
'''
return True
def _check(self, cr, uid, procurement, context=None):
'''Returns True if the given procurement is fulfilled, False otherwise
:param procurement: browse record
:rtype: boolean
'''
return False
#
# Scheduler
#
def run_scheduler(self, cr, uid, use_new_cursor=False, company_id = False, context=None):
'''
Call the scheduler to check the procurement order. This is intented to be done for all existing companies at
the same time, so we're running all the methods as SUPERUSER to avoid intercompany and access rights issues.
@param self: The object pointer
@param cr: The current row, from the database cursor,
@param uid: The current user ID for security checks
@param ids: List of selected IDs
@param use_new_cursor: if set, use a dedicated cursor and auto-commit after processing each procurement.
This is appropriate for batch jobs only.
@param context: A standard dictionary for contextual values
@return: Dictionary of values
'''
if context is None:
context = {}
try:
if use_new_cursor:
cr = openerp.registry(cr.dbname).cursor()
# Run confirmed procurements
dom = [('state', '=', 'confirmed')]
if company_id:
dom += [('company_id', '=', company_id)]
prev_ids = []
while True:
ids = self.search(cr, SUPERUSER_ID, dom, context=context)
if not ids or prev_ids == ids:
break
else:
prev_ids = ids
self.run(cr, SUPERUSER_ID, ids, autocommit=use_new_cursor, context=context)
if use_new_cursor:
cr.commit()
# Check if running procurements are done
offset = 0
dom = [('state', '=', 'running')]
if company_id:
dom += [('company_id', '=', company_id)]
prev_ids = []
while True:
ids = self.search(cr, SUPERUSER_ID, dom, offset=offset, context=context)
if not ids or prev_ids == ids:
break
else:
prev_ids = ids
self.check(cr, SUPERUSER_ID, ids, autocommit=use_new_cursor, context=context)
if use_new_cursor:
cr.commit()
finally:
if use_new_cursor:
try:
cr.close()
except Exception:
pass
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Acidburn0zzz/servo | etc/ci/performance/test_differ.py | 4 | 1929 | #!/usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import argparse
import json
parser = argparse.ArgumentParser(description="Diff between two runs of performance tests.")
parser.add_argument("file1", help="the first output json from runner")
parser.add_argument("file2", help="the second output json from runner")
args = parser.parse_args()
def load_data(filename):
with open(filename, 'r') as f:
results = {}
totals = {}
counts = {}
records = json.load(f)
for record in records:
key = record.get('testcase')
value = record.get('domComplete') - record.get('domLoading')
totals[key] = totals.get('key', 0) + value
counts[key] = counts.get('key', 0) + 1
results[key] = round(totals[key] / counts[key])
return results
data1 = load_data(args.file1)
data2 = load_data(args.file2)
keys = set(data1.keys()).union(data2.keys())
BLUE = '\033[94m'
GREEN = '\033[92m'
WARNING = '\033[93m'
END = '\033[0m'
total1 = 0
total2 = 0
def print_line(value1, value2, key):
diff = value2 - value1
change = diff / value1
color = BLUE if value1 <= value2 else GREEN
print("{}{:6} {:6} {:+6} {:+8.2%} {}.{}".format(color, value1, value2, diff, change, key, END))
for key in keys:
value1 = data1.get(key)
value2 = data2.get(key)
if value1 and not(value2):
print("{}Test {}: missing from {}.{}".format(WARNING, key, args.file2, END))
elif value2 and not(value1):
print("{}Test {}: missing from {}.{}".format(WARNING, key, args.file1, END))
elif value1 and value2:
total1 += value1
total2 += value2
print_line(value1, value2, key)
print("")
print_line(total1, total2, "TOTAL")
| mpl-2.0 |
keithroe/vtkoptix | ThirdParty/Twisted/twisted/internet/test/test_gtkreactor.py | 28 | 2861 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests to ensure all attributes of L{twisted.internet.gtkreactor} are
deprecated.
"""
import sys
from twisted.trial.unittest import TestCase
class GtkReactorDeprecation(TestCase):
"""
Tests to ensure all attributes of L{twisted.internet.gtkreactor} are
deprecated.
"""
class StubGTK:
class GDK:
INPUT_READ = None
def input_add(self, *params):
pass
class StubPyGTK:
def require(self, something):
pass
def setUp(self):
"""
Create a stub for the module 'gtk' if it does not exist, so that it can
be imported without errors or warnings.
"""
self.mods = sys.modules.copy()
sys.modules['gtk'] = self.StubGTK()
sys.modules['pygtk'] = self.StubPyGTK()
def tearDown(self):
"""
Return sys.modules to the way it was before the test.
"""
sys.modules.clear()
sys.modules.update(self.mods)
def lookForDeprecationWarning(self, testmethod, attributeName):
warningsShown = self.flushWarnings([testmethod])
self.assertEqual(len(warningsShown), 1)
self.assertIs(warningsShown[0]['category'], DeprecationWarning)
self.assertEqual(
warningsShown[0]['message'],
"twisted.internet.gtkreactor." + attributeName + " "
"was deprecated in Twisted 10.1.0: All new applications should be "
"written with gtk 2.x, which is supported by "
"twisted.internet.gtk2reactor.")
def test_gtkReactor(self):
"""
Test deprecation of L{gtkreactor.GtkReactor}
"""
from twisted.internet import gtkreactor
gtkreactor.GtkReactor();
self.lookForDeprecationWarning(self.test_gtkReactor, "GtkReactor")
def test_portableGtkReactor(self):
"""
Test deprecation of L{gtkreactor.GtkReactor}
"""
from twisted.internet import gtkreactor
gtkreactor.PortableGtkReactor()
self.lookForDeprecationWarning(self.test_portableGtkReactor,
"PortableGtkReactor")
def test_install(self):
"""
Test deprecation of L{gtkreactor.install}
"""
from twisted.internet import gtkreactor
self.assertRaises(AssertionError, gtkreactor.install)
self.lookForDeprecationWarning(self.test_install, "install")
def test_portableInstall(self):
"""
Test deprecation of L{gtkreactor.portableInstall}
"""
from twisted.internet import gtkreactor
self.assertRaises(AssertionError, gtkreactor.portableInstall)
self.lookForDeprecationWarning(self.test_portableInstall,
"portableInstall")
| bsd-3-clause |
jkettleb/iris | lib/iris/io/format_picker.py | 1 | 11433 | # (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
A module to provide convenient file format identification through a combination of filename extension
and file based *magic* numbers.
To manage a collection of FormatSpecifications for loading::
import iris.io.format_picker as fp
import matplotlib.pyplot as plt
fagent = fp.FormatAgent()
png_spec = fp.FormatSpecification('PNG image', fp.MagicNumber(8),
0x89504E470D0A1A0A,
handler=lambda filename: plt.imread(filename),
priority=5
)
fagent.add_spec(png_spec)
To identify a specific format from a file::
handling_spec = fagent.get_spec(png_filename, open(png_filename, 'rb'))
In the example, handling_spec will now be the png_spec previously added to the agent.
Now that a specification has been found, if a handler has been given with the specification, then the file can be handled::
handler = handling_spec.handler
if handler is None:
raise ValueError('File cannot be handled.')
else:
result = handler(filename)
The calling sequence of handler is dependent on the function given in the original specification and can be customised to your project's needs.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import collections
import functools
import os
import struct
import iris.io
class FormatAgent(object):
"""
The FormatAgent class is the containing object which is responsible for identifying the format of a given file
by interrogating its children FormatSpecification instances.
Typically a FormatAgent will be created empty and then extended with the :meth:`FormatAgent.add_spec` method::
agent = FormatAgent()
agent.add_spec(NetCDF_specification)
Less commonly, this can also be written::
agent = FormatAgent(NetCDF_specification)
"""
def __init__(self, format_specs=None):
""" """
self._format_specs = list(format_specs or [])
self._format_specs.sort()
def add_spec(self, format_spec):
"""Add a FormatSpecification instance to this agent for format consideration."""
self._format_specs.append(format_spec)
self._format_specs.sort()
def __repr__(self):
return 'FormatAgent(%r)' % self._format_specs
def __str__(self):
prefix = ' * ' if len(self._format_specs) > 1 else ''
return prefix + '\n * '.join(['%s' % format_spec for format_spec in self._format_specs])
def get_spec(self, basename, buffer_obj):
"""
Pick the first FormatSpecification which can handle the given
filename and file/buffer object.
.. note::
``buffer_obj`` may be ``None`` when a seekable file handle is not
feasible (such as over the http protocol). In these cases only the
format specifications which do not require a file handle are
tested.
"""
element_cache = {}
for format_spec in self._format_specs:
# For the case where a buffer_obj is None (such as for the
# http protocol) skip any specs which require a fh - they
# don't match.
if buffer_obj is None and format_spec.file_element.requires_fh:
continue
fmt_elem = format_spec.file_element
fmt_elem_value = format_spec.file_element_value
# cache the results for each file element
if repr(fmt_elem) not in element_cache:
# N.B. File oriented as this is assuming seekable stream.
if buffer_obj is not None and buffer_obj.tell() != 0:
# reset the buffer if tell != 0
buffer_obj.seek(0)
element_cache[repr(fmt_elem)] = \
fmt_elem.get_element(basename, buffer_obj)
# If we have a callable object, then call it and tests its result, otherwise test using basic equality
if isinstance(fmt_elem_value, collections.Callable):
matches = fmt_elem_value(element_cache[repr(fmt_elem)])
elif element_cache[repr(fmt_elem)] == fmt_elem_value:
matches = True
else:
matches = False
if matches:
return format_spec
printable_values = {}
for key, value in element_cache.iteritems():
value = str(value)
if len(value) > 50:
value = value[:50] + '...'
printable_values[key] = value
msg = ('No format specification could be found for the given buffer.'
' File element cache:\n {}'.format(printable_values))
raise ValueError(msg)
@functools.total_ordering
class FormatSpecification(object):
"""
Provides the base class for file type definition.
Every FormatSpecification instance has a name which can be accessed with the :attr:`FormatSpecification.name` property and
a FileElement, such as filename extension or 32-bit magic number, with an associated value for format identification.
"""
def __init__(self, format_name, file_element, file_element_value,
handler=None, priority=0, constraint_aware_handler=False):
"""
Constructs a new FormatSpecification given the format_name and particular FileElements
Args:
* format_name - string name of fileformat being described
* file_element - FileElement instance of the element which identifies this FormatSpecification
* file_element_value - The value that the file_element should take if a file matches this FormatSpecification
Kwargs:
* handler - function which will be called when the specification has been identified and is required to handler a format.
If None, then the file can still be identified but no handling can be done.
* priority - Integer giving a priority for considering this specification where higher priority means sooner consideration.
"""
if not isinstance(file_element, FileElement):
raise ValueError('file_element must be an instance of FileElement, got %r' % file_element)
self._file_element = file_element
self._file_element_value = file_element_value
self._format_name = format_name
self._handler = handler
self.priority = priority
self.constraint_aware_handler = constraint_aware_handler
def __hash__(self):
# Hashed by specification for consistent ordering in FormatAgent (including self._handler in this hash
# for example would order randomly according to object id)
return hash(self._file_element)
@property
def file_element(self):
return self._file_element
@property
def file_element_value(self):
return self._file_element_value
@property
def name(self):
"""The name of this FileFormat. (Read only)"""
return self._format_name
@property
def handler(self):
"""The handler function of this FileFormat. (Read only)"""
return self._handler
def __lt__(self, other):
if not isinstance(other, FormatSpecification):
return NotImplemented
return (-self.priority, hash(self)) < (-other.priority, hash(other))
def __eq__(self, other):
if not isinstance(other, FormatSpecification):
return NotImplemented
return self.priority == other.priority and hash(self) == hash(other)
def __ne__(self, other):
return not (self == other)
def __repr__(self):
# N.B. loader is not always going to provide a nice repr if it is a lambda function, hence a prettier version is available in __str__
return 'FormatSpecification(%r, %r, %r, handler=%r, priority=%s)' % (self._format_name, self._file_element,
self._file_element_value, self.handler, self.priority)
def __str__(self):
return '%s%s (priority %s)' % (self.name, ' (no handler available)' if self.handler is None else '', self.priority)
class FileElement(object):
"""
Represents a specific aspect of a FileFormat which can be identified using the given element getter function.
"""
def __init__(self, requires_fh=True):
"""
Constructs a new file element, which may require a file buffer.
Kwargs:
* requires_fh - Whether this FileElement needs a file buffer.
"""
self.requires_fh = requires_fh
def get_element(self, basename, file_handle):
"""Called when identifying the element of a file that this FileElement is representing."""
raise NotImplementedError("get_element must be defined in a subclass")
def __hash__(self):
return hash(repr(self))
def __repr__(self):
return '{}()'.format(self.__class__.__name__)
class MagicNumber(FileElement):
"""A :class:`FileElement` that returns a byte sequence in the file."""
len_formats = {4: ">L", 8: ">Q"}
def __init__(self, num_bytes, offset=None):
FileElement.__init__(self)
self._num_bytes = num_bytes
self._offset = offset
def get_element(self, basename, file_handle):
if self._offset is not None:
file_handle.seek(self._offset)
bytes = file_handle.read(self._num_bytes)
fmt = self.len_formats.get(self._num_bytes)
if len(bytes) != self._num_bytes:
raise EOFError(file_handle.name)
if fmt is None:
result = bytes
else:
result = struct.unpack(fmt, bytes)[0]
return result
def __repr__(self):
return 'MagicNumber({}, {})'.format(self._num_bytes, self._offset)
class FileExtension(FileElement):
"""A :class:`FileElement` that returns the extension from the filename."""
def get_element(self, basename, file_handle):
return os.path.splitext(basename)[1]
class LeadingLine(FileElement):
"""A :class:`FileElement` that returns the first line from the file."""
def get_element(self, basename, file_handle):
return file_handle.readline()
class UriProtocol(FileElement):
"""
A :class:`FileElement` that returns the "scheme" and "part" from a URI,
using :func:`~iris.io.decode_uri`.
"""
def __init__(self):
FileElement.__init__(self, requires_fh=False)
def get_element(self, basename, file_handle):
return iris.io.decode_uri(basename)[0]
| lgpl-3.0 |
salamer/django | tests/template_tests/filter_tests/test_iriencode.py | 388 | 1603 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.template.defaultfilters import iriencode, urlencode
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class IriencodeTests(SimpleTestCase):
"""
Ensure iriencode keeps safe strings.
"""
@setup({'iriencode01': '{{ url|iriencode }}'})
def test_iriencode01(self):
output = self.engine.render_to_string('iriencode01', {'url': '?test=1&me=2'})
self.assertEqual(output, '?test=1&me=2')
@setup({'iriencode02': '{% autoescape off %}{{ url|iriencode }}{% endautoescape %}'})
def test_iriencode02(self):
output = self.engine.render_to_string('iriencode02', {'url': '?test=1&me=2'})
self.assertEqual(output, '?test=1&me=2')
@setup({'iriencode03': '{{ url|iriencode }}'})
def test_iriencode03(self):
output = self.engine.render_to_string('iriencode03', {'url': mark_safe('?test=1&me=2')})
self.assertEqual(output, '?test=1&me=2')
@setup({'iriencode04': '{% autoescape off %}{{ url|iriencode }}{% endautoescape %}'})
def test_iriencode04(self):
output = self.engine.render_to_string('iriencode04', {'url': mark_safe('?test=1&me=2')})
self.assertEqual(output, '?test=1&me=2')
class FunctionTests(SimpleTestCase):
def test_unicode(self):
self.assertEqual(iriencode('S\xf8r-Tr\xf8ndelag'), 'S%C3%B8r-Tr%C3%B8ndelag')
def test_urlencoded(self):
self.assertEqual(iriencode(urlencode('fran\xe7ois & jill')), 'fran%C3%A7ois%20%26%20jill')
| bsd-3-clause |
woylaski/notebook | graphic/kivy-master/kivy/uix/behaviors.py | 1 | 55531 | '''
Behaviors
=========
.. versionadded:: 1.8.0
This module implements behaviors that can be mixed with existing base widgets.
For example, if you want to add a "button" capability to an `Image`, you could
do::
class IconButton(ButtonBehavior, Image):
pass
.. note::
The behavior class must always be _before_ the widget class. If you don't
specify the inheritance in this order, the behavior will not work because
the behavior methods are overwritten by the class method listed first.
Similarly, if you combine a behavior class with a class which
requires the use of the methods also defined by the behavior class, the
resulting class may not function properly. E.g. combining a ButtonBehavior
with a Slider, both of which require the on_touch_up methods, the resulting
class will not work.
'''
__all__ = ('ButtonBehavior', 'ToggleButtonBehavior', 'DragBehavior',
'FocusBehavior', 'CompoundSelectionBehavior')
from kivy.clock import Clock
from kivy.properties import OptionProperty, ObjectProperty, NumericProperty,\
ReferenceListProperty, BooleanProperty, ListProperty, AliasProperty
from kivy.config import Config
from kivy.metrics import sp
from kivy.base import EventLoop
from kivy.logger import Logger
from functools import partial
from weakref import ref
from time import clock, time
import string
# When we are generating documentation, Config doesn't exist
_scroll_timeout = _scroll_distance = 0
_is_desktop = False
_keyboard_mode = 'system'
if Config:
_scroll_timeout = Config.getint('widgets', 'scroll_timeout')
_scroll_distance = Config.getint('widgets', 'scroll_distance')
_is_desktop = Config.getboolean('kivy', 'desktop')
_keyboard_mode = Config.get('kivy', 'keyboard_mode')
class ButtonBehavior(object):
'''Button behavior.
:Events:
`on_press`
Fired when the button is pressed.
`on_release`
Fired when the button is released (i.e. the touch/click that
pressed the button goes away).
'''
state = OptionProperty('normal', options=('normal', 'down'))
'''State of the button, must be one of 'normal' or 'down'.
The state is 'down' only when the button is currently touched/clicked,
otherwise 'normal'.
:attr:`state` is an :class:`~kivy.properties.OptionProperty`.
'''
last_touch = ObjectProperty(None)
'''Contains the last relevant touch received by the Button. This can
be used in `on_press` or `on_release` in order to know which touch
dispatched the event.
.. versionadded:: 1.8.0
:attr:`last_touch` is a :class:`~kivy.properties.ObjectProperty`,
defaults to None.
'''
MIN_STATE_TIME = 0.035
'''The minimum period of time which the widget must remain in the
`'down'` state.
:attr:`MIN_STATE_TIME` is a float.
'''
def __init__(self, **kwargs):
self.register_event_type('on_press')
self.register_event_type('on_release')
super(ButtonBehavior, self).__init__(**kwargs)
self.__state_event = None
self.__touch_time = None
self.bind(state=self.cancel_event)
def _do_press(self):
self.state = 'down'
def _do_release(self, *args):
self.state = 'normal'
def cancel_event(self, *args):
if self.__state_event:
self.__state_event.cancel()
self.__state_event = None
def on_touch_down(self, touch):
if super(ButtonBehavior, self).on_touch_down(touch):
return True
if touch.is_mouse_scrolling:
return False
if not self.collide_point(touch.x, touch.y):
return False
if self in touch.ud:
return False
touch.grab(self)
touch.ud[self] = True
self.last_touch = touch
self.__touch_time = time()
self._do_press()
self.dispatch('on_press')
return True
def on_touch_move(self, touch):
if touch.grab_current is self:
return True
if super(ButtonBehavior, self).on_touch_move(touch):
return True
return self in touch.ud
def on_touch_up(self, touch):
if touch.grab_current is not self:
return super(ButtonBehavior, self).on_touch_up(touch)
assert(self in touch.ud)
touch.ungrab(self)
self.last_touch = touch
touchtime = time() - self.__touch_time
if touchtime < self.MIN_STATE_TIME:
self.__state_event = Clock.schedule_once(
self._do_release, self.MIN_STATE_TIME - touchtime)
else:
self._do_release()
self.dispatch('on_release')
return True
def on_press(self):
pass
def on_release(self):
pass
def trigger_action(self, duration=0.1):
'''Trigger whatever action(s) have been bound to the button by calling
both the on_press and on_release callbacks.
This simulates a quick button press without using any touch events.
Duration is the length of the press in seconds. Pass 0 if you want
the action to happen instantly.
.. versionadded:: 1.8.0
'''
self._do_press()
self.dispatch('on_press')
def trigger_release(dt):
self._do_release()
self.dispatch('on_release')
if not duration:
trigger_release(0)
else:
Clock.schedule_once(trigger_release, duration)
class ToggleButtonBehavior(ButtonBehavior):
'''ToggleButton behavior, see ToggleButton module documentation for more
information.
.. versionadded:: 1.8.0
'''
__groups = {}
group = ObjectProperty(None, allownone=True)
'''Group of the button. If None, no group will be used (button is
independent). If specified, :attr:`group` must be a hashable object, like
a string. Only one button in a group can be in 'down' state.
:attr:`group` is a :class:`~kivy.properties.ObjectProperty`
'''
allow_no_selection = BooleanProperty(True)
'''This specifies whether the checkbox in group allows everything to
be deselected.
..versionadded::1.9.0
:attr:`allow_no_selection` is a :class:`BooleanProperty` defaults to
`True`
'''
def __init__(self, **kwargs):
self._previous_group = None
super(ToggleButtonBehavior, self).__init__(**kwargs)
def on_group(self, *largs):
groups = ToggleButtonBehavior.__groups
if self._previous_group:
group = groups[self._previous_group]
for item in group[:]:
if item() is self:
group.remove(item)
break
group = self._previous_group = self.group
if group not in groups:
groups[group] = []
r = ref(self, ToggleButtonBehavior._clear_groups)
groups[group].append(r)
def _release_group(self, current):
if self.group is None:
return
group = self.__groups[self.group]
for item in group[:]:
widget = item()
if widget is None:
group.remove(item)
if widget is current:
continue
widget.state = 'normal'
def _do_press(self):
if (not self.allow_no_selection and
self.group and self.state == 'down'):
return
self._release_group(self)
self.state = 'normal' if self.state == 'down' else 'down'
def _do_release(self, *args):
pass
@staticmethod
def _clear_groups(wk):
# auto flush the element when the weak reference have been deleted
groups = ToggleButtonBehavior.__groups
for group in list(groups.values()):
if wk in group:
group.remove(wk)
break
@staticmethod
def get_widgets(groupname):
'''Return the widgets contained in a specific group. If the group
doesn't exist, an empty list will be returned.
.. important::
Always release the result of this method! In doubt, do::
l = ToggleButtonBehavior.get_widgets('mygroup')
# do your job
del l
.. warning::
It's possible that some widgets that you have previously
deleted are still in the list. Garbage collector might need
more elements before flushing it. The return of this method
is informative, you've been warned!
'''
groups = ToggleButtonBehavior.__groups
if groupname not in groups:
return []
return [x() for x in groups[groupname] if x()][:]
class DragBehavior(object):
'''Drag behavior. When combined with a widget, dragging in the rectangle
defined by :attr:`drag_rectangle` will drag the widget.
For example, to make a popup which is draggable by its title do::
from kivy.uix.behaviors import DragBehavior
from kivy.uix.popup import Popup
class DragPopup(DragBehavior, Popup):
pass
And in .kv do::
<DragPopup>:
drag_rectangle: self.x, self.y+self._container.height, self.width,\
self.height - self._container.height
drag_timeout: 10000000
drag_distance: 0
.. versionadded:: 1.8.0
'''
drag_distance = NumericProperty(_scroll_distance)
'''Distance to move before dragging the :class:`DragBehavior`, in pixels.
As soon as the distance has been traveled, the :class:`DragBehavior` will
start to drag, and no touch event will go to children.
It is advisable that you base this value on the dpi of your target device's
screen.
:attr:`drag_distance` is a :class:`~kivy.properties.NumericProperty`,
defaults to 20 (pixels), according to the default value of scroll_distance
in user configuration.
'''
drag_timeout = NumericProperty(_scroll_timeout)
'''Timeout allowed to trigger the :attr:`drag_distance`, in milliseconds.
If the user has not moved :attr:`drag_distance` within the timeout,
dragging will be disabled, and the touch event will go to the children.
:attr:`drag_timeout` is a :class:`~kivy.properties.NumericProperty`,
defaults to 55 (milliseconds), according to the default value of
scroll_timeout in user configuration.
'''
drag_rect_x = NumericProperty(0)
'''X position of the axis aligned bounding rectangle where dragging
is allowed. In window coordinates.
:attr:`drag_rect_x` is a :class:`~kivy.properties.NumericProperty`,
defaults to 0.
'''
drag_rect_y = NumericProperty(0)
'''Y position of the axis aligned bounding rectangle where dragging
is allowed. In window coordinates.
:attr:`drag_rect_Y` is a :class:`~kivy.properties.NumericProperty`,
defaults to 0.
'''
drag_rect_width = NumericProperty(100)
'''Width of the axis aligned bounding rectangle where dragging is allowed.
:attr:`drag_rect_width` is a :class:`~kivy.properties.NumericProperty`,
defaults to 100.
'''
drag_rect_height = NumericProperty(100)
'''Height of the axis aligned bounding rectangle where dragging is allowed.
:attr:`drag_rect_height` is a :class:`~kivy.properties.NumericProperty`,
defaults to 100.
'''
drag_rectangle = ReferenceListProperty(drag_rect_x, drag_rect_y,
drag_rect_width, drag_rect_height)
'''Position and size of the axis aligned bounding rectangle where dragging
is allowed.
:attr:`drag_rectangle` is a :class:`~kivy.properties.ReferenceListProperty`
of (:attr:`drag_rect_x`, :attr:`drag_rect_y`, :attr:`drag_rect_width`,
:attr:`drag_rect_height`) properties.
'''
def __init__(self, **kwargs):
self._drag_touch = None
super(DragBehavior, self).__init__(**kwargs)
def _get_uid(self, prefix='sv'):
return '{0}.{1}'.format(prefix, self.uid)
def on_touch_down(self, touch):
xx, yy, w, h = self.drag_rectangle
x, y = touch.pos
if not self.collide_point(x, y):
touch.ud[self._get_uid('svavoid')] = True
return super(DragBehavior, self).on_touch_down(touch)
if self._drag_touch or ('button' in touch.profile and
touch.button.startswith('scroll')) or\
not ((xx < x <= xx + w) and (yy < y <= yy + h)):
return super(DragBehavior, self).on_touch_down(touch)
# no mouse scrolling, so the user is going to drag with this touch.
self._drag_touch = touch
uid = self._get_uid()
touch.grab(self)
touch.ud[uid] = {
'mode': 'unknown',
'dx': 0,
'dy': 0}
Clock.schedule_once(self._change_touch_mode,
self.drag_timeout / 1000.)
return True
def on_touch_move(self, touch):
if self._get_uid('svavoid') in touch.ud or\
self._drag_touch is not touch:
return super(DragBehavior, self).on_touch_move(touch) or\
self._get_uid() in touch.ud
if touch.grab_current is not self:
return True
uid = self._get_uid()
ud = touch.ud[uid]
mode = ud['mode']
if mode == 'unknown':
ud['dx'] += abs(touch.dx)
ud['dy'] += abs(touch.dy)
if ud['dx'] > sp(self.drag_distance):
mode = 'drag'
if ud['dy'] > sp(self.drag_distance):
mode = 'drag'
ud['mode'] = mode
if mode == 'drag':
self.x += touch.dx
self.y += touch.dy
return True
def on_touch_up(self, touch):
if self._get_uid('svavoid') in touch.ud:
return super(DragBehavior, self).on_touch_up(touch)
if self._drag_touch and self in [x() for x in touch.grab_list]:
touch.ungrab(self)
self._drag_touch = None
ud = touch.ud[self._get_uid()]
if ud['mode'] == 'unknown':
super(DragBehavior, self).on_touch_down(touch)
Clock.schedule_once(partial(self._do_touch_up, touch), .1)
else:
if self._drag_touch is not touch:
super(DragBehavior, self).on_touch_up(touch)
return self._get_uid() in touch.ud
def _do_touch_up(self, touch, *largs):
super(DragBehavior, self).on_touch_up(touch)
# don't forget about grab event!
for x in touch.grab_list[:]:
touch.grab_list.remove(x)
x = x()
if not x:
continue
touch.grab_current = x
super(DragBehavior, self).on_touch_up(touch)
touch.grab_current = None
def _change_touch_mode(self, *largs):
if not self._drag_touch:
return
uid = self._get_uid()
touch = self._drag_touch
ud = touch.ud[uid]
if ud['mode'] != 'unknown':
return
touch.ungrab(self)
self._drag_touch = None
super(DragBehavior, self).on_touch_down(touch)
return
class FocusBehavior(object):
'''Implements keyboard focus behavior. When combined with other
FocusBehavior widgets it allows one to cycle focus among them by pressing
tab. In addition, upon gaining focus the instance will automatically
receive keyboard input.
Focus, very different then selection, is intimately tied with the keyboard;
each keyboard can focus on zero or one widgets, and each widget can only
have the focus of one keyboard. However, multiple keyboards can focus
simultaneously on different widgets. When escape is hit, the widget having
the focus of that keyboard will de-focus.
In essence, focus is implemented as a doubly linked list, where each
node holds a (weak) reference to the instance before it and after it,
as visualized when cycling through the nodes using tab (forward) or
shift+tab (backward). If previous or next widget is not specified,
:attr:`focus_next` and :attr:`focus_previous` defaults to `None`,
which means that the children list and parents are walked to find
the next focusable widget, unless :attr:`focus_next` or
:attr:`focus_previous` is set to the `StopIteration` class, in which case
focus stops there.
For example, to cycle focus between :class:`~kivy.uix.button.Button`
elements of a :class:`~kivy.uix.gridlayout.GridLayout`::
class FocusButton(FocusBehavior, Button):
pass
grid = GridLayout(cols=4)
for i in range(40):
grid.add_widget(FocusButton(text=str(i)))
# clicking on a widget will activate focus, and tab can now be used
# to cycle through
.. versionadded:: 1.9.0
.. warning::
This code is still experimental, and its API is subject to change in a
future version.
'''
_requested_keyboard = False
_keyboard = ObjectProperty(None, allownone=True)
_keyboards = {}
ignored_touch = []
'''A list of touches that should not be used to defocus. After on_touch_up,
every touch that is not in :attr:`ignored_touch` will defocus all the
focused widgets, if, the config keyboard mode is not multi. Touches on
focusable widgets that were used to focus are automatically added here.
Example usage::
class Unfocusable(Widget):
def on_touch_down(self, touch):
if self.collide_point(*touch.pos):
FocusBehavior.ignored_touch.append(touch)
Notice that you need to access this as class, not instance variable.
'''
def _set_keyboard(self, value):
focus = self.focus
keyboard = self._keyboard
keyboards = FocusBehavior._keyboards
if keyboard:
self.focus = False # this'll unbind
if self._keyboard: # remove assigned keyboard from dict
del keyboards[keyboard]
if value and not value in keyboards:
keyboards[value] = None
self._keyboard = value
self.focus = focus
def _get_keyboard(self):
return self._keyboard
keyboard = AliasProperty(_get_keyboard, _set_keyboard,
bind=('_keyboard', ))
'''The keyboard to bind, or bound to the widget when focused.
When None, a keyboard is requested and released whenever the widget comes
into and out of focus. If not None, it must be a keyboard, which gets
bound and unbound from the widget whenever it's in or out of focus. It is
useful only when more than one keyboard is available, so it is recommended
to be set to None when only one keyboard is available
If more than one keyboard is available, whenever an instance get focused
a new keyboard will be requested if None. Unless, the other instances lose
focus (e.g. if tab was used), a new keyboard will appear. When this is
undesired, the keyboard property can be used. For example, if there are
two users with two keyboards, then each keyboard can be assigned to
different groups of instances of FocusBehavior, ensuring that within
each group, only one FocusBehavior will have focus, and will receive input
from the correct keyboard. see `keyboard_mode` in :mod:`~kivy.config` for
information on the keyboard modes.
:attr:`keyboard` is a :class:`~kivy.properties.AliasProperty`, defaults to
None.
.. note::
When Config's `keyboard_mode` is multi, each new touch is considered
a touch by a different user and will focus (if clicked on a
focusable) with a new keyboard. Already focused elements will not lose
their focus (even if clicked on a unfocusable).
.. note:
If the keyboard property is set, that keyboard will be used when the
instance gets focused. If widgets with different keyboards are linked
through :attr:`focus_next` and :attr:`focus_previous`, then as they are
tabbed through, different keyboards will become active. Therefore,
typically it's undesirable to link instances which are assigned
different keyboards.
.. note:
When an instance has focus, setting keyboard to None will remove the
current keyboard, but will then try to get a keyboard back. It is
better to set :attr:`focus` to False.
.. warning:
When assigning a keyboard, the keyboard must not be released while
it is still assigned to an instance. Similarly, the keyboard created
by the instance on focus and assigned to :attr:`keyboard` if None,
will be released by the instance when the instance loses focus.
Therefore, it is not safe to assign this keyboard to another instance's
:attr:`keyboard`.
'''
is_focusable = BooleanProperty(_is_desktop)
'''Whether the instance can become focused. If focused, it'll lose focus
when set to False.
:attr:`is_focusable` is a :class:`~kivy.properties.BooleanProperty`,
defaults to True on a desktop (i.e. desktop is True in
:mod:`~kivy.config`), False otherwise.
'''
focus = BooleanProperty(False)
'''Whether the instance currently has focus.
Setting it to True, will bind to and/or request the keyboard, and input
will be forwarded to the instance. Setting it to False, will unbind
and/or release the keyboard. For a given keyboard, only one widget can
have its focus, so focusing one will automatically unfocus the other
instance holding its focus.
:attr:`focus` is a :class:`~kivy.properties.BooleanProperty`, defaults to
False.
'''
focused = focus
'''An alias of :attr:`focus`.
:attr:`focused` is a :class:`~kivy.properties.BooleanProperty`, defaults to
False.
.. warning::
:attr:`focused` is an alias of :attr:`focus` and will be removed in
2.0.0.
'''
def _set_on_focus_next(self, instance, value):
''' If changing code, ensure following code is not infinite loop:
widget.focus_next = widget
widget.focus_previous = widget
widget.focus_previous = widget2
'''
next = self._old_focus_next
if next is value: # prevent infinite loop
return
if isinstance(next, FocusBehavior):
next.focus_previous = None
self._old_focus_next = value
if value is None or value is StopIteration:
return
if not isinstance(value, FocusBehavior):
raise ValueError('focus_next accepts only objects based'
' on FocusBehavior, or the StopIteration class.')
value.focus_previous = self
focus_next = ObjectProperty(None, allownone=True)
'''The :class:`FocusBehavior` instance to acquire focus when
tab is pressed when this instance has focus, if not `None` or
`'StopIteration'`.
When tab is pressed, focus cycles through all the :class:`FocusBehavior`
widgets that are linked through :attr:`focus_next` and are focusable. If
:attr:`focus_next` is `None`, it instead walks the children lists to find
the next focusable widget. Finally, if :attr:`focus_next` is
the `StopIteration` class, focus won't move forward, but end here.
.. note:
Setting :attr:`focus_next` automatically sets :attr:`focus_previous`
of the other instance to point to this instance, if not None or
`StopIteration`. Similarly, if it wasn't None or `StopIteration`, it
also sets the :attr:`focus_previous` property of the instance
previously in :attr:`focus_next` to `None`. Therefore, it is only
required to set one side of the :attr:`focus_previous`,
:attr:`focus_next`, links since the other side will be set
automatically.
:attr:`focus_next` is a :class:`~kivy.properties.ObjectProperty`, defaults
to `None`.
'''
def _set_on_focus_previous(self, instance, value):
prev = self._old_focus_previous
if prev is value:
return
if isinstance(prev, FocusBehavior):
prev.focus_next = None
self._old_focus_previous = value
if value is None or value is StopIteration:
return
if not isinstance(value, FocusBehavior):
raise ValueError('focus_previous accepts only objects based'
' on FocusBehavior, or the StopIteration class.')
value.focus_next = self
focus_previous = ObjectProperty(None, allownone=True)
'''The :class:`FocusBehavior` instance to acquire focus when
shift+tab is pressed on this instance, if not None or `StopIteration`.
When shift+tab is pressed, focus cycles through all the
:class:`FocusBehavior` widgets that are linked through
:attr:`focus_previous` and are focusable. If :attr:`focus_previous` is
`None', it instead walks the children tree to find the
previous focusable widget. Finally, if :attr:`focus_previous` is the
`StopIteration` class, focus won't move backward, but end here.
.. note:
Setting :attr:`focus_previous` automatically sets :attr:`focus_next`
of the other instance to point to this instance, if not None or
`StopIteration`. Similarly, if it wasn't None or `StopIteration`, it
also sets the :attr:`focus_next` property of the instance previously in
:attr:`focus_previous` to `None`. Therefore, it is only required
to set one side of the :attr:`focus_previous`, :attr:`focus_next`,
links since the other side will be set automatically.
:attr:`focus_previous` is a :class:`~kivy.properties.ObjectProperty`,
defaults to `None`.
'''
keyboard_mode = OptionProperty('auto', options=('auto', 'managed'))
'''How the keyboard visibility should be managed (auto will have standard
behaviour to show/hide on focus, managed requires setting keyboard_visible
manually, or calling the helper functions ``show_keyboard()``
and ``hide_keyboard()``.
:attr:`keyboard_mode` is an :class:`~kivy.properties.OptionsProperty` and
defaults to 'auto'. Can be one of 'auto' or 'managed'.
'''
input_type = OptionProperty('text', options=('text', 'number', 'url',
'mail', 'datetime', 'tel',
'address'))
'''The kind of input keyboard to request.
.. versionadded:: 1.8.0
:attr:`input_type` is an :class:`~kivy.properties.OptionsProperty` and
defaults to 'text'. Can be one of 'text', 'number', 'url', 'mail',
'datetime', 'tel', 'address'.
'''
unfocus_on_touch = BooleanProperty(_keyboard_mode not in
('multi', 'systemandmulti'))
'''Whether a instance should lose focus when clicked outside the instance.
When a user clicks on a widget that is focus aware and shares the same
keyboard as the this widget (which in the case of only one keyboard, are
all focus aware widgets), then as the other widgets gains focus, this
widget loses focus. In addition to that, if this property is `True`,
clicking on any widget other than this widget, will remove focus form this
widget.
:attr:`unfocus_on_touch` is a :class:`~kivy.properties.BooleanProperty`,
defaults to `False` if the `keyboard_mode` in :attr:`~kivy.config.Config`
is `'multi'` or `'systemandmulti'`, otherwise it defaults to `True`.
'''
def __init__(self, **kwargs):
self._old_focus_next = None
self._old_focus_previous = None
super(FocusBehavior, self).__init__(**kwargs)
self._keyboard_mode = _keyboard_mode
self.bind(focus=self._on_focus, disabled=self._on_focusable,
is_focusable=self._on_focusable,
focus_next=self._set_on_focus_next,
focus_previous=self._set_on_focus_previous)
def _on_focusable(self, instance, value):
if self.disabled or not self.is_focusable:
self.focus = False
def _on_focus(self, instance, value, *largs):
if self.keyboard_mode == 'auto':
if value:
self._bind_keyboard()
else:
self._unbind_keyboard()
def _ensure_keyboard(self):
if self._keyboard is None:
self._requested_keyboard = True
keyboard = self._keyboard =\
EventLoop.window.request_keyboard(
self._keyboard_released, self, input_type=self.input_type)
keyboards = FocusBehavior._keyboards
if keyboard not in keyboards:
keyboards[keyboard] = None
def _bind_keyboard(self):
self._ensure_keyboard()
keyboard = self._keyboard
if not keyboard or self.disabled or not self.is_focusable:
self.focus = False
return
keyboards = FocusBehavior._keyboards
old_focus = keyboards[keyboard] # keyboard should be in dict
if old_focus:
old_focus.focus = False
# keyboard shouldn't have been released here, see keyboard warning
keyboards[keyboard] = self
keyboard.bind(on_key_down=self.keyboard_on_key_down,
on_key_up=self.keyboard_on_key_up)
def _unbind_keyboard(self):
keyboard = self._keyboard
if keyboard:
keyboard.unbind(on_key_down=self.keyboard_on_key_down,
on_key_up=self.keyboard_on_key_up)
if self._requested_keyboard:
keyboard.release()
self._keyboard = None
self._requested_keyboard = False
del FocusBehavior._keyboards[keyboard]
else:
FocusBehavior._keyboards[keyboard] = None
def _keyboard_released(self):
self.focus = False
def on_touch_down(self, touch):
if not self.collide_point(*touch.pos):
return
if (not self.disabled and self.is_focusable and
('button' not in touch.profile or
not touch.button.startswith('scroll'))):
self.focus = True
FocusBehavior.ignored_touch.append(touch)
return super(FocusBehavior, self).on_touch_down(touch)
@staticmethod
def _handle_post_on_touch_up(touch):
''' Called by window after each touch has finished.
'''
touches = FocusBehavior.ignored_touch
if touch in touches:
touches.remove(touch)
return
for focusable in list(FocusBehavior._keyboards.values()):
if focusable is None or not focusable.unfocus_on_touch:
continue
focusable.focus = False
def _get_focus_next(self, focus_dir):
current = self
walk_tree = 'walk' if focus_dir is 'focus_next' else 'walk_reverse'
while 1:
# if we hit a focusable, walk through focus_xxx
while getattr(current, focus_dir) is not None:
current = getattr(current, focus_dir)
if current is self or current is StopIteration:
return None # make sure we don't loop forever
if current.is_focusable and not current.disabled:
return current
# hit unfocusable, walk widget tree
itr = getattr(current, walk_tree)(loopback=True)
if focus_dir is 'focus_next':
next(itr) # current is returned first when walking forward
for current in itr:
if isinstance(current, FocusBehavior):
break
# why did we stop
if isinstance(current, FocusBehavior):
if current is self:
return None
if current.is_focusable and not current.disabled:
return current
else:
return None
def keyboard_on_key_down(self, window, keycode, text, modifiers):
'''The method bound to the keyboard when the instance has focus.
When the instance becomes focused, this method is bound to the
keyboard and will be called for every input press. The parameters are
the same as :meth:`kivy.core.window.WindowBase.on_key_down`.
When overwriting the method in the derived widget, super should be
called to enable tab cycling. If the derived widget wishes to use tab
for its own purposes, it can call super at the end after it is done if
it didn't consume tab.
Similar to other keyboard functions, it should return True if the
key was consumed.
'''
if keycode[1] == 'tab': # deal with cycle
if ['shift'] == modifiers:
next = self._get_focus_next('focus_previous')
else:
next = self._get_focus_next('focus_next')
if next:
self.focus = False
next.focus = True
return True
return False
def keyboard_on_key_up(self, window, keycode):
'''The method bound to the keyboard when the instance has focus.
When the instance becomes focused, this method is bound to the
keyboard and will be called for every input release. The parameters are
the same as :meth:`kivy.core.window.WindowBase.on_key_up`.
When overwriting the method in the derived widget, super should be
called to enable de-focusing on escape. If the derived widget wishes
to use escape for its own purposes, it can call super at the end after
it is done if it didn't consume escape.
See :meth:`on_key_down`
'''
if keycode[1] == 'escape':
self.focus = False
return True
return False
def show_keyboard(self):
'''
Convenience function to show the keyboard in managed mode.
'''
if self.keyboard_mode == 'managed':
self._bind_keyboard()
def hide_keyboard(self):
'''
Convenience function to hide the keyboard in managed mode.
'''
if self.keyboard_mode == 'managed':
self._unbind_keyboard()
class CompoundSelectionBehavior(object):
'''Selection behavior implements the logic behind keyboard and touch
selection of selectable widgets managed by the derived widget.
For example, it could be combined with a
:class:`~kivy.uix.gridlayout.GridLayout` to add selection to the layout.
At its core, it keeps a dynamic list of widgets that can be selected.
Then, as the touches and keyboard input are passed in, it selects one or
more of the widgets based on these inputs. For example, it uses the mouse
scroll and keyboard up/down buttons to scroll through the list of widgets.
Multiselection can also be achieved using the keyboard shift and ctrl keys.
Finally, in addition to the up/down type keyboard inputs, it can also
accepts letters from the kayboard to be used to select nodes with
associated strings that start with those letters, similar to how files
are selected by a file browser.
When the controller needs to select a node it calls :meth:`select_node` and
:meth:`deselect_node`. Therefore, they must be overwritten in order affect
the selected nodes. By default, the class doesn't listen to keyboard and
touch events, therefore, the derived widget must call
:meth:`select_with_touch`, :meth:`select_with_key_down`, and
:meth:`select_with_key_up` on events that it wants to pass on for selection
purposes.
For example, to add selection to a grid layout which will contain
:class:`~kivy.uix.Button` widgets::
class SelectableGrid(CompoundSelectionBehavior, GridLayout):
def __init__(self, **kwargs):
super(CompoundSelectionBehavior, self).__init__(**kwargs)
keyboard = Window.request_keyboard(None, self)
keyboard.bind(on_key_down=self.select_with_key_down,
on_key_up=self.select_with_key_up)
def select_node(self, node):
node.background_color = (1, 0, 0, 1)
return super(CompoundSelectionBehavior, self).select_node(node)
def deselect_node(self, node):
node.background_color = (1, 1, 1, 1)
super(CompoundSelectionBehavior, self).deselect_node(node)
Then, for each button added to the layout, bind on_touch_down of the button
to :meth:`select_with_touch` to pass on the touch events.
.. versionadded:: 1.9.0
.. warning::
This code is still experimental, and its API is subject to change in a
future version.
'''
selected_nodes = ListProperty([])
'''The list of selected nodes.
.. note:
Multiple nodes can be selected right after another using e.g. the
keyboard, so when listening to :attr:`selected_nodes` one should be
aware of this.
:attr:`selected_nodes` is a :class:`~kivy.properties.ListProperty` and
defaults to the empty list, []. It is read-only and should not be modified.
'''
touch_multiselect = BooleanProperty(False)
'''A special touch mode which determines whether touch events, as
processed with :meth:`select_with_touch`, will add to the selection the
currently touched node, or if it will clear the selection before adding the
node. This allows the selection of multiple nodes by simply touching them.
This is different than :attr:`multiselect`, because when this is True
simply touching an unselected node will select it, even if e.g. ctrl is not
pressed. If this is False, however, ctrl is required to be held in order to
add to selection when :attr:`multiselect` is True.
.. note::
:attr:`multiselect`, when False, will disable
:attr:`touch_multiselect`.
:attr:`touch_multiselect` is a :class:`~kivy.properties.BooleanProperty`,
defaults to False.
'''
multiselect = BooleanProperty(False)
'''Determines whether multiple nodes can be selected. If enabled, keyboard
shift and ctrl selection, optionally combined with touch, for example, will
be able to select multiple widgets in the normally expected manner.
This dominates :attr:`touch_multiselect` when False.
:attr:`multiselect` is a :class:`~kivy.properties.BooleanProperty`
, defaults to False.
'''
keyboard_select = BooleanProperty(True)
''' Whether the keybaord can be used for selection. If False, keyboard
inputs will be ignored.
:attr:`keyboard_select` is a :class:`~kivy.properties.BooleanProperty`
, defaults to True.
'''
page_count = NumericProperty(10)
'''Determines by how much the selected node is moved up or down, relative
to position of the last selected node, when pageup (or pagedown) is
pressed.
:attr:`page_count` is a :class:`~kivy.properties.NumericProperty`,
defaults to 10.
'''
up_count = NumericProperty(1)
'''Determines by how much the selected node is moved up or down, relative
to position of the last selected node, when the up (or down) arrow on the
keyboard is pressed.
:attr:`up_count` is a :class:`~kivy.properties.NumericProperty`,
defaults to 1.
'''
right_count = NumericProperty(1)
'''Determines by how much the selected node is moved up or down, relative
to position of the last selected node, when the right (or left) arrow on
the keyboard is pressed.
:attr:`right_count` is a :class:`~kivy.properties.NumericProperty`,
defaults to 1.
'''
scroll_count = NumericProperty(0)
'''Determines by how much the selected node is moved up or down, relative
to position of the last selected node, when the mouse scroll wheel is
scrolled.
:attr:`right_count` is a :class:`~kivy.properties.NumericProperty`,
defaults to 0.
'''
_anchor = None # the last anchor node selected (e.g. shift relative node)
# the idx may be out of sync
_anchor_idx = 0 # cache indexs in case list hasn't changed
_last_selected_node = None # the absolute last node selected
_last_node_idx = 0
_ctrl_down = False # if it's pressed - for e.g. shift selection
_shift_down = False
# holds str used to find node, e.g. if word is typed. passed to goto_node
_word_filter = ''
_last_key_time = 0 # time since last press, for finding whole strs in node
_printable = set(string.printable)
_key_list = [] # keys that are already pressed, to not press continuously
_offset_counts = {} # cache of counts for faster access
def __init__(self, **kwargs):
super(CompoundSelectionBehavior, self).__init__(**kwargs)
def ensure_single_select(*l):
if (not self.multiselect) and len(self.selected_nodes) > 1:
self.clear_selection()
self._update_counts()
self.bind(multiselect=ensure_single_select,
page_count=self._update_counts, up_count=self._update_counts,
right_count=self._update_counts, scroll_count=self._update_counts)
def select_with_touch(self, node, touch=None):
'''(internal) Processes a touch on the node. This should be called by
the derived widget when a node is touched and is to be used for
selection. Depending on the keyboard keys pressed and the
configuration, it could select or deslect this and other nodes in the
selectable nodes list, :meth:`get_selectable_nodes`.
:Parameters:
`node`
The node that recieved the touch. Can be None for a scroll
type touch.
`touch`
Optionally, the touch. Defaults to None.
:Returns:
bool, True if the touch was used, False otherwise.
'''
multi = self.multiselect
multiselect = multi and (self._ctrl_down or self.touch_multiselect)
range_select = multi and self._shift_down
if touch and 'button' in touch.profile and touch.button in\
('scrollup', 'scrolldown', 'scrollleft', 'scrollright'):
node_src, idx_src = self._reslove_last_node()
node, idx = self.goto_node(touch.button, node_src, idx_src)
if node == node_src:
return False
if range_select:
self._select_range(multiselect, True, node, idx)
else:
if not multiselect:
self.clear_selection()
self.select_node(node)
return True
if node is None:
return False
if (node in self.selected_nodes and (not range_select)): # selected
if multiselect:
self.deselect_node(node)
else:
self.clear_selection()
self.select_node(node)
elif range_select:
# keep anchor only if not multislect (ctrl-type selection)
self._select_range(multiselect, not multiselect, node, 0)
else: # it's not selected at this point
if not multiselect:
self.clear_selection()
self.select_node(node)
return True
def select_with_key_down(self, keyboard, scancode, codepoint, modifiers,
**kwargs):
'''Processes a key press. This is called when a key press is to be used
for selection. Depending on the keyboard keys pressed and the
configuration, it could select or deslect nodes or node ranges
from the selectable nodes list, :meth:`get_selectable_nodes`.
The parameters are such that it could be bound directly to the
on_key_down event of a keyboard. Therefore, it is safe to be called
repeatedly when the key is held down as is done by the keyboard.
:Returns:
bool, True if the keypress was used, False otherwise.
'''
if not self.keyboard_select:
return False
keys = self._key_list
multi = self.multiselect
node_src, idx_src = self._reslove_last_node()
if scancode[1] == 'shift':
self._shift_down = True
elif scancode[1] == 'ctrl':
self._ctrl_down = True
elif (multi and 'ctrl' in modifiers and scancode[1] in ('a', 'A')
and scancode[1] not in keys):
sister_nodes = self.get_selectable_nodes()
select = self.select_node
for node in sister_nodes:
select(node)
keys.append(scancode[1])
else:
if scancode[1] in self._printable:
if clock() - self._last_key_time <= 1.:
self._word_filter += scancode[1]
else:
self._word_filter = scancode[1]
self._last_key_time = clock()
node, idx = self.goto_node(self._word_filter, node_src,
idx_src)
else:
node, idx = self.goto_node(scancode[1], node_src, idx_src)
if node == node_src:
return False
multiselect = multi and 'ctrl' in modifiers
if multi and 'shift' in modifiers:
self._select_range(multiselect, True, node, idx)
else:
if not multiselect:
self.clear_selection()
self.select_node(node)
return True
return False
def select_with_key_up(self, keyboard, scancode, **kwargs):
'''(internal) Processes a key release. This must be called by the
derived widget when a key that :meth:`select_with_key_down` returned
True is released.
The parameters are such that it could be bound directly to the
on_key_up event of a keyboard.
:Returns:
bool, True if the key release was used, False otherwise.
'''
if scancode[1] == 'shift':
self._shift_down = False
elif scancode[1] == 'ctrl':
self._ctrl_down = False
else:
try:
self._key_list.remove(scancode[1])
return True
except ValueError:
return False
return True
def _update_counts(self, *largs):
# doesn't invert indices here
pc = self.page_count
uc = self.up_count
rc = self.right_count
sc = self.scroll_count
self._offset_counts = {'pageup': -pc, 'pagedown': pc, 'up': -uc,
'down': uc, 'right': rc, 'left': -rc, 'scrollup': sc,
'scrolldown': -sc, 'scrollright': -sc, 'scrollleft': sc}
def _reslove_last_node(self):
# for offset selection, we have a anchor, and we select everything
# between anchor and added offset relative to last node
sister_nodes = self.get_selectable_nodes()
if not len(sister_nodes):
return None, 0
last_node = self._last_selected_node
last_idx = self._last_node_idx
end = len(sister_nodes) - 1
if last_node is None:
last_node = self._anchor
last_idx = self._anchor_idx
if last_node is None:
return sister_nodes[end], end
if last_idx > end or sister_nodes[last_idx] != last_node:
try:
return last_node, sister_nodes.index(last_node)
except ValueError:
return sister_nodes[end], end
return last_node, last_idx
def _select_range(self, multiselect, keep_anchor, node, idx):
'''Selects a range between self._anchor and node or idx.
If multiselect, it'll add to selection, otherwise it will unselect
everything before selecting the range. This is only called if
self.multiselect is True.
If keep anchor is False, the anchor is moved to node. This should
always be True of keyboard selection.
'''
select = self.select_node
sister_nodes = self.get_selectable_nodes()
end = len(sister_nodes) - 1
last_node = self._anchor
last_idx = self._anchor_idx
if last_node is None:
last_idx = end
last_node = sister_nodes[end]
else:
if last_idx > end or sister_nodes[last_idx] != last_node:
try:
last_idx = sister_nodes.index(last_node)
except ValueError:
# list changed - cannot do select across them
return
if idx > end or sister_nodes[idx] != node:
try: # just in case
idx = sister_nodes.index(node)
except ValueError:
return
if last_idx > idx:
last_idx, idx = idx, last_idx
if not multiselect:
self.clear_selection()
for item in sister_nodes[last_idx:idx + 1]:
select(item)
if keep_anchor:
self._anchor = last_node
self._anchor_idx = last_idx
else:
self._anchor = node # in case idx was reversed, reset
self._anchor_idx = idx
self._last_selected_node = node
self._last_node_idx = idx
def clear_selection(self):
''' Deselects all the currently selected nodes.
'''
# keep the anchor and last selected node
deselect = self.deselect_node
nodes = self.selected_nodes
# empty beforehand so lookup in deselect will be fast
self.selected_nodes = []
for node in nodes:
deselect(node)
def get_selectable_nodes(self):
'''(internal) Returns a list of the nodes that can be selected. It can
be overwritten by the derived widget to return the correct list.
This list is used to determine which nodes to select with group
selection. E.g. the last element in the list will be selected when
home is pressed, pagedown will move (or add to, if shift is held) the
selection from the current position by negative :attr:`page_count`
nodes starting from the position of the currently selected node in
this list and so on. Still, nodes can be selected even if they are not
in this list.
.. note::
It is safe to dynamically change this list including removing,
adding, or re-arranging its elements. Nodes can be selected even
if they are not on this list. And selected nodes removed from the
list will remain selected until :meth:`deselect_node` is called.
.. warning::
Layouts display their children in the reverse order. That is, the
contents of :attr:`~kivy.uix.widget.Widget.children` is displayed
form right to left, bottom to top. Therefore, internally, the
indices of the elements returned by this function is reversed to
make it work by default for most layouts so that the final result
is that e.g. home, although it will select the last element on this
list, visually it'll select the first element when counting from
top to bottom and left to right. If this behavior is not desired,
a reversed list should be returned instead.
Defaults to returning :attr:`~kivy.uix.widget.Widget.children`.
'''
return self.children
def goto_node(self, key, last_node, last_node_idx):
'''(internal) Used by the controller to get the node at the position
indicated by key. The key can be keyboard inputs, e.g. pageup,
or scroll inputs from the mouse scroll wheel, e.g. scrollup.
Last node is the last node selected and is used to find the resulting
node. For example, if the key is up, the returned node is one node
up from the last node.
It can be overwritten by the derived widget.
:Parameters:
`key`
str, the string used to find the desired node. It can be any
of the keyboard keys, as well as the mouse scrollup,
scrolldown, scrollright, and scrollleft strings. If letters
are typed in quick succession, the letters will be combined
before it's passed in as key and can be used to find nodes that
have an associated string that starts with those letters.
`last_node`
The last node that was selected.
`last_node_idx`
The cached index of the last node selected in the
:meth:`get_selectable_nodes` list. If the list hasn't changed
it saves having to look up the index of `last_node` in that
list.
:Returns:
tuple, the node targeted by key and its index in the
:meth:`get_selectable_nodes` list. Returning
`(last_node, last_node_idx)` indicates a node wasn't found.
'''
sister_nodes = self.get_selectable_nodes()
end = len(sister_nodes) - 1
counts = self._offset_counts
if end == -1:
return last_node, last_node_idx
if last_node_idx > end or sister_nodes[last_node_idx] != last_node:
try: # just in case
last_node_idx = sister_nodes.index(last_node)
except ValueError:
return last_node, last_node_idx
try:
idx = max(min(-counts[key] + last_node_idx, end), 0)
return sister_nodes[idx], idx
except KeyError:
pass
if key == 'home':
return sister_nodes[end], end
elif key == 'end':
return sister_nodes[0], 0
else:
return last_node, last_node_idx
def select_node(self, node):
''' Selects a node.
It is called by the controller when it selects a node and can be
called from the outside to select a node directly. The derived widget
should overwrite this method and change the node to its selected state
when this is called
:Parameters:
`node`
The node to be selected.
:Returns:
bool, True if the node was selected, False otherwise.
.. warning::
This method must be called by the derived widget using super if it
is overwritten.
'''
nodes = self.selected_nodes
if (not self.multiselect) and len(nodes):
self.clear_selection()
if node not in nodes:
nodes.append(node)
self._anchor = node
self._last_selected_node = node
return True
def deselect_node(self, node):
''' Deselects a possibly selected node.
It is called by the controller when it deselects a node and can also
be called from the outside to deselect a node directly. The derived
widget should overwrite this method and change the node to its
unselected state when this is called
:Parameters:
`node`
The node to be deselected.
.. warning::
This method must be called by the derived widget using super if it
is overwritten.
'''
try:
self.selected_nodes.remove(node)
except ValueError:
pass
| gpl-3.0 |
VirusTotal/msticpy | msticpy/sectools/tiproviders/ti_provider_base.py | 1 | 21069 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
Module for TILookup classes.
Input can be a single IoC observable or a pandas DataFrame containing
multiple observables. Processing may require a an API key and
processing performance may be limited to a specific number of
requests per minute for the account type that you have.
"""
import abc
from abc import ABC
import math # noqa
import pprint
import re
from collections import Counter, namedtuple
from enum import Enum
from functools import lru_cache, singledispatch, total_ordering
from ipaddress import IPv4Address, IPv6Address, ip_address
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
from urllib.parse import quote_plus
import attr
import pandas as pd
from urllib3.exceptions import LocationParseError
from urllib3.util import parse_url
from ..._version import VERSION
from ...common.utility import export
from ..iocextract import IoCExtract, IoCType
__version__ = VERSION
__author__ = "Ian Hellen"
SanitizedObservable = namedtuple("SanitizedObservable", ["observable", "status"])
# pylint: disable=too-few-public-methods
@total_ordering
class TISeverity(Enum):
"""Threat intelligence report severity."""
unknown = -1
information = 0
warning = 1
high = 2
@classmethod
def parse(cls, value) -> "TISeverity":
"""
Parse string or numeric value to TISeverity.
Parameters
----------
value : Any
TISeverity, str or int
Returns
-------
TISeverity
TISeverity instance.
"""
if isinstance(value, TISeverity):
return value
if isinstance(value, str) and value.lower() in cls.__members__:
return cls[value.lower()]
if isinstance(value, int):
if value in [v.value for v in cls.__members__.values()]:
return cls(value)
return TISeverity.unknown
# pylint: disable=comparison-with-callable
def __eq__(self, other) -> bool:
"""
Return True if severities are equal.
Parameters
----------
other : Any
TISeverity to compare to.
Can be a numeric value or name of TISeverity value.
Returns
-------
bool
If severities are equal
"""
other_sev = TISeverity.parse(other)
return self.value == other_sev.value
def __gt__(self, other) -> bool:
"""
Return True self is greater than other.
Parameters
----------
other : Any
TISeverity to compare to.
Can be a numeric value or name of TISeverity value.
Returns
-------
bool
If severities are equal
"""
other_sev = TISeverity.parse(other)
return self.value > other_sev.value
# pylint: enable=comparison-with-callable
# pylint: disable=too-many-instance-attributes
@attr.s(auto_attribs=True)
class LookupResult:
"""Lookup result for IoCs."""
ioc: str
ioc_type: str
safe_ioc: str = ""
query_subtype: Optional[str] = None
provider: Optional[str] = None
result: bool = False
severity: int = attr.ib(default=0)
details: Any = None
raw_result: Optional[Union[str, dict]] = None
reference: Optional[str] = None
status: int = 0
@severity.validator
def _check_severity(self, attribute, value):
del attribute
if isinstance(value, TISeverity):
self.severity = value.name
return
self.severity = TISeverity.parse(value).name
@property
def summary(self):
"""Print a summary of the Lookup Result."""
p_pr = pprint.PrettyPrinter(indent=4)
print("ioc:", self.ioc, "(", self.ioc_type, ")")
print("result:", self.result)
# print("severity:", self.severity)
p_pr.pprint(self.details)
print("reference: ", self.reference)
@property
def raw_result_fmtd(self):
"""Print raw results of the Lookup Result."""
p_pr = pprint.PrettyPrinter(indent=4)
p_pr.pprint(self.raw_result)
@property
def severity_name(self) -> str:
"""
Return text description of severity score.
Returns
-------
str
Severity description.
"""
try:
return TISeverity(self.severity).name
except ValueError:
return TISeverity.unknown.name
def set_severity(self, value: Any):
"""
Set the severity from enum, int or string.
Parameters
----------
value : Any
The severity value to set
"""
self._check_severity(None, value)
@classmethod
def column_map(cls):
"""Return a dictionary that maps fields to DF Names."""
col_mapping = {}
for name in attr.fields_dict(cls):
out_name = "".join([part.capitalize() for part in name.split("_")])
col_mapping[name] = out_name
return col_mapping
# pylint: enable=too-many-instance-attributes
# pylint: disable=too-few-public-methods
class TILookupStatus(Enum):
"""Threat intelligence lookup status."""
ok = 0
not_supported = 1
bad_format = 2
query_failed = 3
other = 10
# pylint: enable=too-few-public-methods
_IOC_EXTRACT = IoCExtract()
@export
class TIProvider(ABC):
"""Abstract base class for Threat Intel providers."""
_IOC_QUERIES: Dict[str, Any] = {}
# pylint: disable=unused-argument
def __init__(self, **kwargs):
"""Initialize the provider."""
self._supported_types: Set[IoCType] = set()
self.description: Optional[str] = None
self._supported_types = {
IoCType.parse(ioc_type.split("-")[0]) for ioc_type in self._IOC_QUERIES
}
if IoCType.unknown in self._supported_types:
self._supported_types.remove(IoCType.unknown)
self.require_url_encoding = False
# pylint: disable=duplicate-code
@abc.abstractmethod
def lookup_ioc(
self, ioc: str, ioc_type: str = None, query_type: str = None, **kwargs
) -> LookupResult:
"""
Lookup a single IoC observable.
Parameters
----------
ioc : str
IoC Observable value
ioc_type : str, optional
IoC Type, by default None (type will be inferred)
query_type : str, optional
Specify the data subtype to be queried, by default None.
If not specified the default record type for the IoC type
will be returned.
Returns
-------
LookupResult
The returned results.
"""
def lookup_iocs(
self,
data: Union[pd.DataFrame, Dict[str, str], Iterable[str]],
obs_col: str = None,
ioc_type_col: str = None,
query_type: str = None,
**kwargs,
) -> pd.DataFrame:
"""
Lookup collection of IoC observables.
Parameters
----------
data : Union[pd.DataFrame, Dict[str, str], Iterable[str]]
Data input in one of three formats:
1. Pandas dataframe (you must supply the column name in
`obs_col` parameter)
2. Dict of observable, IoCType
3. Iterable of observables - IoCTypes will be inferred
obs_col : str, optional
DataFrame column to use for observables, by default None
ioc_type_col : str, optional
DataFrame column to use for IoCTypes, by default None
query_type : str, optional
Specify the data subtype to be queried, by default None.
If not specified the default record type for the IoC type
will be returned.
Returns
-------
pd.DataFrame
DataFrame of results.
"""
results = []
for observable, ioc_type in generate_items(data, obs_col, ioc_type_col):
if not observable:
continue
item_result = self.lookup_ioc(
ioc=observable, ioc_type=ioc_type, query_type=query_type
)
results.append(pd.Series(attr.asdict(item_result)))
return pd.DataFrame(data=results).rename(columns=LookupResult.column_map())
@abc.abstractmethod
def parse_results(self, response: LookupResult) -> Tuple[bool, TISeverity, Any]:
"""
Return the details of the response.
Parameters
----------
response : LookupResult
The returned data response
Returns
-------
Tuple[bool, TISeverity, Any]
bool = positive or negative hit
TISeverity = enumeration of severity
Object with match details
"""
@property
def supported_types(self) -> List[str]:
"""
Return list of supported IoC types for this provider.
Returns
-------
List[str]
List of supported type names
"""
return [ioc.name for ioc in self._supported_types]
@classmethod
def is_known_type(cls, ioc_type: str) -> bool:
"""
Return True if this a known IoC Type.
Parameters
----------
ioc_type : str
IoCType string to test
Returns
-------
bool
True if known type.
"""
return ioc_type in IoCType.__members__ and ioc_type != "unknown"
@classmethod
def usage(cls):
"""Print usage of provider."""
print(f"{cls.__doc__} Supported query types:")
for ioc_key in sorted(cls._IOC_QUERIES):
ioc_key_elems = ioc_key.split("-", maxsplit=1)
if len(ioc_key_elems) == 1:
print(f"\tioc_type={ioc_key_elems[0]}")
if len(ioc_key_elems) == 2:
print(
f"\tioc_type={ioc_key_elems[0]}, ioc_query_type={ioc_key_elems[1]}"
)
def is_supported_type(self, ioc_type: Union[str, IoCType]) -> bool:
"""
Return True if the passed type is supported.
Parameters
----------
ioc_type : Union[str, IoCType]
IoC type name or instance
Returns
-------
bool
True if supported.
"""
if isinstance(ioc_type, str):
ioc_type = IoCType.parse(ioc_type)
return ioc_type.name in self.supported_types
@staticmethod
@lru_cache(maxsize=1024)
def resolve_ioc_type(observable: str) -> str:
"""
Return IoCType determined by IoCExtract.
Parameters
----------
observable : str
IoC observable string
Returns
-------
str
IoC Type (or unknown if type could not be determined)
"""
return _IOC_EXTRACT.get_ioc_type(observable)
def _check_ioc_type(
self, ioc: str, ioc_type: str = None, query_subtype: str = None
) -> LookupResult:
"""
Check IoC Type and cleans up observable.
Parameters
----------
ioc : str
IoC observable
ioc_type : str, optional
IoC type, by default None
query_subtype : str, optional
Query sub-type, if any, by default None
Returns
-------
LookupResult
Lookup result with resolved ioc_type and pre-processed
observable.
LookupResult.status is none-zero on failure.
"""
result = LookupResult(
ioc=ioc,
safe_ioc=ioc,
ioc_type=ioc_type if ioc_type else self.resolve_ioc_type(ioc),
query_subtype=query_subtype,
result=False,
details="",
raw_result=None,
reference=None,
)
if not self.is_supported_type(result.ioc_type):
result.details = f"IoC type {result.ioc_type} not supported."
result.status = TILookupStatus.not_supported.value
return result
clean_ioc = preprocess_observable(
ioc, result.ioc_type, self.require_url_encoding
)
result.safe_ioc = clean_ioc.observable
if clean_ioc.status != "ok":
result.details = clean_ioc.status
result.status = TILookupStatus.bad_format.value
return result
# slightly stricter than normal URL regex to exclude '() from host string
_HTTP_STRICT_REGEX = r"""
(?P<protocol>(https?|ftp|telnet|ldap|file)://)
(?P<userinfo>([a-z0-9-._~!$&*+,;=:]|%[0-9A-F]{2})*@)?
(?P<host>([a-z0-9-._~!$&\*+,;=]|%[0-9A-F]{2})*)
(:(?P<port>\d*))?
(/(?P<path>([^?\#| ]|%[0-9A-F]{2})*))?
(\?(?P<query>([a-z0-9-._~!$&'()*+,;=:/?@]|%[0-9A-F]{2})*))?
(\#(?P<fragment>([a-z0-9-._~!$&'()*+,;=:/?@]|%[0-9A-F]{2})*))?\b"""
_HTTP_STRICT_RGXC = re.compile(_HTTP_STRICT_REGEX, re.I | re.X | re.M)
# pylint: disable=too-many-return-statements, too-many-branches
def preprocess_observable(
observable, ioc_type, require_url_encoding: bool = False
) -> SanitizedObservable:
"""
Preprocesses and checks validity of observable against declared IoC type.
:param observable: the value of the IoC
:param ioc_type: the IoC type
"""
observable = observable.strip()
try:
validated = _IOC_EXTRACT.validate(observable, ioc_type)
except KeyError:
validated = False
if not validated:
return SanitizedObservable(
None, "Observable does not match expected pattern for " + ioc_type
)
if ioc_type == "url":
return _preprocess_url(observable, require_url_encoding)
if ioc_type == "ipv4":
return _preprocess_ip(observable, version=4)
if ioc_type == "ipv6":
return _preprocess_ip(observable, version=6)
if ioc_type in ["dns", "hostname"]:
return _preprocess_dns(observable)
if ioc_type in ["md5_hash", "sha1_hash", "sha256_hash", "file_hash"]:
return _preprocess_hash(observable)
return SanitizedObservable(observable, "ok")
# Would complicate code with too many branches
# pylint: disable=too-many-return-statements
def _preprocess_url(
url: str, require_url_encoding: bool = False
) -> SanitizedObservable:
"""
Check that URL can be parsed.
Parameters
----------
url : str
The URL to check
require_url_encoding : bool
Set to True if url's require encoding before passing to provider
Returns
-------
SanitizedObservable
Pre-processed result
"""
clean_url, scheme, host = get_schema_and_host(url, require_url_encoding)
if scheme is None or host is None:
return SanitizedObservable(None, f"Could not obtain scheme or host from {url}")
# get rid of some obvious false positives (localhost, local hostnames)
try:
addr = ip_address(host)
if addr.is_private:
return SanitizedObservable(None, "Host part of URL is a private IP address")
if addr.is_loopback:
return SanitizedObservable(
None, "Host part of URL is a loopback IP address"
)
except ValueError:
pass
if "." not in host:
return SanitizedObservable(None, "Host is unqualified domain name")
if scheme.lower() in ["file"]:
return SanitizedObservable(None, f"{scheme} URL scheme is not supported")
return SanitizedObservable(clean_url, "ok")
def get_schema_and_host(
url: str, require_url_encoding: bool = False
) -> Tuple[Optional[str], Optional[str], Optional[str]]:
"""
Return URL scheme and host and cleaned URL.
Parameters
----------
url : str
Input URL
require_url_encoding : bool
Set to True if url needs encoding. Defualt is False.
Returns
-------
Tuple[Optional[str], Optional[str], Optional[str]
Tuple of URL, scheme, host
"""
clean_url = None
scheme = None
host = None
try:
scheme, _, host, _, _, _, _ = parse_url(url)
clean_url = url
except LocationParseError:
# Try to clean URL and re-check
cleaned_url = _clean_url(url)
if cleaned_url is not None:
try:
scheme, _, host, _, _, _, _ = parse_url(cleaned_url)
clean_url = cleaned_url
except LocationParseError:
pass
if require_url_encoding and clean_url:
clean_url = quote_plus(clean_url)
return clean_url, scheme, host
def _clean_url(url: str) -> Optional[str]:
"""
Clean URL to remove query params and fragments and any trailing stuff.
Parameters
----------
url : str
the URL to check
Returns
-------
Optional[str]
Cleaned URL or None if the input was not a valid URL
"""
# Try to clean URL and re-check
match_url = _HTTP_STRICT_RGXC.search(url)
if (
not match_url
or match_url.groupdict()["protocol"] is None
or match_url.groupdict()["host"] is None
):
return None
# build the URL dropping the query string and fragments
clean_url = match_url.groupdict()["protocol"]
if match_url.groupdict()["userinfo"]:
clean_url += match_url.groupdict()["userinfo"]
clean_url += match_url.groupdict()["host"]
if match_url.groupdict()["port"]:
clean_url += ":" + match_url.groupdict()["port"]
if match_url.groupdict()["path"]:
clean_url += "/" + match_url.groupdict()["path"]
return clean_url
# Would complicate code with too many branches
# pylint: disable=too-many-return-statements
def _preprocess_ip(ipaddress: str, version=4):
"""Ensure Ip address is a valid public IPv4 address."""
try:
addr = ip_address(ipaddress)
except ValueError:
return SanitizedObservable(None, "IP address is invalid format")
if version == 4 and not isinstance(addr, IPv4Address):
return SanitizedObservable(None, "Not an IPv4 address")
if version == 6 and not isinstance(addr, IPv6Address):
return SanitizedObservable(None, "Not an IPv6 address")
if addr.is_global:
return SanitizedObservable(ipaddress, "ok")
return SanitizedObservable(None, "IP address is not global")
def _preprocess_dns(domain: str) -> SanitizedObservable:
"""Ensure DNS is a valid-looking domain."""
if "." not in domain:
return SanitizedObservable(None, "Domain is unqualified domain name")
try:
addr = ip_address(domain)
del addr
return SanitizedObservable(None, "Domain is an IP address")
except ValueError:
pass
return SanitizedObservable(domain, "ok")
def _preprocess_hash(hash_str: str) -> SanitizedObservable:
"""Ensure Hash has minimum entropy (rather than a string of 'x')."""
str_entropy = entropy(hash_str)
if str_entropy < 3.0:
return SanitizedObservable(None, "String has too low an entropy to be a hash")
return SanitizedObservable(hash_str, "ok")
def entropy(input_str: str) -> float:
"""Compute entropy of input string."""
str_len = float(len(input_str))
return -sum(
map(
lambda a: (a / str_len) * math.log2(a / str_len),
Counter(input_str).values(),
)
)
@singledispatch
def generate_items(
data: Any, obs_col: Optional[str] = None, ioc_type_col: Optional[str] = None
) -> Iterable[Tuple[Optional[str], Optional[str]]]:
"""
Generate item pairs from different input types.
Parameters
----------
data : Any
DataFrame, dictionary or iterable
obs_col : Optional[str]
If `data` is a DataFrame, the column containing the observable value.
ioc_type_col : Optional[str]
If `data` is a DataFrame, the column containing the observable type.
Returns
-------
Iterable[Tuple[Optional[str], Optional[str]]]] - a tuple of Observable/Type.
"""
del obs_col, ioc_type_col
if isinstance(data, Iterable):
for item in data:
yield item, TIProvider.resolve_ioc_type(item)
else:
yield None, None
@generate_items.register(pd.DataFrame)
def _(data: pd.DataFrame, obs_col: str, ioc_type_col: Optional[str] = None):
for _, row in data.iterrows():
if ioc_type_col is None:
yield row[obs_col], TIProvider.resolve_ioc_type(row[obs_col])
else:
yield row[obs_col], row[ioc_type_col]
@generate_items.register(dict) # type: ignore
def _(data: dict, obs_col: Optional[str] = None, ioc_type_col: Optional[str] = None):
for obs, ioc_type in data.items():
if not ioc_type:
ioc_type = TIProvider.resolve_ioc_type(obs)
yield obs, ioc_type
| mit |
laayis/yowsup | yowsup/layers/protocol_groups/protocolentities/notification_groups.py | 32 | 1732 | from yowsup.structs import ProtocolEntity, ProtocolTreeNode
from yowsup.layers.protocol_notifications.protocolentities import NotificationProtocolEntity
class GroupsNotificationProtocolEntity(NotificationProtocolEntity):
'''
<notification notify="WhatsApp" id="{{id}}" t="1420402514" participant="{{participant_jiid}}" from="{{group_jid}}" type="w:gp2">
</notification>
'''
def __init__(self, _id, _from, timestamp, notify, participant, offline):
super(GroupsNotificationProtocolEntity, self).__init__("w:gp2", _id, _from, timestamp, notify, offline)
self.setParticipant(participant)
self.setGroupId(_from)
def setParticipant(self, participant):
self._participant = participant
def getParticipant(self, full = True):
return self._participant if full else self._participant.split('@')[0]
def getGroupId(self):
return self._id
def setGroupId(self, groupId):
self._id = groupId
def __str__(self):
out = super(GroupsNotificationProtocolEntity, self).__str__()
out += "Participant: %s\n" % self.getParticipant()
return out
def toProtocolTreeNode(self):
node = super(GroupsNotificationProtocolEntity, self).toProtocolTreeNode()
node.setAttribute("participant", self.getParticipant())
return node
@staticmethod
def fromProtocolTreeNode(node):
entity = super(GroupsNotificationProtocolEntity, GroupsNotificationProtocolEntity).fromProtocolTreeNode(node)
entity.__class__ = GroupsNotificationProtocolEntity
entity.setParticipant(node.getAttributeValue("participant"))
entity.setGroupId(node.getAttributeValue("from"))
return entity
| gpl-3.0 |
nathanielvarona/airflow | tests/providers/amazon/aws/hooks/test_cloud_formation.py | 3 | 3561 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
import unittest
from airflow.providers.amazon.aws.hooks.cloud_formation import AWSCloudFormationHook
try:
from moto import mock_cloudformation
from moto.ec2.models import NetworkInterface as some_model
except ImportError:
mock_cloudformation = None
@unittest.skipIf(mock_cloudformation is None, 'moto package not present')
class TestAWSCloudFormationHook(unittest.TestCase):
def setUp(self):
self.hook = AWSCloudFormationHook(aws_conn_id='aws_default')
def create_stack(self, stack_name):
timeout = 15
template_body = json.dumps(
{
'Resources': {
"myResource": {
"Type": some_model.cloudformation_type(),
"Properties": {"myProperty": "myPropertyValue"},
}
}
}
)
self.hook.create_stack(
stack_name=stack_name,
params={
'TimeoutInMinutes': timeout,
'TemplateBody': template_body,
'Parameters': [{'ParameterKey': 'myParam', 'ParameterValue': 'myParamValue'}],
},
)
@mock_cloudformation
def test_get_conn_returns_a_boto3_connection(self):
assert self.hook.get_conn().describe_stacks() is not None
@mock_cloudformation
def test_get_stack_status(self):
stack_name = 'my_test_get_stack_status_stack'
stack_status = self.hook.get_stack_status(stack_name=stack_name)
assert stack_status is None
self.create_stack(stack_name)
stack_status = self.hook.get_stack_status(stack_name=stack_name)
assert stack_status == 'CREATE_COMPLETE', 'Incorrect stack status returned.'
@mock_cloudformation
def test_create_stack(self):
stack_name = 'my_test_create_stack_stack'
self.create_stack(stack_name)
stacks = self.hook.get_conn().describe_stacks()['Stacks']
assert len(stacks) > 0, 'CloudFormation should have stacks'
matching_stacks = [x for x in stacks if x['StackName'] == stack_name]
assert len(matching_stacks) == 1, f'stack with name {stack_name} should exist'
stack = matching_stacks[0]
assert stack['StackStatus'] == 'CREATE_COMPLETE', 'Stack should be in status CREATE_COMPLETE'
@mock_cloudformation
def test_delete_stack(self):
stack_name = 'my_test_delete_stack_stack'
self.create_stack(stack_name)
self.hook.delete_stack(stack_name=stack_name)
stacks = self.hook.get_conn().describe_stacks()['Stacks']
matching_stacks = [x for x in stacks if x['StackName'] == stack_name]
assert len(matching_stacks) == 0, f'stack with name {stack_name} should not exist'
| apache-2.0 |
zeroc-ice/ice-demos | python/IceDiscovery/hello/server.py | 1 | 1125 | #!/usr/bin/env python
#
# Copyright (c) ZeroC, Inc. All rights reserved.
#
import signal
import sys
import time
import Ice
Ice.loadSlice('Hello.ice')
import Demo
class HelloI(Demo.Hello):
def sayHello(self, delay, current):
if delay != 0:
time.sleep(delay / 1000.0)
print("Hello World!")
def shutdown(self, current):
current.adapter.getCommunicator().shutdown()
#
# Ice.initialize returns an initialized Ice communicator,
# the communicator is destroyed once it goes out of scope.
#
with Ice.initialize(sys.argv, "config.server") as communicator:
#
# Install a signal handler to shutdown the communicator on Ctrl-C
#
signal.signal(signal.SIGINT, lambda signum, frame: communicator.shutdown())
#
# The communicator initialization removes all Ice-related arguments from argv
#
if len(sys.argv) > 1:
print(sys.argv[0] + ": too many arguments")
sys.exit(1)
adapter = communicator.createObjectAdapter("Hello")
adapter.add(HelloI(), Ice.stringToIdentity("hello"))
adapter.activate()
communicator.waitForShutdown()
| gpl-2.0 |
lihkin213/ursula | library/upstart_service.py | 16 | 4526 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2014, Blue Box Group, Inc.
# Copyright 2014, Craig Tracey <craigtracey@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import traceback
from hashlib import md5
from jinja2 import Environment
UPSTART_TEMPLATE = """
start on {{ start_on }}
stop on {{ stop_on }}
{% if description -%}
description {{ description }}
{% endif -%}
{% if envs -%}
{% for env in envs %}
env {{ env }}
{% endfor %}
{% endif -%}
{% if prestart_script -%}
pre-start script
{{ prestart_script }}
end script
{% endif -%}
{% if respawn -%}
respawn
{% endif -%}
exec start-stop-daemon --start --chuid {{ user }} {{ pidfile }} --exec {{ cmd }} {{ args }}
"""
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(default=None, required=True),
cmd=dict(default=None, required=True),
args=dict(default=None),
user=dict(default=None, required=True),
config_dirs=dict(default=None),
config_files=dict(default=None),
description=dict(default=None),
envs=dict(default=None, required=False, type='list'),
state=dict(default='present'),
start_on=dict(default='runlevel [2345]'),
stop_on=dict(default='runlevel [!2345]'),
prestart_script=dict(default=None),
respawn=dict(default=True),
path=dict(default=None),
pidfile=dict(default=None)
)
)
try:
changed = False
service_path = None
if not module.params['path']:
service_path = '/etc/init/%s.conf' % module.params['name']
else:
service_path = module.params['path']
symlink = os.path.join('/etc/init.d/', module.params['name'])
if module.params['state'] == 'absent':
if os.path.exists(service_path):
os.remove(service_path)
changed = True
if os.path.exists(symlink):
os.remove(symlink)
changed = True
if not changed:
module.exit_json(changed=False, result="ok")
else:
module.exit_json(changed=True, result="changed")
pidfile = ''
if module.params['pidfile'] and len(module.params['pidfile']):
pidfile = '--make-pidfile --pidfile %s' % module.params['pidfile']
args = ''
if module.params['args'] or module.params['config_dirs'] or \
module.params['config_files']:
args = '-- '
if module.params['args']:
args += module.params['args']
if module.params['config_dirs']:
for directory in module.params['config_dirs'].split(','):
args += '--config-dir %s ' % directory
if module.params['config_files']:
for filename in module.params['config_files'].split(','):
args += '--config-file %s ' % filename
template_vars = module.params
template_vars['pidfile'] = pidfile
template_vars['args'] = args
env = Environment().from_string(UPSTART_TEMPLATE)
rendered_service = env.render(template_vars)
if os.path.exists(service_path):
file_hash = md5(open(service_path, 'rb').read()).hexdigest()
template_hash = md5(rendered_service).hexdigest()
if file_hash == template_hash:
module.exit_json(changed=False, result="ok")
with open(service_path, "w") as fh:
fh.write(rendered_service)
if not os.path.exists(symlink):
os.symlink('/lib/init/upstart-job', symlink)
module.exit_json(changed=True, result="created")
except Exception as e:
formatted_lines = traceback.format_exc()
module.fail_json(msg="creating the service failed: %s" % (str(e)))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
main()
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.