text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""Tests for lineprofiler.py."""
# Standard library imports
import os
# Third party imports
from pytestqt import qtbot
from qtpy.QtCore import Qt
from spyder.utils.qthelpers import qapplication
MAIN_APP = qapplication()
# Local imports
from spyder_line_profiler.widgets.lineprofiler import LineProfilerWidget
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock # Python 2
TEST_SCRIPT = """import time
@profile
def foo():
time.sleep(1)
xs = []
for k in range(100):
xs = xs + ['x']
foo()"""
def test_profile_and_display_results(qtbot, tmpdir, monkeypatch):
"""Run profiler on simple script and check that results are okay."""
os.chdir(tmpdir.strpath)
testfilename = tmpdir.join("test_foo.py").strpath
with open(testfilename, "w") as f:
f.write(TEST_SCRIPT)
MockQMessageBox = Mock()
monkeypatch.setattr(
"spyder_line_profiler.widgets.lineprofiler.QMessageBox", MockQMessageBox
)
widget = LineProfilerWidget(None)
qtbot.addWidget(widget)
with qtbot.waitSignal(widget.sig_finished, timeout=10000, raising=True):
widget.analyze(testfilename)
MockQMessageBox.assert_not_called()
dt = widget.datatree
assert dt.topLevelItemCount() == 1 # number of functions profiled
top = dt.topLevelItem(0)
assert top.data(0, Qt.DisplayRole).startswith("foo ")
assert top.childCount() == 6
for i in range(6):
assert top.child(i).data(0, Qt.DisplayRole) == i + 2 # line no
assert top.child(2).data(1, Qt.DisplayRole) == "1" # hits
assert top.child(3).data(1, Qt.DisplayRole) == "1"
assert top.child(4).data(1, Qt.DisplayRole) == "101"
assert top.child(5).data(1, Qt.DisplayRole) == "100"
assert float(top.child(2).data(2, Qt.DisplayRole)) >= 900 # time (ms)
assert float(top.child(2).data(2, Qt.DisplayRole)) <= 1200
assert float(top.child(3).data(2, Qt.DisplayRole)) <= 100
assert float(top.child(4).data(2, Qt.DisplayRole)) <= 100
assert float(top.child(5).data(2, Qt.DisplayRole)) <= 100
|
{
"content_hash": "20d70752ee52dff3f7d3fae82b54d0a7",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 80,
"avg_line_length": 29.7,
"alnum_prop": 0.6772486772486772,
"repo_name": "Nodd/spyder.line_profiler",
"id": "372ab171ed4d6d13eb86a4c0b7f785e3f679f3fb",
"size": "2232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spyder_line_profiler/widgets/tests/test_lineprofiler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30894"
}
],
"symlink_target": ""
}
|
'''
TODO - essentially all the tests currently here are barebones sanity checks
to ensure a minimal level of functionality exists. In other words, there are
many special cases that are not being covered *yet*.
'''
import array
import codecs
import os
import re
import shutil
import string
import subprocess
import sys
import unittest
from iptest import IronPythonTestCase, is_cli, is_cpython, is_mono, is_netcoreapp, is_posix, is_linux, is_windows, run_test, skipUnlessIronPython
from iptest.misc_util import ip_supported_encodings
if is_cpython and is_linux:
import time
class CodecTest(IronPythonTestCase):
def test_escape_decode(self):
# escape_decode decodes bytes to bytes, but when given a string it encodes it first with UTF-8
self.assertEqual(codecs.escape_decode("abc€ghi🐍xyz"), codecs.escape_decode(b'abc\xe2\x82\xacghi\xf0\x9f\x90\x8dxyz'))
value, length = codecs.escape_decode("ab\a\b\t\n\r\f\vba")
self.assertEqual(value, b'ab\x07\x08\t\n\r\x0c\x0bba')
self.assertEqual(length, 11)
value, length = codecs.escape_decode("\\a")
self.assertEqual(value, b'\x07')
self.assertEqual(length, 2)
value, length = codecs.escape_decode("ab\a\b\t\n\r\f\v\'\"baab\\a\\b\\t\\n\\r\\f\\v\\'\\\"baab\\\a\\\b\\\t\\\n\\\r\\\f\\\vba")
self.assertEqual(value, b'ab\x07\x08\t\n\r\x0c\x0b\'\"baab\x07\x08\t\n\r\x0c\x0b\'\"baab\\\x07\\\x08\\\t\\\r\\\x0c\\\x0bba')
self.assertEqual(length, 53)
value, length = codecs.escape_decode("\\\a")
self.assertEqual(value, b'\\\x07')
self.assertEqual(length, 2)
value, length = codecs.escape_decode("\\07")
self.assertEqual(value, b'\x07')
self.assertEqual(length, 3)
value, length = codecs.escape_decode("\\047")
self.assertEqual(value, b"'")
self.assertEqual(length, 4)
self.assertEqual(codecs.escape_decode(b"ab\nc"), (b"ab\nc", 4))
self.assertEqual(codecs.escape_decode(b"ab\rc"), (b"ab\rc", 4))
self.assertEqual(codecs.escape_decode(b"ab\r\nc"), (b"ab\r\nc", 5))
self.assertEqual(codecs.escape_decode(b"ab\\\nc"), (b"abc", 5))
self.assertEqual(codecs.escape_decode(b"ab\\\rc"), (b"ab\\\rc", 5))
self.assertEqual(codecs.escape_decode(b"ab\\\r\\\nc"), (b"ab\\\rc", 7))
self.assertEqual(codecs.escape_decode("ÿ".encode('latin-1')), (b'\xff', 1))
self.assertEqual(codecs.escape_decode("\\ÿ".encode('latin-1')), (b'\\\xff', 2))
self.assertEqual(codecs.escape_decode("\\\\ÿ".encode('latin-1')), (b'\\\xff', 3))
if sys.implementation.name != 'cpython' or sys.version_info >= (3, 5):
self.assertEqual(codecs.escape_decode(array.array('I', (1633771873,))), (b"aaaa", 4))
def test_escape_decode_errors(self):
self.assertEqual(codecs.escape_decode("abc", None), (b"abc", 3))
self.assertEqual(b"?", codecs.escape_decode("\\x", 'replace')[0])
self.assertEqual(b"?", codecs.escape_decode("\\x2", 'replace')[0])
self.assertEqual(b"?I", codecs.escape_decode("\\xI", 'replace')[0])
self.assertEqual(b"?II", codecs.escape_decode("\\xII", 'replace')[0])
self.assertEqual(b"?I", codecs.escape_decode("\\x1I", 'replace')[0])
self.assertEqual(b"?I1", codecs.escape_decode("\\xI1", 'replace')[0])
self.assertEqual(b"abc", codecs.escape_decode("abc\\x", 'ignore')[0])
self.assertEqual(b"abc", codecs.escape_decode("abc\\x2", 'ignore')[0])
self.assertEqual(b"abcI", codecs.escape_decode("abc\\xI", 'ignore')[0])
self.assertEqual(b"abcII", codecs.escape_decode("abc\\xII", 'ignore')[0])
self.assertEqual(b"abcI", codecs.escape_decode("abc\\x1I", 'ignore')[0])
self.assertEqual(b"abcI1", codecs.escape_decode("abc\\xI1", 'ignore')[0])
self.assertRaisesRegex(ValueError, r"Trailing \\ in string", codecs.escape_decode, b"\\", None)
self.assertRaisesRegex(ValueError, r"Trailing \\ in string", codecs.escape_decode, b"\\", 'strict')
self.assertRaisesRegex(ValueError, r"Trailing \\ in string", codecs.escape_decode, b"\\", 'replace')
self.assertRaisesRegex(ValueError, r"Trailing \\ in string", codecs.escape_decode, b"\\", 'ignore')
self.assertRaisesRegex(ValueError, r"Trailing \\ in string", codecs.escape_decode, b"\\", 'non-existent')
self.assertRaisesRegex(ValueError, r"invalid \\x escape at position 3", codecs.escape_decode, b"abc\\xii")
self.assertRaisesRegex(ValueError, r"invalid \\x escape at position 3", codecs.escape_decode, b"abc\\x1i")
self.assertRaisesRegex(ValueError, r"invalid \\x escape at position 3", codecs.escape_decode, b"abc\\xii", 'strict')
self.assertRaisesRegex(ValueError, r"invalid \\x escape at position 3", codecs.escape_decode, b"abc\\x1i", 'strict')
self.assertRaisesRegex(ValueError, r"invalid \\x escape at position 3", codecs.escape_decode, b"abc\\xii", None)
self.assertRaisesRegex(ValueError, r"invalid \\x escape at position 3", codecs.escape_decode, b"abc\\x1i", None)
for errors in ['backslashreplace', 'xmlcharrefreplace', 'namereplace', 'surrogateescape', 'surrogatepass', 'non-existent', '']:
self.assertRaisesRegex(ValueError, "decoding error; unknown error handling code: " + errors, codecs.escape_decode, b"abc\\xii", errors)
self.assertRaisesRegex(ValueError, "decoding error; unknown error handling code: " + errors, codecs.escape_decode, b"abc\\x1i", errors)
self.assertRaises(TypeError, codecs.escape_decode, None)
self.assertRaises(TypeError, codecs.escape_decode, None, None)
self.assertRaises(ValueError, codecs.escape_decode, rb"\x", None)
self.assertRaises(ValueError, codecs.escape_decode, r"\x", None)
def test_escape_encode(self):
#sanity checks
value, length = codecs.escape_encode(b"abba")
self.assertEqual(value, b"abba")
self.assertEqual(length, 4)
value, length = codecs.escape_encode(b"ab\a\b\t\n\r\f\vba")
self.assertEqual(value, b'ab\\x07\\x08\\t\\n\\r\\x0c\\x0bba')
self.assertEqual(length, 11)
value, length = codecs.escape_encode(b"\\a")
self.assertEqual(value, b"\\\\a")
self.assertEqual(length, 2)
value, length = codecs.escape_encode(bytes(range(256)))
self.assertEqual(value, b'\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\t\\n\\x0b\\x0c\\r\\x0e\\x0f\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a\\x1b\\x1c\\x1d\\x1e\\x1f !"#$%&\\\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\\x7f\\x80\\x81\\x82\\x83\\x84\\x85\\x86\\x87\\x88\\x89\\x8a\\x8b\\x8c\\x8d\\x8e\\x8f\\x90\\x91\\x92\\x93\\x94\\x95\\x96\\x97\\x98\\x99\\x9a\\x9b\\x9c\\x9d\\x9e\\x9f\\xa0\\xa1\\xa2\\xa3\\xa4\\xa5\\xa6\\xa7\\xa8\\xa9\\xaa\\xab\\xac\\xad\\xae\\xaf\\xb0\\xb1\\xb2\\xb3\\xb4\\xb5\\xb6\\xb7\\xb8\\xb9\\xba\\xbb\\xbc\\xbd\\xbe\\xbf\\xc0\\xc1\\xc2\\xc3\\xc4\\xc5\\xc6\\xc7\\xc8\\xc9\\xca\\xcb\\xcc\\xcd\\xce\\xcf\\xd0\\xd1\\xd2\\xd3\\xd4\\xd5\\xd6\\xd7\\xd8\\xd9\\xda\\xdb\\xdc\\xdd\\xde\\xdf\\xe0\\xe1\\xe2\\xe3\\xe4\\xe5\\xe6\\xe7\\xe8\\xe9\\xea\\xeb\\xec\\xed\\xee\\xef\\xf0\\xf1\\xf2\\xf3\\xf4\\xf5\\xf6\\xf7\\xf8\\xf9\\xfa\\xfb\\xfc\\xfd\\xfe\\xff')
self.assertEqual(length, 256)
self.assertRaises(TypeError, codecs.escape_encode, "abc")
self.assertRaises(TypeError, codecs.escape_encode, None)
self.assertRaises(TypeError, codecs.escape_encode, None, None)
self.assertEqual(codecs.escape_encode(b"\\", None), (b"\\\\", 1))
self.assertEqual(codecs.escape_encode(b"\\", 'strict'), (b"\\\\", 1))
self.assertRaises(TypeError, codecs.escape_encode, bytearray(b"abc"))
self.assertRaises(TypeError, codecs.escape_encode, memoryview(b"abc"))
self.assertRaises(TypeError, codecs.escape_encode, array.array('I', (1633771873,)))
def test_charmap_decode(self):
self.assertEqual(codecs.charmap_decode(b""), ("", 0))
self.assertEqual(codecs.charmap_decode(b"", 'strict', {}), ("", 0))
self.assertEqual(codecs.charmap_decode(b"", 'strict', ""), ("", 0))
# Default map is Latin-1
self.assertEqual(codecs.charmap_decode(b"abc\xff"), ("abcÿ", 4))
self.assertEqual(codecs.charmap_decode(b"abc\xff", 'strict'), ("abcÿ", 4))
# Ignore errors
self.assertEqual(codecs.charmap_decode(b"abc", "ignore", {}), ("", 3))
charmap = {ord(c): None for c in "abcdefgh"}
self.assertEqual(codecs.charmap_decode(b"abc", "ignore", charmap), ("", 3))
# Replace errors
self.assertEqual(codecs.charmap_decode(b"abc", 'replace', {}), ("\ufffd" * 3, 3))
charmap = {ord(c): None for c in "abcdefgh"}
self.assertEqual(codecs.charmap_decode(b"abc", 'replace', charmap), ("\ufffd" * 3, 3))
# Dict[int, int] (byte value => codepoint)
charmap = {ord(c): ord(c.upper()) for c in "abcdefgh"}
self.assertEqual(codecs.charmap_decode(b"abc", 'strict', charmap), ("ABC", 3))
# Dict[int, str]
charmap = {ord(c): 2*c.upper() for c in "abcdefgh"}
self.assertEqual(codecs.charmap_decode(b"abc", 'strict', charmap), ("AABBCC", 3))
# Non-BMP character
charmap = {ord('p'): "\U0001F40D"}
self.assertEqual(codecs.charmap_decode(b"p", 'strict', charmap), ("🐍", 1))
charmap = {ord('p'): 0x1F40D}
self.assertEqual(codecs.charmap_decode(b"p", 'strict', charmap), ("🐍", 1))
# using a string mapping
self.assertEqual(codecs.charmap_decode(b'\x02\x01\x00', 'strict', "abc"), ('cba', 3))
# Full-size string mapping
charmap = "".join(chr(c) for c in range(255, -1, -1))
self.assertEqual(codecs.charmap_decode(b"ABC", 'strict', charmap), ('¾½¼', 3))
# Oversize string mapping
charmap = "".join(chr(c) for c in range(255, -1, -1)) + "abc"
self.assertEqual(codecs.charmap_decode(b"ABC", 'strict', charmap), ('¾½¼', 3))
# Missing key
self.assertRaisesRegex(UnicodeDecodeError, "^'charmap' codec can't decode byte 0x61 in position 0: character maps to <undefined>",
codecs.charmap_decode, b"abc", 'strict', {})
# Bytes key is not recognized, it must be an int (character ordinal)
self.assertRaisesRegex(UnicodeDecodeError, "^'charmap' codec can't decode byte 0x61 in position 0: character maps to <undefined>",
codecs.charmap_decode, b"abc", 'strict', {bytes(c, 'ascii'): ord(c.upper()) for c in "abcdefgh"})
# Explict None as value mapping
self.assertRaisesRegex(UnicodeDecodeError, "^'charmap' codec can't decode byte 0x61 in position 0: character maps to <undefined>",
codecs.charmap_decode, b"abc", 'strict', {ord(c): None for c in "abcdefgh"})
self.assertRaisesRegex(LookupError, "^unknown error handler name 'non-existent'$",
codecs.charmap_decode, b"abc", 'non-existent', {})
# Unsupported: Dict[int, bytes]
self.assertRaisesRegex(TypeError, "^character mapping must return integer, None or str",
codecs.charmap_decode, b"abc", 'strict', {ord(c): bytes(c.upper(), 'ascii') for c in "abcdefgh"})
# Negative values
# Bug in CPython: range(0x%lx)
self.assertRaisesRegex(TypeError, r"character mapping must be in range\(0x.*\)",
codecs.charmap_decode, b"abc", 'strict', {ord(c): -ord(c) for c in "abcdefgh"})
# Values outside of bytes range
self.assertRaisesRegex(TypeError, r"character mapping must be in range\(0x.*\)",
codecs.charmap_decode, b"abc", 'strict', {ord(c): ord(c) + 0x110000 for c in "abcdefgh"})
# Wrong number type format
self.assertRaisesRegex(TypeError, "^character mapping must return integer, None or str",
codecs.charmap_decode, b"a", "strict", {ord('a'): 2.0})
# Invalid character in dict
self.assertRaisesRegex(UnicodeDecodeError, "^'charmap' codec can't decode byte 0x01 in position 0: character maps to <undefined>",
codecs.charmap_decode, b"\x01", 'strict', {1: "\uFFFE"})
self.assertRaisesRegex(UnicodeDecodeError, "^'charmap' codec can't decode byte 0x01 in position 0: character maps to <undefined>",
codecs.charmap_decode, b"\x01", 'strict', {1: 0xFFFE})
# Too short charmap
self.assertRaisesRegex(UnicodeDecodeError, "^'charmap' codec can't decode byte 0x01 in position 0: character maps to <undefined>",
codecs.charmap_decode, b"\x01", 'strict', "x")
# Empty charmap
self.assertRaisesRegex(UnicodeDecodeError, "^'charmap' codec can't decode byte 0x00 in position 0: character maps to <undefined>",
codecs.charmap_decode, b"\0", 'strict', "")
# Invalid character in charmap string
self.assertRaisesRegex(UnicodeDecodeError, "^'charmap' codec can't decode byte 0x01 in position 0: character maps to <undefined>",
codecs.charmap_decode, b"\x01", 'strict', "x\uFFFEz")
# None input
self.assertRaises(TypeError, codecs.charmap_decode, None)
self.assertRaises(TypeError, codecs.charmap_decode, None, None)
self.assertRaises(TypeError, codecs.charmap_decode, None, None, None)
self.assertEqual(codecs.charmap_decode(b"", None), ("", 0))
self.assertEqual(codecs.charmap_decode(b"", None, None), ("", 0))
self.assertRaises(UnicodeDecodeError, codecs.charmap_decode, b"\0", None, {})
self.assertRaises(UnicodeDecodeError, codecs.charmap_decode, b"\0", None, "")
# Bytes-like input
self.assertEqual(codecs.charmap_decode(array.array('I', (1633771873,))), ("aaaa", 4))
def test_decode(self):
self.assertEqual(codecs.decode(b"abc"), "abc")
self.assertEqual(codecs.decode(array.array('I', (1633771873,))), "aaaa")
self.assertRaises(TypeError, codecs.decode, "abc")
self.assertRaises(TypeError, codecs.decode, None)
self.assertRaises(TypeError, codecs.decode, None, None)
self.assertRaises(TypeError, codecs.decode, None, None, None)
self.assertRaises(TypeError, codecs.decode, b"abc", None)
self.assertRaises(TypeError, codecs.decode, b"abc", None, None)
self.assertRaises(TypeError, codecs.decode, b"abc", 'utf-8', None)
self.assertRaises(TypeError, codecs.decode, None, 'utf-8')
self.assertRaises(TypeError, codecs.decode, b"abc", None, 'strict')
def test_encode(self):
self.assertEqual(codecs.encode("abc"), b"abc")
self.assertRaises(TypeError, codecs.encode, b"abc")
self.assertRaises(TypeError, codecs.encode, None)
self.assertRaises(TypeError, codecs.encode, None, None)
self.assertRaises(TypeError, codecs.encode, None, None, None)
self.assertRaises(TypeError, codecs.encode, "abc", None)
self.assertRaises(TypeError, codecs.encode, "abc", None, None)
self.assertRaises(TypeError, codecs.encode, "abc", "utf-8", None)
self.assertRaises(TypeError, codecs.encode, None, "utf-8")
self.assertRaises(TypeError, codecs.encode, "abc", None, 'strict')
def test_raw_unicode_escape_decode(self):
new_str, num_processed = codecs.raw_unicode_escape_decode("abc")
self.assertEqual(new_str, "abc")
self.assertEqual(num_processed, 3)
new_str, num_processed = codecs.raw_unicode_escape_decode(b"abc")
self.assertEqual(new_str, "abc")
self.assertEqual(num_processed, 3)
new_str, num_processed = codecs.raw_unicode_escape_decode("abc\\u20ACxyz")
self.assertEqual(new_str, "abc€xyz")
self.assertEqual(num_processed, 12)
new_str, num_processed = codecs.raw_unicode_escape_decode("abc\\U0001F40Dxyz\\")
self.assertEqual(new_str, "abc🐍xyz\\")
self.assertEqual(num_processed, 17)
self.assertEqual(codecs.raw_unicode_escape_decode(array.array('I', (1633771873,))), ("aaaa", 4))
def test_raw_unicode_escape_decode_errors(self):
with self.assertRaises(UnicodeDecodeError) as cm:
codecs.raw_unicode_escape_decode("abc\\u20klm\xffxyz\u20ac") # Unicode string
self.assertEqual(cm.exception.encoding, 'rawunicodeescape')
self.assertTrue(cm.exception.reason.startswith("truncated \\uXXXX"))
self.assertEqual(cm.exception.start, 3)
self.assertEqual(cm.exception.end, 7)
self.assertEqual(cm.exception.object, b"abc\\u20klm\xc3\xbfxyz\xe2\x82\xac") # in UTF-8
with self.assertRaises(UnicodeDecodeError) as cm:
codecs.raw_unicode_escape_decode("abc\\U0001F44xyz")
self.assertEqual(cm.exception.encoding, 'rawunicodeescape')
if is_cpython and sys.version_info < (3, 6):
self.assertEqual(cm.exception.reason, "truncated \\uXXXX")
else:
self.assertEqual(cm.exception.reason, "truncated \\UXXXXXXXX escape")
self.assertEqual(cm.exception.start, 3)
self.assertEqual(cm.exception.end, 12)
self.assertEqual(cm.exception.object, b"abc\\U0001F44xyz")
with self.assertRaises(UnicodeDecodeError) as cm:
codecs.raw_unicode_escape_decode("abc\\U00110011xyz")
self.assertEqual(cm.exception.encoding, 'rawunicodeescape')
self.assertEqual(cm.exception.reason, "\\Uxxxxxxxx out of range")
self.assertEqual(cm.exception.start, 3)
self.assertEqual(cm.exception.end, 13)
self.assertEqual(cm.exception.object, b"abc\\U00110011xyz")
new_str, num_processed = codecs.raw_unicode_escape_decode(b"abc\\u20klm\\U0001F44nop\\U00110011xyz",'ignore')
self.assertEqual(new_str, "abcklmnopxyz")
self.assertEqual(num_processed, 35)
new_str, num_processed = codecs.raw_unicode_escape_decode(b"abc\\u20klm\\U0001F44nop\\U00110011xyz",'replace')
self.assertEqual(new_str, "abc\uFFFDklm\uFFFDnop\uFFFDxyz")
self.assertEqual(num_processed, 35)
self.assertRaises(TypeError, codecs.raw_unicode_escape_decode, None)
self.assertRaises(TypeError, codecs.raw_unicode_escape_decode, None, None)
self.assertEqual(codecs.raw_unicode_escape_decode(b"", None), ("", 0))
self.assertRaises(UnicodeDecodeError, codecs.raw_unicode_escape_decode, b"\\u", None)
def test_raw_unicode_escape_decode_errors_custom(self):
def test_encoding_error_plushandler(ue):
return ("+" * (ue.end - ue.start), ue.end)
codecs.register_error('test_plus', test_encoding_error_plushandler)
self.assertEqual(codecs.raw_unicode_escape_decode(b"abc\\uxyz", 'test_plus'), ("abc++xyz", 8))
def test_raw_unicode_escape_encode(self):
new_str, num_processed = codecs.raw_unicode_escape_encode("abc")
self.assertEqual(new_str, b'abc')
self.assertEqual(num_processed, 3)
new_str, num_processed = codecs.raw_unicode_escape_encode("\\a\tbc\r\n")
self.assertEqual(new_str, b'\\a\tbc\r\n')
self.assertEqual(num_processed, 7)
new_str, num_processed = codecs.raw_unicode_escape_encode("=\0\x7f\x80¡ÿ!=")
self.assertEqual(new_str, b'=\0\x7f\x80\xa1\xff!=')
self.assertEqual(num_processed, 8)
new_str, num_processed = codecs.raw_unicode_escape_encode("=€=")
self.assertEqual(new_str, b'=\\u20ac=')
self.assertEqual(num_processed, 3)
new_str, num_processed = codecs.raw_unicode_escape_encode("=🜋=")
self.assertEqual(new_str, b'=\\U0001f70b=')
if is_cli: # surrogate pair processed
self.assertEqual(num_processed, 4)
else:
self.assertEqual(num_processed, 3)
self.assertRaises(TypeError, codecs.raw_unicode_escape_encode, b"aaaa")
self.assertRaises(TypeError, codecs.raw_unicode_escape_encode, None)
self.assertRaises(TypeError, codecs.raw_unicode_escape_encode, None, None)
self.assertEqual(codecs.raw_unicode_escape_encode("", None), (b"", 0))
def test_unicode_escape_decode(self):
new_str, num_processed = codecs.unicode_escape_decode("abc")
self.assertEqual(new_str, "abc")
self.assertEqual(num_processed, 3)
new_str, num_processed = codecs.unicode_escape_decode(b"abc")
self.assertEqual(new_str, "abc")
self.assertEqual(num_processed, 3)
new_str, num_processed = codecs.unicode_escape_decode("abc\\u20ACxyz")
self.assertEqual(new_str, "abc€xyz")
self.assertEqual(num_processed, 12)
new_str, num_processed = codecs.unicode_escape_decode("abc\\U0001F40Dxyz")
self.assertEqual(new_str, "abc🐍xyz")
self.assertEqual(num_processed, 16)
new_str, num_processed = codecs.unicode_escape_decode("=\\u20AC=\\15\\n=\\xFF=\\\\\\=\\N{euro sign}=")
self.assertEqual(new_str, "=€=\r\n=ÿ=\\\\=€=")
self.assertEqual(num_processed, 37)
new_str, num_processed = codecs.unicode_escape_decode("\\\n")
self.assertEqual(new_str, "")
self.assertEqual(num_processed, 2)
new_str, num_processed = codecs.unicode_escape_decode("\\\r\\\n")
self.assertEqual(new_str, "\\\r")
self.assertEqual(num_processed, 4)
self.assertEqual(codecs.unicode_escape_decode(array.array('I', (1633771873,))), ("aaaa", 4))
def test_unicode_escape_decode_errors(self):
def check(data, msg, start, end, ex_data):
with self.assertRaises(UnicodeDecodeError) as cm:
codecs.unicode_escape_decode(data)
self.assertEqual(cm.exception.encoding, 'unicodeescape')
self.assertEqual(cm.exception.reason, msg)
self.assertEqual(cm.exception.start, start)
self.assertEqual(cm.exception.end, end)
self.assertEqual(cm.exception.object, ex_data)
test_data = [
("abc\\xyz", "truncated \\xXX escape", 3, 5, b"abc\\xyz"), # str to bytes
("abc\\x0xyz", "truncated \\xXX escape", 3, 6, b"abc\\x0xyz"),
("abc\\u20klm\xffxyz\u20ac", "truncated \\uXXXX escape", 3, 7, b"abc\\u20klm\xc3\xbfxyz\xe2\x82\xac"), # Unicode to UTF-8
("abc\\U0001F44xyz", "truncated \\UXXXXXXXX escape", 3, 12, b"abc\\U0001F44xyz"),
("abc\\U00110011xyz", "illegal Unicode character", 3, 13, b"abc\\U00110011xyz"),
("abc\\N{EURO}xyz", "unknown Unicode character name", 3, 11, b"abc\\N{EURO}xyz"),
("abc\\Nxyz", "malformed \\N character escape", 3, 5, b"abc\\Nxyz"),
("abc\\N", "malformed \\N character escape", 3, 5, b"abc\\N"),
("abc\\N{xyz", "malformed \\N character escape", 3, 9, b"abc\\N{xyz"),
("abc\\N{", "malformed \\N character escape", 3, 6, b"abc\\N{"),
("abc\\N{}xyz", "malformed \\N character escape", 3, 6, b"abc\\N{}xyz"),
("abc\\N{}", "malformed \\N character escape", 3, 6, b"abc\\N{}"),
("abc\\", "\\ at end of string", 3, 4, b"abc\\"),
]
for params in test_data:
check(*params)
self.assertRaises(TypeError, codecs.unicode_escape_decode, None)
self.assertRaises(TypeError, codecs.unicode_escape_decode, None, None)
self.assertEqual(codecs.unicode_escape_decode(b"", None), ("", 0))
self.assertRaises(UnicodeDecodeError, codecs.unicode_escape_decode, b"\\u", None)
def test_unicode_escape_decode_errors_ignore(self):
test_data = [
(b"abc\\xyz", "abcyz"),
(b"abc\\x0xyz", "abcxyz"),
(b"abc\\u20klm\xffxyz", "abcklm\xffxyz"),
(b"abc\\U0001F44xyz", "abcxyz"),
(b"abc\\U00110011xyz", "abcxyz"),
(b"abc\\N{EURO}xyz", "abcxyz"),
(b"abc\\Nxyz", "abcxyz"),
(b"abc\\N", "abc"),
(b"abc\\N{xyz", "abc"),
(b"abc\\N{", "abc"),
(b"abc\\N{}xyz", "abc}xyz"),
(b"abc\\N{}", "abc}"),
(b"abc\\", "abc"),
]
for sample in test_data:
self.assertEqual(codecs.unicode_escape_decode(sample[0], 'ignore')[0], sample[1])
def test_unicode_escape_decode_errors_replace(self):
test_data = [
(b"abc\\xyz", "abc�yz"),
(b"abc\\x0xyz", "abc�xyz"),
(b"abc\\u20klm\xffxyz", "abc�klm\xffxyz"),
(b"abc\\U0001F44xyz", "abc�xyz"),
(b"abc\\U00110011xyz", "abc�xyz"),
(b"abc\\N{EURO}xyz", "abc�xyz"),
(b"abc\\Nxyz", "abc�xyz"),
(b"abc\\N", "abc�"),
(b"abc\\N{xyz", "abc�"),
(b"abc\\N{", "abc�"),
(b"abc\\N{}xyz", "abc�}xyz"),
(b"abc\\N{}", "abc�}"),
(b"abc\\", "abc�"),
]
for sample in test_data:
self.assertEqual(codecs.unicode_escape_decode(sample[0], 'replace')[0], sample[1])
def test_unicode_escape_decode_errors_custom(self):
def test_encoding_error_starhandler(ue):
return ("*" * (ue.end - ue.start), ue.end)
codecs.register_error('test_star', test_encoding_error_starhandler)
test_data = [
(b"abc\\xyz", "abc**yz"),
(b"abc\\x0xyz", "abc***xyz"),
(b"abc\\u20klm\xffxyz", "abc****klm\xffxyz"),
(b"abc\\U0001F44xyz", "abc*********xyz"),
(b"abc\\U00110011xyz", "abc**********xyz"),
(b"abc\\N{EURO}xyz", "abc********xyz"),
(b"abc\\Nxyz", "abc**xyz"),
(b"abc\\N", "abc**"),
(b"abc\\N{xyz", "abc******"),
(b"abc\\N{", "abc***"),
(b"abc\\N{}xyz", "abc***}xyz"),
(b"abc\\N{}", "abc***}"),
(b"abc\\", "abc*"),
]
for sample in test_data:
self.assertEqual(codecs.unicode_escape_decode(sample[0], 'test_star')[0], sample[1], sample[0])
def test_unicode_escape_encode(self):
new_str, num_processed = codecs.unicode_escape_encode("\\a\tbc\r\n")
self.assertEqual(new_str, b'\\\\a\\tbc\\r\\n')
self.assertEqual(num_processed, 7)
new_str, num_processed = codecs.unicode_escape_encode("=\0\x7f\x80¡ÿ!=")
self.assertEqual(new_str, b'=\\x00\\x7f\\x80\\xa1\\xff!=')
self.assertEqual(num_processed, 8)
new_str, num_processed = codecs.unicode_escape_encode("=€=")
self.assertEqual(new_str, b'=\\u20ac=')
self.assertEqual(num_processed, 3)
new_str, num_processed = codecs.unicode_escape_encode("=🜋=")
self.assertEqual(new_str, b'=\\U0001f70b=')
if is_cli: # surrogate pair processed
self.assertEqual(num_processed, 4)
else:
self.assertEqual(num_processed, 3)
self.assertRaises(TypeError, codecs.unicode_escape_encode, b"aaaa")
self.assertRaises(TypeError, codecs.unicode_escape_encode, None)
self.assertRaises(TypeError, codecs.unicode_escape_encode, None, None)
self.assertEqual(codecs.unicode_escape_encode("", None), (b"", 0))
def test_utf_7_decode(self):
#sanity
new_str, num_processed = codecs.utf_7_decode(b"abc")
self.assertEqual(new_str, 'abc')
self.assertEqual(num_processed, 3)
self.assertEqual(codecs.utf_7_decode(array.array('I', (1633771873,))), ("aaaa", 4))
self.assertRaises(TypeError, codecs.utf_7_decode, "abc")
self.assertRaises(TypeError, codecs.utf_7_decode, None)
self.assertRaises(TypeError, codecs.utf_7_decode, None, None)
self.assertEqual(codecs.utf_7_decode(b"abc", None), ("abc", 3))
def test_utf7_decode_incremental(self):
b = "abc\u20acxyz".encode('utf-7')
b += "abc\u20ad\u20aexyz".encode('utf-7')
b += "abc\u20af\u20b0\u20b1xyz".encode('utf-7')
# expected results generated by CPython 3.4
expected = [
('', 0),
('a', 1),
('ab', 2),
('abc', 3),
('abc', 3),
('abc', 3),
('abc', 3),
('abc', 3),
('abc€', 8),
('abc€x', 9),
('abc€xy', 10),
('abc€xyz', 11),
('abc€xyza', 12),
('abc€xyzab', 13),
('abc€xyzabc', 14),
('abc€xyzabc', 14),
('abc€xyzabc', 14),
('abc€xyzabc', 14),
('abc€xyzabc', 14),
('abc€xyzabc', 14),
('abc€xyzabc', 14),
('abc€xyzabc', 14),
('abc€xyzabc₭₮', 22),
('abc€xyzabc₭₮x', 23),
('abc€xyzabc₭₮xy', 24),
('abc€xyzabc₭₮xyz', 25),
('abc€xyzabc₭₮xyza', 26),
('abc€xyzabc₭₮xyzab', 27),
('abc€xyzabc₭₮xyzabc', 28),
('abc€xyzabc₭₮xyzabc', 28),
('abc€xyzabc₭₮xyzabc', 28),
('abc€xyzabc₭₮xyzabc', 28),
('abc€xyzabc₭₮xyzabc', 28),
('abc€xyzabc₭₮xyzabc', 28),
('abc€xyzabc₭₮xyzabc', 28),
('abc€xyzabc₭₮xyzabc', 28),
('abc€xyzabc₭₮xyzabc', 28),
('abc€xyzabc₭₮xyzabc', 28),
('abc€xyzabc₭₮xyzabc₯₰₱', 38),
('abc€xyzabc₭₮xyzabc₯₰₱x', 39),
('abc€xyzabc₭₮xyzabc₯₰₱xy', 40),
('abc€xyzabc₭₮xyzabc₯₰₱xyz', 41)
]
for i in range(len(b) + 1):
res = codecs.utf_7_decode(b[:i])
self.assertEqual(res, expected[i])
def test_utf_7_encode(self):
#sanity
new_str, num_processed = codecs.utf_7_encode("abc")
self.assertEqual(new_str, b'abc')
self.assertEqual(num_processed, 3)
self.assertRaises(TypeError, codecs.utf_7_encode, b"abc")
self.assertRaises(TypeError, codecs.utf_7_encode, None)
self.assertRaises(TypeError, codecs.utf_7_encode, None, None)
self.assertEqual(codecs.utf_7_encode("abc", None), (b"abc", 3))
def test_ascii_decode(self):
#sanity
new_str, num_processed = codecs.ascii_decode(b"abc")
self.assertEqual(new_str, 'abc')
self.assertEqual(num_processed, 3)
self.assertEqual(codecs.ascii_decode(b"abc"), ("abc", 3))
self.assertEqual(codecs.ascii_decode(b"abc", None), ("abc", 3))
self.assertEqual(codecs.ascii_decode(array.array('I', (1633771873,))), ("aaaa", 4))
self.assertRaises(TypeError, codecs.ascii_decode, "abc")
self.assertRaises(TypeError, codecs.ascii_decode, None)
self.assertRaises(UnicodeDecodeError, codecs.ascii_decode, b"\xff", None)
def test_ascii_encode(self):
#sanity
self.assertEqual(codecs.ascii_encode("abc"), (b"abc", 3))
self.assertEqual(codecs.ascii_encode("abc", None), (b"abc", 3))
self.assertRaises(TypeError, codecs.ascii_encode, b"abc")
self.assertRaises(TypeError, codecs.ascii_encode, None)
self.assertRaises(TypeError, codecs.ascii_encode, b"")
self.assertRaises(UnicodeEncodeError, codecs.ascii_encode, "\u0100", None)
def test_latin_1_decode(self):
#sanity
new_str, num_processed = codecs.latin_1_decode(b"abc")
self.assertEqual(new_str, 'abc')
self.assertEqual(num_processed, 3)
self.assertEqual(codecs.latin_1_decode(array.array('I', (1633771873,))), ("aaaa", 4))
self.assertRaises(TypeError, codecs.latin_1_decode, "abc")
self.assertRaises(TypeError, codecs.latin_1_decode, None)
self.assertRaises(TypeError, codecs.latin_1_decode, None, None)
def test_latin_1_encode(self):
#sanity
new_str, num_processed = codecs.latin_1_encode("abc")
self.assertEqual(new_str, b'abc')
self.assertEqual(num_processed, 3)
# so many ways to express latin 1...
for x in ['iso-8859-1', 'iso8859-1', '8859', 'cp819', 'latin', 'latin1', 'L1']:
self.assertEqual('abc'.encode(x), b'abc')
self.assertRaises(TypeError, codecs.latin_1_encode, b"abc")
self.assertRaises(TypeError, codecs.latin_1_encode, None)
self.assertRaises(TypeError, codecs.latin_1_encode, None, None)
self.assertRaises(UnicodeEncodeError, codecs.latin_1_encode, "\u0100", None)
def test_error_handlers(self):
ude = UnicodeDecodeError('dummy', b"abcdefgh", 3, 5, "decoding testing purposes")
uee = UnicodeEncodeError('dummy', "abcdefgh", 2, 6, "encoding testing purposes")
ute = UnicodeTranslateError("abcdefgh", 2, 6, "translating testing purposes")
unicode_data = "ab\xff\u20ac\U0001f40d\0\t\r\nz"
uee_unicode = UnicodeEncodeError('dummy', unicode_data, 2, len(unicode_data), "encoding testing purposes")
strict = codecs.lookup_error('strict')
self.assertEqual(strict, codecs.strict_errors)
with self.assertRaises(UnicodeDecodeError) as cm:
strict(ude)
self.assertEqual(cm.exception, ude)
with self.assertRaises(UnicodeEncodeError) as cm:
strict(uee)
self.assertEqual(cm.exception, uee)
with self.assertRaises(UnicodeTranslateError) as cm:
strict(ute)
self.assertEqual(cm.exception, ute)
self.assertRaisesRegex(TypeError, "codec must pass exception instance", strict, None)
self.assertRaisesRegex(TypeError, "\w+\(\) takes exactly (one|1) argument \(0 given\)", strict)
self.assertRaisesRegex(TypeError, "\w+\(\) takes exactly (one|1) argument \(2 given\)", strict, ude, uee)
self.assertRaises(LookupError, codecs.lookup_error, "STRICT")
ignore = codecs.lookup_error('ignore')
self.assertEqual(ignore, codecs.ignore_errors)
self.assertEqual(ignore(ude), ("", 5))
self.assertEqual(ignore(uee), ("", 6))
self.assertEqual(ignore(ute), ("", 6))
self.assertEqual(ignore(uee_unicode), ("", uee_unicode.end))
replace = codecs.lookup_error('replace')
self.assertEqual(replace, codecs.replace_errors)
self.assertEqual(replace(ude), ("�", 5))
self.assertEqual(replace(uee), ("????", 6))
self.assertEqual(replace(ute), ("����", 6))
self.assertEqual(replace(uee_unicode), ("?" * (uee_unicode.end - uee_unicode.start), uee_unicode.end))
backslashreplace = codecs.lookup_error('backslashreplace')
self.assertEqual(backslashreplace, codecs.backslashreplace_errors)
self.assertRaisesRegex(TypeError, "don't know how to handle UnicodeDecodeError in error callback", backslashreplace, ude)
self.assertEqual(backslashreplace(uee), (r"\x63\x64\x65\x66", 6))
self.assertRaisesRegex(TypeError, "don't know how to handle UnicodeTranslateError in error callback", backslashreplace, ute)
self.assertEqual(backslashreplace(uee_unicode), (r"\xff\u20ac\U0001f40d\x00\x09\x0d\x0a\x7a", uee_unicode.end))
xmlcharrefreplace = codecs.lookup_error('xmlcharrefreplace')
self.assertEqual(xmlcharrefreplace, codecs.xmlcharrefreplace_errors)
self.assertRaisesRegex(TypeError, "don't know how to handle UnicodeDecodeError in error callback", xmlcharrefreplace, ude)
self.assertEqual(xmlcharrefreplace(uee), ("cdef", 6))
self.assertRaisesRegex(TypeError, "don't know how to handle UnicodeTranslateError in error callback", xmlcharrefreplace, ute)
self.assertEqual(xmlcharrefreplace(uee_unicode), ("ÿ€🐍�	 z", uee_unicode.end))
def test_error_handlers_surrogateescape(self):
surrogateescape = codecs.lookup_error('surrogateescape')
# Decoding with surrogateescape
self.assertEqual(surrogateescape(UnicodeDecodeError('dummy', b"a\xff\x7fz", 1, 2, "encoding testing purposes")), ("\udcff", 2))
self.assertEqual(surrogateescape(UnicodeDecodeError('dummy', b"a\xff\x7fz", 1, 3, "encoding testing purposes")), ("\udcff", 2))
self.assertEqual(surrogateescape(UnicodeDecodeError('dummy', b"a\xff\x80z", 1, 3, "encoding testing purposes")), ("\udcff\udc80", 3))
self.assertEqual(surrogateescape(UnicodeDecodeError('dummy', b"a\xff\x80\x81z", 1, 3, "encoding testing purposes")), ("\udcff\udc80", 3))
ude = UnicodeDecodeError('dummy', b"abcd", 1, 3, "ASCII bytes cannot be smuggled (PEP 383)")
with self.assertRaises(UnicodeDecodeError) as cm:
surrogateescape(ude)
self.assertEqual(cm.exception, ude)
ude = UnicodeDecodeError('dummy', b"a\x7f\xffz", 1, 3, "ASCII bytes cannot be smuggled, 0x7f is withing ASCII range")
with self.assertRaises(UnicodeDecodeError) as cm:
surrogateescape(ude)
self.assertEqual(cm.exception, ude)
ude = UnicodeDecodeError('utf-16-le', b"a\x00\x00\xdcz\x00", 2, 4, r"although \x00\xdc is \udc00 in utf-16-le, it contains ASCII byte \x00")
with self.assertRaises(UnicodeDecodeError) as cm:
surrogateescape(ude)
self.assertEqual(cm.exception, ude)
ude = UnicodeDecodeError('utf-16-le', b"a\x00\xff\xdcz\x00", 2, 4, r"\xff\xdc is \udcff in utf-16-le, and each byte is being escaped individually")
self.assertEqual(surrogateescape(ude), ("\udcff\udcdc", 4))
# Encoding with surrogateescape
self.assertEqual(surrogateescape(UnicodeEncodeError('dummy', "a\udcff\udc7fz", 1, 2, "encoding testing purposes")), (b"\xff", 2))
self.assertEqual(surrogateescape(UnicodeEncodeError('dummy', "a\udcff\udc80z", 1, 3, "encoding testing purposes")), (b"\xff\x80", 3))
self.assertEqual(surrogateescape(UnicodeEncodeError('dummy', "a\udcff\udc80\udc81z", 1, 3, "encoding testing purposes")), (b"\xff\x80", 3))
uee = UnicodeEncodeError('dummy', "abcd", 1, 3, "no low surrogates to un-escape")
with self.assertRaises(UnicodeEncodeError) as cm:
surrogateescape(uee)
self.assertEqual(cm.exception, uee)
uee = UnicodeEncodeError('dummy', "\x80\x81\x82\x83", 1, 3, "no low surrogates to un-escape")
with self.assertRaises(UnicodeEncodeError) as cm:
surrogateescape(uee)
self.assertEqual(cm.exception, uee)
uee = UnicodeEncodeError('dummy', "a\udcff\udc7fz", 1, 3, r"ASCII bytes cannot be smuggled, \udc7f carries ASCI byte \x7f")
with self.assertRaises(UnicodeEncodeError) as cm:
surrogateescape(uee)
self.assertEqual(cm.exception, uee)
ude = UnicodeEncodeError('utf-16-le', "a\udc00\udcdcz", 1, 3, r"ASCII bytes cannot be smuggled, \udc00 carries ASCI byte \x00")
with self.assertRaises(UnicodeEncodeError) as cm:
surrogateescape(ude)
self.assertEqual(cm.exception, ude)
ude = UnicodeEncodeError('utf-16-le', "a\udcff\udcdcz", 1, 3, "encoding testing purposes")
self.assertEqual(surrogateescape(ude), (b"\xff\xdc", 3))
ude = UnicodeEncodeError('utf-16-le', "a\udcff\udcdcz", 1, 2, "one byte at a time for widechar encoding")
self.assertEqual(surrogateescape(ude), (b"\xff", 2))
# Translating with surrogateescape
ute = UnicodeTranslateError("abcd", 1, 3, "UnicodeTranslateError not supported with surrogateescape")
self.assertRaisesRegex(TypeError, "don't know how to handle UnicodeTranslateError in error callback", surrogateescape, ute)
def test_error_handlers_surrogatepass(self):
surrogatepass = codecs.lookup_error('surrogatepass')
# Decoding with surrogatepass
self.assertEqual(surrogatepass(UnicodeDecodeError('utf-8', b'a\xed\xb7\xaez', 1, 4, "encoding testing purposes")), ("\uddee", 4))
self.assertEqual(surrogatepass(UnicodeDecodeError('dummy', b'a\xed\xb7\xaez', 1, 4, "for unrecognized encoding fall back to utf-8")), ("\uddee", 4))
self.assertEqual(surrogatepass(UnicodeDecodeError('utf-16-lex', b'a\xed\xb7\xaez', 1, 4, "for misspelled encoding fall back to utf-8")), ("\uddee", 4))
self.assertEqual(surrogatepass(UnicodeDecodeError('utf-16--le', b'a\xed\xb7\xaez', 1, 4, "for misspelled encoding fall back to utf-8")), ("\uddee", 4))
self.assertEqual(surrogatepass(UnicodeDecodeError('utf--16-le', b'a\xed\xb7\xaez', 1, 4, "for misspelled encoding fall back to utf-8")), ("\uddee", 4))
self.assertEqual(surrogatepass(UnicodeDecodeError('utf-8', b'a\xed\xb7\xae\xed\xb7\xbfz', 1, 4, "only one surrogate at a time")), ("\uddee", 4))
self.assertEqual(surrogatepass(UnicodeDecodeError('utf-8', b'a\xed\xb7\xae\xed\xb7\xbfz', 1, 7, "only one surrogate at a time")), ("\uddee", 4))
self.assertEqual(surrogatepass(UnicodeDecodeError('utf-8', b'a\xed\xb7\xae\xed\xb7\xbfz', 4, 7, "only one surrogate at a time")), ("\uddff", 7))
self.assertEqual(surrogatepass(UnicodeDecodeError('utf-16', b"a\0\xff\xdcz\0", 2, 4, "various names for UTF-16")), ("\udcff", 4))
self.assertEqual(surrogatepass(UnicodeDecodeError('utf16', b"a\0\xff\xdcz\0", 2, 4, "various names for UTF-16")), ("\udcff", 4))
self.assertEqual(surrogatepass(UnicodeDecodeError('utf-16-le', b"a\0\xff\xdcz\0", 2, 4, "various names for UTF-16")), ("\udcff", 4))
self.assertEqual(surrogatepass(UnicodeDecodeError('Utf-16LE', b"a\0\xff\xdcz\0", 2, 4, "various names for UTF-16")), ("\udcff", 4))
self.assertEqual(surrogatepass(UnicodeDecodeError('Utf_16LE', b"a\0\xff\xdcz\0", 2, 4, "various names for UTF-16s")), ("\udcff", 4))
self.assertEqual(surrogatepass(UnicodeDecodeError('Utf_16_LE', b"a\0\xff\xdcz\0", 2, 4, "various names for UTF-16s")), ("\udcff", 4))
self.assertEqual(surrogatepass(UnicodeDecodeError('Utf16Le', b"a\0\xff\xdcz\0", 2, 4, "various names for UTF-16")), ("\udcff", 4))
self.assertEqual(surrogatepass(UnicodeDecodeError('utf-16-be', b"\0a\xdc\xff\0z", 2, 4, "various names for UTF-16BE")), ("\udcff", 4))
self.assertEqual(surrogatepass(UnicodeDecodeError('Utf-16BE', b"\0a\xdc\xff\0z", 2, 4, "various names for UTF-16BE")), ("\udcff", 4))
self.assertEqual(surrogatepass(UnicodeDecodeError('Utf_16BE', b"\0a\xdc\xff\0z", 2, 4, "various names for UTF-16BE")), ("\udcff", 4))
self.assertEqual(surrogatepass(UnicodeDecodeError('Utf16Be', b"\0a\xdc\xff\0z", 2, 4, "various names for UTF-16BE")), ("\udcff", 4))
self.assertEqual(surrogatepass(UnicodeDecodeError('utf-32', b"a\0\0\0\xff\xdc\0\0z\0\0\0", 4, 8, "various names for UTF-32")), ("\udcff", 8))
self.assertEqual(surrogatepass(UnicodeDecodeError('utf32', b"a\0\0\0\xff\xdc\0\0z\0\0\0", 4, 8, "various names for UTF-32")), ("\udcff", 8))
self.assertEqual(surrogatepass(UnicodeDecodeError('utf-32-le', b"a\0\0\0\xff\xdc\0\0z\0\0\0", 4, 8, "various names for UTF-32")), ("\udcff", 8))
self.assertEqual(surrogatepass(UnicodeDecodeError('Utf-32LE', b"a\0\0\0\xff\xdc\0\0z\0\0\0", 4, 8, "various names for UTF-32")), ("\udcff", 8))
self.assertEqual(surrogatepass(UnicodeDecodeError('Utf_32LE', b"a\0\0\0\xff\xdc\0\0z\0\0\0", 4, 8, "various names for UTF-32")), ("\udcff", 8))
self.assertEqual(surrogatepass(UnicodeDecodeError('Utf_32_LE', b"a\0\0\0\xff\xdc\0\0z\0\0\0", 4, 8, "various names for UTF-32")), ("\udcff", 8))
self.assertEqual(surrogatepass(UnicodeDecodeError('Utf32Le', b"a\0\0\0\xff\xdc\0\0z\0\0\0", 4, 8, "various names for UTF-32")), ("\udcff", 8))
self.assertEqual(surrogatepass(UnicodeDecodeError('utf-32-be', b"\0\0\0a\0\0\xdc\xff\0\0\0z", 4, 8, "various names for UTF-32")), ("\udcff", 8))
self.assertEqual(surrogatepass(UnicodeDecodeError('Utf-32BE', b"\0\0\0a\0\0\xdc\xff\0\0\0z", 4, 8, "various names for UTF-32")), ("\udcff", 8))
self.assertEqual(surrogatepass(UnicodeDecodeError('Utf_32BE', b"\0\0\0a\0\0\xdc\xff\0\0\0z", 4, 8, "various names for UTF-32")), ("\udcff", 8))
self.assertEqual(surrogatepass(UnicodeDecodeError('Utf32Be', b"\0\0\0a\0\0\xdc\xff\0\0\0z", 4, 8, "various names for UTF-32")), ("\udcff", 8))
self.assertEqual(surrogatepass(UnicodeDecodeError('utf-8', b'a\xed\xb7\xae\xed\xb7\xbfz', 1, 0, "end index ignored")), ("\uddee", 4))
self.assertEqual(surrogatepass(UnicodeDecodeError('utf-8', b'a\xed\xb7\xae\xed\xb7\xbfz', 1, 1, "end index ignored")), ("\uddee", 4))
self.assertEqual(surrogatepass(UnicodeDecodeError('utf-8', b'a\xed\xb7\xae\xed\xb7\xbfz', 1, 2, "end index ignored")), ("\uddee", 4))
self.assertEqual(surrogatepass(UnicodeDecodeError('utf-8', b'a\xed\xb7\xae\xed\xb7\xbfz', 1, 3, "end index ignored")), ("\uddee", 4))
self.assertEqual(surrogatepass(UnicodeDecodeError('utf-8', b'a\xed\xb7\xae\xed\xb7\xbfz', 1, 5, "end index ignored")), ("\uddee", 4))
self.assertEqual(surrogatepass(UnicodeDecodeError('utf-8', b'a\xed\xb7\xae\xed\xb7\xbfz', 1, 50, "end index ignored")), ("\uddee", 4))
self.assertEqual(surrogatepass(UnicodeDecodeError('utf-8', b'a\xed\xb7\xae\xed\xb7\xbfz', 1, -50, "end index ignored")), ("\uddee", 4))
self.assertEqual(surrogatepass(UnicodeDecodeError('utf-16', b"a\0\xff\xdcz\0", 2, 0, "end index ignored")), ("\udcff", 4))
self.assertEqual(surrogatepass(UnicodeDecodeError('utf-16', b"a\0\xff\xdcz\0", 2, 2, "end index ignored")), ("\udcff", 4))
self.assertEqual(surrogatepass(UnicodeDecodeError('utf-16', b"a\0\xff\xdcz\0", 2, 3, "end index ignored")), ("\udcff", 4))
self.assertEqual(surrogatepass(UnicodeDecodeError('utf-16', b"a\0\xff\xdcz\0", 2, 5, "end index ignored")), ("\udcff", 4))
self.assertEqual(surrogatepass(UnicodeDecodeError('utf-16', b"a\0\xff\xdcz\0", 2, 50, "end index ignored")), ("\udcff", 4))
self.assertEqual(surrogatepass(UnicodeDecodeError('utf-16', b"a\0\xff\xdcz\0", 2, -50, "end index ignored")), ("\udcff", 4))
self.assertEqual(surrogatepass(UnicodeDecodeError('utf-32', b"a\0\0\0\xff\xdc\0\0z\0\0\0", 4, 0, "end index ignored")), ("\udcff", 8))
self.assertEqual(surrogatepass(UnicodeDecodeError('utf-32', b"a\0\0\0\xff\xdc\0\0z\0\0\0", 4, 2, "end index ignored")), ("\udcff", 8))
self.assertEqual(surrogatepass(UnicodeDecodeError('utf-32', b"a\0\0\0\xff\xdc\0\0z\0\0\0", 4, 22, "end index ignored")), ("\udcff", 8))
self.assertEqual(surrogatepass(UnicodeDecodeError('utf-32', b"a\0\0\0\xff\xdc\0\0z\0\0\0", 4, -22, "end index ignored")), ("\udcff", 8))
self.assertEqual(surrogatepass(UnicodeDecodeError('utf-16', b"a\xff\xdcz", 1, 0, "misaligned bytes")), ("\udcff", 3))
self.assertEqual(surrogatepass(UnicodeDecodeError('utf-32', b"a\xff\xdc\0\0z", 1, 0, "misaligned bytes")), ("\udcff", 5))
ude = UnicodeDecodeError('utf-8', b"abcde", 1, 4, "no surrogate present")
with self.assertRaises(UnicodeDecodeError) as cm:
surrogatepass(ude)
self.assertEqual(cm.exception, ude)
ude = UnicodeDecodeError('utf-8', b'a\xed\xb7\xed\xb7\xbfz', 1, 4, "incomplete surogate")
with self.assertRaises(UnicodeDecodeError) as cm:
surrogatepass(ude)
self.assertEqual(cm.exception, ude)
ude = UnicodeDecodeError('utf-8', b'a\xed\xb7', 1, 4, "incomplete surogate")
with self.assertRaises(UnicodeDecodeError) as cm:
surrogatepass(ude)
self.assertEqual(cm.exception, ude)
ude = UnicodeDecodeError('utf-16', b"abcde", 2, 4, "no surrogate present")
with self.assertRaises(UnicodeDecodeError) as cm:
surrogatepass(ude)
self.assertEqual(cm.exception, ude)
ude = UnicodeDecodeError('utf-32', b"abcde", 2, 4, "no surrogate present")
with self.assertRaises(UnicodeDecodeError) as cm:
surrogatepass(ude)
self.assertEqual(cm.exception, ude)
# Encoding with surrogatepass
self.assertEqual(surrogatepass(UnicodeEncodeError('utf-8', "a\udcff\ud880z", 1, 2, "encoding testing purposes")), (b'\xed\xb3\xbf', 2))
self.assertEqual(surrogatepass(UnicodeEncodeError('utf-8', "a\udcff\ud880z", 1, 3, "encoding testing purposes")), (b'\xed\xb3\xbf\xed\xa2\x80', 3))
self.assertEqual(surrogatepass(UnicodeEncodeError('utf-8', "a\udcff\ud880\udc81z", 1, 3, "encoding testing purposes")), (b'\xed\xb3\xbf\xed\xa2\x80', 3))
self.assertEqual(surrogatepass(UnicodeEncodeError('dummy', "a\udcff\udc80z", 1, 2, "for unrecognized encoding fall back to utf-8")), (b'\xed\xb3\xbf', 2))
self.assertEqual(surrogatepass(UnicodeEncodeError('utf-16', "a\udcff\ud880z", 1, 2, "encoding testing purposes")), (b'\xff\xdc', 2))
self.assertEqual(surrogatepass(UnicodeEncodeError('utf-16', "a\udcff\ud880z", 1, 3, "encoding testing purposes")), (b'\xff\xdc\x80\xd8', 3))
self.assertEqual(surrogatepass(UnicodeEncodeError('utf-16', "a\udcff\ud880\udc81z", 1, 3, "encoding testing purposes")), (b'\xff\xdc\x80\xd8', 3))
self.assertEqual(surrogatepass(UnicodeEncodeError('utf-16', "a\ud800\udc00z", 1, 3, "encoding testing purposes")), (b'\x00\xd8\x00\xdc', 3))
self.assertEqual(surrogatepass(UnicodeEncodeError('utf-32', "a\udcff\ud880z", 1, 2, "encoding testing purposes")), (b'\xff\xdc\0\0', 2))
self.assertEqual(surrogatepass(UnicodeEncodeError('utf-32', "a\udcff\ud880z", 1, 3, "encoding testing purposes")), (b'\xff\xdc\0\0\x80\xd8\0\0', 3))
self.assertEqual(surrogatepass(UnicodeEncodeError('utf-32', "a\udcff\ud880\udc81z", 1, 3, "encoding testing purposes")), (b'\xff\xdc\0\0\x80\xd8\0\0', 3))
for encoding in ['utf-8', 'utf-16', 'utf-16-le', 'utf-16-be' 'utf-32', 'utf-32-le', 'utf-32-be']:
uee = UnicodeEncodeError(encoding, "abcd", 1, 3, "no surrogates to pass")
with self.assertRaises(UnicodeEncodeError) as cm:
surrogatepass(uee)
self.assertEqual(cm.exception, uee)
uee = UnicodeEncodeError(encoding, "a\uddddcd", 1, 3, "not all surrogates")
with self.assertRaises(UnicodeEncodeError) as cm:
surrogatepass(uee)
self.assertEqual(cm.exception, uee)
# Translating with surrogatepass
ute = UnicodeTranslateError("abcd", 1, 3, "UnicodeTranslateError not supported with surrogatepass")
self.assertRaisesRegex(TypeError, "don't know how to handle UnicodeTranslateError in error callback", surrogatepass, ute)
def test_register_error(self):
def garbage_error0(): print("garbage_error0")
def garbage_error1(param1): print("garbage_error1:", param1)
def garbage_error2(param1, param2): print("garbage_error2:", param1, "; ", param2)
codecs.register_error("garbage0", garbage_error0)
codecs.register_error("garbage1", garbage_error1)
codecs.register_error("garbage2", garbage_error2)
codecs.register_error("garbage1dup", garbage_error1)
# test error handler that produces a replacement string
def test_encoding_error_strhandler(ue): return ("*" * (ue.end - ue.start), ue.end)
codecs.register_error('test_enc_str', test_encoding_error_strhandler)
self.assertEqual(codecs.lookup_error('test_enc_str'), test_encoding_error_strhandler)
self.assertEqual(codecs.latin_1_encode("a\u20AC\u20AAz", 'test_enc_str'), (b"a**z", 4))
self.assertEqual(codecs.utf_8_encode("a\uDDDD\uD800z", 'test_enc_str'), (b"a**z", 4))
self.assertEqual(codecs.encode("a\u20AC\u20AAz", "iso8859-2", 'test_enc_str'), b"a**z")
self.assertEqual(codecs.utf_8_decode(b"a\xFF\xFEz", 'test_enc_str'), ("a**z", 4))
self.assertEqual(codecs.ascii_decode(b"a\xFF\xFEz", 'test_enc_str'), ("a**z", 4))
self.assertEqual(codecs.charmap_decode(b"a\xFF\xFEz", 'test_enc_str', {ord('a'): 'a', ord('z'): 'z'}), ("a**z", 4))
# test error handler that produces a replacement string containing surrogates
def test_encoding_error_surhandler(ue): return ("\uDDEE" * (ue.end - ue.start), ue.end)
codecs.register_error('test_dec_sur', test_encoding_error_surhandler)
self.assertEqual(codecs.lookup_error('test_dec_sur'), test_encoding_error_surhandler)
self.assertEqual(codecs.utf_8_decode(b"a\xFF\xFEz", 'test_dec_sur'), ("a\uDDEE\uDDEEz", 4))
self.assertEqual(codecs.ascii_decode(b"a\xFF\xFEz", 'test_dec_sur'), ("a\uDDEE\uDDEEz", 4))
self.assertEqual(codecs.charmap_decode(b"a\xFF\xFEz", 'test_dec_sur', {ord('a'): 'a', ord('z'): 'z'}), ("a\uDDEE\uDDEEz", 4))
if not is_mono: # 'iso-2022-jp' is not working well on Mono
self.assertEqual(b"a\x81\x82z".decode('iso-2022-jp', 'test_dec_sur'), "a\uDDEE\uDDEEz")
# test encoding error handler that produces replacement bytes
def test_encoding_error_byteshandler(uee): return (b"*" * (uee.end - uee.start), uee.end)
codecs.register_error('test_enc_bytes', test_encoding_error_byteshandler)
self.assertEqual(codecs.lookup_error('test_enc_bytes'), test_encoding_error_byteshandler)
self.assertEqual(codecs.latin_1_encode("a\u20AC\u20AAz", 'test_enc_bytes'), (b"a**z", 4))
self.assertEqual(codecs.utf_8_encode("a\uDDDD\uD800z", 'test_enc_bytes'), (b"a**z", 4))
self.assertEqual(codecs.encode("a\u20AC\u20AAz", "iso8859-2", 'test_enc_bytes'), b"a**z")
# test encoding error handler that produces replacement bytearray
def test_encoding_error_bytearrayhandler(uee): return (bytearray(b"*" * (uee.end - uee.start)), uee.end)
codecs.register_error('test_enc_bytearray', test_encoding_error_bytearrayhandler)
self.assertEqual(codecs.lookup_error('test_enc_bytearray'), test_encoding_error_bytearrayhandler)
self.assertRaisesRegex(TypeError, r"^encoding error handler must return \(str/bytes, int\) tuple$",
codecs.latin_1_encode, "a\u20AC\u20AAz", 'test_enc_bytearray')
# test that error handler receives the original string (i.e. no data copying happening)
data = "a\u20AC\u20AAz"
def test_encoding_error_nocopyhandler(ue):
self.assertIs(ue.object, data)
return ("", ue.end)
codecs.register_error('test_enc_nocopy', test_encoding_error_nocopyhandler)
self.assertEqual(codecs.latin_1_encode(data, 'test_enc_nocopy'), (b"az", 4))
self.assertEqual(codecs.encode(data, "iso8859-2", 'test_enc_nocopy'), b"az")
# test that error handler receives the equivalent bytes object
data = b"a\xFF\xFEz"
def test_encoding_error_eqhandler(ue):
self.assertEqual(ue.object, data)
return ("", ue.end)
codecs.register_error('test_dec_eq', test_encoding_error_eqhandler)
self.assertEqual(codecs.utf_8_decode(data, 'test_dec_eq'), ("az", 4))
self.assertEqual(codecs.ascii_decode(data, 'test_dec_eq'), ("az", 4))
self.assertEqual(codecs.charmap_decode(data, 'test_dec_eq', {ord('a'): 'a', ord('z'): 'z'}), ("az", 4))
# Test that BOM is properly accounted for
data = b"a\x00\xDD\xDDz\x00"
def test_encoding_error_bomhandler(ue):
self.assertEqual(ue.object[ue.start:ue.end], b"\xDD\xDD")
return ("", ue.end)
codecs.register_error('test_bom', test_encoding_error_bomhandler)
self.assertEqual(codecs.utf_16_decode(codecs.BOM_UTF16_LE + data, 'test_bom'), ("az", 8))
def test_lookup_error(self):
#sanity
self.assertRaises(LookupError, codecs.lookup_error, "blah garbage xyz")
def garbage_error1(someError): pass
codecs.register_error("blah garbage xyz", garbage_error1)
self.assertEqual(codecs.lookup_error("blah garbage xyz"), garbage_error1)
def garbage_error2(someError): pass
codecs.register_error("some other dummy", garbage_error2)
self.assertEqual(codecs.lookup_error("some other dummy"), garbage_error2)
# register under the same name, overriding the previous registration
def garbage_error3(someError): return ("<garbage>", someError.end)
codecs.register_error("some other dummy", garbage_error3)
self.assertEqual(codecs.lookup_error("some other dummy"), garbage_error3)
self.assertEqual(codecs.utf_8_decode(b"a\xffz", "some other dummy"), ("a<garbage>z", 3))
self.assertEqual(codecs.latin_1_encode("a\u20ACz", "some other dummy"), (b"a<garbage>z", 3))
self.assertEqual(codecs.encode("a\u20ACz", "iso8859-2", "some other dummy"), b"a<garbage>z")
@unittest.skip("TODO")
def test_lookup_error_override(self):
# override default 'strict'
self.assertEqual(codecs.lookup_error('strict'), codecs.strict_errors)
def test_strict(someError): return ("<garbage>", someError.end)
try:
codecs.register_error('strict', test_strict)
self.assertEqual(codecs.lookup_error('strict'), test_strict)
self.assertEqual(codecs.utf_8_decode(b"a\xffz"), ("a<garbage>z", 3))
self.assertEqual(codecs.utf_8_decode(b"a\xffz", "strict"), ("a<garbage>z", 3))
self.assertEqual(codecs.decode(b"a\xffz", "u8", "strict"), "a<garbage>z")
# override does not work during encoding
self.assertRaises(UnicodeEncodeError, codecs.latin_1_encode, "a\u20ACz", "strict")
self.assertRaises(UnicodeEncodeError, codecs.encode, "a\u20ACz", "iso8859-2", "strict")
finally:
codecs.register_error('strict', codecs.strict_errors)
self.assertEqual(codecs.lookup_error('strict'), codecs.strict_errors)
# try override default 'ignore'
self.assertEqual(codecs.lookup_error('ignore'), codecs.ignore_errors)
def test_ignore(someError): return (" " * (someError.end - someError.start), someError.end)
try:
codecs.register_error('ignore with spaces', test_ignore)
self.assertEqual(codecs.lookup_error('ignore with spaces'), test_ignore)
self.assertEqual(codecs.utf_8_decode(b"a\xff\xfez", 'ignore with spaces'), ("a z", 4))
self.assertEqual(codecs.latin_1_encode("a\u20AC\u20AAz", "ignore with spaces"), (b"a z", 4))
self.assertEqual(codecs.encode("a\u20AC\u20AAz", "iso8859-2", "ignore with spaces"), b"a z")
codecs.register_error('ignore', test_ignore)
self.assertEqual(codecs.lookup_error('ignore'), test_ignore)
if is_cli or sys.version_info >= (3,6):
# override does not work during decoding
self.assertEqual(codecs.utf_8_decode(b"a\xff\xfez", 'ignore'), ("az", 4))
else:
self.assertEqual(codecs.utf_8_decode(b"a\xff\xfez", 'ignore'), ("a z", 4))
# override does not work during encoding
self.assertEqual(codecs.latin_1_encode("a\u20AC\u20AAz", "ignore"), (b"az", 4))
self.assertEqual(codecs.encode("a\u20AC\u20AAz", "iso8859-2", "ignore"), b"az")
finally:
codecs.register_error('ignore', codecs.ignore_errors)
self.assertEqual(codecs.lookup_error('ignore'), codecs.ignore_errors)
# try override default 'replace'
self.assertEqual(codecs.lookup_error('replace'), codecs.replace_errors)
def test_replace(someError): return ("<error>", someError.end)
try:
codecs.register_error('replace errors', test_replace)
self.assertEqual(codecs.lookup_error('replace errors'), test_replace)
self.assertEqual(codecs.utf_8_decode(b"a\xffz", 'replace errors'), ("a<error>z", 3))
self.assertEqual(codecs.latin_1_encode("a\u20AC\u20AAz", "replace errors"), (b"a<error>z", 4))
self.assertEqual(codecs.encode("a\u20AC\u20AAz", "iso8859-2", "replace errors"), b"a<error>z")
codecs.register_error('replace', test_replace)
self.assertEqual(codecs.lookup_error('replace'), test_replace)
if is_cli or sys.version_info >= (3,6):
# override does not work during decoding
self.assertEqual(codecs.utf_8_decode(b"a\xffz", 'replace'), ("a\uFFFDz", 3))
else:
self.assertEqual(codecs.utf_8_decode(b"a\xffz", 'replace'), ("a<error>z", 3))
# override does not work during encoding
self.assertEqual(codecs.latin_1_encode("a\u20AC\u20AAz", "replace"), (b"a??z", 4))
self.assertEqual(codecs.encode("a\u20AC\u20AAz", "iso8859-2", "replace"), b"a??z")
finally:
codecs.register_error('replace', codecs.replace_errors)
self.assertEqual(codecs.lookup_error('replace'), codecs.replace_errors)
self.assertRaises(TypeError, codecs.lookup_error, None)
self.assertRaises(TypeError, codecs.register_error, None, test_replace)
self.assertRaises(TypeError, codecs.register_error, "blah none garbage", None)
#TODO: @skip("multiple_execute")
def test_register(self):
#sanity check - basically just ensure that functions can be registered
def garbage_func1(param1): pass
codecs.register(garbage_func1)
#negative cases
self.assertRaises(TypeError, codecs.register)
self.assertRaises(TypeError, codecs.register, None)
self.assertRaises(TypeError, codecs.register, ())
self.assertRaises(TypeError, codecs.register, [])
self.assertRaises(TypeError, codecs.register, 1)
self.assertRaises(TypeError, codecs.register, "abc")
self.assertRaises(TypeError, codecs.register, 3.14)
def my_test_decode(b, errors = None):
nonlocal decode_input
decode_input = b
if type(b) == memoryview:
# clone memoryview for inspection, since the original may get released in the meantime
nonlocal mv_decode_input
mv_decode_input = memoryview(b)
return ('*' * len(b), len(b))
def my_search_function(name):
if name == 'ironpython_test_codecs_test_register':
return codecs.CodecInfo(None, my_test_decode)
codecs.register(my_search_function)
# When 'codecs.decode' is used, the decode input is passed to the decoding function as is
b = b"abc"
decode_input = mv_decode_input = None
self.assertEqual(codecs.decode(b, 'ironpython_test_codecs_test_register'), "***")
self.assertIs(decode_input, b)
ba = bytearray(b)
decode_input = mv_decode_input = None
self.assertEqual(codecs.decode(ba, 'ironpython_test_codecs_test_register'), "***")
self.assertIs(decode_input, ba)
mv = memoryview(ba)
decode_input = mv_decode_input = None
self.assertEqual(codecs.decode(mv, 'ironpython_test_codecs_test_register'), "***")
self.assertIs(decode_input, mv)
mv_decode_input.release()
import array
arr = array.array('B', b)
decode_input = mv_decode_input = None
self.assertEqual(codecs.decode(arr, 'ironpython_test_codecs_test_register'), "***")
self.assertIs(decode_input, arr)
# When 'decode' method is used on 'bytes' or 'bytearray', the object is being wrapped in a readonly 'memoryview'
decode_input = mv_decode_input = None
self.assertEqual(b.decode('ironpython_test_codecs_test_register'), "***")
self.assertIs(type(decode_input), memoryview)
self.assertTrue(mv_decode_input.readonly)
self.assertRaises(TypeError, memoryview.__setitem__, mv_decode_input, 0, 120)
mv_decode_input.release()
decode_input = mv_decode_input = None
self.assertEqual(ba.decode('ironpython_test_codecs_test_register'), "***")
self.assertIs(type(decode_input), memoryview)
self.assertTrue(mv_decode_input.readonly)
self.assertRaises(TypeError, memoryview.__setitem__, mv_decode_input, 0, 120)
numBytes = len(ba)
self.assertEqual(len(mv_decode_input), numBytes)
self.assertEqual(mv_decode_input.shape, (numBytes,))
ba[1] = ord('x')
self.assertEqual(mv_decode_input[1], ord('x'))
mv_decode_input.release()
del decode_input, mv_decode_input
def test_readbuffer_encode(self):
self.assertEqual(codecs.readbuffer_encode("abc\u20ac"), (b"abc\xe2\x82\xac", 6))
self.assertEqual(codecs.readbuffer_encode("abc\u20ac", None), (b"abc\xe2\x82\xac", 6))
self.assertRaises(UnicodeEncodeError, codecs.readbuffer_encode, "\uDDDD", 'ignore')
self.assertEqual(codecs.readbuffer_encode(b"abc\xff"), (b"abc\xff", 4))
self.assertEqual(codecs.readbuffer_encode(b"abc\xff", None), (b"abc\xff", 4))
self.assertEqual(codecs.readbuffer_encode(array.array('I', (1633771873,))), (b"aaaa", 4))
def test_unicode_internal_encode(self):
# takes one or two parameters, not zero or three
self.assertRaises(TypeError, codecs.unicode_internal_encode)
self.assertRaises(TypeError, codecs.unicode_internal_encode, 'abc', 'def', 'qrt')
if is_cli or is_windows:
self.assertEqual(codecs.unicode_internal_encode('abc'), (b'a\x00b\x00c\x00', 3))
else:
self.assertEqual(codecs.unicode_internal_encode('abc'), (b'a\x00\x00\x00b\x00\x00\x00c\x00\x00\x00', 3))
self.assertEqual(codecs.unicode_internal_encode(b'abc'), (b'abc', 3))
self.assertEqual(codecs.unicode_internal_encode(array.array('I', (1633771873,))), (b"aaaa", 4))
self.assertRaises(TypeError, codecs.unicode_internal_encode, None)
self.assertRaises(TypeError, codecs.unicode_internal_encode, None, None)
self.assertEqual(codecs.unicode_internal_encode("", None), (b"", 0))
self.assertEqual(codecs.unicode_internal_encode(b"", None), (b"", 0))
def test_unicode_internal_decode(self):
# takes one or two parameters, not zero or three
self.assertRaises(TypeError, codecs.unicode_internal_decode)
self.assertRaises(TypeError, codecs.unicode_internal_decode, 'abc', 'def', 'qrt')
if is_cli or is_windows:
self.assertEqual(codecs.unicode_internal_decode(b'ab'), ('\u6261', 2))
self.assertEqual(codecs.unicode_internal_decode(array.array('I', (1633771873,))), ("慡慡", 4))
else:
self.assertEqual(codecs.unicode_internal_decode(b'ab\0\0'), ('\u6261', 4))
self.assertEqual(codecs.unicode_internal_decode(array.array('I', (1633771873 % 0x10000,))), ("慡", 4))
self.assertEqual(codecs.unicode_internal_decode('abc'), ('abc', 3))
self.assertRaises(TypeError, codecs.unicode_internal_decode, None)
self.assertRaises(TypeError, codecs.unicode_internal_decode, None, None)
self.assertEqual(codecs.unicode_internal_decode("", None), ("", 0))
self.assertEqual(codecs.unicode_internal_decode(b"", None), ("", 0))
def test_utf_16_be_decode(self):
string, num_processed = codecs.utf_16_be_decode(b'\0a\0b\0c')
self.assertEqual(string, "abc")
self.assertEqual(num_processed, 3 * 2)
string, num_processed = codecs.utf_16_be_decode(codecs.BOM_UTF16_BE + b'\0a\0b\0c')
self.assertEqual(string, "\uFEFFabc")
self.assertEqual(num_processed, 4 * 2)
self.assertEqual(codecs.utf_16_be_decode(array.array('I', (1633771873,))), ("慡慡", 4))
self.assertRaises(TypeError, codecs.utf_16_be_decode, "abc")
self.assertRaises(TypeError, codecs.utf_16_be_decode, None)
self.assertRaises(TypeError, codecs.utf_16_be_decode, None, None)
self.assertEqual(codecs.utf_16_be_decode(b"", None), ("", 0))
def test_utf_16_be_decode_incremental(self):
b = b"\xff\xfe\x00\x41\xd9\x00\xdd\x00\xdc\x00\xd8\x00\xdc\x00"
expected = [
('', 0),
('', 0),
('\ufffe', 2),
('\ufffe', 2),
('\ufffeA', 4),
('\ufffeA', 4),
('\ufffeA', 4),
('\ufffeA', 4),
('\ufffeA\U00050100', 8),
('\ufffeA\U00050100', 8),
('\ufffeA\U00050100\ufffd', 10),
('\ufffeA\U00050100\ufffd', 10),
('\ufffeA\U00050100\ufffd', 10),
('\ufffeA\U00050100\ufffd', 10),
('\ufffeA\U00050100\ufffd\U00010000', 14)
]
if not is_cli:
# CPython's strings are UTF-32 so an invalid surrogate pair results in one replacement char.
# Therefore CPython does not report error on a dangling low surrogate until it verifies
# that the next char is not an invalid surrogate as well.
expected[10] = expected[11] = ('\ufffeA\U00050100', 8)
for i in range(len(b) + 1):
res = codecs.utf_16_be_decode(b[:i], 'replace')
self.assertEqual(res, expected[i])
self.assertRaises(UnicodeDecodeError, codecs.utf_16_be_decode, b"\x41\x00\xd8\x00\xd8\x00", 'strict', False)
def test_utf_16_be_encode(self):
data, num_processed = codecs.utf_16_be_encode("abc")
self.assertEqual(data, b'\0a\0b\0c')
self.assertEqual(num_processed, 3)
self.assertRaises(TypeError, codecs.utf_16_be_encode, b"abc")
self.assertRaises(TypeError, codecs.utf_16_be_encode, None)
self.assertRaises(TypeError, codecs.utf_16_be_encode, None, None)
self.assertEqual(codecs.utf_16_be_encode("", None), (b"", 0))
def test_utf_16_le_decode(self):
string, num_processed = codecs.utf_16_le_decode(b'a\0b\0c\0')
self.assertEqual(string, "abc")
self.assertEqual(num_processed, 3 * 2)
string, num_processed = codecs.utf_16_le_decode(codecs.BOM_UTF16_LE + b'a\0b\0c\0')
self.assertEqual(string, "\uFEFFabc")
self.assertEqual(num_processed, 4 * 2)
self.assertEqual(codecs.utf_16_le_decode(array.array('I', (1633771873,))), ("慡慡", 4))
self.assertRaises(TypeError, codecs.utf_16_le_decode, "abc")
self.assertRaises(TypeError, codecs.utf_16_le_decode, None)
self.assertRaises(TypeError, codecs.utf_16_le_decode, None, None)
self.assertEqual(codecs.utf_16_le_decode(b"", None), ("", 0))
def test_utf_16_le_decode_incremental(self):
b = b"\xfe\xff\x41\x00\x00\xd9\x00\xdd\x00\xdc\x00\xd8\x00\xdc"
expected = [
('', 0),
('', 0),
('\ufffe', 2),
('\ufffe', 2),
('\ufffeA', 4),
('\ufffeA', 4),
('\ufffeA', 4),
('\ufffeA', 4),
('\ufffeA\U00050100', 8),
('\ufffeA\U00050100', 8),
('\ufffeA\U00050100\ufffd', 10),
('\ufffeA\U00050100\ufffd', 10),
('\ufffeA\U00050100\ufffd', 10),
('\ufffeA\U00050100\ufffd', 10),
('\ufffeA\U00050100\ufffd\U00010000', 14)
]
if not is_cli:
# CPython's strings are UTF-32 so an invalid surrogate pair results in one replacement char.
# Therefore CPython does not report error on a dangling low surrogate until it verifies
# that the next char is not an invalid surrogate as well.
expected[10] = expected[11] = ('\ufffeA\U00050100', 8)
for i in range(len(b) + 1):
res = codecs.utf_16_le_decode(b[:i], 'replace')
self.assertEqual(res, expected[i])
self.assertRaises(UnicodeDecodeError, codecs.utf_16_le_decode, b"\x00\x41\x00\xd8\x00\xd8", 'strict', False)
def test_utf_16_le_encode(self):
data, num_processed = codecs.utf_16_le_encode("abc")
self.assertEqual(data, b'a\0b\0c\0')
self.assertEqual(num_processed, 3)
self.assertRaises(TypeError, codecs.utf_16_le_encode, b"abc")
self.assertRaises(TypeError, codecs.utf_16_le_encode, None)
self.assertRaises(TypeError, codecs.utf_16_le_encode, None, None)
self.assertEqual(codecs.utf_16_le_encode("", None), (b"", 0))
def test_utf_16_ex_decode(self):
#sanity
new_str, num_processed, zero = codecs.utf_16_ex_decode(b"abc")
self.assertEqual(new_str, '\u6261')
self.assertEqual(num_processed, 2)
self.assertEqual(zero, 0)
self.utf_ex_decode_test_helper(
charwidth=2,
abc="abc",
func=codecs.utf_16_ex_decode,
abc_le=b'a\0b\0c\0',
abc_be=b'\0a\0b\0c',
bom_le=codecs.BOM_UTF16_LE,
bom_be=codecs.BOM_UTF16_BE)
self.assertEqual(codecs.utf_16_ex_decode(array.array('I', (1633771873,))), ("慡慡", 4, 0))
self.assertRaises(TypeError, codecs.utf_16_ex_decode, "abc")
self.assertRaises(TypeError, codecs.utf_16_ex_decode, None)
self.assertRaises(TypeError, codecs.utf_16_ex_decode, None, None)
self.assertEqual(codecs.utf_16_ex_decode(b"", None), ("", 0, 0))
def test_utf_16_decode(self):
# When BOM present: it is removed and the proper UTF-16 variant is automatically selected
string, num_processed = codecs.utf_16_decode(codecs.BOM_UTF16_LE + b'a\0b\0c\0')
self.assertEqual(string, "abc")
self.assertEqual(num_processed, 4 * 2)
string, num_processed = codecs.utf_16_decode(codecs.BOM_UTF16_BE + b'\0a\0b\0c')
self.assertEqual(string, "abc")
self.assertEqual(num_processed, 4 * 2)
# When no BOM: on little-endian systems, UTF-16 defaults to UTF-16-LE
string, num_processed = codecs.utf_16_decode(b'a\0b\0c\0')
self.assertEqual(string, 'abc')
self.assertEqual(num_processed, 3 * 2)
self.assertEqual(codecs.utf_16_decode(array.array('I', (1633771873,))), ("慡慡", 4))
self.assertRaises(TypeError, codecs.utf_16_decode, "abc")
self.assertRaises(TypeError, codecs.utf_16_decode, None)
self.assertRaises(TypeError, codecs.utf_16_decode, None, None)
self.assertEqual(codecs.utf_16_decode(b"", None), ("", 0))
def test_utf_16_encode(self):
# On little-endian systems, UTF-16 encodes in UTF-16-LE prefixed with BOM
data, num_processed = codecs.utf_16_encode("abc")
self.assertEqual(data, codecs.BOM_UTF16 + b'a\0b\0c\0')
self.assertEqual(num_processed, 3)
self.assertRaises(TypeError, codecs.utf_16_encode, b"abc")
self.assertRaises(TypeError, codecs.utf_16_encode, None)
self.assertRaises(TypeError, codecs.utf_16_encode, None, None)
self.assertEqual(codecs.utf_16_encode("", None), (codecs.BOM_UTF16, 0))
def test_utf_16_le_encode_alias(self):
for x in ('utf_16_le', 'UTF-16LE', 'utf-16le', 'utf-16-le'):
self.assertEqual('abc'.encode(x), b'a\x00b\x00c\x00')
def test_utf_32_be_decode(self):
string, num_processed = codecs.utf_32_be_decode(b'\0\0\0a\0\0\0b\0\0\0c')
self.assertEqual(string, "abc")
self.assertEqual(num_processed, 3 * 4)
string, num_processed = codecs.utf_32_be_decode(codecs.BOM_UTF32_BE + b'\0\0\0a\0\0\0b\0\0\0c')
self.assertEqual(string, "\uFEFFabc")
self.assertEqual(num_processed, 4 * 4)
self.assertEqual(codecs.utf_32_be_decode(array.array('I', (0x0df40100,))), ("\U0001f40d", 4))
self.assertRaises(TypeError, codecs.utf_32_be_decode, "abc")
self.assertRaises(TypeError, codecs.utf_32_be_decode, None)
self.assertRaises(TypeError, codecs.utf_32_be_decode, None, None)
self.assertEqual(codecs.utf_32_be_decode(b"", None), ("", 0))
def test_utf_32_be_encode(self):
data, num_processed = codecs.utf_32_be_encode("abc")
self.assertEqual(data, b'\0\0\0a\0\0\0b\0\0\0c')
self.assertEqual(num_processed, 3)
self.assertRaises(TypeError, codecs.utf_32_be_encode, b"abc")
self.assertRaises(TypeError, codecs.utf_32_be_encode, None)
self.assertRaises(TypeError, codecs.utf_32_be_encode, None, None)
self.assertEqual(codecs.utf_32_be_encode("", None), (b"", 0))
def test_utf_32_le_decode(self):
string, num_processed = codecs.utf_32_le_decode(b'a\0\0\0b\0\0\0c\0\0\0')
self.assertEqual(string, "abc")
self.assertEqual(num_processed, 3 * 4)
string, num_processed = codecs.utf_32_le_decode(codecs.BOM_UTF32_LE + b'a\0\0\0b\0\0\0c\0\0\0')
self.assertEqual(string, "\uFEFFabc")
self.assertEqual(num_processed, 4 * 4)
self.assertEqual(codecs.utf_32_le_decode(array.array('I', (0x0001f40d,))), ("\U0001f40d", 4))
self.assertRaises(TypeError, codecs.utf_32_le_decode, "abc")
self.assertRaises(TypeError, codecs.utf_32_le_decode, None)
self.assertRaises(TypeError, codecs.utf_32_le_decode, None, None)
self.assertEqual(codecs.utf_32_le_decode(b"", None), ("", 0))
def test_utf_32_le_encode(self):
data, num_processed = codecs.utf_32_le_encode("abc")
self.assertEqual(data, b'a\0\0\0b\0\0\0c\0\0\0')
self.assertEqual(num_processed, 3)
self.assertRaises(TypeError, codecs.utf_32_le_encode, b"abc")
self.assertRaises(TypeError, codecs.utf_32_le_encode, None)
self.assertRaises(TypeError, codecs.utf_32_le_encode, None, None)
self.assertEqual(codecs.utf_32_le_encode("", None), (b"", 0))
def test_utf_32_ex_decode(self):
self.utf_ex_decode_test_helper(
charwidth=4,
abc="abc",
func=codecs.utf_32_ex_decode,
abc_le=b'a\0\0\0b\0\0\0c\0\0\0',
abc_be=b'\0\0\0a\0\0\0b\0\0\0c',
bom_le=codecs.BOM_UTF32_LE,
bom_be=codecs.BOM_UTF32_BE)
self.assertEqual(codecs.utf_32_ex_decode(array.array('I', (0x0001f40d,))), ("\U0001f40d", 4, 0))
self.assertRaises(TypeError, codecs.utf_32_ex_decode, "abc")
self.assertRaises(TypeError, codecs.utf_32_ex_decode, None)
self.assertRaises(TypeError, codecs.utf_32_ex_decode, None, None)
self.assertEqual(codecs.utf_32_ex_decode(b"", None), ("", 0, 0))
def test_utf_32_decode(self):
# When BOM present: it is removed and the proper UTF-32 variant is automatically selected
string, num_processed = codecs.utf_32_decode(codecs.BOM_UTF32_LE + b'a\0\0\0b\0\0\0c\0\0\0')
self.assertEqual(string, "abc")
self.assertEqual(num_processed, 4 * 4)
string, num_processed = codecs.utf_32_decode(codecs.BOM_UTF32_BE + b'\0\0\0a\0\0\0b\0\0\0c')
self.assertEqual(string, "abc")
self.assertEqual(num_processed, 4 * 4)
# When no BOM: on little-endian systems, UTF-32 defaults to UTF-32-LE
string, num_processed = codecs.utf_32_decode(b'a\0\0\0b\0\0\0c\0\0\0')
self.assertEqual(string, 'abc')
self.assertEqual(num_processed, 3 * 4)
with self.assertRaises(UnicodeDecodeError):
codecs.utf_32_decode(b'\0\0\0a\0\0\0b\0\0\0c')
self.assertEqual(codecs.utf_32_decode(array.array('I', (0x0001f40d,))), ("\U0001f40d", 4))
self.assertRaises(TypeError, codecs.utf_32_decode, "abc")
def test_utf_32_encode(self):
# On little-endian systems, UTF-32 encodes in UTF-32-LE prefixed with BOM
data, num_processed = codecs.utf_32_encode("abc")
self.assertEqual(data, codecs.BOM_UTF32 + b'a\0\0\0b\0\0\0c\0\0\0')
self.assertEqual(num_processed, 3)
self.assertRaises(TypeError, codecs.utf_32_encode, b"abc")
def utf_ex_decode_test_helper(self, charwidth, abc, func, abc_le, abc_be, bom_le, bom_be):
bom_abc_le = bom_le + abc_le
bom_abc_be = bom_be + abc_be
order = 0
# When BOM present, and no order given: BOM is removed and the proper UTF-32 variant is automatically detected and used
string, num_processed, detected = func(bom_abc_le, 'strict', order)
self.assertEqual(string, abc)
self.assertEqual(num_processed, (1 + len(abc)) * charwidth)
self.assertLess(detected, order)
string, num_processed, detected = func(bom_abc_be, 'strict', order)
self.assertEqual(string, abc)
self.assertEqual(num_processed, (1 + len(abc)) * charwidth)
self.assertGreater(detected, order)
# When only BOM present, and no order given: the decoded string is empty but the UTF-32 variant is detected
string, num_processed, detected = func(bom_le, 'strict', order)
self.assertEqual(string, "")
self.assertEqual(num_processed, charwidth)
self.assertLess(detected, order)
string, num_processed, detected = func(bom_be, 'strict', order)
self.assertEqual(string, "")
self.assertEqual(num_processed, charwidth)
self.assertGreater(detected, order)
# When no BOM, and no order given: on little-endian systems, UTF-XX defaults to UTF-XX-LE, but no BOM detection
string, num_processed, detected = func(abc_le, 'strict', order)
self.assertEqual(string, abc)
self.assertEqual(num_processed, len(abc) * charwidth)
self.assertEqual(detected, order)
# When BOM present, and order given: BOM must match order and is passed to output, order unchanged
for order in [1, 42]:
string, num_processed, detected = func(bom_abc_be, 'strict', order)
self.assertEqual(string, "\uFEFF" + abc)
self.assertEqual(num_processed, (1 + len(abc)) * charwidth)
self.assertEqual(detected, order)
string, num_processed, detected = func(bom_abc_le, 'strict', -order)
self.assertEqual(string, "\uFEFF" + abc)
self.assertEqual(num_processed, (1 + len(abc)) * charwidth)
self.assertEqual(detected, -order)
# When no BOM, and order given: on little-endian systems, UTF-XX defaults to UTF-XX-LE, order unchanged
for order in [1, 42]:
string, num_processed, detected = func(abc_be, 'strict', order)
self.assertEqual(string, abc)
self.assertEqual(num_processed, len(abc) * charwidth)
self.assertEqual(detected, order)
string, num_processed, detected = func(abc_le, 'strict', -order)
self.assertEqual(string, abc)
self.assertEqual(num_processed, len(abc) * charwidth)
self.assertEqual(detected, -order)
def test_utf_8_decode(self):
#sanity
new_str, num_processed = codecs.utf_8_decode(b"abc")
self.assertEqual(new_str, 'abc')
self.assertEqual(num_processed, 3)
self.assertEqual(codecs.utf_8_decode(array.array('I', (1633771873,))), ("aaaa", 4))
self.assertRaises(TypeError, codecs.utf_8_decode, "abc")
self.assertRaises(TypeError, codecs.utf_8_decode, None)
self.assertRaises(TypeError, codecs.utf_8_decode, None, None)
self.assertEqual(codecs.utf_8_decode(b"abc", None), ("abc", 3))
self.assertRaises(UnicodeDecodeError, codecs.utf_8_decode, b"\xFF", None)
def test_cp34951(self):
def internal_cp34951(sample1, preamble, bom_len):
self.assertEqual(codecs.utf_8_decode(sample1), (preamble + '12\u20ac\x0a', 6 + bom_len))
sample1 = sample1[:-1] # 12<euro>
self.assertEqual(codecs.utf_8_decode(sample1), (preamble + '12\u20ac', 5 + bom_len))
sample1 = sample1[:-1] # 12<incomplete euro>
self.assertEqual(codecs.utf_8_decode(sample1), (preamble + '12', 2 + bom_len))
sample1 = sample1 + b'x7f' # makes it invalid
self.assertRaises(UnicodeDecodeError, codecs.utf_8_decode, sample1)
internal_cp34951(b'\x31\x32\xe2\x82\xac\x0a', '', 0) # 12<euro><cr>
internal_cp34951(b'\xef\xbb\xbf\x31\x32\xe2\x82\xac\x0a', '\ufeff', 3) # <BOM>12<euro><cr>
def test_utf_8_decode_incremental(self):
b = '\x00\x7f\x80\xff\u0100\u07ff\u0800\ufffd\uffff\U00010000\U000fffff\U00100000\U0010ffff'.encode('utf-8')
# expected results generated by CPython 3.4
expected = [
('', 0),
('\x00', 1),
('\x00\x7f', 2),
('\x00\x7f', 2),
('\x00\x7f\x80', 4),
('\x00\x7f\x80', 4),
('\x00\x7f\x80\xff', 6),
('\x00\x7f\x80\xff', 6),
('\x00\x7f\x80\xff\u0100', 8),
('\x00\x7f\x80\xff\u0100', 8),
('\x00\x7f\x80\xff\u0100\u07ff', 10),
('\x00\x7f\x80\xff\u0100\u07ff', 10),
('\x00\x7f\x80\xff\u0100\u07ff', 10),
('\x00\x7f\x80\xff\u0100\u07ff\u0800', 13),
('\x00\x7f\x80\xff\u0100\u07ff\u0800', 13),
('\x00\x7f\x80\xff\u0100\u07ff\u0800', 13),
('\x00\x7f\x80\xff\u0100\u07ff\u0800\ufffd', 16),
('\x00\x7f\x80\xff\u0100\u07ff\u0800\ufffd', 16),
('\x00\x7f\x80\xff\u0100\u07ff\u0800\ufffd', 16),
('\x00\x7f\x80\xff\u0100\u07ff\u0800\ufffd\uffff', 19),
('\x00\x7f\x80\xff\u0100\u07ff\u0800\ufffd\uffff', 19),
('\x00\x7f\x80\xff\u0100\u07ff\u0800\ufffd\uffff', 19),
('\x00\x7f\x80\xff\u0100\u07ff\u0800\ufffd\uffff', 19),
('\x00\x7f\x80\xff\u0100\u07ff\u0800\ufffd\uffff\U00010000', 23),
('\x00\x7f\x80\xff\u0100\u07ff\u0800\ufffd\uffff\U00010000', 23),
('\x00\x7f\x80\xff\u0100\u07ff\u0800\ufffd\uffff\U00010000', 23),
('\x00\x7f\x80\xff\u0100\u07ff\u0800\ufffd\uffff\U00010000', 23),
('\x00\x7f\x80\xff\u0100\u07ff\u0800\ufffd\uffff\U00010000\U000fffff', 27),
('\x00\x7f\x80\xff\u0100\u07ff\u0800\ufffd\uffff\U00010000\U000fffff', 27),
('\x00\x7f\x80\xff\u0100\u07ff\u0800\ufffd\uffff\U00010000\U000fffff', 27),
('\x00\x7f\x80\xff\u0100\u07ff\u0800\ufffd\uffff\U00010000\U000fffff', 27),
('\x00\x7f\x80\xff\u0100\u07ff\u0800\ufffd\uffff\U00010000\U000fffff\U00100000', 31),
('\x00\x7f\x80\xff\u0100\u07ff\u0800\ufffd\uffff\U00010000\U000fffff\U00100000', 31),
('\x00\x7f\x80\xff\u0100\u07ff\u0800\ufffd\uffff\U00010000\U000fffff\U00100000', 31),
('\x00\x7f\x80\xff\u0100\u07ff\u0800\ufffd\uffff\U00010000\U000fffff\U00100000', 31),
('\x00\x7f\x80\xff\u0100\u07ff\u0800\ufffd\uffff\U00010000\U000fffff\U00100000\U0010ffff', 35)
]
for i in range(len(b) + 1):
res = codecs.utf_8_decode(b[:i])
self.assertEqual(res, expected[i])
def test_utf_8_encode(self):
#sanity
new_str, num_processed = codecs.utf_8_encode("abc")
self.assertEqual(new_str, b'abc')
self.assertEqual(num_processed, 3)
self.assertEqual(codecs.utf_8_encode("abc\u20ac"), (b"abc\xe2\x82\xac", 4))
self.assertRaises(TypeError, codecs.utf_8_encode, b"abc")
self.assertRaises(TypeError, codecs.utf_8_encode, None)
self.assertRaises(TypeError, codecs.utf_8_encode, None, None)
self.assertEqual(codecs.utf_8_encode("abc", None), (b"abc", 3))
self.assertRaises(UnicodeEncodeError, codecs.utf_8_encode, "\uDDDD", None)
def test_charmap_encode(self):
self.assertEqual(codecs.charmap_encode(""), (b'', 0))
self.assertEqual(codecs.charmap_encode("", "strict", {}), (b'', 0))
self.assertRaises(TypeError, codecs.charmap_encode, b"")
# Default map is Latin-1
self.assertEqual(codecs.charmap_encode("abcÿ"), (b'abc\xff', 4))
self.assertEqual(codecs.charmap_encode("abcÿ", "strict"), (b'abc\xff', 4))
# Ignore errors
self.assertEqual(codecs.charmap_encode("abc", "ignore", {}), (b'', 3))
charmap = {ord(c): None for c in "abcdefgh"}
self.assertEqual(codecs.charmap_encode("abc", "ignore", charmap), (b'', 3))
# Dict[int, int]
charmap = {ord(c): ord(c.upper()) for c in "abcdefgh"}
self.assertEqual(codecs.charmap_encode("abc", "strict", charmap), (b'ABC', 3))
# Dict[int, bytes]
charmap = {ord(c): bytes(2*c.upper(), "ascii") for c in "abcdefgh"}
self.assertEqual(codecs.charmap_encode("abc", "strict", charmap), (b'AABBCC', 3))
# Non-BMP character
charmap = {0x1F40D: ord('p')}
self.assertEqual(codecs.charmap_encode("🐍", 'strict', charmap), (b"p", len("🐍")))
# Fallback characters are charmapped again
charmap = {ord(c): ord(c.upper()) for c in "abcdefgh"}
charmap.update({ord('?'): ord('*')})
self.assertEqual(codecs.charmap_encode("abcxyz", "replace", charmap), (b'ABC***', 6))
charmap.update({ord('?'): b'+?-'})
self.assertEqual(codecs.charmap_encode("abcxyz", "replace", charmap), (b'ABC+?-+?-+?-', 6))
fallbacks = codecs.backslashreplace_errors(UnicodeEncodeError('dummy', "klm", 0, 3, "replace these chars"))[0]
charmap = {ord(c): ord(c.upper()) for c in "abcdefgh" + fallbacks}
charmap[ord('\\')] = ord('/')
self.assertEqual(codecs.charmap_encode("abcklm", "backslashreplace", charmap), (b'ABC/X6B/X6C/X6D', 6))
charmap = {ord(c): ord(c.upper()) for c in "abcdefgh0123456789"}
charmap.update({ord('&'): b'@', ord('#'): b'$:', ord(';'): b':'})
self.assertEqual(codecs.charmap_encode("abcklm", "xmlcharrefreplace", charmap), (b'ABC@$:107:@$:108:@$:109:', 6))
def test_python_replace(uee):
return ("🐍" * (uee.end - uee.start), uee.end)
codecs.register_error(test_python_replace.__name__, test_python_replace)
charmap = {0x1F40D: ord('p')}
self.assertEqual(codecs.charmap_encode("abc", 'test_python_replace', charmap), (b"ppp", 3))
# However, if the error handler returns bytes, these are not remapped, but used as they are
if not is_cli: # TODO: IronPython does not support fallback bytes yet
def my_encode_replace(uee): return (b"?!" * (uee.end - uee.start), uee.end)
codecs.register_error('my_encode_replace', my_encode_replace)
charmap = {ord(c): ord(c.upper()) for c in "abcdefgh"}
self.assertEqual(codecs.charmap_encode("abcxyz", "my_encode_replace", charmap), (b'ABC?!?!?!', 6))
# Fallback characters are not charmapped recursively
for errors in ['strict', 'replace', 'backslashreplace', 'xmlcharrefreplace']:
# Missing key
self.assertRaisesRegex(UnicodeEncodeError, "^'charmap' codec can't encode character.+ in position .+: character maps to <undefined>",
codecs.charmap_encode, "abc", errors, {})
# Character key is not recognized, it must be an int (character ordinal)
self.assertRaisesRegex(UnicodeEncodeError, "^'charmap' codec can't encode character.+ in position .+: character maps to <undefined>",
codecs.charmap_encode, "abc", errors, {c: ord(c.upper()) for c in "abcdefgh"})
# Explict None as value mapping
self.assertRaisesRegex(UnicodeEncodeError, "^'charmap' codec can't encode character.+ in position .+: character maps to <undefined>",
codecs.charmap_encode, "abc", errors, {ord(c): None for c in "abcdefgh"})
self.assertRaisesRegex(LookupError, "^unknown error handler name 'non-existent'$",
codecs.charmap_encode, "abc", 'non-existent', {})
# Unsupported: Dict[int, str]
self.assertRaisesRegex(TypeError, "^character mapping must return integer, bytes or None, not str",
codecs.charmap_encode, "abc", 'strict', {ord(c): c.upper() for c in "abcdefgh"})
# Negative values
self.assertRaisesRegex(TypeError, r"^character mapping must be in range\(256\)",
codecs.charmap_encode, "abc", 'strict', {ord(c): -ord(c) for c in "abcdefgh"})
# Values outside of bytes range
self.assertRaisesRegex(TypeError, r"^character mapping must be in range\(256\)",
codecs.charmap_encode, "abc", 'strict', {ord(c): ord(c) + 0x100 for c in "abcdefgh"})
# Invalid charmap_build calls
self.assertRaises(TypeError, codecs.charmap_build)
self.assertRaises(TypeError, codecs.charmap_build, None)
self.assertRaises(TypeError, codecs.charmap_build, 1)
self.assertRaises(TypeError, codecs.charmap_build, "")
self.assertRaises(TypeError, codecs.charmap_build, "", "")
# Using EncodingMap
charmap = "".join(chr(c) for c in range(255, -1, -1))
em = codecs.charmap_build(charmap)
self.assertEqual(codecs.charmap_encode("ABC", 'strict', em), (b"\xbe\xbd\xbc", 3))
charmap = "\0" + "".join(chr(c) for c in range(254, 0, -1)) + "\xff"
em = codecs.charmap_build(charmap)
self.assertEqual(codecs.charmap_encode("ABC", 'strict', em), (b"\xbe\xbd\xbc", 3))
self.assertEqual(str(type(em)), "<class 'EncodingMap'>")
# Handling of invaid character U+FFFE
self.assertEqual(codecs.charmap_encode("\uFFFE", "strict", {0xFFFE: ord('A')}), (b'A', 1))
em = codecs.charmap_build("\0\uFFFE")
self.assertRaisesRegex(UnicodeEncodeError, r"^'charmap' codec can't encode character '\\ufffe' in position 0: character maps to <undefined>",
codecs.charmap_encode, "\uFFFE", 'strict', em)
# EncodingMap and error handlers
charmap = "".join(chr(c).lower() for c in range(0x60)) # short map, no capital letters
em = codecs.charmap_build(charmap)
self.assertEqual(codecs.charmap_encode("abcABC", 'ignore', em), (b"ABC", 6))
charmap = "".join(chr(c).lower() for c in range(0x60)) # short map, no capital letters
em = codecs.charmap_build(charmap)
self.assertEqual(codecs.charmap_encode("abcABC", 'replace', em), (b"ABC???", 6))
charmap = charmap.replace('?', '`').replace('!', '?')
em = codecs.charmap_build(charmap)
self.assertEqual(codecs.charmap_encode("abcABC", 'replace', em), (b"ABC!!!", 6))
charmap = "".join(chr(c).lower() for c in range(0x60)) # short map, no capital letters
charmap = charmap.replace('\\', '`').replace('/', '\\')
em = codecs.charmap_build(charmap)
self.assertEqual(codecs.charmap_encode("abcABC", 'backslashreplace', em), (b"ABC/X41/X42/X43", 6))
charmap = "".join(chr(c).lower() for c in range(0x60)) # short map, no capital letters
charmap = charmap.replace('#', '`').replace('=', '#')
em = codecs.charmap_build(charmap)
self.assertEqual(codecs.charmap_encode("abcABC", 'xmlcharrefreplace', em), (b"ABC&=65;&=66;&=67;", 6))
charmap = "".join(chr(c) for c in range(ord('p'))) + '\U0001F40D'
em = codecs.charmap_build(charmap)
self.assertEqual(codecs.charmap_encode("axc", 'test_python_replace', em), (b"apc", 3))
# None input
self.assertRaises(TypeError, codecs.charmap_encode, None)
self.assertRaises(TypeError, codecs.charmap_encode, None, '', {})
self.assertEqual(codecs.charmap_encode("", None, None), (b"", 0))
self.assertEqual(codecs.charmap_encode("abc", None, None), (b"abc", 3))
self.assertRaises(UnicodeEncodeError, codecs.charmap_encode, "\u0100", None, None)
em = codecs.charmap_build("".join(chr(c) for c in range(256)))
self.assertEqual(codecs.charmap_encode("", None, em), (b"", 0))
self.assertRaises(UnicodeEncodeError, codecs.charmap_encode, "\u0100", None, em)
self.assertRaises(LookupError, codecs.charmap_encode, "\u0100", "", em)
self.assertRaises(TypeError, codecs.charmap_encode, None, None, em)
@unittest.skipIf(is_posix, 'only UTF8 on posix - mbcs_decode/encode only exist on windows versions of python')
def test_mbcs_decode(self):
for mode in ['strict', 'replace', 'ignore', 'badmodethatdoesnotexist', None]:
if is_netcoreapp and mode == 'badmodethatdoesnotexist': continue # FallbackBuffer created even if not used
self.assertEqual(codecs.mbcs_decode(b'foo', mode), ('foo', 3))
cpyres = '\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7f\u20ac\x81\u201a\u0192\u201e\u2026\u2020\u2021\u02c6\u2030\u0160\u2039\u0152\x8d\u017d\x8f\x90\u2018\u2019\u201c\u201d\u2022\u2013\u2014\u02dc\u2122\u0161\u203a\u0153\x9d\u017e\u0178\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff'
allchars = bytes(range(256))
self.assertEqual(codecs.mbcs_decode(allchars, mode)[0], cpyres)
# round tripping
self.assertEqual(codecs.mbcs_encode(codecs.mbcs_decode(allchars, mode)[0])[0], allchars)
self.assertEqual(codecs.mbcs_decode(array.array('I', (1633771873,))), ("aaaa", 4))
self.assertRaises(TypeError, codecs.mbcs_decode, "abc")
self.assertRaises(TypeError, codecs.mbcs_decode, None)
self.assertRaises(TypeError, codecs.mbcs_decode, None, None)
@unittest.skipIf(is_posix, 'only UTF8 on posix - mbcs_decode/encode only exist on windows versions of python')
def test_mbcs_encode(self):
# these are invalid
invalid = [0x80, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8e, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9e, 0x9f]
uinvalid = ''.join([chr(i) for i in invalid])
uall = ''.join([chr(i) for i in range(256) if i not in invalid])
cpyres = b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7f\x81\x8d\x8f\x90\x9d\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff'
for mode in ['strict', 'replace', 'ignore', 'badmodethatdoesnotexist', None]:
self.assertEqual(codecs.mbcs_encode('foo', mode), (b'foo', 3))
ipyres = codecs.mbcs_encode(uall, mode)[0]
self.assertEqual(cpyres, ipyres)
# all weird unicode characters that are supported
chrs = '\u20ac\u201a\u0192\u201e\u2026\u2020\u2021\u02c6\u2030\u0160\u2039\u0152\u017d\u2018\u2019\u201c\u201d\u2022\u2013\u2014\u02dc\u2122\u0161\u203a\u0153\u017e\u0178'
self.assertEqual(codecs.mbcs_encode(chrs, mode), (b'\x80\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8e\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9e\x9f', 27))
self.assertEqual(codecs.mbcs_encode(uinvalid, 'replace'), (b'?'*len(uinvalid), len(uinvalid)))
self.assertEqual(codecs.mbcs_encode(uinvalid, 'ignore'), (b'', len(uinvalid)))
self.assertRaises(UnicodeEncodeError, codecs.mbcs_encode, uinvalid, None)
self.assertRaises(TypeError, codecs.mbcs_encode, b"abc")
self.assertRaises(TypeError, codecs.mbcs_encode, None)
self.assertRaises(TypeError, codecs.mbcs_encode, None, None)
@unittest.skipIf(is_posix, 'only UTF8 on posix - code_page_decode/encode only exist on windows versions of python')
def test_code_page_decode(self):
# Sanity
self.assertEqual(codecs.code_page_decode(1252, b"aaaa"), ("aaaa", 4))
self.assertEqual(codecs.code_page_decode(1252, array.array('I', (1633771873,))), ("aaaa", 4))
self.assertRaises(TypeError, codecs.code_page_decode, "abc")
self.assertRaises(TypeError, codecs.code_page_decode, None)
self.assertRaises(TypeError, codecs.code_page_decode, None, None)
@unittest.skipIf(is_posix, 'only UTF8 on posix - code_page_decode/encode only exist on windows versions of python')
def test_code_page_encode(self):
# Sanity
self.assertEqual(codecs.code_page_encode(1252, "aaaa"), (b"aaaa", 4))
self.assertRaises(TypeError, codecs.code_page_encode, b"abc")
self.assertRaises(TypeError, codecs.code_page_encode, None)
self.assertRaises(TypeError, codecs.code_page_encode, None, None)
def test_misc_encodings(self):
self.assertEqual('abc'.encode('utf-16'), b'\xff\xfea\x00b\x00c\x00')
self.assertEqual('abc'.encode('utf-16-be'), b'\x00a\x00b\x00c')
for unicode_escape in ['unicode-escape', 'unicode escape', 'unicode_escape']:
self.assertEqual('abc'.encode(unicode_escape), b'abc')
self.assertEqual('abc\\u1234'.encode(unicode_escape), b'abc\\\\u1234')
def test_file_encodings(self):
'''
Tests valid PEP-236 style file encoding declarations during import
'''
sys.path.append(os.path.join(self.temporary_dir, "tmp_encodings"))
try:
os.mkdir(os.path.join(self.temporary_dir, "tmp_encodings"))
except:
pass
try:
#positive cases
for coding in ip_supported_encodings:
# check if the coding name matches PEP-263 requirements; this test is meaningless for names that do not match
# https://www.python.org/dev/peps/pep-0263/#defining-the-encoding
if not re.match('[-_.a-zA-Z0-9]+$', coding):
continue
# wide-char Unicode encodings not supported by CPython
if not is_cli and re.match('utf[-_](16|32)', coding, re.IGNORECASE):
continue
temp_mod_name = "test_encoding_" + coding.replace('-','_')
with open(os.path.join(self.temporary_dir, "tmp_encodings", temp_mod_name + ".py"), "w", encoding=coding) as f:
# wide-char Unicode encodings need a BOM to be recognized
if re.match('utf[-_](16|32).', coding, re.IGNORECASE):
f.write("\ufeff")
# UTF-8 with signature may only use 'utf-8' as coding (PEP-263)
if re.match('utf[-_]8[-_]sig$', coding, re.IGNORECASE):
coding = 'utf-8'
f.write("# coding: %s" % (coding))
if is_cpython and is_linux:
time.sleep(0.01)
__import__(temp_mod_name)
os.remove(os.path.join(self.temporary_dir, "tmp_encodings", temp_mod_name + ".py"))
finally:
#cleanup
sys.path.remove(os.path.join(self.temporary_dir, "tmp_encodings"))
shutil.rmtree(os.path.join(self.temporary_dir, "tmp_encodings"), True)
# handcrafted positive cases
sys.path.append(os.path.join(self.test_dir, "encoded_files"))
try:
# Test that using tab of formfeed whitespace characters before "# coding ..." is OK
# and that a tab between 'coding:' and the encoding name is OK too
__import__('ok_encoding_whitespace')
# Test that non-ASCII letters in the encoding name are not part of the name
__import__('ok_encoding_nonascii')
finally:
sys.path.remove(os.path.join(self.test_dir, "encoded_files"))
def test_file_encodings_negative(self):
'''
Test source file encoding errorr on import
'''
sys.path.append(os.path.join(self.test_dir, "encoded_files"))
try:
# Test that "# coding ..." declaration in the first line shadows the second line
with self.assertRaises(SyntaxError) as cm:
__import__("bad_encoding_name")
# CPython's message differs when running this file, but is the same when importing it
self.assertEqual(cm.exception.msg, "unknown encoding: bad-coding-name")
# Test that latin-1 encoded files result in error if a coding declaration is missing
with self.assertRaises(SyntaxError) as cm:
__import__("bad_latin1_nodecl")
# CPython's message differs when importing this file, but is the same when running it
if is_cli:
self.assertTrue(cm.exception.msg.startswith("Non-UTF-8 code starting with '\\xb5' in file"))
else:
self.assertTrue(cm.exception.msg.startswith("(unicode error) 'utf-8' codec can't decode byte 0xb5 in position "))
# Test that latin-1 encoded files result in error if a UTF-8 BOM is present
with self.assertRaises(SyntaxError) as cm:
__import__("bad_latin1_bom")
# CPython's message is the same (both on import and run)
self.assertTrue(cm.exception.msg.startswith("(unicode error) 'utf-8' codec can't decode byte 0xb5 in position"))
# Test that latin-1 encoded files result in error if a UTF-8 BOM is present and 'utf-8' encoding is declared
with self.assertRaises(SyntaxError) as cm:
__import__("bad_latin1_bom_decl")
# CPython's message is the same (both on import and run)
self.assertTrue(cm.exception.msg.startswith("(unicode error) 'utf-8' codec can't decode byte 0xb5 in position"))
# Test that utf-8 encoded files result in error if a UTF-8 BOM is present and 'iso-8859-1' encoding is declared
with self.assertRaises(SyntaxError) as cm:
__import__("bad_utf8_bom_decl")
# CPython's message is the same (both on import and run)
self.assertTrue(cm.exception.msg.startswith("encoding problem: iso-8859-1 with BOM"))
# Test that using a non-breaking whitespace inside the magic comment removes the magic
self.assertRaises(SyntaxError, __import__, "bad_latin1_nbsp")
finally:
sys.path.remove(os.path.join(self.test_dir, "encoded_files"))
def test_cp11334(self):
def run_python(filename):
p = subprocess.Popen([sys.executable, os.path.join(self.test_dir, "encoded_files", filename)], shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
t_in, t_out, t_err = (p.stdin, p.stdout, p.stderr)
t_err_lines = t_err.readlines()
t_out_lines = t_out.readlines()
t_err.close()
t_out.close()
t_in.close()
return t_out_lines, t_err_lines
#--Test that not using "# coding ..." results in an error
t_out_lines, t_err_lines = run_python("cp11334_bad.py")
self.assertEqual(len(t_out_lines), 0)
self.assertTrue(t_err_lines[0].startswith(b" File"))
self.assertTrue(t_err_lines[0].rstrip().endswith(b', line 1'))
self.assertTrue(t_err_lines[1].startswith(b"SyntaxError: Non-UTF-8 code starting with '\\xb5' in file"))
#--Test that using "# coding ..." is OK
t_out_lines, t_err_lines = run_python("cp11334_ok.py")
self.assertEqual(len(t_err_lines), 0)
self.assertEqual(len(t_out_lines), 1)
if is_cli:
# CodePlex 11334: IronPython uses active console codepage for output
# Check active codepage in cmd.exe by running 'chcp' or 'mode'
# Check active codepage in powershell.exe by evaluating [Console]::OutputEncoding
import clr
import System
clr.AddReference('System.Console')
# expected = '\xb5ble'.encode(System.Console.OutputEncoding) # this will not work correctly if encoding is UTF-8, which by default adds a preamble
expected = '\xb5ble'.encode("cp" + str(System.Console.OutputEncoding.CodePage))
self.assertEqual(t_out_lines[0].rstrip(), expected)
else:
# CPython uses locale.getpreferredencoding() for pipe output
# unless overriden by PYTHONIOENCODING emvironment vairable.
# The active console codepage is ignored (which is a confirmed bug: bpo-6135, bpo-27179)
if not 'PYTHONIOENCODING' in os.environ:
import locale
expected = '\xb5ble'.encode(locale.getpreferredencoding())
self.assertEqual(t_out_lines[0].rstrip(), expected)
def test_cp1214(self):
"""
TODO: extend this a great deal
"""
with self.assertRaises(LookupError):
b'7FF80000000000007FF0000000000000'.decode('hex')
self.assertEqual(codecs.decode(b'7FF80000000000007FF0000000000000', 'hex'),
b'\x7f\xf8\x00\x00\x00\x00\x00\x00\x7f\xf0\x00\x00\x00\x00\x00\x00')
def test_codecs_lookup(self):
l = []
def my_func(encoding, cache = l):
l.append(encoding)
codecs.register(my_func)
allchars = ''.join([chr(i) for i in range(1, 256)])
self.assertRaises(LookupError, codecs.lookup, allchars)
# Only ASCII chars are set to lowercase for lookup purposes
lowerchars = allchars.translate(str.maketrans(' ' + string.ascii_uppercase, '-' + string.ascii_lowercase))
for i in range(len(lowerchars)):
if l[0][i] != lowerchars[i]:
self.assertTrue(False, 'bad chars at index %d: %r %r' % (i, l[0][i], lowerchars[i]))
self.assertRaises(TypeError, codecs.lookup, '\0')
self.assertRaises(TypeError, codecs.lookup, 'abc\0')
self.assertEqual(len(l), 1)
def test_lookup_encodings(self):
try:
with self.assertRaises(UnicodeError):
b'a'.decode('undefined')
except LookupError:
# if we don't have encodings then this will fail so
# make sure we're failing because we don't have encodings
self.assertRaises(ImportError, __import__, 'encodings')
self.assertRaises(TypeError, codecs.lookup, None)
def test_cp1019(self):
#--Test that bogus encodings fail properly
# https://github.com/IronLanguages/main/issues/255
p = subprocess.Popen([sys.executable, os.path.join(self.test_dir, "encoded_files", "cp1019.py")], shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
t_in, t_out, t_err = (p.stdin, p.stdout, p.stderr)
t_err_lines = t_err.readlines()
t_out_lines = t_out.readlines()
t_err.close()
t_out.close()
t_in.close()
self.assertEqual(len(t_out_lines), 0)
self.assertTrue(t_err_lines[0].startswith(b" File"))
# CPython's message differs when running this file, but is the same when importing it
if is_cli:
self.assertTrue(t_err_lines[1].startswith(b"SyntaxError: unknown encoding: garbage"))
else:
self.assertTrue(t_err_lines[1].startswith(b"SyntaxError: encoding problem: garbage"))
def test_cp20302(self):
import _codecs
for encoding in ip_supported_encodings:
_codecs.lookup(encoding)
def test_charmap_build(self):
decodemap = ''.join([chr(i).upper() if chr(i).islower() else chr(i).lower() for i in range(256)])
encodemap = codecs.charmap_build(decodemap)
self.assertEqual(codecs.charmap_decode(b'Hello World', 'strict', decodemap), ('hELLO wORLD', 11))
self.assertEqual(codecs.charmap_encode('Hello World', 'strict', encodemap), (b'hELLO wORLD', 11))
decodemap = ''.join(chr(i) for i in range(254)) + "\U0001F40D" +"\xFF"
encodemap = codecs.charmap_build(decodemap)
s = '\xFF\U0001F40D\xFF'
b = b'\xFF\xFE\xFF'
self.assertEqual(codecs.charmap_decode(b, 'strict', decodemap), (s, len(b)))
self.assertEqual(codecs.charmap_encode(s, 'strict', encodemap), (b, len(s)))
def test_gh16(self):
"""
https://github.com/IronLanguages/ironpython2/issues/16
"""
# test with a standard error handler
res = "\xac\u1234\u20ac\u8000".encode("ptcp154", "backslashreplace")
self.assertEqual(res, b"\xac\\u1234\\u20ac\\u8000")
# test with a custom error handler
def handler(ex):
return ("", ex.end)
codecs.register_error("test_unicode_error", handler)
res = "\xac\u1234\u20ac\u8000".encode("ptcp154", "test_unicode_error")
self.assertEqual(res, b"\xac")
def handler1(ex):
print()
print(ex)
return ("", ex.end + 1)
codecs.register_error("test_unicode_error1", handler1)
if is_cli:
with self.assertRaises(NotImplementedError):
res = "+++\xac\u1234\u20ac\u8000---".encode("ptcp154", "test_unicode_error1")
else:
res = "+++\xac\u1234\u20ac\u8000---".encode("ptcp154", "test_unicode_error1")
self.assertEqual(res, b"+++\xac--")
run_test(__name__)
|
{
"content_hash": "f783ff1021bfdd64646ba032d8fb64f5",
"timestamp": "",
"source": "github",
"line_count": 2052,
"max_line_length": 936,
"avg_line_length": 54.874756335282655,
"alnum_prop": 0.6250277523689423,
"repo_name": "IronLanguages/ironpython3",
"id": "aaa7d58d26e89586d5e3a0cff0d35a8619b8c48f",
"size": "113122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Tests/modules/io_related/test_codecs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6855"
},
{
"name": "C",
"bytes": "239473"
},
{
"name": "C#",
"bytes": "12619304"
},
{
"name": "C++",
"bytes": "28403"
},
{
"name": "CSS",
"bytes": "96"
},
{
"name": "HTML",
"bytes": "13157428"
},
{
"name": "Makefile",
"bytes": "332"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PowerShell",
"bytes": "84504"
},
{
"name": "Python",
"bytes": "29490541"
},
{
"name": "Roff",
"bytes": "21080"
},
{
"name": "Shell",
"bytes": "4872"
},
{
"name": "VBScript",
"bytes": "481"
}
],
"symlink_target": ""
}
|
import re
import struct
import logging
import six # Python 2+3 compatibility
try:
import hashlib as md5
except ImportError:
import md5
try:
from Crypto.Cipher import ARC4
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
except ImportError:
AES = SHA256 = None
from . import arcfour as ARC4
from .psparser import PSEOF
from .psparser import literal_name
from .psparser import LIT
from .psparser import KWD
from . import settings
from .pdftypes import PDFException
from .pdftypes import PDFTypeError
from .pdftypes import PDFStream
from .pdftypes import PDFObjectNotFound
from .pdftypes import decipher_all
from .pdftypes import int_value
from .pdftypes import str_value
from .pdftypes import list_value
from .pdftypes import dict_value
from .pdftypes import stream_value
from .pdfparser import PDFSyntaxError
from .pdfparser import PDFStreamParser
from .utils import choplist
from .utils import nunpack
from .utils import decode_text
log = logging.getLogger(__name__)
## Exceptions
##
class PDFNoValidXRef(PDFSyntaxError):
pass
class PDFNoOutlines(PDFException):
pass
class PDFDestinationNotFound(PDFException):
pass
class PDFEncryptionError(PDFException):
pass
class PDFPasswordIncorrect(PDFEncryptionError):
pass
class PDFTextExtractionNotAllowed(PDFEncryptionError):
pass
# some predefined literals and keywords.
LITERAL_OBJSTM = LIT('ObjStm')
LITERAL_XREF = LIT('XRef')
LITERAL_CATALOG = LIT('Catalog')
## XRefs
##
class PDFBaseXRef(object):
def get_trailer(self):
raise NotImplementedError
def get_objids(self):
return []
# Must return
# (strmid, index, genno)
# or (None, pos, genno)
def get_pos(self, objid):
raise KeyError(objid)
## PDFXRef
##
class PDFXRef(PDFBaseXRef):
def __init__(self):
self.offsets = {}
self.trailer = {}
return
def __repr__(self):
return '<PDFXRef: offsets=%r>' % (self.offsets.keys())
def load(self, parser):
while True:
try:
(pos, line) = parser.nextline()
if not line.strip():
continue
except PSEOF:
raise PDFNoValidXRef('Unexpected EOF - file corrupted?')
if not line:
raise PDFNoValidXRef('Premature eof: %r' % parser)
if line.startswith(b'trailer'):
parser.seek(pos)
break
f = line.strip().split(b' ')
if len(f) != 2:
raise PDFNoValidXRef('Trailer not found: %r: line=%r' % (parser, line))
try:
if six.PY2:
(start, nobjs) = map(long, f)
else:
(start, nobjs) = map(int, f)
except ValueError:
raise PDFNoValidXRef('Invalid line: %r: line=%r' % (parser, line))
for objid in range(start, start+nobjs):
try:
(_, line) = parser.nextline()
except PSEOF:
raise PDFNoValidXRef('Unexpected EOF - file corrupted?')
f = line.strip().split(b' ')
if len(f) != 3:
raise PDFNoValidXRef('Invalid XRef format: %r, line=%r' % (parser, line))
(pos, genno, use) = f
if use != b'n':
continue
self.offsets[objid] = (None, long(pos) if six.PY2 else int(pos), int(genno))
log.debug('xref objects: %r', self.offsets)
self.load_trailer(parser)
return
def load_trailer(self, parser):
try:
(_, kwd) = parser.nexttoken()
assert kwd is KWD(b'trailer'), str(kwd)
(_, dic) = parser.nextobject()
except PSEOF:
x = parser.pop(1)
if not x:
raise PDFNoValidXRef('Unexpected EOF - file corrupted')
(_, dic) = x[0]
self.trailer.update(dict_value(dic))
log.debug('trailer=%r', self.trailer)
return
def get_trailer(self):
return self.trailer
def get_objids(self):
return six.iterkeys(self.offsets)
def get_pos(self, objid):
try:
return self.offsets[objid]
except KeyError:
raise
## PDFXRefFallback
##
class PDFXRefFallback(PDFXRef):
def __repr__(self):
return '<PDFXRefFallback: offsets=%r>' % (self.offsets.keys())
PDFOBJ_CUE = re.compile(r'^(\d+)\s+(\d+)\s+obj\b')
def load(self, parser):
parser.seek(0)
while 1:
try:
(pos, line) = parser.nextline()
except PSEOF:
break
if line.startswith(b'trailer'):
parser.seek(pos)
self.load_trailer(parser)
log.info('trailer: %r', self.trailer)
break
if six.PY3:
line=line.decode('latin-1') #default pdf encoding
m = self.PDFOBJ_CUE.match(line)
if not m:
continue
(objid, genno) = m.groups()
objid = int(objid)
genno = int(genno)
self.offsets[objid] = (None, pos, genno)
# expand ObjStm.
parser.seek(pos)
(_, obj) = parser.nextobject()
if isinstance(obj, PDFStream) and obj.get('Type') is LITERAL_OBJSTM:
stream = stream_value(obj)
try:
n = stream['N']
except KeyError:
if settings.STRICT:
raise PDFSyntaxError('N is not defined: %r' % stream)
n = 0
parser1 = PDFStreamParser(stream.get_data())
objs = []
try:
while 1:
(_, obj) = parser1.nextobject()
objs.append(obj)
except PSEOF:
pass
n = min(n, len(objs)//2)
for index in range(n):
objid1 = objs[index*2]
self.offsets[objid1] = (objid, index, 0)
return
## PDFXRefStream
##
class PDFXRefStream(PDFBaseXRef):
def __init__(self):
self.data = None
self.entlen = None
self.fl1 = self.fl2 = self.fl3 = None
self.ranges = []
return
def __repr__(self):
return '<PDFXRefStream: ranges=%r>' % (self.ranges)
def load(self, parser):
(_, objid) = parser.nexttoken() # ignored
(_, genno) = parser.nexttoken() # ignored
(_, kwd) = parser.nexttoken()
(_, stream) = parser.nextobject()
if not isinstance(stream, PDFStream) or stream['Type'] is not LITERAL_XREF:
raise PDFNoValidXRef('Invalid PDF stream spec.')
size = stream['Size']
index_array = stream.get('Index', (0, size))
if len(index_array) % 2 != 0:
raise PDFSyntaxError('Invalid index number')
self.ranges.extend(choplist(2, index_array))
(self.fl1, self.fl2, self.fl3) = stream['W']
self.data = stream.get_data()
self.entlen = self.fl1+self.fl2+self.fl3
self.trailer = stream.attrs
log.info('xref stream: objid=%s, fields=%d,%d,%d',
', '.join(map(repr, self.ranges)),
self.fl1, self.fl2, self.fl3)
return
def get_trailer(self):
return self.trailer
def get_objids(self):
for (start, nobjs) in self.ranges:
for i in range(nobjs):
offset = self.entlen * i
ent = self.data[offset:offset+self.entlen]
f1 = nunpack(ent[:self.fl1], 1)
if f1 == 1 or f1 == 2:
yield start+i
return
def get_pos(self, objid):
index = 0
for (start, nobjs) in self.ranges:
if start <= objid and objid < start+nobjs:
index += objid - start
break
else:
index += nobjs
else:
raise KeyError(objid)
offset = self.entlen * index
ent = self.data[offset:offset+self.entlen]
f1 = nunpack(ent[:self.fl1], 1)
f2 = nunpack(ent[self.fl1:self.fl1+self.fl2])
f3 = nunpack(ent[self.fl1+self.fl2:])
if f1 == 1:
return (None, f2, f3)
elif f1 == 2:
return (f2, f3, 0)
else:
# this is a free object
raise KeyError(objid)
## PDFSecurityHandler
##
class PDFStandardSecurityHandler(object):
PASSWORD_PADDING = (b'(\xbfN^Nu\x8aAd\x00NV\xff\xfa\x01\x08'
b'..\x00\xb6\xd0h>\x80/\x0c\xa9\xfedSiz')
supported_revisions = (2, 3)
def __init__(self, docid, param, password=''):
self.docid = docid
self.param = param
self.password = password
self.init()
return
def init(self):
self.init_params()
if self.r not in self.supported_revisions:
raise PDFEncryptionError('Unsupported revision: param=%r' % self.param)
self.init_key()
return
def init_params(self):
self.v = int_value(self.param.get('V', 0))
self.r = int_value(self.param['R'])
self.p = int_value(self.param['P'])
self.o = str_value(self.param['O'])
self.u = str_value(self.param['U'])
self.length = int_value(self.param.get('Length', 40))
return
def init_key(self):
self.key = self.authenticate(self.password)
if self.key is None:
raise PDFPasswordIncorrect
return
def is_printable(self):
return bool(self.p & 4)
def is_modifiable(self):
return bool(self.p & 8)
def is_extractable(self):
return bool(self.p & 16)
def compute_u(self, key):
if self.r == 2:
# Algorithm 3.4
return ARC4.new(key).encrypt(self.PASSWORD_PADDING) # 2
else:
# Algorithm 3.5
hash = md5.md5(self.PASSWORD_PADDING) # 2
hash.update(self.docid[0]) # 3
result = ARC4.new(key).encrypt(hash.digest()) # 4
for i in range(1, 20): # 5
k = b''.join(six.int2byte(c ^ i) for c in six.iterbytes(key))
result = ARC4.new(k).encrypt(result)
result += result # 6
return result
def compute_encryption_key(self, password):
# Algorithm 3.2
password = (password + self.PASSWORD_PADDING)[:32] # 1
hash = md5.md5(password) # 2
hash.update(self.o) # 3
hash.update(struct.pack('<l', self.p)) # 4
hash.update(self.docid[0]) # 5
if self.r >= 4:
if not self.encrypt_metadata:
hash.update(b'\xff\xff\xff\xff')
result = hash.digest()
n = 5
if self.r >= 3:
n = self.length // 8
for _ in range(50):
result = md5.md5(result[:n]).digest()
return result[:n]
def authenticate(self, password):
password = password.encode("latin1")
key = self.authenticate_user_password(password)
if key is None:
key = self.authenticate_owner_password(password)
return key
def authenticate_user_password(self, password):
key = self.compute_encryption_key(password)
if self.verify_encryption_key(key):
return key
else:
return None
def verify_encryption_key(self, key):
# Algorithm 3.6
u = self.compute_u(key)
if self.r == 2:
return u == self.u
return u[:16] == self.u[:16]
def authenticate_owner_password(self, password):
# Algorithm 3.7
password = (password + self.PASSWORD_PADDING)[:32]
hash = md5.md5(password)
if self.r >= 3:
for _ in range(50):
hash = md5.md5(hash.digest())
n = 5
if self.r >= 3:
n = self.length // 8
key = hash.digest()[:n]
if self.r == 2:
user_password = ARC4.new(key).decrypt(self.o)
else:
user_password = self.o
for i in range(19, -1, -1):
k = b''.join(six.int2byte(c ^ i) for c in six.iterbytes(key))
user_password = ARC4.new(k).decrypt(user_password)
return self.authenticate_user_password(user_password)
def decrypt(self, objid, genno, data, attrs=None):
return self.decrypt_rc4(objid, genno, data)
def decrypt_rc4(self, objid, genno, data):
key = self.key + struct.pack('<L', objid)[:3] + struct.pack('<L', genno)[:2]
hash = md5.md5(key)
key = hash.digest()[:min(len(key), 16)]
return ARC4.new(key).decrypt(data)
class PDFStandardSecurityHandlerV4(PDFStandardSecurityHandler):
supported_revisions = (4,)
def init_params(self):
super(PDFStandardSecurityHandlerV4, self).init_params()
self.length = 128
self.cf = dict_value(self.param.get('CF'))
self.stmf = literal_name(self.param['StmF'])
self.strf = literal_name(self.param['StrF'])
self.encrypt_metadata = bool(self.param.get('EncryptMetadata', True))
if self.stmf != self.strf:
raise PDFEncryptionError('Unsupported crypt filter: param=%r' % self.param)
self.cfm = {}
for k, v in self.cf.items():
f = self.get_cfm(literal_name(v['CFM']))
if f is None:
raise PDFEncryptionError('Unknown crypt filter method: param=%r' % self.param)
self.cfm[k] = f
self.cfm['Identity'] = self.decrypt_identity
if self.strf not in self.cfm:
raise PDFEncryptionError('Undefined crypt filter: param=%r' % self.param)
return
def get_cfm(self, name):
if name == 'V2':
return self.decrypt_rc4
elif name == 'AESV2':
return self.decrypt_aes128
else:
return None
def decrypt(self, objid, genno, data, attrs=None, name=None):
if not self.encrypt_metadata and attrs is not None:
t = attrs.get('Type')
if t is not None and literal_name(t) == 'Metadata':
return data
if name is None:
name = self.strf
return self.cfm[name](objid, genno, data)
def decrypt_identity(self, objid, genno, data):
return data
def decrypt_aes128(self, objid, genno, data):
key = self.key + struct.pack('<L', objid)[:3] + struct.pack('<L', genno)[:2] + b'sAlT'
hash = md5.md5(key)
key = hash.digest()[:min(len(key), 16)]
return AES.new(key, mode=AES.MODE_CBC, IV=data[:16]).decrypt(data[16:])
class PDFStandardSecurityHandlerV5(PDFStandardSecurityHandlerV4):
supported_revisions = (5,)
def init_params(self):
super(PDFStandardSecurityHandlerV5, self).init_params()
self.length = 256
self.oe = str_value(self.param['OE'])
self.ue = str_value(self.param['UE'])
self.o_hash = self.o[:32]
self.o_validation_salt = self.o[32:40]
self.o_key_salt = self.o[40:]
self.u_hash = self.u[:32]
self.u_validation_salt = self.u[32:40]
self.u_key_salt = self.u[40:]
return
def get_cfm(self, name):
if name == 'AESV3':
return self.decrypt_aes256
else:
return None
def authenticate(self, password):
password = password.encode('utf-8')[:127]
hash = SHA256.new(password)
hash.update(self.o_validation_salt)
hash.update(self.u)
if hash.digest() == self.o_hash:
hash = SHA256.new(password)
hash.update(self.o_key_salt)
hash.update(self.u)
return AES.new(hash.digest(), mode=AES.MODE_CBC, IV=b'\x00' * 16).decrypt(self.oe)
hash = SHA256.new(password)
hash.update(self.u_validation_salt)
if hash.digest() == self.u_hash:
hash = SHA256.new(password)
hash.update(self.u_key_salt)
return AES.new(hash.digest(), mode=AES.MODE_CBC, IV=b'\x00' * 16).decrypt(self.ue)
return None
def decrypt_aes256(self, objid, genno, data):
return AES.new(self.key, mode=AES.MODE_CBC, IV=data[:16]).decrypt(data[16:])
## PDFDocument
##
class PDFDocument(object):
"""PDFDocument object represents a PDF document.
Since a PDF file can be very big, normally it is not loaded at
once. So PDF document has to cooperate with a PDF parser in order to
dynamically import the data as processing goes.
Typical usage:
doc = PDFDocument(parser, password)
obj = doc.getobj(objid)
"""
security_handler_registry = {
1: PDFStandardSecurityHandler,
2: PDFStandardSecurityHandler,
}
if AES is not None:
security_handler_registry[4] = PDFStandardSecurityHandlerV4
if SHA256 is not None:
security_handler_registry[5] = PDFStandardSecurityHandlerV5
def __init__(self, parser, password='', caching=True, fallback=True):
"Set the document to use a given PDFParser object."
self.caching = caching
self.xrefs = []
self.info = []
self.catalog = None
self.encryption = None
self.decipher = None
self._parser = None
self._cached_objs = {}
self._parsed_objs = {}
self._parser = parser
self._parser.set_document(self)
self.is_printable = self.is_modifiable = self.is_extractable = True
# Retrieve the information of each header that was appended
# (maybe multiple times) at the end of the document.
try:
pos = self.find_xref(parser)
self.read_xref_from(parser, pos, self.xrefs)
except PDFNoValidXRef:
pass # fallback = True
if fallback:
parser.fallback = True
xref = PDFXRefFallback()
xref.load(parser)
self.xrefs.append(xref)
for xref in self.xrefs:
trailer = xref.get_trailer()
if not trailer:
continue
# If there's an encryption info, remember it.
if 'Encrypt' in trailer:
#assert not self.encryption, str(self.encryption)
self.encryption = (list_value(trailer['ID']),
dict_value(trailer['Encrypt']))
self._initialize_password(password)
if 'Info' in trailer:
self.info.append(dict_value(trailer['Info']))
if 'Root' in trailer:
# Every PDF file must have exactly one /Root dictionary.
self.catalog = dict_value(trailer['Root'])
break
else:
raise PDFSyntaxError('No /Root object! - Is this really a PDF?')
if self.catalog.get('Type') is not LITERAL_CATALOG:
if settings.STRICT:
raise PDFSyntaxError('Catalog not found!')
return
# _initialize_password(password=b'')
# Perform the initialization with a given password.
def _initialize_password(self, password=''):
(docid, param) = self.encryption
if literal_name(param.get('Filter')) != 'Standard':
raise PDFEncryptionError('Unknown filter: param=%r' % param)
v = int_value(param.get('V', 0))
factory = self.security_handler_registry.get(v)
if factory is None:
raise PDFEncryptionError('Unknown algorithm: param=%r' % param)
handler = factory(docid, param, password)
self.decipher = handler.decrypt
self.is_printable = handler.is_printable()
self.is_modifiable = handler.is_modifiable()
self.is_extractable = handler.is_extractable()
self._parser.fallback = False # need to read streams with exact length
return
def _getobj_objstm(self, stream, index, objid):
if stream.objid in self._parsed_objs:
(objs, n) = self._parsed_objs[stream.objid]
else:
(objs, n) = self._get_objects(stream)
if self.caching:
self._parsed_objs[stream.objid] = (objs, n)
i = n*2+index
try:
obj = objs[i]
except IndexError:
raise PDFSyntaxError('index too big: %r' % index)
return obj
def _get_objects(self, stream):
if stream.get('Type') is not LITERAL_OBJSTM:
if settings.STRICT:
raise PDFSyntaxError('Not a stream object: %r' % stream)
try:
n = stream['N']
except KeyError:
if settings.STRICT:
raise PDFSyntaxError('N is not defined: %r' % stream)
n = 0
parser = PDFStreamParser(stream.get_data())
parser.set_document(self)
objs = []
try:
while 1:
(_, obj) = parser.nextobject()
objs.append(obj)
except PSEOF:
pass
return (objs, n)
def _getobj_parse(self, pos, objid):
self._parser.seek(pos)
(_, objid1) = self._parser.nexttoken() # objid
(_, genno) = self._parser.nexttoken() # genno
(_, kwd) = self._parser.nexttoken()
# #### hack around malformed pdf files
# copied from https://github.com/jaepil/pdfminer3k/blob/master/pdfminer/pdfparser.py#L399
#to solve https://github.com/pdfminer/pdfminer.six/issues/56
#assert objid1 == objid, str((objid1, objid))
if objid1 != objid:
x = []
while kwd is not self.KEYWORD_OBJ:
(_,kwd) = self._parser.nexttoken()
x.append(kwd)
if x:
objid1 = x[-2]
genno = x[-1]
# #### end hack around malformed pdf files
if objid1 != objid:
raise PDFSyntaxError('objid mismatch: %r=%r' % (objid1, objid))
if kwd != KWD(b'obj'):
raise PDFSyntaxError('Invalid object spec: offset=%r' % pos)
(_, obj) = self._parser.nextobject()
return obj
# can raise PDFObjectNotFound
def getobj(self, objid):
assert objid != 0
if not self.xrefs:
raise PDFException('PDFDocument is not initialized')
log.debug('getobj: objid=%r', objid)
if objid in self._cached_objs:
(obj, genno) = self._cached_objs[objid]
else:
for xref in self.xrefs:
try:
(strmid, index, genno) = xref.get_pos(objid)
except KeyError:
continue
try:
if strmid is not None:
stream = stream_value(self.getobj(strmid))
obj = self._getobj_objstm(stream, index, objid)
else:
obj = self._getobj_parse(index, objid)
if self.decipher:
obj = decipher_all(self.decipher, objid, genno, obj)
if isinstance(obj, PDFStream):
obj.set_objid(objid, genno)
break
except (PSEOF, PDFSyntaxError):
continue
else:
raise PDFObjectNotFound(objid)
log.debug('register: objid=%r: %r', objid, obj)
if self.caching:
self._cached_objs[objid] = (obj, genno)
return obj
def get_outlines(self):
if 'Outlines' not in self.catalog:
raise PDFNoOutlines
def search(entry, level):
entry = dict_value(entry)
if 'Title' in entry:
if 'A' in entry or 'Dest' in entry:
title = decode_text(str_value(entry['Title']))
dest = entry.get('Dest')
action = entry.get('A')
se = entry.get('SE')
yield (level, title, dest, action, se)
if 'First' in entry and 'Last' in entry:
for x in search(entry['First'], level+1):
yield x
if 'Next' in entry:
for x in search(entry['Next'], level):
yield x
return
return search(self.catalog['Outlines'], 0)
def lookup_name(self, cat, key):
try:
names = dict_value(self.catalog['Names'])
except (PDFTypeError, KeyError):
raise KeyError((cat, key))
# may raise KeyError
d0 = dict_value(names[cat])
def lookup(d):
if 'Limits' in d:
(k1, k2) = list_value(d['Limits'])
if key < k1 or k2 < key:
return None
if 'Names' in d:
objs = list_value(d['Names'])
names = dict(choplist(2, objs))
return names[key]
if 'Kids' in d:
for c in list_value(d['Kids']):
v = lookup(dict_value(c))
if v:
return v
raise KeyError((cat, key))
return lookup(d0)
def get_dest(self, name):
try:
# PDF-1.2 or later
obj = self.lookup_name('Dests', name)
except KeyError:
# PDF-1.1 or prior
if 'Dests' not in self.catalog:
raise PDFDestinationNotFound(name)
d0 = dict_value(self.catalog['Dests'])
if name not in d0:
raise PDFDestinationNotFound(name)
obj = d0[name]
return obj
# find_xref
def find_xref(self, parser):
"""Internal function used to locate the first XRef."""
# search the last xref table by scanning the file backwards.
prev = None
for line in parser.revreadlines():
line = line.strip()
log.debug('find_xref: %r', line)
if line == b'startxref':
break
if line:
prev = line
else:
raise PDFNoValidXRef('Unexpected EOF')
log.info('xref found: pos=%r', prev)
return long(prev) if six.PY2 else int(prev)
# read xref table
def read_xref_from(self, parser, start, xrefs):
"""Reads XRefs from the given location."""
parser.seek(start)
parser.reset()
try:
(pos, token) = parser.nexttoken()
except PSEOF:
raise PDFNoValidXRef('Unexpected EOF')
log.info('read_xref_from: start=%d, token=%r', start, token)
if isinstance(token, int):
# XRefStream: PDF-1.5
parser.seek(pos)
parser.reset()
xref = PDFXRefStream()
xref.load(parser)
else:
if token is parser.KEYWORD_XREF:
parser.nextline()
xref = PDFXRef()
xref.load(parser)
xrefs.append(xref)
trailer = xref.get_trailer()
log.info('trailer: %r', trailer)
if 'XRefStm' in trailer:
pos = int_value(trailer['XRefStm'])
self.read_xref_from(parser, pos, xrefs)
if 'Prev' in trailer:
# find previous xref
pos = int_value(trailer['Prev'])
self.read_xref_from(parser, pos, xrefs)
return
|
{
"content_hash": "aba38d504b54895b8ed8d78f5869511c",
"timestamp": "",
"source": "github",
"line_count": 813,
"max_line_length": 97,
"avg_line_length": 33.708487084870846,
"alnum_prop": 0.5377850757161102,
"repo_name": "rotula/pdfminer",
"id": "b92d7c6e2080d343194ae7310e811bb11ea235ff",
"size": "27406",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pdfminer/pdfdocument.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1861"
},
{
"name": "Python",
"bytes": "527345"
}
],
"symlink_target": ""
}
|
import numpy
import rospy
import tf
import tf2_ros
import geometry_msgs.msg
import math
def normalize(v):
norm=numpy.linalg.norm(v)
if norm==0:
return v
return v/norm
def lookupMatrixLH(cameraPosition, cameraTarget, cameraUpVector):
zaxis = normalize(cameraTarget - cameraPosition)
xaxis = normalize(numpy.cross(cameraUpVector, zaxis))
yaxis = numpy.cross(zaxis, xaxis)
array = numpy.array([
xaxis[0], yaxis[0], zaxis[0], 0,
xaxis[1], yaxis[1], zaxis[1], 0,
xaxis[2], yaxis[2], zaxis[2], 0,
-numpy.dot(xaxis, cameraPosition),
-numpy.dot(yaxis, cameraPosition),
-numpy.dot(zaxis, cameraPosition),
1], dtype=float)
return array.reshape(4, 4, order='F')
def lookupMatrixRH(cameraPosition, cameraTarget, cameraUpVector):
zaxis = normalize(cameraPosition - cameraTarget)
xaxis = normalize(numpy.cross(cameraUpVector, zaxis))
yaxis = numpy.cross(zaxis, xaxis)
array = numpy.array([
xaxis[0], yaxis[0], zaxis[0], 0,
xaxis[1], yaxis[1], zaxis[1], 0,
xaxis[2], yaxis[2], zaxis[2], 0,
-numpy.dot(xaxis, cameraPosition),
-numpy.dot(yaxis, cameraPosition),
-numpy.dot(zaxis, cameraPosition),
1], dtype=float)
return array.reshape(4, 4, order='F')
def publish_transforms():
TBase2Object = numpy.dot(
tf.transformations.quaternion_matrix(
tf.transformations.quaternion_from_euler(0.79, 0.0, 0.79)
),
tf.transformations.translation_matrix((0.0, 1.0, 1.0))
)
TObject2Base = tf.transformations.inverse_matrix(TBase2Object)
tr = tf.transformations.translation_from_matrix(TBase2Object)
q = tf.transformations.quaternion_from_matrix(TBase2Object)
object_transform = geometry_msgs.msg.TransformStamped()
object_transform.transform.translation.x = tr[0]
object_transform.transform.translation.y = tr[1]
object_transform.transform.translation.z = tr[2]
object_transform.transform.rotation.x = q[0]
object_transform.transform.rotation.y = q[1]
object_transform.transform.rotation.z = q[2]
object_transform.transform.rotation.w = q[3]
object_transform.header.stamp = rospy.Time.now()
object_transform.header.frame_id = "base_frame"
object_transform.child_frame_id = "object_frame"
br.sendTransform(object_transform)
TBase2Robot = numpy.dot(
tf.transformations.quaternion_matrix(
tf.transformations.quaternion_from_euler(0.0, 0.0, 1.5)
),
tf.transformations.translation_matrix((0.0, -1.0, 0.0))
)
tr = tf.transformations.translation_from_matrix(TBase2Robot)
q = tf.transformations.quaternion_from_matrix(TBase2Robot)
robot_transform = geometry_msgs.msg.TransformStamped()
robot_transform.transform.translation.x = tr[0]
robot_transform.transform.translation.y = tr[1]
robot_transform.transform.translation.z = tr[2]
robot_transform.transform.rotation.x = q[0]
robot_transform.transform.rotation.y = q[1]
robot_transform.transform.rotation.z = q[2]
robot_transform.transform.rotation.w = q[3]
robot_transform.header.stamp = rospy.Time.now()
robot_transform.header.frame_id = "base_frame"
robot_transform.child_frame_id = "robot_frame"
br.sendTransform(robot_transform)
TRobot2Object = numpy.dot(TObject2Base, TBase2Robot)
TObject2Robot = tf.transformations.inverse_matrix(TRobot2Object)
objp = numpy.dot(TObject2Robot, numpy.array([0,0,0,1]))
camp = numpy.array([0,0.1,0.1,1])
TRobot2Camera = tf.transformations.translation_matrix(camp)
TCamera2Base = numpy.dot(TBase2Robot,TRobot2Camera)
TBase2Camera = tf.transformations.inverse_matrix(TCamera2Base)
camv = numpy.array([1,0,0,1])[:3]
targetv = normalize(numpy.subtract(objp, camp))[:3]
axis = normalize(numpy.cross(camv, targetv))
angle = math.acos(numpy.dot(camv, targetv))
tr = tf.transformations.translation_from_matrix(TRobot2Camera)
q = tf.transformations.quaternion_about_axis(angle, axis)
camera_transform = geometry_msgs.msg.TransformStamped()
camera_transform.transform.translation.x = tr[0]
camera_transform.transform.translation.y = tr[1]
camera_transform.transform.translation.z = tr[2]
camera_transform.transform.rotation.x = q[0]
camera_transform.transform.rotation.y = q[1]
camera_transform.transform.rotation.z = q[2]
camera_transform.transform.rotation.w = q[3]
camera_transform.header.stamp = rospy.Time.now()
camera_transform.header.frame_id = "robot_frame"
camera_transform.child_frame_id = "camera_frame"
br.sendTransform(camera_transform)
if __name__ == '__main__':
rospy.init_node('project2_solution')
br = tf2_ros.TransformBroadcaster()
rospy.sleep(0.5)
while not rospy.is_shutdown():
publish_transforms()
rospy.sleep(0.05)
|
{
"content_hash": "bb9ee38300bd665700ce07d46294d846",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 70,
"avg_line_length": 39.693548387096776,
"alnum_prop": 0.6859000406338887,
"repo_name": "xunilrj/sandbox",
"id": "7b7e5f8ad724e6b31605562d6784e992367ce648",
"size": "4945",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "courses/columbia-robotic/proj02/src/project2_solution/scripts/solution.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "235"
},
{
"name": "ASP.NET",
"bytes": "110"
},
{
"name": "Assembly",
"bytes": "28409"
},
{
"name": "Asymptote",
"bytes": "22978"
},
{
"name": "C",
"bytes": "1022035"
},
{
"name": "C#",
"bytes": "474510"
},
{
"name": "C++",
"bytes": "33387716"
},
{
"name": "CMake",
"bytes": "1288737"
},
{
"name": "CSS",
"bytes": "49690"
},
{
"name": "Common Lisp",
"bytes": "858"
},
{
"name": "Coq",
"bytes": "6200"
},
{
"name": "Dockerfile",
"bytes": "2912"
},
{
"name": "Elixir",
"bytes": "34"
},
{
"name": "Erlang",
"bytes": "8204"
},
{
"name": "F#",
"bytes": "33187"
},
{
"name": "Fortran",
"bytes": "20472"
},
{
"name": "GDB",
"bytes": "701"
},
{
"name": "GLSL",
"bytes": "7478"
},
{
"name": "Go",
"bytes": "8971"
},
{
"name": "HTML",
"bytes": "6469462"
},
{
"name": "Handlebars",
"bytes": "8236"
},
{
"name": "Haskell",
"bytes": "18581"
},
{
"name": "Java",
"bytes": "120539"
},
{
"name": "JavaScript",
"bytes": "5055335"
},
{
"name": "Jupyter Notebook",
"bytes": "1849172"
},
{
"name": "LLVM",
"bytes": "43431"
},
{
"name": "MATLAB",
"bytes": "462980"
},
{
"name": "Makefile",
"bytes": "1622666"
},
{
"name": "Objective-C",
"bytes": "2001"
},
{
"name": "PostScript",
"bytes": "45490"
},
{
"name": "PowerShell",
"bytes": "192867"
},
{
"name": "Python",
"bytes": "726138"
},
{
"name": "R",
"bytes": "31364"
},
{
"name": "Roff",
"bytes": "5700"
},
{
"name": "Ruby",
"bytes": "5865"
},
{
"name": "Rust",
"bytes": "797104"
},
{
"name": "Sage",
"bytes": "654"
},
{
"name": "Scala",
"bytes": "42383"
},
{
"name": "Shell",
"bytes": "154039"
},
{
"name": "TLA",
"bytes": "16779"
},
{
"name": "TSQL",
"bytes": "3412"
},
{
"name": "TeX",
"bytes": "6989202"
},
{
"name": "TypeScript",
"bytes": "8845"
},
{
"name": "Visual Basic .NET",
"bytes": "1090"
},
{
"name": "WebAssembly",
"bytes": "70321"
},
{
"name": "q",
"bytes": "13889"
}
],
"symlink_target": ""
}
|
import json
import os
import argparse
import subprocess
from scripts.mirnas.update_mirnas_helpers import get_rfam_accs, get_mirna_ids
from scripts.mirnas.mirna_config import UPDATE_DIR, SEARCH_DIRS, MEMORY, CPU, LSF_GROUP, NEW_DIR, QUEUE
def add_ref_sequentially(reference, mirna_ids=None, rfam_accessions=None):
"""
Call add_ref.pl sequentially
:param reference: PubMed reference ID for the DESC, by default the latest MiRBase paper 30423142
:param rfam_accessions: list of Rfam accession numbers
:param mirna_ids: list miRNA IDs
"""
cmd = ("add_ref.pl {0}".format(reference))
def call_addref(fam_dir):
if os.path.exists(fam_dir):
os.chdir(fam_dir)
subprocess.call(cmd, shell=True)
if rfam_accessions:
for family in rfam_accessions:
family_dir = os.path.join(UPDATE_DIR, family)
call_addref(family_dir)
elif mirna_ids:
for mirna in mirna_ids:
family_dir = os.path.join(NEW_DIR, mirna)
call_addref(family_dir)
def call_add_ref_cmd(fam_dir, ref):
"""
Submit the add_ref.pl jpb
:param fam_dir: family directory, from which to call the command
:param ref: PubMed reference ID for the DESC, by default the latest MiRBase paper 30423142
"""
cmd = ("bsub -o {out_file} -e {err_file} -n {cpu} -g {lsf_group} -q {queue}"
" \"cd {family_dir} && add_ref.pl {ref}\"")
lsf_err_file = os.path.join(fam_dir, "auto_add_ref.err")
lsf_out_file = os.path.join(fam_dir, "auto_add_ref.out")
subprocess.call(
cmd.format(
mem=MEMORY, out_file=lsf_out_file, err_file=lsf_err_file, cpu=CPU, lsf_group=LSF_GROUP, queue=QUEUE,
family_dir=fam_dir, ref=ref), shell=True)
def auto_add_ref(reference, rfam_accessions=None, thresholds_file=None):
"""
Call add_ref.pl
:param reference: PubMed reference ID for the DESC, by default the latest MiRBase paper 30423142
:param rfam_accessions: list of Rfam accession numbers
:param thresholds_file: JSON file with miRNA IDs and the corresponding threshold
"""
if rfam_accessions:
for family in rfam_accessions:
family_dir = os.path.join(UPDATE_DIR, family)
if os.path.exists(family_dir):
call_add_ref_cmd(family_dir, reference)
else:
continue
elif thresholds_file:
with open(thresholds_file, 'r') as fp:
thresholds = json.load(fp)
for family in thresholds.keys():
for searchdir in SEARCH_DIRS:
if family.find("relabelled") == -1:
family_dir = os.path.join(searchdir, family + "_relabelled")
else:
family_dir = os.path.join(searchdir, family)
if os.path.exists(family_dir):
call_add_ref_cmd(family_dir, reference)
else:
continue
def parse_arguments():
parser = argparse.ArgumentParser()
file_input = parser.add_mutually_exclusive_group()
file_input.add_argument("--input",
help="TSV file with miRNA ID, and threshold value of families to update, "
"file will also include Rfam acc number if families to update")
parser.add_argument("--new", help="True if miRNA IDs are new families", action='store_true', default=False)
file_input.add_argument("--thresholds", help="A json file with miRNA : threshold pairs",
action="store")
parser.add_argument("--ref", help="A string indicating the PubMed id to use for reference",
action="store", default="30423142")
parser.add_argument("--sequential", help="Modify DESC files sequentially",
action="store_true", default=False)
return parser.parse_args()
if __name__ == '__main__':
args = parse_arguments()
if args.input and args.new:
mirna_list = get_mirna_ids(args.input)
else:
mirna_list = None
if args.input and not args.new:
rfam_accs = get_rfam_accs(args.input)
else:
rfam_accs = None
if args.sequential is False:
auto_add_ref(reference=args.ref, rfam_accessions=rfam_accs, thresholds_file=mirna_list)
else:
add_ref_sequentially(reference=args.ref, rfam_accessions=rfam_accs, mirna_ids=mirna_list)
|
{
"content_hash": "be7c902eff98194f0702d0a23d1d4772",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 112,
"avg_line_length": 39.13274336283186,
"alnum_prop": 0.61917684305744,
"repo_name": "Rfam/rfam-production",
"id": "450b300c31d6a616f13ed33b8fc54764ec46fb8e",
"size": "4422",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/mirnas/auto_addref.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Common Workflow Language",
"bytes": "18759"
},
{
"name": "Dockerfile",
"bytes": "1905"
},
{
"name": "Nextflow",
"bytes": "28485"
},
{
"name": "Perl",
"bytes": "8029"
},
{
"name": "Python",
"bytes": "880144"
},
{
"name": "Shell",
"bytes": "2771"
}
],
"symlink_target": ""
}
|
import django.forms as forms
import magic
from captcha.fields import CaptchaField
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import FileExtensionValidator
from django.db import models
from django.shortcuts import render
from django.template.defaultfilters import filesizeformat
from django_countries.fields import CountryField
from wagtail.wagtailadmin.edit_handlers import FieldPanel
from wagtail.wagtailcore.fields import RichTextField
from wagtail.wagtailcore.models import Page
class PublicationIdea(models.Model):
full_name = models.CharField(max_length=255)
affiliation = models.CharField(max_length=255)
country = CountryField()
email = models.CharField(max_length=255)
website = models.URLField(null=True, blank=True)
publication_title = models.CharField(max_length=255)
scholarly_discipline = models.CharField(max_length=255)
keywords = models.CharField(max_length=255)
summary = models.TextField(
max_length=10000,
help_text='Please provide a maximum 1000 words description of your '
'publication idea. Make sure you explain where its innovative and '
'experimental nature lie.'
)
link = models.URLField(
null=True,
blank=True,
help_text='You can include a link to a website to provide further '
'material to support your idea'
)
attachment = models.FileField(
null=True, blank=True,
upload_to=settings.SUP_URL,
validators=[FileExtensionValidator(['pdf'])],
help_text='You can include 1 attachment (pdf format only; max size: '
'5MB) for example to visualise statically some aspects of '
'your proposal.'
)
future_funding = models.TextField(
max_length=2000,
default='',
help_text='Please explain how this project has the potential to '
'contribute to the possibility of obtaining external funding in '
'the future and what external funding schemes you are '
'considering, if any, to develop a fully-fledged product.'
)
class PublicationIdeaForm(forms.ModelForm):
captcha = CaptchaField(required=True)
full_name = forms.CharField(max_length=255, required=True)
affiliation = forms.CharField(max_length=255, required=True)
email = forms.CharField(max_length=255, required=True)
publication_title = forms.CharField(max_length=255, required=True)
scholarly_discipline = forms.CharField(max_length=255, required=True)
keywords = forms.CharField(max_length=255, required=True)
summary = forms.CharField(
widget=forms.Textarea,
max_length=10000,
help_text='Please provide a maximum 1000 words description of your '
'publication idea. Make sure you explain where its '
'innovative and experimental nature lie.'
)
max_upload_size = 10 * 1024 * 1024
# Guidance called for max words not characters but could change
summary_max_words = 1000
allowed_attachment_types = [u'application/pdf', ]
def filename_to_title(filename):
from os.path import splitext
if filename:
result = splitext(filename)[0]
result = result.replace('-', ' ').replace('_', ' ')
return result.title()
def check_image_file_size(self, f):
# Upload size checking can be disabled by setting max upload size to
# None
if self.max_upload_size is None:
return
# Check the filesize
if f.size > self.max_upload_size:
raise ValidationError(self.error_messages['file_too_large'] % (
filesizeformat(f.size),
), code='file_too_large')
def clean_summary(self):
summary = self.cleaned_data.get("summary", False)
if self.summary_max_words is None:
return summary
elif len(summary.split()) > self.summary_max_words:
raise ValidationError(
"Summary is {} words long. Maximum is 1000".format(
len(summary.split())))
return summary
def clean_attachment(self):
attachment = self.cleaned_data.get("attachment", None)
if attachment:
# Check file size
self.check_image_file_size(attachment)
# make sure it's actually a pdf
with magic.Magic(flags=magic.MAGIC_MIME_TYPE) as m:
filetype = m.id_filename(attachment.temporary_file_path())
if filetype not in self.allowed_attachment_types:
raise ValidationError(
"File {} is not a valid attachment type.".format(
filetype))
return attachment
class Meta:
model = PublicationIdea
exclude = []
class PublicationIdeaPage(Page):
intro = RichTextField(blank=True)
thankyou_page_title = models.CharField(
max_length=255, help_text="Title text to use for the 'thank you' page")
thankyou_page_message = models.TextField(
help_text="Text to use for the 'thank you' page")
content_panels = Page.content_panels + [
FieldPanel('intro', classname="full"),
FieldPanel('thankyou_page_title'),
FieldPanel('thankyou_page_message'),
]
def serve(self, request):
if request.method == 'POST':
form = PublicationIdeaForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return render(request, 'sup/form_page_landing.html', {
'page': self,
})
else:
form = PublicationIdeaForm()
return render(request, 'sup/form_page.html', {
'page': self,
'form': form,
})
|
{
"content_hash": "a1e89f3ed99980d178ae56c9843d0d65",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 79,
"avg_line_length": 38.091503267973856,
"alnum_prop": 0.6403568977350721,
"repo_name": "kingsdigitallab/kdl-django",
"id": "bdf9cac9d615804fd476aa1587d2f8e5493bef5c",
"size": "5828",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sup/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "69770"
},
{
"name": "HTML",
"bytes": "38338"
},
{
"name": "JavaScript",
"bytes": "15238"
},
{
"name": "Python",
"bytes": "1140999"
},
{
"name": "Shell",
"bytes": "2704"
}
],
"symlink_target": ""
}
|
from urllib import urlencode, quote_plus
from datetime import datetime, date
import urllib2
try:
import functools
partial = functools.partial
except ImportError:
def partial(func, *args, **keywords):
def newfunc(*fargs, **fkeywords):
newkeywords = keywords.copy()
newkeywords.update(fkeywords)
return func(*(args + fargs), **newkeywords)
newfunc.func = func
newfunc.args = args
newfunc.keywords = keywords
return newfunc
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
raise ImportError('Ape requires either Python >2.6 or simplejson')
class MailChimpError(Exception):
def __init__(self, msg, code):
self.msg = msg
self.code = code
class MailChimp(object):
def __init__(self, api_key, ssl=True, debug=False):
self.data_center = api_key.rsplit('-', 1)[-1]
self.api_key = api_key
self.ssl = ssl
self.debug = debug
def __getattr__(self, name):
return partial(self, method=name)
def __call__(self, **kwargs):
method = kwargs.pop('method')
kwargs.update({
'output': 'json',
'apikey': self.api_key,
})
params = self._serialize(kwargs)
if self.ssl:
protocol = 'https'
else:
protocol = 'http'
url = "%s://%s.api.mailchimp.com/1.2/?method=%s" % (
protocol, self.data_center, method)
if self.debug:
print 'URL:', url
print 'POST data:', params
req = urllib2.Request(url, params)
try:
handle = urllib2.urlopen(req)
response = json.loads(handle.read())
try:
if 'error' in response:
raise MailChimpError(response['error'], response['code'])
except TypeError: # the response was boolean
pass
return response
except urllib2.HTTPError, e:
if (e.code == 304):
return []
else:
raise MailChimpError
def _serialize(self, params, key=None):
"""Replicates PHP's (incorrect) serialization to query parameters to
accommodate the "array-based" parameters of MailChimp API methods.
"""
pairs = []
try:
items = params.items()
except AttributeError:
items = [(str(i), n) for i, n in enumerate(params)]
for name, value in items:
name = quote_plus(name)
if key is not None:
name = '%s[%s]' % (key, name)
if type(value) in (list, dict):
pairs.append(self._serialize(value, name))
elif value is not None:
if type(value) in (bool, datetime, date, int):
value = str(value).lower()
pairs.append('%s=%s' % (name, quote_plus(value)))
return '&'.join(pairs)
__all__ = ["MailChimp", "MailChimpError"]
|
{
"content_hash": "1301bd22c07e698e58d6b02e07f779d7",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 77,
"avg_line_length": 31.050505050505052,
"alnum_prop": 0.5393623942745608,
"repo_name": "bmac/evilsheep",
"id": "6020d11cc4701db24bbb7e445b626bb816ce2850",
"size": "3074",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "greatape/__init__.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "23287"
},
{
"name": "Python",
"bytes": "589818"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, division
from sympy.core import S, Add, Expr, Basic
from sympy.assumptions import Q, ask
from sympy.core.logic import fuzzy_not
def refine(expr, assumptions=True):
"""
Simplify an expression using assumptions.
Gives the form of expr that would be obtained if symbols
in it were replaced by explicit numerical expressions satisfying
the assumptions.
Examples
========
>>> from sympy import refine, sqrt, Q
>>> from sympy.abc import x
>>> refine(sqrt(x**2), Q.real(x))
Abs(x)
>>> refine(sqrt(x**2), Q.positive(x))
x
"""
if not isinstance(expr, Basic):
return expr
if not expr.is_Atom:
args = [refine(arg, assumptions) for arg in expr.args]
# TODO: this will probably not work with Integral or Polynomial
expr = expr.func(*args)
if hasattr(expr, '_eval_refine'):
return expr._eval_refine()
name = expr.__class__.__name__
handler = handlers_dict.get(name, None)
if handler is None:
return expr
new_expr = handler(expr, assumptions)
if (new_expr is None) or (expr == new_expr):
return expr
if not isinstance(new_expr, Expr):
return new_expr
return refine(new_expr, assumptions)
def refine_abs(expr, assumptions):
"""
Handler for the absolute value.
Examples
========
>>> from sympy import Symbol, Q, refine, Abs
>>> from sympy.assumptions.refine import refine_abs
>>> from sympy.abc import x
>>> refine_abs(Abs(x), Q.real(x))
>>> refine_abs(Abs(x), Q.positive(x))
x
>>> refine_abs(Abs(x), Q.negative(x))
-x
"""
arg = expr.args[0]
if ask(Q.real(arg), assumptions) and \
fuzzy_not(ask(Q.negative(arg), assumptions)):
# if it's nonnegative
return arg
if ask(Q.negative(arg), assumptions):
return -arg
def refine_Pow(expr, assumptions):
"""
Handler for instances of Pow.
>>> from sympy import Symbol, Q
>>> from sympy.assumptions.refine import refine_Pow
>>> from sympy.abc import x,y,z
>>> refine_Pow((-1)**x, Q.real(x))
>>> refine_Pow((-1)**x, Q.even(x))
1
>>> refine_Pow((-1)**x, Q.odd(x))
-1
For powers of -1, even parts of the exponent can be simplified:
>>> refine_Pow((-1)**(x+y), Q.even(x))
(-1)**y
>>> refine_Pow((-1)**(x+y+z), Q.odd(x) & Q.odd(z))
(-1)**y
>>> refine_Pow((-1)**(x+y+2), Q.odd(x))
(-1)**(y + 1)
>>> refine_Pow((-1)**(x+3), True)
(-1)**(x + 1)
"""
from sympy.core import Pow, Rational
from sympy.functions.elementary.complexes import Abs
from sympy.functions import sign
if isinstance(expr.base, Abs):
if ask(Q.real(expr.base.args[0]), assumptions) and \
ask(Q.even(expr.exp), assumptions):
return expr.base.args[0] ** expr.exp
if ask(Q.real(expr.base), assumptions):
if expr.base.is_number:
if ask(Q.even(expr.exp), assumptions):
return abs(expr.base) ** expr.exp
if ask(Q.odd(expr.exp), assumptions):
return sign(expr.base) * abs(expr.base) ** expr.exp
if isinstance(expr.exp, Rational):
if type(expr.base) is Pow:
return abs(expr.base.base) ** (expr.base.exp * expr.exp)
if expr.base is S.NegativeOne:
if expr.exp.is_Add:
old = expr
# For powers of (-1) we can remove
# - even terms
# - pairs of odd terms
# - a single odd term + 1
# - A numerical constant N can be replaced with mod(N,2)
coeff, terms = expr.exp.as_coeff_add()
terms = set(terms)
even_terms = set([])
odd_terms = set([])
initial_number_of_terms = len(terms)
for t in terms:
if ask(Q.even(t), assumptions):
even_terms.add(t)
elif ask(Q.odd(t), assumptions):
odd_terms.add(t)
terms -= even_terms
if len(odd_terms) % 2:
terms -= odd_terms
new_coeff = (coeff + S.One) % 2
else:
terms -= odd_terms
new_coeff = coeff % 2
if new_coeff != coeff or len(terms) < initial_number_of_terms:
terms.add(new_coeff)
expr = expr.base**(Add(*terms))
# Handle (-1)**((-1)**n/2 + m/2)
e2 = 2*expr.exp
if ask(Q.even(e2), assumptions):
if e2.could_extract_minus_sign():
e2 *= expr.base
if e2.is_Add:
i, p = e2.as_two_terms()
if p.is_Pow and p.base is S.NegativeOne:
if ask(Q.integer(p.exp), assumptions):
i = (i + 1)/2
if ask(Q.even(i), assumptions):
return expr.base**p.exp
elif ask(Q.odd(i), assumptions):
return expr.base**(p.exp + 1)
else:
return expr.base**(p.exp + i)
if old != expr:
return expr
def refine_exp(expr, assumptions):
"""
Handler for exponential function.
>>> from sympy import Symbol, Q, exp, I, pi
>>> from sympy.assumptions.refine import refine_exp
>>> from sympy.abc import x
>>> refine_exp(exp(pi*I*2*x), Q.real(x))
>>> refine_exp(exp(pi*I*2*x), Q.integer(x))
1
"""
arg = expr.args[0]
if arg.is_Mul:
coeff = arg.as_coefficient(S.Pi*S.ImaginaryUnit)
if coeff:
if ask(Q.integer(2*coeff), assumptions):
if ask(Q.even(coeff), assumptions):
return S.One
elif ask(Q.odd(coeff), assumptions):
return S.NegativeOne
elif ask(Q.even(coeff + S.Half), assumptions):
return -S.ImaginaryUnit
elif ask(Q.odd(coeff + S.Half), assumptions):
return S.ImaginaryUnit
def refine_atan2(expr, assumptions):
"""
Handler for the atan2 function
Examples
========
>>> from sympy import Symbol, Q, refine, atan2
>>> from sympy.assumptions.refine import refine_atan2
>>> from sympy.abc import x, y
>>> refine_atan2(atan2(y,x), Q.real(y) & Q.positive(x))
atan(y/x)
>>> refine_atan2(atan2(y,x), Q.negative(y) & Q.negative(x))
atan(y/x) - pi
>>> refine_atan2(atan2(y,x), Q.positive(y) & Q.negative(x))
atan(y/x) + pi
>>> refine_atan2(atan2(y,x), Q.zero(y) & Q.negative(x))
pi
>>> refine_atan2(atan2(y,x), Q.positive(y) & Q.zero(x))
pi/2
>>> refine_atan2(atan2(y,x), Q.negative(y) & Q.zero(x))
-pi/2
>>> refine_atan2(atan2(y,x), Q.zero(y) & Q.zero(x))
nan
"""
from sympy.functions.elementary.trigonometric import atan
from sympy.core import S
y, x = expr.args
if ask(Q.real(y) & Q.positive(x), assumptions):
return atan(y / x)
elif ask(Q.negative(y) & Q.negative(x), assumptions):
return atan(y / x) - S.Pi
elif ask(Q.positive(y) & Q.negative(x), assumptions):
return atan(y / x) + S.Pi
elif ask(Q.zero(y) & Q.negative(x), assumptions):
return S.Pi
elif ask(Q.positive(y) & Q.zero(x), assumptions):
return S.Pi/2
elif ask(Q.negative(y) & Q.zero(x), assumptions):
return -S.Pi/2
elif ask(Q.zero(y) & Q.zero(x), assumptions):
return S.NaN
else:
return expr
def refine_Relational(expr, assumptions):
"""
Handler for Relational
>>> from sympy.assumptions.refine import refine_Relational
>>> from sympy.assumptions.ask import Q
>>> from sympy.abc import x
>>> refine_Relational(x<0, ~Q.is_true(x<0))
False
"""
return ask(Q.is_true(expr), assumptions)
handlers_dict = {
'Abs': refine_abs,
'Pow': refine_Pow,
'exp': refine_exp,
'atan2': refine_atan2,
'Equality': refine_Relational,
'Unequality': refine_Relational,
'GreaterThan': refine_Relational,
'LessThan': refine_Relational,
'StrictGreaterThan': refine_Relational,
'StrictLessThan': refine_Relational
}
|
{
"content_hash": "5d47c3866a39cf58b837b04f3b1e5414",
"timestamp": "",
"source": "github",
"line_count": 268,
"max_line_length": 78,
"avg_line_length": 31.69776119402985,
"alnum_prop": 0.5300765155974102,
"repo_name": "kumarkrishna/sympy",
"id": "e96ddea614eae867dd16d4beaf7f48e921006a33",
"size": "8495",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sympy/assumptions/refine.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "14011496"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "4008"
},
{
"name": "Tcl",
"bytes": "1048"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
}
|
from regioninfo import SDBRegionInfo
def regions():
"""
Get all available regions for the SDB service.
:rtype: list
:return: A list of :class:`boto.sdb.regioninfo.RegionInfo`
"""
return [SDBRegionInfo(name='us-east-1',
endpoint='sdb.amazonaws.com'),
SDBRegionInfo(name='eu-west-1',
endpoint='sdb.eu-west-1.amazonaws.com'),
SDBRegionInfo(name='us-west-1',
endpoint='sdb.us-west-1.amazonaws.com'),
SDBRegionInfo(name='ap-southeast-1',
endpoint='sdb.ap-southeast-1.amazonaws.com')
]
def connect_to_region(region_name):
for region in regions():
if region.name == region_name:
return region.connect()
return None
|
{
"content_hash": "3e0a5de194e9fe6b66475a7bd820a6d0",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 70,
"avg_line_length": 32.96,
"alnum_prop": 0.5546116504854369,
"repo_name": "woggle/mesos-old",
"id": "58c9794535c59383a1e035f956ff6942f47d3c02",
"size": "1931",
"binary": false,
"copies": "5",
"ref": "refs/heads/trunk",
"path": "third_party/boto-2.0b2/boto/sdb/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "7570539"
},
{
"name": "C++",
"bytes": "33836368"
},
{
"name": "Emacs Lisp",
"bytes": "7798"
},
{
"name": "Java",
"bytes": "14250280"
},
{
"name": "JavaScript",
"bytes": "39087"
},
{
"name": "Objective-C",
"bytes": "119767"
},
{
"name": "PHP",
"bytes": "152555"
},
{
"name": "Perl",
"bytes": "624560"
},
{
"name": "Python",
"bytes": "3246265"
},
{
"name": "Ruby",
"bytes": "67470"
},
{
"name": "Shell",
"bytes": "13471292"
},
{
"name": "Smalltalk",
"bytes": "56562"
},
{
"name": "VimL",
"bytes": "3774"
}
],
"symlink_target": ""
}
|
import random
from django.test import TestCase
from game.forms import NewGameForm
class NewGameFormTest(TestCase):
def test_player_valid(self):
form = NewGameForm({
'player1': 'game.players.AIPlayer',
'player2': 'human'
})
self.assertTrue(form.is_valid())
def test_player_invalid(self):
form = NewGameForm({
'player1': '',
'player2': 'foobar'
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
'player1': ['This field is required.'],
'player2': ['Unknown player type: foobar']
})
def test_create(self):
"Tests that create returns a model with the players assigned."
random.seed(1)
form = NewGameForm({
'player1': 'game.players.AIPlayer',
'player2': 'human'
})
self.assertTrue(form.is_valid())
game = form.create()
self.assertEqual(game.board, " ")
self.assertEqual(game.player_x, "human")
self.assertEqual(game.player_o, "game.players.AIPlayer")
|
{
"content_hash": "fb6e76a8fc54a9d93df0898723a72928",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 70,
"avg_line_length": 29.526315789473685,
"alnum_prop": 0.5650623885918004,
"repo_name": "claudiordgz/tictactoe-django",
"id": "bdd27952437fcf6bae9e5cfd6f67e9f28b6bc0b6",
"size": "1122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "game/tests/test_forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "808"
},
{
"name": "HTML",
"bytes": "5287"
},
{
"name": "JavaScript",
"bytes": "1"
},
{
"name": "Python",
"bytes": "24507"
}
],
"symlink_target": ""
}
|
class Indian(object):
def printIndian(self):
print 'I am an Indian'
class Gujarati(Indian):
def printGujarati(self):
print 'I am Gujarati'
def printMarathi(self):
print 'I am Marathi'
def printKathiyawadi(self):
print 'I am Kathiyawadi'
def main():
superObj = Indian()
subObj = Gujarati()
print superObj.printIndian()
print subObj.printGujarati()
if __name__ == '__main__':
main()
|
{
"content_hash": "03afcc5e0ae263c9e9905240e43a96d6",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 32,
"avg_line_length": 17,
"alnum_prop": 0.6078431372549019,
"repo_name": "dek-odoo/python-samples",
"id": "5ad5b57aa3f940ffc2ce131a4a5cc30d4f58ec6d",
"size": "658",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python exercises/dek_program051.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "95972"
}
],
"symlink_target": ""
}
|
import os
import uuid
import t
from restkit import request
from restkit.forms import multipart_form_encode
from _server_test import HOST, PORT
LONG_BODY_PART = """This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client...
This is a relatively long body, that we send to the client..."""
def test_001():
u = "http://%s:%s" % (HOST, PORT)
r = request(u)
t.eq(r.status_int, 200)
t.eq(r.body_string(), "welcome")
def test_002():
u = "http://%s:%s" % (HOST, PORT)
r = request(u, 'POST', body=LONG_BODY_PART)
t.eq(r.status_int, 200)
body = r.body_string()
t.eq(len(body), len(LONG_BODY_PART))
t.eq(body, LONG_BODY_PART)
def test_003():
u = "http://test:test@%s:%s/auth" % (HOST, PORT)
r = request(u)
t.eq(r.status_int, 200)
u = "http://test:test2@%s:%s/auth" % (HOST, PORT)
r = request(u)
t.eq(r.status_int, 403)
def test_004():
u = "http://%s:%s/multipart2" % (HOST, PORT)
fn = os.path.join(os.path.dirname(__file__), "1M")
f = open(fn, 'rb')
l = int(os.fstat(f.fileno())[6])
b = {'a':'aa','b':['bb','éàù@'], 'f':f}
h = {'content-type':"multipart/form-data"}
body, headers = multipart_form_encode(b, h, uuid.uuid4().hex)
r = request(u, method='POST', body=body, headers=headers)
t.eq(r.status_int, 200)
t.eq(int(r.body_string()), l)
def test_005():
u = "http://%s:%s/multipart3" % (HOST, PORT)
fn = os.path.join(os.path.dirname(__file__), "1M")
f = open(fn, 'rb')
l = int(os.fstat(f.fileno())[6])
b = {'a':'aa','b':'éàù@', 'f':f}
h = {'content-type':"multipart/form-data"}
body, headers = multipart_form_encode(b, h, uuid.uuid4().hex)
r = request(u, method='POST', body=body, headers=headers)
t.eq(r.status_int, 200)
t.eq(int(r.body_string()), l)
def test_006():
u = "http://%s:%s/multipart4" % (HOST, PORT)
fn = os.path.join(os.path.dirname(__file__), "1M")
f = open(fn, 'rb')
content = f.read()
f.seek(0)
b = {'a':'aa','b':'éàù@', 'f':f}
h = {'content-type':"multipart/form-data"}
body, headers = multipart_form_encode(b, h, uuid.uuid4().hex)
r = request(u, method='POST', body=body, headers=headers)
t.eq(r.status_int, 200)
t.eq(r.body_string(), content)
def test_007():
import StringIO
u = "http://%s:%s/multipart4" % (HOST, PORT)
content = 'éàù@'
f = StringIO.StringIO('éàù@')
f.name = 'test.txt'
b = {'a':'aa','b':'éàù@', 'f':f}
h = {'content-type':"multipart/form-data"}
body, headers = multipart_form_encode(b, h, uuid.uuid4().hex)
r = request(u, method='POST', body=body, headers=headers)
t.eq(r.status_int, 200)
t.eq(r.body_string(), content)
|
{
"content_hash": "aa09967701093e5591fe645e21911244",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 81,
"avg_line_length": 44.474074074074075,
"alnum_prop": 0.6640572951365756,
"repo_name": "emidln/django_roa",
"id": "6e083e7b5b0d8ce6c05537f33724cc8548755aab",
"size": "6150",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "env/lib/python2.7/site-packages/tests/008-test-request.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "92014"
},
{
"name": "Python",
"bytes": "5463974"
},
{
"name": "Shell",
"bytes": "1098"
}
],
"symlink_target": ""
}
|
"""
zine
~~~~
Zine is a simple python weblog software.
Get a WSGI Application
======================
To get the WSGI application for Zine you can use the `make_app`
function. This function can either create a dispatcher for one instance
or for multiple application instances where the current active instance
is looked up in the WSGI environment. The latter is useful for mass
hosting via mod_wsgi or similar interfaces.
Here a small example `zine.wsgi` for mod_wsgi::
from zine import get_wsgi_app
application = get_wsgi_app('/path/to/instance')
:copyright: (c) 2010 by the Zine Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
__version__ = '0.2-dev'
__url__ = 'http://zine.pocoo.org/'
# implementation detail. Stuff in __all__ and the initial import has to be
# the same. Everything that is not listed in `__all__` or everything that
# does not start with two leading underscores is wiped out on reload and
# the core module is *not* reloaded, thus stuff will get lost if it's not
# properly listed.
from zine._core import setup, get_wsgi_app, override_environ_config
__all__ = ('setup', 'get_wsgi_app', 'override_environ_config')
|
{
"content_hash": "9bb4d22fc27e49dbdb238cf93227d4dd",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 76,
"avg_line_length": 34.77777777777778,
"alnum_prop": 0.6829073482428115,
"repo_name": "mitsuhiko/zine",
"id": "c54317ff52c36b8817dcb79249133848ed1af863",
"size": "1276",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zine/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "215269"
},
{
"name": "Python",
"bytes": "1141277"
},
{
"name": "Shell",
"bytes": "231"
}
],
"symlink_target": ""
}
|
import sys
from subprocess import Popen, call, PIPE
from random import randint
from time import strftime
import shlex
try:
from Rappture.tools import getCommandOutput as RapptureExec
except ImportError:
pass
def psd(s, **kwargs):
"""pysimm.apps.poreblazer.psd
Perform pore size distribution calculation using PoreBlazer v2.0
Args:
atoms: file name to contain ff parameters (ff.atoms)
data: file name to write xyz file (data.xyz)
angles: angles of simlation box (90.0 90.0 90.0)
insertions: number of insertions for calculation (500)
min_probe: minimum probe size (1.0)
probe_dr: step size to increase probe size (0.2)
max_probe: maximum probe size: 25
psd_save: T/F to save psd points (F)
psd_range: range in which to save psd points (2.5,3.8)
exec_path: path to poreblazer psd executable (psd.exe)
gen_files: if True, only generate input do not execute (None)
Returns:
None
"""
atoms = kwargs.get('atoms', 'ff.atoms')
data = kwargs.get('data', "'data.xyz'")
angles = kwargs.get('angles', '90.0 90.0 90.0')
insertions = kwargs.get('insertions', 500)
min_probe = kwargs.get('min_probe', 1.0)
probe_dr = kwargs.get('probe_dr', 0.2)
max_probe = kwargs.get('max_probe', 25)
psd_save = kwargs.get('psd_save', 'F')
psd_range = kwargs.get('psd_range', '2.5,3.8')
exec_path = kwargs.get('exec_path', 'psd.exe')
nanohub = kwargs.get('nanohub')
gen_files = kwargs.get('gen_files')
with open('psd.in', 'w+') as f:
f.write('%s\n' % atoms)
f.write('%s\n' % data)
f.write('%s\n' % insertions)
f.write('%s\n' % min_probe)
f.write('%s\n' % probe_dr)
f.write('%s\n' % max_probe)
f.write('%s %s %s\n' % (s.dim.dx, s.dim.dy, s.dim.dz))
f.write('%s\n' % angles)
f.write('%s\n' % randint(10000, 99999))
f.write('%s\n' % psd_save)
f.write('%s\n' % psd_range)
with open('ff.atoms', 'w+') as f:
f.write('%s\n\n' % s.particle_types.count)
for pt in s.particle_types:
f.write('%s\t%f\n' % (pt.tag, pt.sigma))
s.write_xyz(elem=False)
if gen_files:
return
print('%s: starting pore size distribution simulation using poreblazer'
% strftime('%H:%M:%S'))
if nanohub:
print('%s: sending pore size distribution simulation to computer cluster' % strftime('%H:%M:%S'))
sys.stdout.flush()
cmd = ('submit -n 1 -w %s -i psd.in -i ff.atoms -i data.xyz '
'poreblazer-2.0.0_psd < psd.in'
% (24*60))
stdo, stde = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True).communicate()
else:
stdin = open('psd.in')
stdout = open('psd.out', 'w+')
call(exec_path, stdin=stdin, stdout=stdout, shell=True)
stdin.close()
stdout.close()
print('%s: pore size distribution simulation using poreblazer successful'
% strftime('%H:%M:%S'))
def surface(s, **kwargs):
"""pysimm.apps.poreblazer.surface
Perform accessible surface area calculation using PoreBlazer v2.0
Args:
atoms: file name to contain ff parameters (ff.atoms)
data: file name to write xyz file (data.xyz)
angles: angles of simlation box (90.0 90.0 90.0)
insertions: number of insertions for calculation (1000)
probe: probe size (3.681)
probe_type: type of probe (hs)
vis: True to save visual (F)
exec_path: path to poreblazer surface executable (surface.exe)
Returns:
None
"""
atoms = kwargs.get('atoms', 'ff.atoms')
data = kwargs.get('data', "'data.xyz'")
angles = kwargs.get('angles', '90.0 90.0 90.0')
insertions = kwargs.get('insertions', 1000)
probe = kwargs.get('probe', '3.681')
probe_type = kwargs.get('probe_type', 'hs')
vis = kwargs.get('vis') or 'F'
exec_path = kwargs.get('exec_path', 'surface.exe')
with open('surf_area.in', 'w+') as f:
f.write('%s\n' % atoms)
f.write('%s\n' % data)
f.write('%s\n' % probe)
f.write('%s\n' % insertions)
f.write('%s %s %s\n' % (s.dim.dx, s.dim.dy, s.dim.dz))
f.write('%s\n' % angles)
f.write('%s\n' % probe_type)
f.write('%s\n' % randint(10000, 99999))
f.write('%s\n' % vis)
with open('ff.atoms', 'w+') as f:
f.write('%s\n\n' % s.particle_types.count)
for pt in s.particle_types:
f.write('%s\t%f\t%f\n' % (pt.tag, pt.sigma, pt.mass))
s.write_xyz(elem=False)
print('%s: starting surface area simulation using poreblazer'
% strftime('%H:%M:%S'))
stdin = open('surf_area.in')
stdout = open('surf_area.out', 'w+')
call(exec_path, stdin=stdin, stdout=stdout, shell=True)
stdin.close()
stdout.close()
print('%s: surface area simulation using poreblazer successful'
% strftime('%H:%M:%S'))
s.surf_area = float(open('surf_area.out').readlines()[-1].split()[-1])
return s.surf_area
def pore(s, **kwargs):
"""pysimm.apps.poreblazer.pore
Perform pore volume calculation using PoreBlazer v2.0
Args:
atoms: file name to contain ff parameters (ff.atoms)
data: file name to write xyz file (data.xyz)
angles: angles of simlation box (90.0 90.0 90.0)
insertions: number of insertions for calculation (1000)
temp: temperature at which to perform simulation (300)
pore_probe: sigma, epsilon, cutoff parameters for probe (2.58, 10.22, 12.8)
exec_path: path to poreblazer pore executable (pore_he.exe)
Returns:
None
"""
boltzmann_kcal = 0.001987204
atoms = kwargs.get('atoms', 'ff.atoms')
data = kwargs.get('data', "'data.xyz'")
angles = kwargs.get('angles', '90.0 90.0 90.0')
insertions = kwargs.get('insertions', 1000)
temp = kwargs.get('temp', 300)
pore_probe = kwargs.get('pore_probe', '2.58 10.22 12.8')
exec_path = kwargs.get('exec_path', 'pore_he.exe')
with open('pore_volume.in', 'w+') as f:
f.write('%s\n' % atoms)
f.write('%s\n' % data)
f.write('%s\n' % insertions)
f.write('%s %s %s\n' % (s.dim.dx, s.dim.dy, s.dim.dz))
f.write('%s\n' % angles)
f.write('%s\n' % temp)
f.write('%s\n' % pore_probe)
f.write('%s\n' % randint(10000, 99999))
with open('ff.atoms', 'w+') as f:
f.write('%s\n\n' % s.particle_types.count)
for pt in s.particle_types:
f.write('%s\t%f\t%f\t%f\n' % (pt.tag, pt.sigma,
pt.epsilon/boltzmann_kcal, pt.mass))
s.write_xyz(elem=False)
print('%s: starting pore volume simulation using poreblazer'
% strftime('%H:%M:%S'))
stdin = open('pore_volume.in')
stdout = open('pore_volume.out', 'w+')
call(exec_path, stdin=stdin, stdout=stdout, shell=True)
stdin.close()
stdout.close()
print('%s: pore volume simulation using poreblazer successful'
% strftime('%H:%M:%S'))
s.pore_volume = float(open('pore_volume.out').readlines()[-1].split()[-1])
return s.pore_volume
def void(s, **kwargs):
"""pysimm.apps.poreblazer.void
Perform pore volume calculation using PoreBlazer v2.0 assuming a probe size of 0 to calculate void volume
Args:
atoms: file name to contain ff parameters (ff.atoms)
data: file name to write xyz file (data.xyz)
angles: angles of simlation box (90.0 90.0 90.0)
insertions: number of insertions for calculation (1000)
temp: temperature at which to perform simulation (300)
pore_probe: sigma, epsilon, cutoff parameters for probe (0.00, 10.22, 12.8)
exec_path: path to poreblazer pore executable (pore_he.exe)
Returns:
None
"""
boltzmann_kcal = 0.001987204
atoms = kwargs.get('atoms', 'ff.atoms')
data = kwargs.get('data', "'data.xyz'")
angles = kwargs.get('angles', '90.0 90.0 90.0')
insertions = kwargs.get('insertions', 1000)
temp = kwargs.get('temp', 300)
pore_probe = kwargs.get('pore_probe', '0.0 10.22 12.8')
exec_path = kwargs.get('exec_path', 'pore_he.exe')
with open('void_volume.in', 'w+') as f:
f.write('%s\n' % atoms)
f.write('%s\n' % data)
f.write('%s\n' % insertions)
f.write('%s %s %s\n' % (s.dim.dx, s.dim.dy, s.dim.dz))
f.write('%s\n' % angles)
f.write('%s\n' % temp)
f.write('%s\n' % pore_probe)
f.write('%s\n' % randint(10000, 99999))
with open('ff.atoms', 'w+') as f:
f.write('%s\n\n' % s.particle_types.count)
for pt in s.particle_types:
f.write('%s\t%f\t%f\t%f\n' % (pt.tag, pt.sigma,
pt.epsilon/boltzmann_kcal, pt.mass))
s.write_xyz(elem=False)
print('%s: starting void volume simulation using poreblazer'
% strftime('%H:%M:%S'))
stdin = open('void_volume.in')
stdout = open('void_volume.out', 'w+')
call(exec_path, stdin=stdin, stdout=stdout, shell=True)
stdin.close()
stdout.close()
print('%s: void volume simulation using poreblazer successful'
% strftime('%H:%M:%S'))
s.void_volume = float(open('void_volume.out').readlines()[-1].split()[-2])
s.set_frac_free_volume()
return s.void_volume, s.frac_free_volume
|
{
"content_hash": "ac2aa3d94b0c1c185600e8007b7d62c8",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 109,
"avg_line_length": 34.5970695970696,
"alnum_prop": 0.5818951826363155,
"repo_name": "plin1112/pysimm",
"id": "fe8d1131660c58033a86219e7d8dc22443e8eb44",
"size": "10995",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable",
"path": "pysimm/apps/poreblazer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "503226"
},
{
"name": "Shell",
"bytes": "183"
}
],
"symlink_target": ""
}
|
import time
from datetime import datetime
import sys
import os
from multiprocessing import Pool
# Project dependency modules
import pandas as pd
pd.set_option('mode.chained_assignment', None) # block warnings due to DataFrame value assignment
import lasagne
# Project modules
sys.path.append('../')
from sleep_control.traffic_emulator import TrafficEmulator
from sleep_control.traffic_server import TrafficServer
from sleep_control.controller import QController, DummyController, NController
from sleep_control.integration import Emulation
from sleep_control.env_models import SJTUModel
from rl.qtable import QAgent
from rl.qnn_theano import QAgentNN
from rl.mixin import PhiMixin, DynaMixin
sys_stdout = sys.stdout
log_file_name = 'message_2016-6-12_G9_BUF2_FR20.log'
# Composite classes
class Phi_QAgentNN(PhiMixin, QAgentNN):
def __init__(self, **kwargs):
super(Phi_QAgentNN, self).__init__(**kwargs)
# Parameters
# |- Agent
# |- QAgent
actions = [(True, None), (False, 'serve_all')]
gamma, alpha = 0.9, 0.9
explore_strategy, epsilon = 'epsilon', 0.02 # exploration
# |- QAgentNN
# | - Phi
phi_length = 5
dim_state = (1, phi_length, 3+2)
range_state_slice = [(0, 10), (0, 10), (0, 10), (0, 1), (0, 1)]
range_state = [[range_state_slice]*phi_length]
# | - Other params
momentum, learning_rate = 0.9, 0.01 # SGD
num_buffer, memory_size = 2, 200
reward_scaling, reward_scaling_update = 1, 'adaptive'
batch_size, update_period, freeze_period, rs_period = 100, 4, 16, 32
# |- Env model
Rs, Rw, Rf, Co, Cw = 1.0, -1.0, -10.0, -5.0, 0.0
beta = 0.5
reward_params = (Rs, Rw, Rf, Co, Cw, beta)
# |- Env
# |- Time
start_time = pd.to_datetime('2014-11-05 09:20:00')
total_time = pd.Timedelta(days=7)
time_step = pd.Timedelta(seconds=2)
backoff_epochs = num_buffer*memory_size+phi_length
head_datetime = start_time - time_step*backoff_epochs
tail_datetime = head_datetime + total_time
TOTAL_EPOCHS = int(total_time/time_step)
# |- Reward
rewarding = {'serve': Rs, 'wait': Rw, 'fail': Rf}
# load from processed data
session_df =pd.read_csv(
filepath_or_buffer='../data/trace_dh3.dat',
parse_dates=['startTime_datetime', 'endTime_datetime']
)
te = TrafficEmulator(
session_df=session_df, time_step=time_step,
head_datetime=head_datetime, tail_datetime=tail_datetime,
rewarding=rewarding,
verbose=2)
ts = TrafficServer(cost=(Co, Cw), verbose=2)
agent = Phi_QAgentNN(
phi_length=phi_length,
dim_state=dim_state, range_state=range_state,
f_build_net = None,
batch_size=batch_size, learning_rate=learning_rate, momentum=momentum,
reward_scaling=reward_scaling, reward_scaling_update=reward_scaling_update, rs_period=rs_period,
update_period=update_period, freeze_period=freeze_period,
memory_size=memory_size, num_buffer=num_buffer,
# Below is QAgent params
actions=actions, alpha=alpha, gamma=gamma,
explore_strategy=explore_strategy, epsilon=epsilon,
verbose=2)
c = QController(agent=agent)
emu = Emulation(te=te, ts=ts, c=c, beta=beta)
# Heavyliftings
t = time.time()
sys.stdout = sys_stdout
log_path = './log/'
if os.path.isfile(log_path+log_file_name):
print "Log file {} already exist. Experiment cancelled.".format(log_file_name)
else:
log_file = open(log_path+log_file_name,"w")
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
while emu.epoch is not None and emu.epoch<TOTAL_EPOCHS:
# log time
print "Epoch {},".format(emu.epoch),
left = emu.te.head_datetime + emu.te.epoch*emu.te.time_step
right = left + emu.te.time_step
print "{} - {}".format(left.strftime("%Y-%m-%d %H:%M:%S"), right.strftime("%Y-%m-%d %H:%M:%S"))
emu.step()
print
if emu.epoch%(0.05*TOTAL_EPOCHS)==0:
sys.stdout = sys_stdout
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
sys.stdout = sys_stdout
log_file.close()
print
print log_file_name,
print '{:.3f} sec,'.format(time.time()-t),
print '{:.3f} min'.format((time.time()-t)/60)
|
{
"content_hash": "f95da46dba21970b1ca04d88e169100f",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 103,
"avg_line_length": 33.41538461538462,
"alnum_prop": 0.6673572744014733,
"repo_name": "zaxliu/deepnap",
"id": "42a9eb16a5b795432712c0181bc2d6d1117662ae",
"size": "4370",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "experiments/kdd-exps/experiment_message_2016-6-12_G9_BUF2_FR20_legacy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "571252"
},
{
"name": "Python",
"bytes": "520535"
}
],
"symlink_target": ""
}
|
import datetime
import decimal
import hashlib
import logging
import operator
import re
import sys
import threading
import uuid
from collections import deque
from collections import namedtuple
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict
from copy import deepcopy
from functools import wraps
from inspect import isclass
__version__ = '2.6.4'
__all__ = [
'BareField',
'BigIntegerField',
'BlobField',
'BooleanField',
'CharField',
'Check',
'Clause',
'CompositeKey',
'DatabaseError',
'DataError',
'DateField',
'DateTimeField',
'DecimalField',
'DoesNotExist',
'DoubleField',
'DQ',
'Field',
'FixedCharField',
'FloatField',
'fn',
'ForeignKeyField',
'ImproperlyConfigured',
'IntegerField',
'IntegrityError',
'InterfaceError',
'InternalError',
'JOIN',
'JOIN_FULL',
'JOIN_INNER',
'JOIN_LEFT_OUTER',
'Model',
'MySQLDatabase',
'NotSupportedError',
'OperationalError',
'Param',
'PostgresqlDatabase',
'prefetch',
'PrimaryKeyField',
'ProgrammingError',
'Proxy',
'R',
'SqliteDatabase',
'SQL',
'TextField',
'TimeField',
'Using',
'UUIDField',
'Window',
]
# Set default logging handler to avoid "No handlers could be found for logger
# "peewee"" warnings.
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
# All peewee-generated logs are logged to this namespace.
logger = logging.getLogger('peewee')
logger.addHandler(NullHandler())
# Python 2/3 compatibility helpers. These helpers are used internally and are
# not exported.
def with_metaclass(meta, base=object):
return meta("NewBase", (base,), {})
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY26 = sys.version_info[:2] == (2, 6)
if PY3:
import builtins
from collections import Callable
from functools import reduce
callable = lambda c: isinstance(c, Callable)
unicode_type = str
string_type = bytes
basestring = str
print_ = getattr(builtins, 'print')
binary_construct = lambda s: bytes(s.encode('raw_unicode_escape'))
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
elif PY2:
unicode_type = unicode
string_type = basestring
binary_construct = buffer
def print_(s):
sys.stdout.write(s)
sys.stdout.write('\n')
exec('def reraise(tp, value, tb=None): raise tp, value, tb')
else:
raise RuntimeError('Unsupported python version.')
# By default, peewee supports Sqlite, MySQL and Postgresql.
try:
import sqlite3
except ImportError:
try:
from pysqlite2 import dbapi2 as sqlite3
except ImportError:
sqlite3 = None
try:
from psycopg2cffi import compat
compat.register()
except ImportError:
pass
try:
import psycopg2
from psycopg2 import extensions as pg_extensions
except ImportError:
psycopg2 = None
try:
import MySQLdb as mysql # prefer the C module.
except ImportError:
try:
import pymysql as mysql
except ImportError:
mysql = None
try:
from playhouse.speedups import strip_parens
except ImportError:
def strip_parens(s):
# Quick sanity check.
if not s or s[0] != '(':
return s
ct = i = 0
l = len(s)
while i < l:
if s[i] == '(' and s[l - 1] == ')':
ct += 1
i += 1
l -= 1
else:
break
if ct:
# If we ever end up with negatively-balanced parentheses, then we
# know that one of the outer parentheses was required.
unbalanced_ct = 0
required = 0
for i in range(ct, l - ct):
if s[i] == '(':
unbalanced_ct += 1
elif s[i] == ')':
unbalanced_ct -= 1
if unbalanced_ct < 0:
required += 1
unbalanced_ct = 0
if required == ct:
break
ct -= required
if ct > 0:
return s[ct:-ct]
return s
if sqlite3:
sqlite3.register_adapter(decimal.Decimal, str)
sqlite3.register_adapter(datetime.date, str)
sqlite3.register_adapter(datetime.time, str)
DATETIME_PARTS = ['year', 'month', 'day', 'hour', 'minute', 'second']
DATETIME_LOOKUPS = set(DATETIME_PARTS)
# Sqlite does not support the `date_part` SQL function, so we will define an
# implementation in python.
SQLITE_DATETIME_FORMATS = (
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d',
'%H:%M:%S',
'%H:%M:%S.%f',
'%H:%M')
def _sqlite_date_part(lookup_type, datetime_string):
assert lookup_type in DATETIME_LOOKUPS
if not datetime_string:
return
dt = format_date_time(datetime_string, SQLITE_DATETIME_FORMATS)
return getattr(dt, lookup_type)
SQLITE_DATE_TRUNC_MAPPING = {
'year': '%Y',
'month': '%Y-%m',
'day': '%Y-%m-%d',
'hour': '%Y-%m-%d %H',
'minute': '%Y-%m-%d %H:%M',
'second': '%Y-%m-%d %H:%M:%S'}
MYSQL_DATE_TRUNC_MAPPING = SQLITE_DATE_TRUNC_MAPPING.copy()
MYSQL_DATE_TRUNC_MAPPING['minute'] = '%Y-%m-%d %H:%i'
MYSQL_DATE_TRUNC_MAPPING['second'] = '%Y-%m-%d %H:%i:%S'
def _sqlite_date_trunc(lookup_type, datetime_string):
assert lookup_type in SQLITE_DATE_TRUNC_MAPPING
if not datetime_string:
return
dt = format_date_time(datetime_string, SQLITE_DATETIME_FORMATS)
return dt.strftime(SQLITE_DATE_TRUNC_MAPPING[lookup_type])
def _sqlite_regexp(regex, value):
return re.search(regex, value, re.I) is not None
class attrdict(dict):
def __getattr__(self, attr):
return self[attr]
# Operators used in binary expressions.
OP = attrdict(
AND='and',
OR='or',
ADD='+',
SUB='-',
MUL='*',
DIV='/',
BIN_AND='&',
BIN_OR='|',
XOR='^',
MOD='%',
EQ='=',
LT='<',
LTE='<=',
GT='>',
GTE='>=',
NE='!=',
IN='in',
NOT_IN='not in',
IS='is',
IS_NOT='is not',
LIKE='like',
ILIKE='ilike',
BETWEEN='between',
REGEXP='regexp',
CONCAT='||',
)
JOIN = attrdict(
INNER='INNER',
LEFT_OUTER='LEFT OUTER',
RIGHT_OUTER='RIGHT OUTER',
FULL='FULL',
)
JOIN_INNER = JOIN.INNER
JOIN_LEFT_OUTER = JOIN.LEFT_OUTER
JOIN_FULL = JOIN.FULL
# To support "django-style" double-underscore filters, create a mapping between
# operation name and operation code, e.g. "__eq" == OP.EQ.
DJANGO_MAP = {
'eq': OP.EQ,
'lt': OP.LT,
'lte': OP.LTE,
'gt': OP.GT,
'gte': OP.GTE,
'ne': OP.NE,
'in': OP.IN,
'is': OP.IS,
'like': OP.LIKE,
'ilike': OP.ILIKE,
'regexp': OP.REGEXP,
}
# Helper functions that are used in various parts of the codebase.
def merge_dict(source, overrides):
merged = source.copy()
merged.update(overrides)
return merged
def returns_clone(func):
"""
Method decorator that will "clone" the object before applying the given
method. This ensures that state is mutated in a more predictable fashion,
and promotes the use of method-chaining.
"""
def inner(self, *args, **kwargs):
clone = self.clone() # Assumes object implements `clone`.
func(clone, *args, **kwargs)
return clone
inner.call_local = func # Provide a way to call without cloning.
return inner
def not_allowed(func):
"""
Method decorator to indicate a method is not allowed to be called. Will
raise a `NotImplementedError`.
"""
def inner(self, *args, **kwargs):
raise NotImplementedError('%s is not allowed on %s instances' % (
func, type(self).__name__))
return inner
class Proxy(object):
"""
Proxy class useful for situations when you wish to defer the initialization
of an object.
"""
__slots__ = ['obj', '_callbacks']
def __init__(self):
self._callbacks = []
self.initialize(None)
def initialize(self, obj):
self.obj = obj
for callback in self._callbacks:
callback(obj)
def attach_callback(self, callback):
self._callbacks.append(callback)
return callback
def __getattr__(self, attr):
if self.obj is None:
raise AttributeError('Cannot use uninitialized Proxy.')
return getattr(self.obj, attr)
def __setattr__(self, attr, value):
if attr not in self.__slots__:
raise AttributeError('Cannot set attribute on proxy.')
return super(Proxy, self).__setattr__(attr, value)
class _CDescriptor(object):
def __get__(self, instance, instance_type=None):
if instance is not None:
return Entity(instance._alias)
return self
# Classes representing the query tree.
class Node(object):
"""Base-class for any part of a query which shall be composable."""
c = _CDescriptor()
_node_type = 'node'
def __init__(self):
self._negated = False
self._alias = None
self._bind_to = None
self._ordering = None # ASC or DESC.
@classmethod
def extend(cls, name=None, clone=False):
def decorator(method):
method_name = name or method.__name__
if clone:
method = returns_clone(method)
setattr(cls, method_name, method)
return method
return decorator
def clone_base(self):
return type(self)()
def clone(self):
inst = self.clone_base()
inst._negated = self._negated
inst._alias = self._alias
inst._ordering = self._ordering
inst._bind_to = self._bind_to
return inst
@returns_clone
def __invert__(self):
self._negated = not self._negated
@returns_clone
def alias(self, a=None):
self._alias = a
@returns_clone
def bind_to(self, bt):
"""
Bind the results of an expression to a specific model type. Useful
when adding expressions to a select, where the result of the expression
should be placed on a joined instance.
"""
self._bind_to = bt
@returns_clone
def asc(self):
self._ordering = 'ASC'
@returns_clone
def desc(self):
self._ordering = 'DESC'
def __pos__(self):
return self.asc()
def __neg__(self):
return self.desc()
def _e(op, inv=False):
"""
Lightweight factory which returns a method that builds an Expression
consisting of the left-hand and right-hand operands, using `op`.
"""
def inner(self, rhs):
if inv:
return Expression(rhs, op, self)
return Expression(self, op, rhs)
return inner
__and__ = _e(OP.AND)
__or__ = _e(OP.OR)
__add__ = _e(OP.ADD)
__sub__ = _e(OP.SUB)
__mul__ = _e(OP.MUL)
__div__ = __truediv__ = _e(OP.DIV)
__xor__ = _e(OP.XOR)
__radd__ = _e(OP.ADD, inv=True)
__rsub__ = _e(OP.SUB, inv=True)
__rmul__ = _e(OP.MUL, inv=True)
__rdiv__ = __rtruediv__ = _e(OP.DIV, inv=True)
__rand__ = _e(OP.AND, inv=True)
__ror__ = _e(OP.OR, inv=True)
__rxor__ = _e(OP.XOR, inv=True)
def __eq__(self, rhs):
if rhs is None:
return Expression(self, OP.IS, None)
return Expression(self, OP.EQ, rhs)
def __ne__(self, rhs):
if rhs is None:
return Expression(self, OP.IS_NOT, None)
return Expression(self, OP.NE, rhs)
__lt__ = _e(OP.LT)
__le__ = _e(OP.LTE)
__gt__ = _e(OP.GT)
__ge__ = _e(OP.GTE)
__lshift__ = _e(OP.IN)
__rshift__ = _e(OP.IS)
__mod__ = _e(OP.LIKE)
__pow__ = _e(OP.ILIKE)
bin_and = _e(OP.BIN_AND)
bin_or = _e(OP.BIN_OR)
# Special expressions.
def in_(self, rhs):
return Expression(self, OP.IN, rhs)
def not_in(self, rhs):
return Expression(self, OP.NOT_IN, rhs)
def is_null(self, is_null=True):
if is_null:
return Expression(self, OP.IS, None)
return Expression(self, OP.IS_NOT, None)
def contains(self, rhs):
return Expression(self, OP.ILIKE, '%%%s%%' % rhs)
def startswith(self, rhs):
return Expression(self, OP.ILIKE, '%s%%' % rhs)
def endswith(self, rhs):
return Expression(self, OP.ILIKE, '%%%s' % rhs)
def between(self, low, high):
return Expression(self, OP.BETWEEN, Clause(low, R('AND'), high))
def regexp(self, expression):
return Expression(self, OP.REGEXP, expression)
def concat(self, rhs):
return Expression(self, OP.CONCAT, rhs)
class SQL(Node):
"""An unescaped SQL string, with optional parameters."""
_node_type = 'sql'
def __init__(self, value, *params):
self.value = value
self.params = params
super(SQL, self).__init__()
def clone_base(self):
return SQL(self.value, *self.params)
R = SQL # backwards-compat.
class Entity(Node):
"""A quoted-name or entity, e.g. "table"."column"."""
_node_type = 'entity'
def __init__(self, *path):
super(Entity, self).__init__()
self.path = path
def clone_base(self):
return Entity(*self.path)
def __getattr__(self, attr):
return Entity(*filter(None, self.path + (attr,)))
class Func(Node):
"""An arbitrary SQL function call."""
_node_type = 'func'
def __init__(self, name, *arguments):
self.name = name
self.arguments = arguments
self._coerce = True
super(Func, self).__init__()
@returns_clone
def coerce(self, coerce=True):
self._coerce = coerce
def clone_base(self):
res = Func(self.name, *self.arguments)
res._coerce = self._coerce
return res
def over(self, partition_by=None, order_by=None, window=None):
if isinstance(partition_by, Window) and window is None:
window = partition_by
if window is None:
sql = Window(
partition_by=partition_by, order_by=order_by).__sql__()
else:
sql = SQL(window._alias)
return Clause(self, SQL('OVER'), sql)
def __getattr__(self, attr):
def dec(*args, **kwargs):
return Func(attr, *args, **kwargs)
return dec
# fn is a factory for creating `Func` objects and supports a more friendly
# API. So instead of `Func("LOWER", param)`, `fn.LOWER(param)`.
fn = Func(None)
class Expression(Node):
"""A binary expression, e.g `foo + 1` or `bar < 7`."""
_node_type = 'expression'
def __init__(self, lhs, op, rhs, flat=False):
super(Expression, self).__init__()
self.lhs = lhs
self.op = op
self.rhs = rhs
self.flat = flat
def clone_base(self):
return Expression(self.lhs, self.op, self.rhs, self.flat)
class Param(Node):
"""
Arbitrary parameter passed into a query. Instructs the query compiler to
specifically treat this value as a parameter, useful for `list` which is
special-cased for `IN` lookups.
"""
_node_type = 'param'
def __init__(self, value, conv=None):
self.value = value
self.conv = conv
super(Param, self).__init__()
def clone_base(self):
return Param(self.value, self.conv)
class Passthrough(Param):
_node_type = 'passthrough'
class Clause(Node):
"""A SQL clause, one or more Node objects joined by spaces."""
_node_type = 'clause'
glue = ' '
parens = False
def __init__(self, *nodes, **kwargs):
if 'glue' in kwargs:
self.glue = kwargs['glue']
if 'parens' in kwargs:
self.parens = kwargs['parens']
super(Clause, self).__init__()
self.nodes = list(nodes)
def clone_base(self):
clone = Clause(*self.nodes)
clone.glue = self.glue
clone.parens = self.parens
return clone
class CommaClause(Clause):
"""One or more Node objects joined by commas, no parens."""
glue = ', '
class EnclosedClause(CommaClause):
"""One or more Node objects joined by commas and enclosed in parens."""
parens = True
class Window(Node):
def __init__(self, partition_by=None, order_by=None):
super(Window, self).__init__()
self.partition_by = partition_by
self.order_by = order_by
self._alias = self._alias or 'w'
def __sql__(self):
over_clauses = []
if self.partition_by:
over_clauses.append(Clause(
SQL('PARTITION BY'),
CommaClause(*self.partition_by)))
if self.order_by:
over_clauses.append(Clause(
SQL('ORDER BY'),
CommaClause(*self.order_by)))
return EnclosedClause(Clause(*over_clauses))
def clone_base(self):
return Window(self.partition_by, self.order_by)
class Check(SQL):
"""Check constraint, usage: `Check('price > 10')`."""
def __init__(self, value):
super(Check, self).__init__('CHECK (%s)' % value)
class DQ(Node):
"""A "django-style" filter expression, e.g. {'foo__eq': 'x'}."""
def __init__(self, **query):
super(DQ, self).__init__()
self.query = query
def clone_base(self):
return DQ(**self.query)
class _StripParens(Node):
_node_type = 'strip_parens'
def __init__(self, node):
super(_StripParens, self).__init__()
self.node = node
JoinMetadata = namedtuple('JoinMetadata', (
'src_model', # Source Model class.
'dest_model', # Dest Model class.
'src', # Source, may be Model, ModelAlias
'dest', # Dest, may be Model, ModelAlias, or SelectQuery.
'attr', # Attribute name joined instance(s) should be assigned to.
'primary_key', # Primary key being joined on.
'foreign_key', # Foreign key being joined from.
'is_backref', # Is this a backref, i.e. 1 -> N.
'alias', # Explicit alias given to join expression.
'is_self_join', # Is this a self-join?
'is_expression', # Is the join ON clause an Expression?
))
class Join(namedtuple('_Join', ('src', 'dest', 'join_type', 'on'))):
def get_foreign_key(self, source, dest, field=None):
if isinstance(source, SelectQuery) or isinstance(dest, SelectQuery):
return None, None
fk_field = source._meta.rel_for_model(dest, field)
if fk_field is not None:
return fk_field, False
reverse_rel = source._meta.reverse_rel_for_model(dest, field)
if reverse_rel is not None:
return reverse_rel, True
return None, None
def get_join_type(self):
return self.join_type or JOIN.INNER
def model_from_alias(self, model_or_alias):
if isinstance(model_or_alias, ModelAlias):
return model_or_alias.model_class
elif isinstance(model_or_alias, SelectQuery):
return model_or_alias.model_class
return model_or_alias
def _join_metadata(self):
# Get the actual tables being joined.
src = self.model_from_alias(self.src)
dest = self.model_from_alias(self.dest)
join_alias = isinstance(self.on, Node) and self.on._alias or None
is_expression = isinstance(self.on, (Expression, Func, SQL))
on_field = isinstance(self.on, (Field, FieldProxy)) and self.on or None
if on_field:
fk_field = on_field
is_backref = on_field.name not in src._meta.fields
else:
fk_field, is_backref = self.get_foreign_key(src, dest, self.on)
if fk_field is None and self.on is not None:
fk_field, is_backref = self.get_foreign_key(src, dest)
if fk_field is not None:
primary_key = fk_field.to_field
else:
primary_key = None
if not join_alias:
if fk_field is not None:
if is_backref:
target_attr = dest._meta.db_table
else:
target_attr = fk_field.name
else:
try:
target_attr = self.on.lhs.name
except AttributeError:
target_attr = dest._meta.db_table
else:
target_attr = None
return JoinMetadata(
src_model=src,
dest_model=dest,
src=self.src,
dest=self.dest,
attr=join_alias or target_attr,
primary_key=primary_key,
foreign_key=fk_field,
is_backref=is_backref,
alias=join_alias,
is_self_join=src is dest,
is_expression=is_expression)
@property
def metadata(self):
if not hasattr(self, '_cached_metadata'):
self._cached_metadata = self._join_metadata()
return self._cached_metadata
class FieldDescriptor(object):
# Fields are exposed as descriptors in order to control access to the
# underlying "raw" data.
def __init__(self, field):
self.field = field
self.att_name = self.field.name
def __get__(self, instance, instance_type=None):
if instance is not None:
return instance._data.get(self.att_name)
return self.field
def __set__(self, instance, value):
instance._data[self.att_name] = value
instance._dirty.add(self.att_name)
class Field(Node):
"""A column on a table."""
_field_counter = 0
_order = 0
_node_type = 'field'
db_field = 'unknown'
def __init__(self, null=False, index=False, unique=False,
verbose_name=None, help_text=None, db_column=None,
default=None, choices=None, primary_key=False, sequence=None,
constraints=None, schema=None):
self.null = null
self.index = index
self.unique = unique
self.verbose_name = verbose_name
self.help_text = help_text
self.db_column = db_column
self.default = default
self.choices = choices # Used for metadata purposes, not enforced.
self.primary_key = primary_key
self.sequence = sequence # Name of sequence, e.g. foo_id_seq.
self.constraints = constraints # List of column constraints.
self.schema = schema # Name of schema, e.g. 'public'.
# Used internally for recovering the order in which Fields were defined
# on the Model class.
Field._field_counter += 1
self._order = Field._field_counter
self._sort_key = (self.primary_key and 1 or 2), self._order
self._is_bound = False # Whether the Field is "bound" to a Model.
super(Field, self).__init__()
def clone_base(self, **kwargs):
inst = type(self)(
null=self.null,
index=self.index,
unique=self.unique,
verbose_name=self.verbose_name,
help_text=self.help_text,
db_column=self.db_column,
default=self.default,
choices=self.choices,
primary_key=self.primary_key,
sequence=self.sequence,
constraints=self.constraints,
schema=self.schema,
**kwargs)
if self._is_bound:
inst.name = self.name
inst.model_class = self.model_class
inst._is_bound = self._is_bound
return inst
def add_to_class(self, model_class, name):
"""
Hook that replaces the `Field` attribute on a class with a named
`FieldDescriptor`. Called by the metaclass during construction of the
`Model`.
"""
self.name = name
self.model_class = model_class
self.db_column = self.db_column or self.name
if not self.verbose_name:
self.verbose_name = re.sub('_+', ' ', name).title()
model_class._meta.fields[self.name] = self
model_class._meta.columns[self.db_column] = self
setattr(model_class, name, FieldDescriptor(self))
self._is_bound = True
def get_database(self):
return self.model_class._meta.database
def get_column_type(self):
field_type = self.get_db_field()
return self.get_database().compiler().get_column_type(field_type)
def get_db_field(self):
return self.db_field
def get_modifiers(self):
return None
def coerce(self, value):
return value
def db_value(self, value):
"""Convert the python value for storage in the database."""
return value if value is None else self.coerce(value)
def python_value(self, value):
"""Convert the database value to a pythonic value."""
return value if value is None else self.coerce(value)
def as_entity(self, with_table=False):
if with_table:
return Entity(self.model_class._meta.db_table, self.db_column)
return Entity(self.db_column)
def __ddl_column__(self, column_type):
"""Return the column type, e.g. VARCHAR(255) or REAL."""
modifiers = self.get_modifiers()
if modifiers:
return SQL(
'%s(%s)' % (column_type, ', '.join(map(str, modifiers))))
return SQL(column_type)
def __ddl__(self, column_type):
"""Return a list of Node instances that defines the column."""
ddl = [self.as_entity(), self.__ddl_column__(column_type)]
if not self.null:
ddl.append(SQL('NOT NULL'))
if self.primary_key:
ddl.append(SQL('PRIMARY KEY'))
if self.sequence:
ddl.append(SQL("DEFAULT NEXTVAL('%s')" % self.sequence))
if self.constraints:
ddl.extend(self.constraints)
return ddl
def __hash__(self):
return hash(self.name + '.' + self.model_class.__name__)
class BareField(Field):
db_field = 'bare'
class IntegerField(Field):
db_field = 'int'
coerce = int
class BigIntegerField(IntegerField):
db_field = 'bigint'
class PrimaryKeyField(IntegerField):
db_field = 'primary_key'
def __init__(self, *args, **kwargs):
kwargs['primary_key'] = True
super(PrimaryKeyField, self).__init__(*args, **kwargs)
class FloatField(Field):
db_field = 'float'
coerce = float
class DoubleField(FloatField):
db_field = 'double'
class DecimalField(Field):
db_field = 'decimal'
def __init__(self, max_digits=10, decimal_places=5, auto_round=False,
rounding=None, *args, **kwargs):
self.max_digits = max_digits
self.decimal_places = decimal_places
self.auto_round = auto_round
self.rounding = rounding or decimal.DefaultContext.rounding
super(DecimalField, self).__init__(*args, **kwargs)
def clone_base(self, **kwargs):
return super(DecimalField, self).clone_base(
max_digits=self.max_digits,
decimal_places=self.decimal_places,
auto_round=self.auto_round,
rounding=self.rounding,
**kwargs)
def get_modifiers(self):
return [self.max_digits, self.decimal_places]
def db_value(self, value):
D = decimal.Decimal
if not value:
return value if value is None else D(0)
if self.auto_round:
exp = D(10) ** (-self.decimal_places)
rounding = self.rounding
return D(str(value)).quantize(exp, rounding=rounding)
return value
def python_value(self, value):
if value is not None:
if isinstance(value, decimal.Decimal):
return value
return decimal.Decimal(str(value))
def coerce_to_unicode(s, encoding='utf-8'):
if isinstance(s, unicode_type):
return s
elif isinstance(s, string_type):
return s.decode(encoding)
return unicode_type(s)
class CharField(Field):
db_field = 'string'
def __init__(self, max_length=255, *args, **kwargs):
self.max_length = max_length
super(CharField, self).__init__(*args, **kwargs)
def clone_base(self, **kwargs):
return super(CharField, self).clone_base(
max_length=self.max_length,
**kwargs)
def get_modifiers(self):
return self.max_length and [self.max_length] or None
def coerce(self, value):
return coerce_to_unicode(value or '')
class FixedCharField(CharField):
db_field = 'fixed_char'
def python_value(self, value):
value = super(FixedCharField, self).python_value(value)
if value:
value = value.strip()
return value
class TextField(Field):
db_field = 'text'
def coerce(self, value):
return coerce_to_unicode(value or '')
class BlobField(Field):
db_field = 'blob'
def db_value(self, value):
if isinstance(value, basestring):
return binary_construct(value)
return value
class UUIDField(Field):
db_field = 'uuid'
def db_value(self, value):
return None if value is None else str(value)
def python_value(self, value):
return None if value is None else uuid.UUID(value)
def format_date_time(value, formats, post_process=None):
post_process = post_process or (lambda x: x)
for fmt in formats:
try:
return post_process(datetime.datetime.strptime(value, fmt))
except ValueError:
pass
return value
def _date_part(date_part):
def dec(self):
return self.model_class._meta.database.extract_date(date_part, self)
return dec
class _BaseFormattedField(Field):
formats = None
def __init__(self, formats=None, *args, **kwargs):
if formats is not None:
self.formats = formats
super(_BaseFormattedField, self).__init__(*args, **kwargs)
def clone_base(self, **kwargs):
return super(_BaseFormattedField, self).clone_base(
formats=self.formats,
**kwargs)
class DateTimeField(_BaseFormattedField):
db_field = 'datetime'
formats = [
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d',
]
def python_value(self, value):
if value and isinstance(value, basestring):
return format_date_time(value, self.formats)
return value
year = property(_date_part('year'))
month = property(_date_part('month'))
day = property(_date_part('day'))
hour = property(_date_part('hour'))
minute = property(_date_part('minute'))
second = property(_date_part('second'))
class DateField(_BaseFormattedField):
db_field = 'date'
formats = [
'%Y-%m-%d',
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S.%f',
]
def python_value(self, value):
if value and isinstance(value, basestring):
pp = lambda x: x.date()
return format_date_time(value, self.formats, pp)
elif value and isinstance(value, datetime.datetime):
return value.date()
return value
year = property(_date_part('year'))
month = property(_date_part('month'))
day = property(_date_part('day'))
class TimeField(_BaseFormattedField):
db_field = 'time'
formats = [
'%H:%M:%S.%f',
'%H:%M:%S',
'%H:%M',
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d %H:%M:%S',
]
def python_value(self, value):
if value:
if isinstance(value, basestring):
pp = lambda x: x.time()
return format_date_time(value, self.formats, pp)
elif isinstance(value, datetime.datetime):
return value.time()
elif value is not None and isinstance(value, datetime.timedelta):
return (datetime.datetime.min + value).time()
return value
hour = property(_date_part('hour'))
minute = property(_date_part('minute'))
second = property(_date_part('second'))
class BooleanField(Field):
db_field = 'bool'
coerce = bool
class RelationDescriptor(FieldDescriptor):
"""Foreign-key abstraction to replace a related PK with a related model."""
def __init__(self, field, rel_model):
self.rel_model = rel_model
super(RelationDescriptor, self).__init__(field)
def get_object_or_id(self, instance):
rel_id = instance._data.get(self.att_name)
if rel_id is not None or self.att_name in instance._obj_cache:
if self.att_name not in instance._obj_cache:
obj = self.rel_model.get(self.field.to_field == rel_id)
instance._obj_cache[self.att_name] = obj
return instance._obj_cache[self.att_name]
elif not self.field.null:
raise self.rel_model.DoesNotExist
return rel_id
def __get__(self, instance, instance_type=None):
if instance is not None:
return self.get_object_or_id(instance)
return self.field
def __set__(self, instance, value):
if isinstance(value, self.rel_model):
instance._data[self.att_name] = getattr(
value, self.field.to_field.name)
instance._obj_cache[self.att_name] = value
else:
orig_value = instance._data.get(self.att_name)
instance._data[self.att_name] = value
if orig_value != value and self.att_name in instance._obj_cache:
del instance._obj_cache[self.att_name]
instance._dirty.add(self.att_name)
class ReverseRelationDescriptor(object):
"""Back-reference to expose related objects as a `SelectQuery`."""
def __init__(self, field):
self.field = field
self.rel_model = field.model_class
def __get__(self, instance, instance_type=None):
if instance is not None:
return self.rel_model.select().where(
self.field == getattr(instance, self.field.to_field.name))
return self
class ObjectIdDescriptor(object):
"""Gives direct access to the underlying id"""
def __init__(self, field):
self.attr_name = field.name
def __get__(self, instance, instance_type=None):
if instance is not None:
return instance._data.get(self.attr_name)
class ForeignKeyField(IntegerField):
def __init__(self, rel_model, related_name=None, on_delete=None,
on_update=None, extra=None, to_field=None, *args, **kwargs):
if rel_model != 'self' and not isinstance(rel_model, Proxy) and not \
issubclass(rel_model, Model):
raise TypeError('Unexpected value for `rel_model`. Expected '
'`Model`, `Proxy` or "self"')
self.rel_model = rel_model
self._related_name = related_name
self.deferred = isinstance(rel_model, Proxy)
self.on_delete = on_delete
self.on_update = on_update
self.extra = extra
self.to_field = to_field
super(ForeignKeyField, self).__init__(*args, **kwargs)
def clone_base(self, **kwargs):
return super(ForeignKeyField, self).clone_base(
rel_model=self.rel_model,
related_name=self.related_name,
on_delete=self.on_delete,
on_update=self.on_update,
extra=self.extra,
to_field=self.to_field,
**kwargs)
def _get_descriptor(self):
return RelationDescriptor(self, self.rel_model)
def _get_id_descriptor(self):
return ObjectIdDescriptor(self)
def _get_backref_descriptor(self):
return ReverseRelationDescriptor(self)
def _get_related_name(self):
return self._related_name or ('%s_set' % self.model_class._meta.name)
def add_to_class(self, model_class, name):
if isinstance(self.rel_model, Proxy):
def callback(rel_model):
self.rel_model = rel_model
self.add_to_class(model_class, name)
self.rel_model.attach_callback(callback)
return
self.name = name
self.model_class = model_class
self.db_column = self.db_column or '%s_id' % self.name
if not self.verbose_name:
self.verbose_name = re.sub('_+', ' ', name).title()
model_class._meta.fields[self.name] = self
model_class._meta.columns[self.db_column] = self
self.related_name = self._get_related_name()
if self.rel_model == 'self':
self.rel_model = self.model_class
if self.to_field is not None:
if not isinstance(self.to_field, Field):
self.to_field = getattr(self.rel_model, self.to_field)
else:
self.to_field = self.rel_model._meta.primary_key
if model_class._meta.validate_backrefs:
if self.related_name in self.rel_model._meta.fields:
error = ('Foreign key: %s.%s related name "%s" collision with '
'model field of the same name.')
raise AttributeError(error % (
self.model_class._meta.name, self.name, self.related_name))
if self.related_name in self.rel_model._meta.reverse_rel:
error = ('Foreign key: %s.%s related name "%s" collision with '
'foreign key using same related_name.')
raise AttributeError(error % (
self.model_class._meta.name, self.name, self.related_name))
setattr(model_class, name, self._get_descriptor())
setattr(model_class, name + '_id', self._get_id_descriptor())
setattr(self.rel_model,
self.related_name,
self._get_backref_descriptor())
self._is_bound = True
model_class._meta.rel[self.name] = self
self.rel_model._meta.reverse_rel[self.related_name] = self
def get_db_field(self):
"""
Overridden to ensure Foreign Keys use same column type as the primary
key they point to.
"""
if not isinstance(self.to_field, PrimaryKeyField):
return self.to_field.get_db_field()
return super(ForeignKeyField, self).get_db_field()
def get_modifiers(self):
if not isinstance(self.to_field, PrimaryKeyField):
return self.to_field.get_modifiers()
return super(ForeignKeyField, self).get_modifiers()
def coerce(self, value):
return self.to_field.coerce(value)
def db_value(self, value):
if isinstance(value, self.rel_model):
value = value._get_pk_value()
return self.to_field.db_value(value)
class CompositeKey(object):
"""A primary key composed of multiple columns."""
sequence = None
def __init__(self, *field_names):
self.field_names = field_names
def add_to_class(self, model_class, name):
self.name = name
self.model_class = model_class
setattr(model_class, name, self)
def __get__(self, instance, instance_type=None):
if instance is not None:
return tuple([getattr(instance, field_name)
for field_name in self.field_names])
return self
def __set__(self, instance, value):
pass
def __eq__(self, other):
expressions = [(self.model_class._meta.fields[field] == value)
for field, value in zip(self.field_names, other)]
return reduce(operator.and_, expressions)
class AliasMap(object):
prefix = 't'
def __init__(self, start=0):
self._alias_map = {}
self._counter = start
def __repr__(self):
return '<AliasMap: %s>' % self._alias_map
def add(self, obj, alias=None):
if obj in self._alias_map:
return
self._counter += 1
self._alias_map[obj] = alias or '%s%s' % (self.prefix, self._counter)
def __getitem__(self, obj):
if obj not in self._alias_map:
self.add(obj)
return self._alias_map[obj]
def __contains__(self, obj):
return obj in self._alias_map
def update(self, alias_map):
if alias_map:
for obj, alias in alias_map._alias_map.items():
if obj not in self:
self._alias_map[obj] = alias
return self
class QueryCompiler(object):
# Mapping of `db_type` to actual column type used by database driver.
# Database classes may provide additional column types or overrides.
field_map = {
'bare': '',
'bigint': 'BIGINT',
'blob': 'BLOB',
'bool': 'SMALLINT',
'date': 'DATE',
'datetime': 'DATETIME',
'decimal': 'DECIMAL',
'double': 'REAL',
'fixed_char': 'CHAR',
'float': 'REAL',
'int': 'INTEGER',
'primary_key': 'INTEGER',
'string': 'VARCHAR',
'text': 'TEXT',
'time': 'TIME',
}
# Mapping of OP. to actual SQL operation. For most databases this will be
# the same, but some column types or databases may support additional ops.
# Like `field_map`, Database classes may extend or override these.
op_map = {
OP.EQ: '=',
OP.LT: '<',
OP.LTE: '<=',
OP.GT: '>',
OP.GTE: '>=',
OP.NE: '!=',
OP.IN: 'IN',
OP.NOT_IN: 'NOT IN',
OP.IS: 'IS',
OP.IS_NOT: 'IS NOT',
OP.BIN_AND: '&',
OP.BIN_OR: '|',
OP.LIKE: 'LIKE',
OP.ILIKE: 'ILIKE',
OP.BETWEEN: 'BETWEEN',
OP.ADD: '+',
OP.SUB: '-',
OP.MUL: '*',
OP.DIV: '/',
OP.XOR: '#',
OP.AND: 'AND',
OP.OR: 'OR',
OP.MOD: '%',
OP.REGEXP: 'REGEXP',
OP.CONCAT: '||',
}
join_map = {
JOIN.INNER: 'INNER JOIN',
JOIN.LEFT_OUTER: 'LEFT OUTER JOIN',
JOIN.RIGHT_OUTER: 'RIGHT OUTER JOIN',
JOIN.FULL: 'FULL JOIN',
}
alias_map_class = AliasMap
def __init__(self, quote_char='"', interpolation='?', field_overrides=None,
op_overrides=None):
self.quote_char = quote_char
self.interpolation = interpolation
self._field_map = merge_dict(self.field_map, field_overrides or {})
self._op_map = merge_dict(self.op_map, op_overrides or {})
self._parse_map = self.get_parse_map()
self._unknown_types = set(['param'])
def get_parse_map(self):
# To avoid O(n) lookups when parsing nodes, use a lookup table for
# common node types O(1).
return {
'expression': self._parse_expression,
'param': self._parse_param,
'passthrough': self._parse_param,
'func': self._parse_func,
'clause': self._parse_clause,
'entity': self._parse_entity,
'field': self._parse_field,
'sql': self._parse_sql,
'select_query': self._parse_select_query,
'compound_select_query': self._parse_compound_select_query,
'strip_parens': self._parse_strip_parens,
}
def quote(self, s):
return '%s%s%s' % (self.quote_char, s, self.quote_char)
def get_column_type(self, f):
return self._field_map[f] if f in self._field_map else f.upper()
def get_op(self, q):
return self._op_map[q]
def _sorted_fields(self, field_dict):
return sorted(field_dict.items(), key=lambda i: i[0]._sort_key)
def _parse_default(self, node, alias_map, conv):
return self.interpolation, [node]
def _parse_expression(self, node, alias_map, conv):
if isinstance(node.lhs, Field):
conv = node.lhs
lhs, lparams = self.parse_node(node.lhs, alias_map, conv)
rhs, rparams = self.parse_node(node.rhs, alias_map, conv)
template = '%s %s %s' if node.flat else '(%s %s %s)'
sql = template % (lhs, self.get_op(node.op), rhs)
return sql, lparams + rparams
def _parse_param(self, node, alias_map, conv):
if node.conv:
params = [node.conv(node.value)]
else:
params = [node.value]
return self.interpolation, params
def _parse_func(self, node, alias_map, conv):
conv = node._coerce and conv or None
sql, params = self.parse_node_list(node.arguments, alias_map, conv)
return '%s(%s)' % (node.name, strip_parens(sql)), params
def _parse_clause(self, node, alias_map, conv):
sql, params = self.parse_node_list(
node.nodes, alias_map, conv, node.glue)
if node.parens:
sql = '(%s)' % strip_parens(sql)
return sql, params
def _parse_entity(self, node, alias_map, conv):
return '.'.join(map(self.quote, node.path)), []
def _parse_sql(self, node, alias_map, conv):
return node.value, list(node.params)
def _parse_field(self, node, alias_map, conv):
if alias_map:
sql = '.'.join((
self.quote(alias_map[node.model_class]),
self.quote(node.db_column)))
else:
sql = self.quote(node.db_column)
return sql, []
def _parse_compound_select_query(self, node, alias_map, conv):
csq = 'compound_select_query'
if node.rhs._node_type == csq and node.lhs._node_type != csq:
first_q, second_q = node.rhs, node.lhs
inv = True
else:
first_q, second_q = node.lhs, node.rhs
inv = False
new_map = self.alias_map_class()
if first_q._node_type == csq:
new_map._counter = alias_map._counter
first, first_p = self.generate_select(first_q, new_map)
second, second_p = self.generate_select(
second_q,
self.calculate_alias_map(second_q, new_map))
if inv:
l, lp, r, rp = second, second_p, first, first_p
else:
l, lp, r, rp = first, first_p , second, second_p
# We add outer parentheses in the event the compound query is used in
# the `from_()` clause, in which case we'll need them.
if node.database.compound_select_parentheses:
sql = '((%s) %s (%s))' % (l, node.operator, r)
else:
sql = '(%s %s %s)' % (l, node.operator, r)
return sql, lp + rp
def _parse_select_query(self, node, alias_map, conv):
clone = node.clone()
if not node._explicit_selection:
if conv and isinstance(conv, ForeignKeyField):
select_field = conv.to_field
else:
select_field = clone.model_class._meta.primary_key
clone._select = (select_field,)
sub, params = self.generate_select(clone, alias_map)
return '(%s)' % strip_parens(sub), params
def _parse_strip_parens(self, node, alias_map, conv):
sql, params = self.parse_node(node.node, alias_map, conv)
return strip_parens(sql), params
def _parse(self, node, alias_map, conv):
# By default treat the incoming node as a raw value that should be
# parameterized.
node_type = getattr(node, '_node_type', None)
unknown = False
if node_type in self._parse_map:
sql, params = self._parse_map[node_type](node, alias_map, conv)
unknown = node_type in self._unknown_types
elif isinstance(node, (list, tuple)):
# If you're wondering how to pass a list into your query, simply
# wrap it in Param().
sql, params = self.parse_node_list(node, alias_map, conv)
sql = '(%s)' % sql
elif isinstance(node, Model):
sql = self.interpolation
if conv and isinstance(conv, ForeignKeyField) and \
not isinstance(conv.to_field, ForeignKeyField):
params = [
conv.to_field.db_value(getattr(node, conv.to_field.name))]
else:
params = [node._get_pk_value()]
elif (isclass(node) and issubclass(node, Model)) or \
isinstance(node, ModelAlias):
entity = node.as_entity().alias(alias_map[node])
sql, params = self.parse_node(entity, alias_map, conv)
else:
sql, params = self._parse_default(node, alias_map, conv)
unknown = True
return sql, params, unknown
def parse_node(self, node, alias_map=None, conv=None):
sql, params, unknown = self._parse(node, alias_map, conv)
if unknown and conv and params:
params = [conv.db_value(i) for i in params]
if isinstance(node, Node):
if node._negated:
sql = 'NOT %s' % sql
if node._alias:
sql = ' '.join((sql, 'AS', node._alias))
if node._ordering:
sql = ' '.join((sql, node._ordering))
return sql, params
def parse_node_list(self, nodes, alias_map, conv=None, glue=', '):
sql = []
params = []
for node in nodes:
node_sql, node_params = self.parse_node(node, alias_map, conv)
sql.append(node_sql)
params.extend(node_params)
return glue.join(sql), params
def calculate_alias_map(self, query, alias_map=None):
new_map = self.alias_map_class()
if alias_map is not None:
new_map._counter = alias_map._counter
new_map.add(query.model_class, query.model_class._meta.table_alias)
for src_model, joined_models in query._joins.items():
new_map.add(src_model, src_model._meta.table_alias)
for join_obj in joined_models:
if isinstance(join_obj.dest, Node):
new_map.add(join_obj.dest, join_obj.dest.alias)
else:
new_map.add(join_obj.dest, join_obj.dest._meta.table_alias)
return new_map.update(alias_map)
def build_query(self, clauses, alias_map=None):
return self.parse_node(Clause(*clauses), alias_map)
def generate_joins(self, joins, model_class, alias_map):
# Joins are implemented as an adjancency-list graph. Perform a
# depth-first search of the graph to generate all the necessary JOINs.
clauses = []
seen = set()
q = [model_class]
while q:
curr = q.pop()
if curr not in joins or curr in seen:
continue
seen.add(curr)
for join in joins[curr]:
src = curr
dest = join.dest
if isinstance(join.on, (Expression, Func, Clause, Entity)):
# Clear any alias on the join expression.
constraint = join.on.clone().alias()
else:
metadata = join.metadata
if metadata.is_backref:
fk_model = join.dest
pk_model = join.src
else:
fk_model = join.src
pk_model = join.dest
fk = metadata.foreign_key
if fk:
lhs = getattr(fk_model, fk.name)
rhs = getattr(pk_model, fk.to_field.name)
if metadata.is_backref:
lhs, rhs = rhs, lhs
constraint = (lhs == rhs)
else:
raise ValueError('Missing required join predicate.')
if isinstance(dest, Node):
# TODO: ensure alias?
dest_n = dest
else:
q.append(dest)
dest_n = dest.as_entity().alias(alias_map[dest])
join_type = join.get_join_type()
if join_type in self.join_map:
join_sql = SQL(self.join_map[join_type])
else:
join_sql = SQL(join_type)
clauses.append(
Clause(join_sql, dest_n, SQL('ON'), constraint))
return clauses
def generate_select(self, query, alias_map=None):
model = query.model_class
db = model._meta.database
alias_map = self.calculate_alias_map(query, alias_map)
if isinstance(query, CompoundSelect):
clauses = [_StripParens(query)]
else:
if not query._distinct:
clauses = [SQL('SELECT')]
else:
clauses = [SQL('SELECT DISTINCT')]
if query._distinct not in (True, False):
clauses += [SQL('ON'), EnclosedClause(*query._distinct)]
select_clause = Clause(*query._select)
select_clause.glue = ', '
clauses.extend((select_clause, SQL('FROM')))
if query._from is None:
clauses.append(model.as_entity().alias(alias_map[model]))
else:
clauses.append(CommaClause(*query._from))
if query._windows is not None:
clauses.append(SQL('WINDOW'))
clauses.append(CommaClause(*[
Clause(
SQL(window._alias),
SQL('AS'),
window.__sql__())
for window in query._windows]))
join_clauses = self.generate_joins(query._joins, model, alias_map)
if join_clauses:
clauses.extend(join_clauses)
if query._where is not None:
clauses.extend([SQL('WHERE'), query._where])
if query._group_by:
clauses.extend([SQL('GROUP BY'), CommaClause(*query._group_by)])
if query._having:
clauses.extend([SQL('HAVING'), query._having])
if query._order_by:
clauses.extend([SQL('ORDER BY'), CommaClause(*query._order_by)])
if query._limit or (query._offset and db.limit_max):
limit = query._limit or db.limit_max
clauses.append(SQL('LIMIT %s' % limit))
if query._offset:
clauses.append(SQL('OFFSET %s' % query._offset))
for_update, no_wait = query._for_update
if for_update:
stmt = 'FOR UPDATE NOWAIT' if no_wait else 'FOR UPDATE'
clauses.append(SQL(stmt))
return self.build_query(clauses, alias_map)
def generate_update(self, query):
model = query.model_class
alias_map = self.alias_map_class()
alias_map.add(model, model._meta.db_table)
if query._on_conflict:
statement = 'UPDATE OR %s' % query._on_conflict
else:
statement = 'UPDATE'
clauses = [SQL(statement), model.as_entity(), SQL('SET')]
update = []
for field, value in self._sorted_fields(query._update):
if not isinstance(value, (Node, Model)):
value = Param(value, conv=field.db_value)
update.append(Expression(
field.as_entity(with_table=False),
OP.EQ,
value,
flat=True)) # No outer parens, no table alias.
clauses.append(CommaClause(*update))
if query._where:
clauses.extend([SQL('WHERE'), query._where])
if query._returning is not None:
returning_clause = Clause(*query._returning)
returning_clause.glue = ', '
clauses.extend([SQL('RETURNING'), returning_clause])
return self.build_query(clauses, alias_map)
def _get_field_clause(self, fields, clause_type=EnclosedClause):
return clause_type(*[
field.as_entity(with_table=False) for field in fields])
def generate_insert(self, query):
model = query.model_class
meta = model._meta
alias_map = self.alias_map_class()
alias_map.add(model, model._meta.db_table)
if query._upsert:
statement = 'INSERT OR REPLACE INTO'
elif query._on_conflict:
statement = 'INSERT OR %s INTO' % query._on_conflict
else:
statement = 'INSERT INTO'
clauses = [SQL(statement), model.as_entity()]
if query._query is not None:
# This INSERT query is of the form INSERT INTO ... SELECT FROM.
if query._fields:
clauses.append(self._get_field_clause(query._fields))
clauses.append(_StripParens(query._query))
elif query._rows is not None:
fields, value_clauses = [], []
have_fields = False
for row_dict in query._iter_rows():
if not have_fields:
fields = sorted(
row_dict.keys(), key=operator.attrgetter('_sort_key'))
have_fields = True
values = []
for field in fields:
value = row_dict[field]
if not isinstance(value, (Node, Model)):
value = Param(value, conv=field.db_value)
values.append(value)
value_clauses.append(EnclosedClause(*values))
if fields:
clauses.extend([
self._get_field_clause(fields),
SQL('VALUES'),
CommaClause(*value_clauses)])
elif query.model_class._meta.auto_increment:
# Bare insert, use default value for primary key.
clauses.append(query.database.default_insert_clause(
query.model_class))
if query.is_insert_returning:
clauses.extend([
SQL('RETURNING'),
self._get_field_clause(
meta.get_primary_key_fields(),
clause_type=CommaClause)])
elif query._returning is not None:
returning_clause = Clause(*query._returning)
returning_clause.glue = ', '
clauses.extend([SQL('RETURNING'), returning_clause])
return self.build_query(clauses, alias_map)
def generate_delete(self, query):
model = query.model_class
clauses = [SQL('DELETE FROM'), model.as_entity()]
if query._where:
clauses.extend([SQL('WHERE'), query._where])
if query._returning is not None:
returning_clause = Clause(*query._returning)
returning_clause.glue = ', '
clauses.extend([SQL('RETURNING'), returning_clause])
return self.build_query(clauses)
def field_definition(self, field):
column_type = self.get_column_type(field.get_db_field())
ddl = field.__ddl__(column_type)
return Clause(*ddl)
def foreign_key_constraint(self, field):
ddl = [
SQL('FOREIGN KEY'),
EnclosedClause(field.as_entity()),
SQL('REFERENCES'),
field.rel_model.as_entity(),
EnclosedClause(field.to_field.as_entity())]
if field.on_delete:
ddl.append(SQL('ON DELETE %s' % field.on_delete))
if field.on_update:
ddl.append(SQL('ON UPDATE %s' % field.on_update))
return Clause(*ddl)
def return_parsed_node(function_name):
# TODO: treat all `generate_` functions as returning clauses, instead
# of SQL/params.
def inner(self, *args, **kwargs):
fn = getattr(self, function_name)
return self.parse_node(fn(*args, **kwargs))
return inner
def _create_foreign_key(self, model_class, field, constraint=None):
constraint = constraint or 'fk_%s_%s_refs_%s' % (
model_class._meta.db_table,
field.db_column,
field.rel_model._meta.db_table)
fk_clause = self.foreign_key_constraint(field)
return Clause(
SQL('ALTER TABLE'),
model_class.as_entity(),
SQL('ADD CONSTRAINT'),
Entity(constraint),
*fk_clause.nodes)
create_foreign_key = return_parsed_node('_create_foreign_key')
def _create_table(self, model_class, safe=False):
statement = 'CREATE TABLE IF NOT EXISTS' if safe else 'CREATE TABLE'
meta = model_class._meta
columns, constraints = [], []
if meta.composite_key:
pk_cols = [meta.fields[f].as_entity()
for f in meta.primary_key.field_names]
constraints.append(Clause(
SQL('PRIMARY KEY'), EnclosedClause(*pk_cols)))
for field in meta.get_fields():
columns.append(self.field_definition(field))
if isinstance(field, ForeignKeyField) and not field.deferred:
constraints.append(self.foreign_key_constraint(field))
return Clause(
SQL(statement),
model_class.as_entity(),
EnclosedClause(*(columns + constraints)))
create_table = return_parsed_node('_create_table')
def _drop_table(self, model_class, fail_silently=False, cascade=False):
statement = 'DROP TABLE IF EXISTS' if fail_silently else 'DROP TABLE'
ddl = [SQL(statement), model_class.as_entity()]
if cascade:
ddl.append(SQL('CASCADE'))
return Clause(*ddl)
drop_table = return_parsed_node('_drop_table')
def index_name(self, table, columns):
index = '%s_%s' % (table, '_'.join(columns))
if len(index) > 64:
index_hash = hashlib.md5(index.encode('utf-8')).hexdigest()
index = '%s_%s' % (table, index_hash)
return index
def _create_index(self, model_class, fields, unique, *extra):
tbl_name = model_class._meta.db_table
statement = 'CREATE UNIQUE INDEX' if unique else 'CREATE INDEX'
index_name = self.index_name(tbl_name, [f.db_column for f in fields])
return Clause(
SQL(statement),
Entity(index_name),
SQL('ON'),
model_class.as_entity(),
EnclosedClause(*[field.as_entity() for field in fields]),
*extra)
create_index = return_parsed_node('_create_index')
def _create_sequence(self, sequence_name):
return Clause(SQL('CREATE SEQUENCE'), Entity(sequence_name))
create_sequence = return_parsed_node('_create_sequence')
def _drop_sequence(self, sequence_name):
return Clause(SQL('DROP SEQUENCE'), Entity(sequence_name))
drop_sequence = return_parsed_node('_drop_sequence')
class ResultIterator(object):
def __init__(self, qrw):
self.qrw = qrw
self._idx = 0
def next(self):
if self._idx < self.qrw._ct:
obj = self.qrw._result_cache[self._idx]
elif not self.qrw._populated:
obj = self.qrw.iterate()
self.qrw._result_cache.append(obj)
self.qrw._ct += 1
else:
raise StopIteration
self._idx += 1
return obj
__next__ = next
class QueryResultWrapper(object):
"""
Provides an iterator over the results of a raw Query, additionally doing
two things:
- converts rows from the database into python representations
- ensures that multiple iterations do not result in multiple queries
"""
def __init__(self, model, cursor, meta=None):
self.model = model
self.cursor = cursor
self._ct = 0
self._idx = 0
self._result_cache = []
self._populated = False
self._initialized = False
if meta is not None:
self.column_meta, self.join_meta = meta
else:
self.column_meta = self.join_meta = None
def __iter__(self):
if self._populated:
return iter(self._result_cache)
else:
return ResultIterator(self)
@property
def count(self):
self.fill_cache()
return self._ct
def process_row(self, row):
return row
def iterate(self):
row = self.cursor.fetchone()
if not row:
self._populated = True
if not getattr(self.cursor, 'name', None):
self.cursor.close()
raise StopIteration
elif not self._initialized:
self.initialize(self.cursor.description)
self._initialized = True
return self.process_row(row)
def iterator(self):
while True:
yield self.iterate()
def next(self):
if self._idx < self._ct:
inst = self._result_cache[self._idx]
self._idx += 1
return inst
elif self._populated:
raise StopIteration
obj = self.iterate()
self._result_cache.append(obj)
self._ct += 1
self._idx += 1
return obj
__next__ = next
def fill_cache(self, n=None):
n = n or float('Inf')
if n < 0:
raise ValueError('Negative values are not supported.')
self._idx = self._ct
while not self._populated and (n > self._ct):
try:
self.next()
except StopIteration:
break
class ExtQueryResultWrapper(QueryResultWrapper):
def initialize(self, description):
model = self.model
conv = []
identity = lambda x: x
for i in range(len(description)):
func = identity
column = description[i][0]
found = False
if self.column_meta is not None:
try:
select_column = self.column_meta[i]
except IndexError:
pass
else:
if isinstance(select_column, Field):
func = select_column.python_value
column = select_column._alias or select_column.name
found = True
elif (isinstance(select_column, Func) and
len(select_column.arguments) and
isinstance(select_column.arguments[0], Field)):
if select_column._coerce:
# Special-case handling aggregations.
func = select_column.arguments[0].python_value
found = True
if not found and column in model._meta.columns:
field_obj = model._meta.columns[column]
column = field_obj.name
func = field_obj.python_value
conv.append((i, column, func))
self.conv = conv
class TuplesQueryResultWrapper(ExtQueryResultWrapper):
def process_row(self, row):
return tuple([self.conv[i][2](col) for i, col in enumerate(row)])
class NaiveQueryResultWrapper(ExtQueryResultWrapper):
def process_row(self, row):
instance = self.model()
for i, column, func in self.conv:
setattr(instance, column, func(row[i]))
instance._prepare_instance()
return instance
class DictQueryResultWrapper(ExtQueryResultWrapper):
def process_row(self, row):
res = {}
for i, column, func in self.conv:
res[column] = func(row[i])
return res
class ModelQueryResultWrapper(QueryResultWrapper):
def initialize(self, description):
self.column_map, model_set = self.generate_column_map()
self.join_list = self.generate_join_list(model_set)
def generate_column_map(self):
column_map = []
models = set([self.model])
for i, node in enumerate(self.column_meta):
attr = conv = None
if isinstance(node, Field):
if isinstance(node, FieldProxy):
key = node._model_alias
constructor = node.model
conv = node.field_instance.python_value
else:
key = constructor = node.model_class
conv = node.python_value
attr = node._alias or node.name
else:
if node._bind_to is None:
key = constructor = self.model
else:
key = constructor = node._bind_to
if isinstance(node, Node) and node._alias:
attr = node._alias
elif isinstance(node, Entity):
attr = node.path[-1]
column_map.append((key, constructor, attr, conv))
models.add(key)
return column_map, models
def generate_join_list(self, models):
join_list = []
joins = self.join_meta
stack = [self.model]
while stack:
current = stack.pop()
if current not in joins:
continue
for join in joins[current]:
metadata = join.metadata
if metadata.dest in models or metadata.dest_model in models:
join_list.append(metadata)
stack.append(join.dest)
return join_list
def process_row(self, row):
collected = self.construct_instances(row)
instances = self.follow_joins(collected)
for i in instances:
i._prepare_instance()
return instances[0]
def construct_instances(self, row, keys=None):
collected_models = {}
for i, (key, constructor, attr, conv) in enumerate(self.column_map):
if keys is not None and key not in keys:
continue
value = row[i]
if key not in collected_models:
collected_models[key] = constructor()
instance = collected_models[key]
if attr is None:
attr = self.cursor.description[i][0]
if conv is not None:
value = conv(value)
setattr(instance, attr, value)
return collected_models
def follow_joins(self, collected):
prepared = [collected[self.model]]
for metadata in self.join_list:
inst = collected[metadata.src]
try:
joined_inst = collected[metadata.dest]
except KeyError:
joined_inst = collected[metadata.dest_model]
# Can we populate a value on the joined instance using the current?
mpk = metadata.primary_key is not None
can_populate_joined_pk = (
mpk and
(metadata.attr in inst._data) and
(getattr(joined_inst, metadata.primary_key.name) is None))
if can_populate_joined_pk:
setattr(
joined_inst,
metadata.primary_key.name,
inst._data[metadata.attr])
if metadata.is_backref:
can_populate_joined_fk = (
mpk and
(metadata.foreign_key is not None) and
(getattr(inst, metadata.primary_key.name) is not None) and
(joined_inst._data.get(metadata.foreign_key.name) is None))
if can_populate_joined_fk:
setattr(
joined_inst,
metadata.foreign_key.name,
inst)
setattr(inst, metadata.attr, joined_inst)
prepared.append(joined_inst)
return prepared
JoinCache = namedtuple('JoinCache', ('metadata', 'attr'))
class AggregateQueryResultWrapper(ModelQueryResultWrapper):
def __init__(self, *args, **kwargs):
self._row = []
super(AggregateQueryResultWrapper, self).__init__(*args, **kwargs)
def initialize(self, description):
super(AggregateQueryResultWrapper, self).initialize(description)
# Collect the set of all models (and ModelAlias objects) queried.
self.all_models = set()
for key, _, _, _ in self.column_map:
self.all_models.add(key)
# Prepare data structures for analyzing unique rows. Also cache
# foreign key and attribute names for joined models.
self.models_with_aggregate = set()
self.back_references = {}
self.source_to_dest = {}
self.dest_to_source = {}
for metadata in self.join_list:
if metadata.is_backref:
att_name = metadata.foreign_key.related_name
else:
att_name = metadata.attr
is_backref = metadata.is_backref or metadata.is_self_join
if is_backref:
self.models_with_aggregate.add(metadata.src)
else:
self.dest_to_source.setdefault(metadata.dest, set())
self.dest_to_source[metadata.dest].add(metadata.src)
self.source_to_dest.setdefault(metadata.src, {})
self.source_to_dest[metadata.src][metadata.dest] = JoinCache(
metadata=metadata,
attr=metadata.alias or att_name)
# Determine which columns could contain "duplicate" data, e.g. if
# getting Users and their Tweets, this would be the User columns.
self.columns_to_compare = {}
key_to_columns = {}
for idx, (key, model_class, col_name, _) in enumerate(self.column_map):
if key in self.models_with_aggregate:
self.columns_to_compare.setdefault(key, [])
self.columns_to_compare[key].append((idx, col_name))
key_to_columns.setdefault(key, [])
key_to_columns[key].append((idx, col_name))
# Also compare columns for joins -> many-related model.
for model_or_alias in self.models_with_aggregate:
if model_or_alias not in self.columns_to_compare:
continue
sources = self.dest_to_source.get(model_or_alias, ())
for joined_model in sources:
self.columns_to_compare[model_or_alias].extend(
key_to_columns[joined_model])
def read_model_data(self, row):
models = {}
for model_class, column_data in self.columns_to_compare.items():
models[model_class] = []
for idx, col_name in column_data:
models[model_class].append(row[idx])
return models
def iterate(self):
if self._row:
row = self._row.pop()
else:
row = self.cursor.fetchone()
if not row:
self._populated = True
if not getattr(self.cursor, 'name', None):
self.cursor.close()
raise StopIteration
elif not self._initialized:
self.initialize(self.cursor.description)
self._initialized = True
def _get_pk(instance):
if instance._meta.composite_key:
return tuple([
instance._data[field_name]
for field_name in instance._meta.primary_key.field_names])
return instance._get_pk_value()
identity_map = {}
_constructed = self.construct_instances(row)
primary_instance = _constructed[self.model]
for model_or_alias, instance in _constructed.items():
identity_map[model_or_alias] = OrderedDict()
identity_map[model_or_alias][_get_pk(instance)] = instance
model_data = self.read_model_data(row)
while True:
cur_row = self.cursor.fetchone()
if cur_row is None:
break
duplicate_models = set()
cur_row_data = self.read_model_data(cur_row)
for model_class, data in cur_row_data.items():
if model_data[model_class] == data:
duplicate_models.add(model_class)
if not duplicate_models:
self._row.append(cur_row)
break
different_models = self.all_models - duplicate_models
new_instances = self.construct_instances(cur_row, different_models)
for model_or_alias, instance in new_instances.items():
# Do not include any instances which are comprised solely of
# NULL values.
all_none = True
for value in instance._data.values():
if value is not None:
all_none = False
if not all_none:
identity_map[model_or_alias][_get_pk(instance)] = instance
stack = [self.model]
instances = [primary_instance]
while stack:
current = stack.pop()
if current not in self.join_meta:
continue
for join in self.join_meta[current]:
try:
metadata, attr = self.source_to_dest[current][join.dest]
except KeyError:
continue
if metadata.is_backref or metadata.is_self_join:
for instance in identity_map[current].values():
setattr(instance, attr, [])
if join.dest not in identity_map:
continue
for pk, inst in identity_map[join.dest].items():
if pk is None:
continue
try:
# XXX: if no FK exists, unable to join.
joined_inst = identity_map[current][
inst._data[metadata.foreign_key.name]]
except KeyError:
continue
getattr(joined_inst, attr).append(inst)
instances.append(inst)
elif attr:
if join.dest not in identity_map:
continue
for pk, instance in identity_map[current].items():
# XXX: if no FK exists, unable to join.
joined_inst = identity_map[join.dest][
instance._data[metadata.foreign_key.name]]
setattr(
instance,
metadata.foreign_key.name,
joined_inst)
instances.append(joined_inst)
stack.append(join.dest)
for instance in instances:
instance._prepare_instance()
return primary_instance
class Query(Node):
"""Base class representing a database query on one or more tables."""
require_commit = True
def __init__(self, model_class):
super(Query, self).__init__()
self.model_class = model_class
self.database = model_class._meta.database
self._dirty = True
self._query_ctx = model_class
self._joins = {self.model_class: []} # Join graph as adjacency list.
self._where = None
def __repr__(self):
sql, params = self.sql()
return '%s %s %s' % (self.model_class, sql, params)
def clone(self):
query = type(self)(self.model_class)
query.database = self.database
return self._clone_attributes(query)
def _clone_attributes(self, query):
if self._where is not None:
query._where = self._where.clone()
query._joins = self._clone_joins()
query._query_ctx = self._query_ctx
return query
def _clone_joins(self):
return dict(
(mc, list(j)) for mc, j in self._joins.items())
def _add_query_clauses(self, initial, expressions, conjunction=None):
reduced = reduce(operator.and_, expressions)
if initial is None:
return reduced
conjunction = conjunction or operator.and_
return conjunction(initial, reduced)
def _model_shorthand(self, args):
accum = []
for arg in args:
if isinstance(arg, Node):
accum.append(arg)
elif isinstance(arg, Query):
accum.append(arg)
elif isinstance(arg, ModelAlias):
accum.extend(arg.get_proxy_fields())
elif isclass(arg) and issubclass(arg, Model):
accum.extend(arg._meta.get_fields())
return accum
@returns_clone
def where(self, *expressions):
self._where = self._add_query_clauses(self._where, expressions)
@returns_clone
def orwhere(self, *expressions):
self._where = self._add_query_clauses(
self._where, expressions, operator.or_)
@returns_clone
def join(self, dest, join_type=None, on=None):
src = self._query_ctx
if not on:
require_join_condition = (
isinstance(dest, SelectQuery) or
(isclass(dest) and not src._meta.rel_exists(dest)))
if require_join_condition:
raise ValueError('A join condition must be specified.')
elif isinstance(on, basestring):
on = src._meta.fields[on]
self._joins.setdefault(src, [])
self._joins[src].append(Join(src, dest, join_type, on))
if not isinstance(dest, SelectQuery):
self._query_ctx = dest
@returns_clone
def switch(self, model_class=None):
"""Change or reset the query context."""
self._query_ctx = model_class or self.model_class
def ensure_join(self, lm, rm, on=None):
ctx = self._query_ctx
for join in self._joins.get(lm, []):
if join.dest == rm:
return self
return self.switch(lm).join(rm, on=on).switch(ctx)
def convert_dict_to_node(self, qdict):
accum = []
joins = []
relationship = (ForeignKeyField, ReverseRelationDescriptor)
for key, value in sorted(qdict.items()):
curr = self.model_class
if '__' in key and key.rsplit('__', 1)[1] in DJANGO_MAP:
key, op = key.rsplit('__', 1)
op = DJANGO_MAP[op]
else:
op = OP.EQ
for piece in key.split('__'):
model_attr = getattr(curr, piece)
if isinstance(model_attr, relationship):
curr = model_attr.rel_model
joins.append(model_attr)
accum.append(Expression(model_attr, op, value))
return accum, joins
def filter(self, *args, **kwargs):
# normalize args and kwargs into a new expression
dq_node = Node()
if args:
dq_node &= reduce(operator.and_, [a.clone() for a in args])
if kwargs:
dq_node &= DQ(**kwargs)
# dq_node should now be an Expression, lhs = Node(), rhs = ...
q = deque([dq_node])
dq_joins = set()
while q:
curr = q.popleft()
if not isinstance(curr, Expression):
continue
for side, piece in (('lhs', curr.lhs), ('rhs', curr.rhs)):
if isinstance(piece, DQ):
query, joins = self.convert_dict_to_node(piece.query)
dq_joins.update(joins)
expression = reduce(operator.and_, query)
# Apply values from the DQ object.
expression._negated = piece._negated
expression._alias = piece._alias
setattr(curr, side, expression)
else:
q.append(piece)
dq_node = dq_node.rhs
query = self.clone()
for field in dq_joins:
if isinstance(field, ForeignKeyField):
lm, rm = field.model_class, field.rel_model
field_obj = field
elif isinstance(field, ReverseRelationDescriptor):
lm, rm = field.field.rel_model, field.rel_model
field_obj = field.field
query = query.ensure_join(lm, rm, field_obj)
return query.where(dq_node)
def compiler(self):
return self.database.compiler()
def sql(self):
raise NotImplementedError
def _execute(self):
sql, params = self.sql()
return self.database.execute_sql(sql, params, self.require_commit)
def execute(self):
raise NotImplementedError
def scalar(self, as_tuple=False, convert=False):
if convert:
row = self.tuples().first()
else:
row = self._execute().fetchone()
if row and not as_tuple:
return row[0]
else:
return row
class RawQuery(Query):
"""
Execute a SQL query, returning a standard iterable interface that returns
model instances.
"""
def __init__(self, model, query, *params):
self._sql = query
self._params = list(params)
self._qr = None
self._tuples = False
self._dicts = False
super(RawQuery, self).__init__(model)
def clone(self):
query = RawQuery(self.model_class, self._sql, *self._params)
query._tuples = self._tuples
query._dicts = self._dicts
return query
join = not_allowed('joining')
where = not_allowed('where')
switch = not_allowed('switch')
@returns_clone
def tuples(self, tuples=True):
self._tuples = tuples
@returns_clone
def dicts(self, dicts=True):
self._dicts = dicts
def sql(self):
return self._sql, self._params
def execute(self):
if self._qr is None:
if self._tuples:
ResultWrapper = TuplesQueryResultWrapper
elif self._dicts:
ResultWrapper = DictQueryResultWrapper
else:
ResultWrapper = NaiveQueryResultWrapper
self._qr = ResultWrapper(self.model_class, self._execute(), None)
return self._qr
def __iter__(self):
return iter(self.execute())
class SelectQuery(Query):
_node_type = 'select_query'
def __init__(self, model_class, *selection):
super(SelectQuery, self).__init__(model_class)
self.require_commit = self.database.commit_select
self.__select(*selection)
self._from = None
self._group_by = None
self._having = None
self._order_by = None
self._windows = None
self._limit = None
self._offset = None
self._distinct = False
self._for_update = (False, False)
self._naive = False
self._tuples = False
self._dicts = False
self._aggregate_rows = False
self._alias = None
self._qr = None
def _clone_attributes(self, query):
query = super(SelectQuery, self)._clone_attributes(query)
query._explicit_selection = self._explicit_selection
query._select = list(self._select)
if self._from is not None:
query._from = []
for f in self._from:
if isinstance(f, Node):
query._from.append(f.clone())
else:
query._from.append(f)
if self._group_by is not None:
query._group_by = list(self._group_by)
if self._having:
query._having = self._having.clone()
if self._order_by is not None:
query._order_by = list(self._order_by)
if self._windows is not None:
query._windows = list(self._windows)
query._limit = self._limit
query._offset = self._offset
query._distinct = self._distinct
query._for_update = self._for_update
query._naive = self._naive
query._tuples = self._tuples
query._dicts = self._dicts
query._aggregate_rows = self._aggregate_rows
query._alias = self._alias
return query
def compound_op(operator):
def inner(self, other):
supported_ops = self.model_class._meta.database.compound_operations
if operator not in supported_ops:
raise ValueError(
'Your database does not support %s' % operator)
return CompoundSelect(self.model_class, self, operator, other)
return inner
_compound_op_static = staticmethod(compound_op)
__or__ = compound_op('UNION')
__and__ = compound_op('INTERSECT')
__sub__ = compound_op('EXCEPT')
def __xor__(self, rhs):
# Symmetric difference, should just be (self | rhs) - (self & rhs)...
wrapped_rhs = self.model_class.select(SQL('*')).from_(
EnclosedClause((self & rhs)).alias('_')).order_by()
return (self | rhs) - wrapped_rhs
def union_all(self, rhs):
return SelectQuery._compound_op_static('UNION ALL')(self, rhs)
def __select(self, *selection):
self._explicit_selection = len(selection) > 0
selection = selection or self.model_class._meta.get_fields()
self._select = self._model_shorthand(selection)
select = returns_clone(__select)
@returns_clone
def from_(self, *args):
self._from = None
if args:
self._from = list(args)
@returns_clone
def group_by(self, *args):
self._group_by = self._model_shorthand(args)
@returns_clone
def having(self, *expressions):
self._having = self._add_query_clauses(self._having, expressions)
@returns_clone
def order_by(self, *args):
self._order_by = list(args)
@returns_clone
def window(self, *windows):
self._windows = list(windows)
@returns_clone
def limit(self, lim):
self._limit = lim
@returns_clone
def offset(self, off):
self._offset = off
@returns_clone
def paginate(self, page, paginate_by=20):
if page > 0:
page -= 1
self._limit = paginate_by
self._offset = page * paginate_by
@returns_clone
def distinct(self, is_distinct=True):
self._distinct = is_distinct
@returns_clone
def for_update(self, for_update=True, nowait=False):
self._for_update = (for_update, nowait)
@returns_clone
def naive(self, naive=True):
self._naive = naive
@returns_clone
def tuples(self, tuples=True):
self._tuples = tuples
@returns_clone
def dicts(self, dicts=True):
self._dicts = dicts
@returns_clone
def aggregate_rows(self, aggregate_rows=True):
self._aggregate_rows = aggregate_rows
@returns_clone
def alias(self, alias=None):
self._alias = alias
def annotate(self, rel_model, annotation=None):
if annotation is None:
annotation = fn.Count(rel_model._meta.primary_key).alias('count')
if self._query_ctx == rel_model:
query = self.switch(self.model_class)
else:
query = self.clone()
query = query.ensure_join(query._query_ctx, rel_model)
if not query._group_by:
query._group_by = [x.alias() for x in query._select]
query._select = tuple(query._select) + (annotation,)
return query
def _aggregate(self, aggregation=None):
if aggregation is None:
aggregation = fn.Count(SQL('*'))
query = self.order_by()
query._select = [aggregation]
return query
def aggregate(self, aggregation=None, convert=True):
return self._aggregate(aggregation).scalar(convert=convert)
def count(self, clear_limit=False):
if self._distinct or self._group_by or self._limit or self._offset:
return self.wrapped_count(clear_limit=clear_limit)
# defaults to a count() of the primary key
return self.aggregate(convert=False) or 0
def wrapped_count(self, clear_limit=False):
clone = self.order_by()
if clear_limit:
clone._limit = clone._offset = None
sql, params = clone.sql()
wrapped = 'SELECT COUNT(1) FROM (%s) AS wrapped_select' % sql
rq = self.model_class.raw(wrapped, *params)
return rq.scalar() or 0
def exists(self):
clone = self.paginate(1, 1)
clone._select = [SQL('1')]
return bool(clone.scalar())
def get(self):
clone = self.paginate(1, 1)
try:
return clone.execute().next()
except StopIteration:
raise self.model_class.DoesNotExist(
'Instance matching query does not exist:\nSQL: %s\nPARAMS: %s'
% self.sql())
def first(self):
res = self.execute()
res.fill_cache(1)
try:
return res._result_cache[0]
except IndexError:
pass
def sql(self):
return self.compiler().generate_select(self)
def verify_naive(self):
model_class = self.model_class
for node in self._select:
if isinstance(node, Field) and node.model_class != model_class:
return False
elif isinstance(node, Node) and node._bind_to is not None:
if node._bind_to != model_class:
return False
return True
def get_query_meta(self):
return (self._select, self._joins)
def _get_result_wrapper(self):
if self._tuples:
return TuplesQueryResultWrapper
elif self._dicts:
return DictQueryResultWrapper
elif self._naive or not self._joins or self.verify_naive():
return NaiveQueryResultWrapper
elif self._aggregate_rows:
return AggregateQueryResultWrapper
else:
return ModelQueryResultWrapper
def execute(self):
if self._dirty or self._qr is None:
model_class = self.model_class
query_meta = self.get_query_meta()
ResultWrapper = self._get_result_wrapper()
self._qr = ResultWrapper(model_class, self._execute(), query_meta)
self._dirty = False
return self._qr
else:
return self._qr
def __iter__(self):
return iter(self.execute())
def iterator(self):
return iter(self.execute().iterator())
def __getitem__(self, value):
res = self.execute()
if isinstance(value, slice):
index = value.stop
else:
index = value
if index is not None and index >= 0:
index += 1
res.fill_cache(index)
return res._result_cache[value]
if PY3:
def __hash__(self):
return id(self)
class CompoundSelect(SelectQuery):
_node_type = 'compound_select_query'
def __init__(self, model_class, lhs=None, operator=None, rhs=None):
self.lhs = lhs
self.operator = operator
self.rhs = rhs
super(CompoundSelect, self).__init__(model_class, [])
def _clone_attributes(self, query):
query = super(CompoundSelect, self)._clone_attributes(query)
query.lhs = self.lhs
query.operator = self.operator
query.rhs = self.rhs
return query
def get_query_meta(self):
return self.lhs.get_query_meta()
def verify_naive(self):
return self.lhs.verify_naive() and self.rhs.verify_naive()
def _get_result_wrapper(self):
if self._tuples:
return TuplesQueryResultWrapper
elif self._dicts:
return DictQueryResultWrapper
elif self._aggregate_rows:
return AggregateQueryResultWrapper
has_joins = self.lhs._joins or self.rhs._joins
is_naive = self.lhs._naive or self.rhs._naive or self._naive
if is_naive or not has_joins or self.verify_naive():
return NaiveQueryResultWrapper
else:
return ModelQueryResultWrapper
class _WriteQuery(Query):
def __init__(self, model_class):
self._returning = None
self._tuples = False
self._dicts = False
self._qr = None
super(_WriteQuery, self).__init__(model_class)
def _clone_attributes(self, query):
query = super(_WriteQuery, self)._clone_attributes(query)
if self._returning:
query._returning = list(self._returning)
query._tuples = self._tuples
query._dicts = self._dicts
return query
def requires_returning(method):
def inner(self, *args, **kwargs):
db = self.model_class._meta.database
if not db.returning_clause:
raise ValueError('RETURNING is not supported by your '
'database: %s' % type(db))
return method(self, *args, **kwargs)
return inner
@requires_returning
@returns_clone
def returning(self, *selection):
if len(selection) == 1 and selection[0] is None:
self._returning = None
else:
if not selection:
selection = self.model_class._meta.get_fields()
self._returning = self._model_shorthand(selection)
@requires_returning
@returns_clone
def tuples(self, tuples=True):
self._tuples = tuples
@requires_returning
@returns_clone
def dicts(self, dicts=True):
self._dicts = dicts
def get_result_wrapper(self):
if self._returning is not None:
if self._tuples:
return TuplesQueryResultWrapper
elif self._dicts:
return DictQueryResultWrapper
return NaiveQueryResultWrapper
def _execute_with_result_wrapper(self):
ResultWrapper = self.get_result_wrapper()
meta = (self._returning, {self.model_class: []})
self._qr = ResultWrapper(self.model_class, self._execute(), meta)
return self._qr
class UpdateQuery(_WriteQuery):
def __init__(self, model_class, update=None):
self._update = update
self._on_conflict = None
super(UpdateQuery, self).__init__(model_class)
def _clone_attributes(self, query):
query = super(UpdateQuery, self)._clone_attributes(query)
query._update = dict(self._update)
query._on_conflict = self._on_conflict
return query
@returns_clone
def on_conflict(self, action=None):
self._on_conflict = action
join = not_allowed('joining')
def sql(self):
return self.compiler().generate_update(self)
def execute(self):
if self._returning is not None and self._qr is None:
return self._execute_with_result_wrapper()
elif self._qr is not None:
return self._qr
else:
return self.database.rows_affected(self._execute())
def __iter__(self):
if not self.model_class._meta.database.returning_clause:
raise ValueError('UPDATE queries cannot be iterated over unless '
'they specify a RETURNING clause, which is not '
'supported by your database.')
return iter(self.execute())
def iterator(self):
return iter(self.execute().iterator())
class InsertQuery(_WriteQuery):
def __init__(self, model_class, field_dict=None, rows=None,
fields=None, query=None):
super(InsertQuery, self).__init__(model_class)
self._upsert = False
self._is_multi_row_insert = rows is not None or query is not None
self._return_id_list = False
if rows is not None:
self._rows = rows
else:
self._rows = [field_dict or {}]
self._fields = fields
self._query = query
self._on_conflict = None
def _iter_rows(self):
model_meta = self.model_class._meta
valid_fields = (set(model_meta.fields.keys()) |
set(model_meta.fields.values()))
def validate_field(field):
if field not in valid_fields:
raise KeyError('"%s" is not a recognized field.' % field)
defaults = model_meta._default_dict
callables = model_meta._default_callables
for row_dict in self._rows:
field_row = defaults.copy()
seen = set()
for key in row_dict:
validate_field(key)
if key in model_meta.fields:
field = model_meta.fields[key]
else:
field = key
field_row[field] = row_dict[key]
seen.add(field)
if callables:
for field in callables:
if field not in seen:
field_row[field] = callables[field]()
yield field_row
def _clone_attributes(self, query):
query = super(InsertQuery, self)._clone_attributes(query)
query._rows = self._rows
query._upsert = self._upsert
query._is_multi_row_insert = self._is_multi_row_insert
query._fields = self._fields
query._query = self._query
query._return_id_list = self._return_id_list
query._on_conflict = self._on_conflict
return query
join = not_allowed('joining')
where = not_allowed('where clause')
@returns_clone
def upsert(self, upsert=True):
self._upsert = upsert
@returns_clone
def on_conflict(self, action=None):
self._on_conflict = action
@returns_clone
def return_id_list(self, return_id_list=True):
self._return_id_list = return_id_list
@property
def is_insert_returning(self):
if self.database.insert_returning:
if not self._is_multi_row_insert or self._return_id_list:
return True
return False
def sql(self):
return self.compiler().generate_insert(self)
def _insert_with_loop(self):
id_list = []
last_id = None
return_id_list = self._return_id_list
for row in self._rows:
last_id = (InsertQuery(self.model_class, row)
.upsert(self._upsert)
.execute())
if return_id_list:
id_list.append(last_id)
if return_id_list:
return id_list
else:
return last_id
def execute(self):
insert_with_loop = (
self._is_multi_row_insert and
self._query is None and
self._returning is None and
not self.database.insert_many)
if insert_with_loop:
return self._insert_with_loop()
if self._returning is not None and self._qr is None:
return self._execute_with_result_wrapper()
elif self._qr is not None:
return self._qr
else:
cursor = self._execute()
if not self._is_multi_row_insert:
if self.database.insert_returning:
pk_row = cursor.fetchone()
meta = self.model_class._meta
clean_data = [
field.python_value(column)
for field, column
in zip(meta.get_primary_key_fields(), pk_row)]
if self.model_class._meta.composite_key:
return clean_data
return clean_data[0]
return self.database.last_insert_id(cursor, self.model_class)
elif self._return_id_list:
return map(operator.itemgetter(0), cursor.fetchall())
else:
return True
class DeleteQuery(_WriteQuery):
join = not_allowed('joining')
def sql(self):
return self.compiler().generate_delete(self)
def execute(self):
if self._returning is not None and self._qr is None:
return self._execute_with_result_wrapper()
elif self._qr is not None:
return self._qr
else:
return self.database.rows_affected(self._execute())
IndexMetadata = namedtuple(
'IndexMetadata',
('name', 'sql', 'columns', 'unique', 'table'))
ColumnMetadata = namedtuple(
'ColumnMetadata',
('name', 'data_type', 'null', 'primary_key', 'table'))
ForeignKeyMetadata = namedtuple(
'ForeignKeyMetadata',
('column', 'dest_table', 'dest_column', 'table'))
class PeeweeException(Exception): pass
class ImproperlyConfigured(PeeweeException): pass
class DatabaseError(PeeweeException): pass
class DataError(DatabaseError): pass
class IntegrityError(DatabaseError): pass
class InterfaceError(PeeweeException): pass
class InternalError(DatabaseError): pass
class NotSupportedError(DatabaseError): pass
class OperationalError(DatabaseError): pass
class ProgrammingError(DatabaseError): pass
class ExceptionWrapper(object):
__slots__ = ['exceptions']
def __init__(self, exceptions):
self.exceptions = exceptions
def __enter__(self): pass
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
return
if exc_type.__name__ in self.exceptions:
new_type = self.exceptions[exc_type.__name__]
if PY26:
exc_args = exc_value
else:
exc_args = exc_value.args
reraise(new_type, new_type(*exc_args), traceback)
class _BaseConnectionLocal(object):
def __init__(self, **kwargs):
super(_BaseConnectionLocal, self).__init__(**kwargs)
self.autocommit = None
self.closed = True
self.conn = None
self.context_stack = []
self.transactions = []
class _ConnectionLocal(_BaseConnectionLocal, threading.local):
pass
class Database(object):
commit_select = False
compiler_class = QueryCompiler
compound_operations = ['UNION', 'INTERSECT', 'EXCEPT', 'UNION ALL']
compound_select_parentheses = False
distinct_on = False
drop_cascade = False
field_overrides = {}
foreign_keys = True
for_update = False
for_update_nowait = False
insert_many = True
insert_returning = False
interpolation = '?'
limit_max = None
op_overrides = {}
quote_char = '"'
reserved_tables = []
returning_clause = False
savepoints = True
sequences = False
subquery_delete_same_table = True
window_functions = False
exceptions = {
'ConstraintError': IntegrityError,
'DatabaseError': DatabaseError,
'DataError': DataError,
'IntegrityError': IntegrityError,
'InterfaceError': InterfaceError,
'InternalError': InternalError,
'NotSupportedError': NotSupportedError,
'OperationalError': OperationalError,
'ProgrammingError': ProgrammingError}
def __init__(self, database, threadlocals=True, autocommit=True,
fields=None, ops=None, autorollback=False, **connect_kwargs):
self.init(database, **connect_kwargs)
if threadlocals:
self.__local = _ConnectionLocal()
else:
self.__local = _BaseConnectionLocal()
self._conn_lock = threading.Lock()
self.autocommit = autocommit
self.autorollback = autorollback
self.field_overrides = merge_dict(self.field_overrides, fields or {})
self.op_overrides = merge_dict(self.op_overrides, ops or {})
def init(self, database, **connect_kwargs):
self.deferred = database is None
self.database = database
self.connect_kwargs = connect_kwargs
def exception_wrapper(self):
return ExceptionWrapper(self.exceptions)
def connect(self):
with self._conn_lock:
if self.deferred:
raise Exception('Error, database not properly initialized '
'before opening connection')
with self.exception_wrapper():
self.__local.conn = self._connect(
self.database,
**self.connect_kwargs)
self.__local.closed = False
self.initialize_connection(self.__local.conn)
def initialize_connection(self, conn):
pass
def close(self):
with self._conn_lock:
if self.deferred:
raise Exception('Error, database not properly initialized '
'before closing connection')
with self.exception_wrapper():
self._close(self.__local.conn)
self.__local.closed = True
def get_conn(self):
if self.__local.context_stack:
conn = self.__local.context_stack[-1].connection
if conn is not None:
return conn
if self.__local.closed:
self.connect()
return self.__local.conn
def is_closed(self):
return self.__local.closed
def get_cursor(self):
return self.get_conn().cursor()
def _close(self, conn):
conn.close()
def _connect(self, database, **kwargs):
raise NotImplementedError
@classmethod
def register_fields(cls, fields):
cls.field_overrides = merge_dict(cls.field_overrides, fields)
@classmethod
def register_ops(cls, ops):
cls.op_overrides = merge_dict(cls.op_overrides, ops)
def last_insert_id(self, cursor, model):
if model._meta.auto_increment:
return cursor.lastrowid
def rows_affected(self, cursor):
return cursor.rowcount
def sql_error_handler(self, exception, sql, params, require_commit):
return True
def compiler(self):
return self.compiler_class(
self.quote_char, self.interpolation, self.field_overrides,
self.op_overrides)
def execute_sql(self, sql, params=None, require_commit=True):
logger.debug((sql, params))
with self.exception_wrapper():
cursor = self.get_cursor()
try:
cursor.execute(sql, params or ())
except Exception as exc:
if self.get_autocommit() and self.autorollback:
self.rollback()
if self.sql_error_handler(exc, sql, params, require_commit):
raise
else:
if require_commit and self.get_autocommit():
self.commit()
return cursor
def begin(self):
pass
def commit(self):
self.get_conn().commit()
def rollback(self):
self.get_conn().rollback()
def set_autocommit(self, autocommit):
self.__local.autocommit = autocommit
def get_autocommit(self):
if self.__local.autocommit is None:
self.set_autocommit(self.autocommit)
return self.__local.autocommit
def push_execution_context(self, transaction):
self.__local.context_stack.append(transaction)
def pop_execution_context(self):
self.__local.context_stack.pop()
def execution_context_depth(self):
return len(self.__local.context_stack)
def execution_context(self, with_transaction=True):
return ExecutionContext(self, with_transaction=with_transaction)
def push_transaction(self, transaction):
self.__local.transactions.append(transaction)
def pop_transaction(self):
self.__local.transactions.pop()
def transaction_depth(self):
return len(self.__local.transactions)
def transaction(self):
return transaction(self)
def commit_on_success(self, func):
@wraps(func)
def inner(*args, **kwargs):
with self.transaction():
return func(*args, **kwargs)
return inner
def savepoint(self, sid=None):
if not self.savepoints:
raise NotImplementedError
return savepoint(self, sid)
def atomic(self):
return _atomic(self)
def get_tables(self, schema=None):
raise NotImplementedError
def get_indexes(self, table, schema=None):
raise NotImplementedError
def get_columns(self, table, schema=None):
raise NotImplementedError
def get_primary_keys(self, table, schema=None):
raise NotImplementedError
def get_foreign_keys(self, table, schema=None):
raise NotImplementedError
def sequence_exists(self, seq):
raise NotImplementedError
def create_table(self, model_class, safe=False):
qc = self.compiler()
return self.execute_sql(*qc.create_table(model_class, safe))
def create_tables(self, models, safe=False):
create_model_tables(models, fail_silently=safe)
def create_index(self, model_class, fields, unique=False):
qc = self.compiler()
if not isinstance(fields, (list, tuple)):
raise ValueError('Fields passed to "create_index" must be a list '
'or tuple: "%s"' % fields)
fobjs = [
model_class._meta.fields[f] if isinstance(f, basestring) else f
for f in fields]
return self.execute_sql(*qc.create_index(model_class, fobjs, unique))
def create_foreign_key(self, model_class, field, constraint=None):
qc = self.compiler()
return self.execute_sql(*qc.create_foreign_key(
model_class, field, constraint))
def create_sequence(self, seq):
if self.sequences:
qc = self.compiler()
return self.execute_sql(*qc.create_sequence(seq))
def drop_table(self, model_class, fail_silently=False, cascade=False):
qc = self.compiler()
return self.execute_sql(*qc.drop_table(
model_class, fail_silently, cascade))
def drop_tables(self, models, safe=False, cascade=False):
drop_model_tables(models, fail_silently=safe, cascade=cascade)
def drop_sequence(self, seq):
if self.sequences:
qc = self.compiler()
return self.execute_sql(*qc.drop_sequence(seq))
def extract_date(self, date_part, date_field):
return fn.EXTRACT(Clause(date_part, R('FROM'), date_field))
def truncate_date(self, date_part, date_field):
return fn.DATE_TRUNC(SQL(date_part), date_field)
def default_insert_clause(self, model_class):
return SQL('DEFAULT VALUES')
class SqliteDatabase(Database):
foreign_keys = False
insert_many = sqlite3 and sqlite3.sqlite_version_info >= (3, 7, 11, 0)
limit_max = -1
op_overrides = {
OP.LIKE: 'GLOB',
OP.ILIKE: 'LIKE',
}
def __init__(self, database, pragmas=None, *args, **kwargs):
self._pragmas = pragmas or []
journal_mode = kwargs.pop('journal_mode', None) # Backwards-compat.
if journal_mode:
self._pragmas.append(('journal_mode', journal_mode))
super(SqliteDatabase, self).__init__(database, *args, **kwargs)
if not self.database:
self.database = ':memory:'
def _connect(self, database, **kwargs):
conn = sqlite3.connect(database, **kwargs)
conn.isolation_level = None
self._add_conn_hooks(conn)
return conn
def _add_conn_hooks(self, conn):
self._set_pragmas(conn)
conn.create_function('date_part', 2, _sqlite_date_part)
conn.create_function('date_trunc', 2, _sqlite_date_trunc)
conn.create_function('regexp', 2, _sqlite_regexp)
def _set_pragmas(self, conn):
if self._pragmas:
cursor = conn.cursor()
for pragma, value in self._pragmas:
cursor.execute('PRAGMA %s = %s;' % (pragma, value))
cursor.close()
def begin(self, lock_type='DEFERRED'):
self.execute_sql('BEGIN %s' % lock_type, require_commit=False)
def get_tables(self, schema=None):
cursor = self.execute_sql('SELECT name FROM sqlite_master WHERE '
'type = ? ORDER BY name;', ('table',))
return [row[0] for row in cursor.fetchall()]
def get_indexes(self, table, schema=None):
query = ('SELECT name, sql FROM sqlite_master '
'WHERE tbl_name = ? AND type = ? ORDER BY name')
cursor = self.execute_sql(query, (table, 'index'))
index_to_sql = dict(cursor.fetchall())
# Determine which indexes have a unique constraint.
unique_indexes = set()
cursor = self.execute_sql('PRAGMA index_list("%s")' % table)
for row in cursor.fetchall():
name = row[1]
is_unique = int(row[2]) == 1
if is_unique:
unique_indexes.add(name)
# Retrieve the indexed columns.
index_columns = {}
for index_name in sorted(index_to_sql):
cursor = self.execute_sql('PRAGMA index_info("%s")' % index_name)
index_columns[index_name] = [row[2] for row in cursor.fetchall()]
return [
IndexMetadata(
name,
index_to_sql[name],
index_columns[name],
name in unique_indexes,
table)
for name in sorted(index_to_sql)]
def get_columns(self, table, schema=None):
cursor = self.execute_sql('PRAGMA table_info("%s")' % table)
return [ColumnMetadata(row[1], row[2], not row[3], bool(row[5]), table)
for row in cursor.fetchall()]
def get_primary_keys(self, table, schema=None):
cursor = self.execute_sql('PRAGMA table_info("%s")' % table)
return [row[1] for row in cursor.fetchall() if row[-1]]
def get_foreign_keys(self, table, schema=None):
cursor = self.execute_sql('PRAGMA foreign_key_list("%s")' % table)
return [ForeignKeyMetadata(row[3], row[2], row[4], table)
for row in cursor.fetchall()]
def savepoint(self, sid=None):
return savepoint_sqlite(self, sid)
def extract_date(self, date_part, date_field):
return fn.date_part(date_part, date_field)
def truncate_date(self, date_part, date_field):
return fn.strftime(SQLITE_DATE_TRUNC_MAPPING[date_part], date_field)
class PostgresqlDatabase(Database):
commit_select = True
compound_select_parentheses = True
distinct_on = True
drop_cascade = True
field_overrides = {
'blob': 'BYTEA',
'bool': 'BOOLEAN',
'datetime': 'TIMESTAMP',
'decimal': 'NUMERIC',
'double': 'DOUBLE PRECISION',
'primary_key': 'SERIAL',
'uuid': 'UUID',
}
for_update = True
for_update_nowait = True
insert_returning = True
interpolation = '%s'
op_overrides = {
OP.REGEXP: '~',
}
reserved_tables = ['user']
returning_clause = True
sequences = True
window_functions = True
register_unicode = True
def _connect(self, database, encoding=None, **kwargs):
if not psycopg2:
raise ImproperlyConfigured('psycopg2 must be installed.')
conn = psycopg2.connect(database=database, **kwargs)
if self.register_unicode:
pg_extensions.register_type(pg_extensions.UNICODE, conn)
pg_extensions.register_type(pg_extensions.UNICODEARRAY, conn)
if encoding:
conn.set_client_encoding(encoding)
return conn
def _get_pk_sequence(self, model):
meta = model._meta
if meta.primary_key.sequence:
return meta.primary_key.sequence
elif meta.auto_increment:
return '%s_%s_seq' % (meta.db_table, meta.primary_key.db_column)
def last_insert_id(self, cursor, model):
sequence = self._get_pk_sequence(model)
if not sequence:
return
meta = model._meta
if meta.schema:
schema = '%s.' % meta.schema
else:
schema = ''
cursor.execute("SELECT CURRVAL('%s\"%s\"')" % (schema, sequence))
result = cursor.fetchone()[0]
if self.get_autocommit():
self.commit()
return result
def get_tables(self, schema='public'):
query = ('SELECT tablename FROM pg_catalog.pg_tables '
'WHERE schemaname = %s ORDER BY tablename')
return [r for r, in self.execute_sql(query, (schema,)).fetchall()]
def get_indexes(self, table, schema='public'):
query = """
SELECT
i.relname, idxs.indexdef, idx.indisunique,
array_to_string(array_agg(cols.attname), ',')
FROM pg_catalog.pg_class AS t
INNER JOIN pg_catalog.pg_index AS idx ON t.oid = idx.indrelid
INNER JOIN pg_catalog.pg_class AS i ON idx.indexrelid = i.oid
INNER JOIN pg_catalog.pg_indexes AS idxs ON
(idxs.tablename = t.relname AND idxs.indexname = i.relname)
LEFT OUTER JOIN pg_catalog.pg_attribute AS cols ON
(cols.attrelid = t.oid AND cols.attnum = ANY(idx.indkey))
WHERE t.relname = %s AND t.relkind = %s AND idxs.schemaname = %s
GROUP BY i.relname, idxs.indexdef, idx.indisunique
ORDER BY idx.indisunique DESC, i.relname;"""
cursor = self.execute_sql(query, (table, 'r', schema))
return [IndexMetadata(row[0], row[1], row[3].split(','), row[2], table)
for row in cursor.fetchall()]
def get_columns(self, table, schema='public'):
query = """
SELECT column_name, is_nullable, data_type
FROM information_schema.columns
WHERE table_name = %s AND table_schema = %s
ORDER BY ordinal_position"""
cursor = self.execute_sql(query, (table, schema))
pks = set(self.get_primary_keys(table, schema))
return [ColumnMetadata(name, dt, null == 'YES', name in pks, table)
for name, null, dt in cursor.fetchall()]
def get_primary_keys(self, table, schema='public'):
query = """
SELECT kc.column_name
FROM information_schema.table_constraints AS tc
INNER JOIN information_schema.key_column_usage AS kc ON (
tc.table_name = kc.table_name AND
tc.table_schema = kc.table_schema AND
tc.constraint_name = kc.constraint_name)
WHERE
tc.constraint_type = %s AND
tc.table_name = %s AND
tc.table_schema = %s"""
cursor = self.execute_sql(query, ('PRIMARY KEY', table, schema))
return [row for row, in cursor.fetchall()]
def get_foreign_keys(self, table, schema='public'):
sql = """
SELECT
kcu.column_name, ccu.table_name, ccu.column_name
FROM information_schema.table_constraints AS tc
JOIN information_schema.key_column_usage AS kcu
ON (tc.constraint_name = kcu.constraint_name AND
tc.constraint_schema = kcu.constraint_schema)
JOIN information_schema.constraint_column_usage AS ccu
ON (ccu.constraint_name = tc.constraint_name AND
ccu.constraint_schema = tc.constraint_schema)
WHERE
tc.constraint_type = 'FOREIGN KEY' AND
tc.table_name = %s AND
tc.table_schema = %s"""
cursor = self.execute_sql(sql, (table, schema))
return [ForeignKeyMetadata(row[0], row[1], row[2], table)
for row in cursor.fetchall()]
def sequence_exists(self, sequence):
res = self.execute_sql("""
SELECT COUNT(*) FROM pg_class, pg_namespace
WHERE relkind='S'
AND pg_class.relnamespace = pg_namespace.oid
AND relname=%s""", (sequence,))
return bool(res.fetchone()[0])
def set_search_path(self, *search_path):
path_params = ','.join(['%s'] * len(search_path))
self.execute_sql('SET search_path TO %s' % path_params, search_path)
class MySQLDatabase(Database):
commit_select = True
compound_operations = ['UNION', 'UNION ALL']
field_overrides = {
'bool': 'BOOL',
'decimal': 'NUMERIC',
'double': 'DOUBLE PRECISION',
'float': 'FLOAT',
'primary_key': 'INTEGER AUTO_INCREMENT',
'text': 'LONGTEXT',
}
for_update = True
interpolation = '%s'
limit_max = 2 ** 64 - 1 # MySQL quirk
op_overrides = {
OP.LIKE: 'LIKE BINARY',
OP.ILIKE: 'LIKE',
OP.XOR: 'XOR',
}
quote_char = '`'
subquery_delete_same_table = False
def _connect(self, database, **kwargs):
if not mysql:
raise ImproperlyConfigured('MySQLdb or PyMySQL must be installed.')
conn_kwargs = {
'charset': 'utf8',
'use_unicode': True,
}
conn_kwargs.update(kwargs)
if 'password' in conn_kwargs:
conn_kwargs['passwd'] = conn_kwargs.pop('password')
return mysql.connect(db=database, **conn_kwargs)
def get_tables(self, schema=None):
return [row for row, in self.execute_sql('SHOW TABLES')]
def get_indexes(self, table, schema=None):
cursor = self.execute_sql('SHOW INDEX FROM `%s`' % table)
unique = set()
indexes = {}
for row in cursor.fetchall():
if not row[1]:
unique.add(row[2])
indexes.setdefault(row[2], [])
indexes[row[2]].append(row[4])
return [IndexMetadata(name, None, indexes[name], name in unique, table)
for name in indexes]
def get_columns(self, table, schema=None):
sql = """
SELECT column_name, is_nullable, data_type
FROM information_schema.columns
WHERE table_name = %s AND table_schema = DATABASE()"""
cursor = self.execute_sql(sql, (table,))
pks = set(self.get_primary_keys(table))
return [ColumnMetadata(name, dt, null == 'YES', name in pks, table)
for name, null, dt in cursor.fetchall()]
def get_primary_keys(self, table, schema=None):
cursor = self.execute_sql('SHOW INDEX FROM `%s`' % table)
return [row[4] for row in cursor.fetchall() if row[2] == 'PRIMARY']
def get_foreign_keys(self, table, schema=None):
query = """
SELECT column_name, referenced_table_name, referenced_column_name
FROM information_schema.key_column_usage
WHERE table_name = %s
AND table_schema = DATABASE()
AND referenced_table_name IS NOT NULL
AND referenced_column_name IS NOT NULL"""
cursor = self.execute_sql(query, (table,))
return [
ForeignKeyMetadata(column, dest_table, dest_column, table)
for column, dest_table, dest_column in cursor.fetchall()]
def extract_date(self, date_part, date_field):
return fn.EXTRACT(Clause(R(date_part), R('FROM'), date_field))
def truncate_date(self, date_part, date_field):
return fn.DATE_FORMAT(date_field, MYSQL_DATE_TRUNC_MAPPING[date_part])
def default_insert_clause(self, model_class):
return Clause(
EnclosedClause(model_class._meta.primary_key),
SQL('VALUES (DEFAULT)'))
class _callable_context_manager(object):
def __call__(self, fn):
@wraps(fn)
def inner(*args, **kwargs):
with self:
return fn(*args, **kwargs)
return inner
class ExecutionContext(_callable_context_manager):
def __init__(self, database, with_transaction=True):
self.database = database
self.with_transaction = with_transaction
self.connection = None
def __enter__(self):
with self.database._conn_lock:
self.database.push_execution_context(self)
self.connection = self.database._connect(
self.database.database,
**self.database.connect_kwargs)
if self.with_transaction:
self.txn = self.database.transaction()
self.txn.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
with self.database._conn_lock:
if self.connection is None:
self.database.pop_execution_context()
else:
try:
if self.with_transaction:
if not exc_type:
self.txn.commit(False)
self.txn.__exit__(exc_type, exc_val, exc_tb)
finally:
self.database.pop_execution_context()
self.database._close(self.connection)
class Using(ExecutionContext):
def __init__(self, database, models, with_transaction=True):
super(Using, self).__init__(database, with_transaction)
self.models = models
def __enter__(self):
self._orig = []
for model in self.models:
self._orig.append(model._meta.database)
model._meta.database = self.database
return super(Using, self).__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
super(Using, self).__exit__(exc_type, exc_val, exc_tb)
for i, model in enumerate(self.models):
model._meta.database = self._orig[i]
class _atomic(_callable_context_manager):
def __init__(self, db):
self.db = db
def __enter__(self):
if self.db.transaction_depth() == 0:
self._helper = self.db.transaction()
else:
self._helper = self.db.savepoint()
return self._helper.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
return self._helper.__exit__(exc_type, exc_val, exc_tb)
class transaction(_callable_context_manager):
def __init__(self, db):
self.db = db
def _begin(self):
self.db.begin()
def commit(self, begin=True):
self.db.commit()
if begin:
self._begin()
def rollback(self, begin=True):
self.db.rollback()
if begin:
self._begin()
def __enter__(self):
self._orig = self.db.get_autocommit()
self.db.set_autocommit(False)
if self.db.transaction_depth() == 0:
self._begin()
self.db.push_transaction(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
try:
if exc_type:
self.rollback(False)
elif self.db.transaction_depth() == 1:
try:
self.commit(False)
except:
self.rollback(False)
raise
finally:
self.db.set_autocommit(self._orig)
self.db.pop_transaction()
class savepoint(_callable_context_manager):
def __init__(self, db, sid=None):
self.db = db
_compiler = db.compiler()
self.sid = sid or 's' + uuid.uuid4().hex
self.quoted_sid = _compiler.quote(self.sid)
def _execute(self, query):
self.db.execute_sql(query, require_commit=False)
def commit(self):
self._execute('RELEASE SAVEPOINT %s;' % self.quoted_sid)
def rollback(self):
self._execute('ROLLBACK TO SAVEPOINT %s;' % self.quoted_sid)
def __enter__(self):
self._orig_autocommit = self.db.get_autocommit()
self.db.set_autocommit(False)
self._execute('SAVEPOINT %s;' % self.quoted_sid)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
try:
if exc_type:
self.rollback()
else:
try:
self.commit()
except:
self.rollback()
raise
finally:
self.db.set_autocommit(self._orig_autocommit)
class savepoint_sqlite(savepoint):
def __enter__(self):
conn = self.db.get_conn()
# For sqlite, the connection's isolation_level *must* be set to None.
# The act of setting it, though, will break any existing savepoints,
# so only write to it if necessary.
if conn.isolation_level is not None:
self._orig_isolation_level = conn.isolation_level
conn.isolation_level = None
else:
self._orig_isolation_level = None
return super(savepoint_sqlite, self).__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
try:
return super(savepoint_sqlite, self).__exit__(
exc_type, exc_val, exc_tb)
finally:
if self._orig_isolation_level is not None:
self.db.get_conn().isolation_level = self._orig_isolation_level
class FieldProxy(Field):
def __init__(self, alias, field_instance):
self._model_alias = alias
self.model = self._model_alias.model_class
self.field_instance = field_instance
def clone_base(self):
return FieldProxy(self._model_alias, self.field_instance)
def coerce(self, value):
return self.field_instance.coerce(value)
def python_value(self, value):
return self.field_instance.python_value(value)
def db_value(self, value):
return self.field_instance.db_value(value)
def __getattr__(self, attr):
if attr == 'model_class':
return self._model_alias
return getattr(self.field_instance, attr)
class ModelAlias(object):
def __init__(self, model_class):
self.__dict__['model_class'] = model_class
def __getattr__(self, attr):
model_attr = getattr(self.model_class, attr)
if isinstance(model_attr, Field):
return FieldProxy(self, model_attr)
return model_attr
def __setattr__(self, attr, value):
raise AttributeError('Cannot set attributes on ModelAlias instances')
def get_proxy_fields(self):
return [
FieldProxy(self, f) for f in self.model_class._meta.get_fields()]
def select(self, *selection):
if not selection:
selection = self.get_proxy_fields()
query = SelectQuery(self, *selection)
if self._meta.order_by:
query = query.order_by(*self._meta.order_by)
return query
def __call__(self, **kwargs):
return self.model_class(**kwargs)
class DoesNotExist(Exception): pass
if sqlite3:
default_database = SqliteDatabase('peewee.db')
else:
default_database = None
class ModelOptions(object):
def __init__(self, cls, database=None, db_table=None, db_table_func=None,
indexes=None, order_by=None, primary_key=None,
table_alias=None, constraints=None, schema=None,
validate_backrefs=True, **kwargs):
self.model_class = cls
self.name = cls.__name__.lower()
self.fields = {}
self.columns = {}
self.defaults = {}
self._default_by_name = {}
self._default_dict = {}
self._default_callables = {}
self.database = database or default_database
self.db_table = db_table
self.db_table_func = db_table_func
self.indexes = list(indexes or [])
self.order_by = order_by
self.primary_key = primary_key
self.table_alias = table_alias
self.constraints = constraints
self.schema = schema
self.validate_backrefs = validate_backrefs
self.auto_increment = None
self.composite_key = False
self.rel = {}
self.reverse_rel = {}
for key, value in kwargs.items():
setattr(self, key, value)
self._additional_keys = set(kwargs.keys())
if self.db_table_func and not self.db_table:
self.db_table = self.db_table_func(cls)
def prepared(self):
for field in self.fields.values():
if field.default is not None:
self.defaults[field] = field.default
if callable(field.default):
self._default_callables[field] = field.default
else:
self._default_dict[field] = field.default
self._default_by_name[field.name] = field.default
if self.order_by:
norm_order_by = []
for item in self.order_by:
if isinstance(item, Field):
prefix = '-' if item._ordering == 'DESC' else ''
item = prefix + item.name
field = self.fields[item.lstrip('-')]
if item.startswith('-'):
norm_order_by.append(field.desc())
else:
norm_order_by.append(field.asc())
self.order_by = norm_order_by
def get_default_dict(self):
dd = self._default_by_name.copy()
if self._default_callables:
for field, default in self._default_callables.items():
dd[field.name] = default()
return dd
def get_sorted_fields(self):
key = lambda i: i[1]._sort_key
return sorted(self.fields.items(), key=key)
def get_field_names(self):
return [f[0] for f in self.get_sorted_fields()]
def get_fields(self):
return [f[1] for f in self.get_sorted_fields()]
def get_field_index(self, field):
for i, (field_name, field_obj) in enumerate(self.get_sorted_fields()):
if field_name == field.name:
return i
return -1
def get_primary_key_fields(self):
if self.composite_key:
return [
self.fields[field_name]
for field_name in self.primary_key.field_names]
return [self.primary_key]
def rel_for_model(self, model, field_obj=None):
is_field = isinstance(field_obj, Field)
is_node = not is_field and isinstance(field_obj, Node)
for field in self.get_fields():
if isinstance(field, ForeignKeyField) and field.rel_model == model:
is_match = (
(field_obj is None) or
(is_field and field_obj.name == field.name) or
(is_node and field_obj._alias == field.name))
if is_match:
return field
def reverse_rel_for_model(self, model, field_obj=None):
return model._meta.rel_for_model(self.model_class, field_obj)
def rel_exists(self, model):
return self.rel_for_model(model) or self.reverse_rel_for_model(model)
def related_models(self, backrefs=False):
models = []
stack = [self.model_class]
while stack:
model = stack.pop()
if model in models:
continue
models.append(model)
for fk in model._meta.rel.values():
stack.append(fk.rel_model)
if backrefs:
for fk in model._meta.reverse_rel.values():
stack.append(fk.model_class)
return models
class BaseModel(type):
inheritable = set([
'constraints', 'database', 'db_table_func', 'indexes', 'order_by',
'primary_key', 'schema', 'validate_backrefs'])
def __new__(cls, name, bases, attrs):
if not bases:
return super(BaseModel, cls).__new__(cls, name, bases, attrs)
meta_options = {}
meta = attrs.pop('Meta', None)
if meta:
for k, v in meta.__dict__.items():
if not k.startswith('_'):
meta_options[k] = v
model_pk = getattr(meta, 'primary_key', None)
parent_pk = None
# inherit any field descriptors by deep copying the underlying field
# into the attrs of the new model, additionally see if the bases define
# inheritable model options and swipe them
for b in bases:
if not hasattr(b, '_meta'):
continue
base_meta = getattr(b, '_meta')
if parent_pk is None:
parent_pk = deepcopy(base_meta.primary_key)
all_inheritable = cls.inheritable | base_meta._additional_keys
for (k, v) in base_meta.__dict__.items():
if k in all_inheritable and k not in meta_options:
meta_options[k] = v
for (k, v) in b.__dict__.items():
if k in attrs:
continue
if isinstance(v, FieldDescriptor):
if not v.field.primary_key:
attrs[k] = deepcopy(v.field)
# initialize the new class and set the magic attributes
cls = super(BaseModel, cls).__new__(cls, name, bases, attrs)
cls._meta = ModelOptions(cls, **meta_options)
cls._data = None
cls._meta.indexes = list(cls._meta.indexes)
if not cls._meta.db_table:
cls._meta.db_table = re.sub('[^\w]+', '_', cls.__name__.lower())
# replace fields with field descriptors, calling the add_to_class hook
fields = []
for name, attr in cls.__dict__.items():
if isinstance(attr, Field):
if attr.primary_key and model_pk:
raise ValueError('primary key is overdetermined.')
elif attr.primary_key:
model_pk, pk_name = attr, name
else:
fields.append((attr, name))
composite_key = False
if model_pk is None:
if parent_pk:
model_pk, pk_name = parent_pk, parent_pk.name
else:
model_pk, pk_name = PrimaryKeyField(primary_key=True), 'id'
elif isinstance(model_pk, CompositeKey):
pk_name = '_composite_key'
composite_key = True
if model_pk is not False:
model_pk.add_to_class(cls, pk_name)
cls._meta.primary_key = model_pk
cls._meta.auto_increment = (
isinstance(model_pk, PrimaryKeyField) or
bool(model_pk.sequence))
cls._meta.composite_key = composite_key
for field, name in fields:
field.add_to_class(cls, name)
# create a repr and error class before finalizing
if hasattr(cls, '__unicode__'):
setattr(cls, '__repr__', lambda self: '<%s: %r>' % (
cls.__name__, self.__unicode__()))
exc_name = '%sDoesNotExist' % cls.__name__
exception_class = type(exc_name, (DoesNotExist,), {})
cls.DoesNotExist = exception_class
cls._meta.prepared()
return cls
def __iter__(self):
return iter(self.select())
class Model(with_metaclass(BaseModel)):
def __init__(self, *args, **kwargs):
self._data = self._meta.get_default_dict()
self._dirty = set()
self._obj_cache = {}
for k, v in kwargs.items():
setattr(self, k, v)
@classmethod
def alias(cls):
return ModelAlias(cls)
@classmethod
def select(cls, *selection):
query = SelectQuery(cls, *selection)
if cls._meta.order_by:
query = query.order_by(*cls._meta.order_by)
return query
@classmethod
def update(cls, **update):
fdict = dict((cls._meta.fields[f], v) for f, v in update.items())
return UpdateQuery(cls, fdict)
@classmethod
def insert(cls, **insert):
return InsertQuery(cls, insert)
@classmethod
def insert_many(cls, rows):
return InsertQuery(cls, rows=rows)
@classmethod
def insert_from(cls, fields, query):
return InsertQuery(cls, fields=fields, query=query)
@classmethod
def delete(cls):
return DeleteQuery(cls)
@classmethod
def raw(cls, sql, *params):
return RawQuery(cls, sql, *params)
@classmethod
def create(cls, **query):
inst = cls(**query)
inst.save(force_insert=True)
inst._prepare_instance()
return inst
@classmethod
def get(cls, *query, **kwargs):
sq = cls.select().naive()
if query:
sq = sq.where(*query)
if kwargs:
sq = sq.filter(**kwargs)
return sq.get()
@classmethod
def get_or_create(cls, **kwargs):
defaults = kwargs.pop('defaults', {})
sq = cls.select().filter(**kwargs)
try:
return sq.get(), False
except cls.DoesNotExist:
try:
params = dict((k, v) for k, v in kwargs.items()
if '__' not in k)
params.update(defaults)
with cls._meta.database.atomic():
return cls.create(**params), True
except IntegrityError as exc:
try:
return sq.get(), False
except cls.DoesNotExist:
raise exc
@classmethod
def create_or_get(cls, **kwargs):
try:
with cls._meta.database.atomic():
return cls.create(**kwargs), True
except IntegrityError:
query = [] # TODO: multi-column unique constraints.
for field_name, value in kwargs.items():
field = cls._meta.fields[field_name]
if field.unique or field.primary_key:
query.append(field == value)
return cls.get(*query), False
@classmethod
def filter(cls, *dq, **query):
return cls.select().filter(*dq, **query)
@classmethod
def table_exists(cls):
kwargs = {}
if cls._meta.schema:
kwargs['schema'] = cls._meta.schema
return cls._meta.db_table in cls._meta.database.get_tables(**kwargs)
@classmethod
def create_table(cls, fail_silently=False):
if fail_silently and cls.table_exists():
return
db = cls._meta.database
pk = cls._meta.primary_key
if db.sequences and pk.sequence:
if not db.sequence_exists(pk.sequence):
db.create_sequence(pk.sequence)
db.create_table(cls)
cls._create_indexes()
@classmethod
def _fields_to_index(cls):
fields = []
for field in cls._meta.fields.values():
if field.primary_key:
continue
requires_index = any((
field.index,
field.unique,
isinstance(field, ForeignKeyField)))
if requires_index:
fields.append(field)
return fields
@classmethod
def _create_indexes(cls):
db = cls._meta.database
for field in cls._fields_to_index():
db.create_index(cls, [field], field.unique)
if cls._meta.indexes:
for fields, unique in cls._meta.indexes:
db.create_index(cls, fields, unique)
@classmethod
def sqlall(cls):
queries = []
compiler = cls._meta.database.compiler()
pk = cls._meta.primary_key
if cls._meta.database.sequences and pk.sequence:
queries.append(compiler.create_sequence(pk.sequence))
queries.append(compiler.create_table(cls))
for field in cls._fields_to_index():
queries.append(compiler.create_index(cls, [field], field.unique))
if cls._meta.indexes:
for field_names, unique in cls._meta.indexes:
fields = [cls._meta.fields[f] for f in field_names]
queries.append(compiler.create_index(cls, fields, unique))
return [sql for sql, _ in queries]
@classmethod
def drop_table(cls, fail_silently=False, cascade=False):
cls._meta.database.drop_table(cls, fail_silently, cascade)
@classmethod
def as_entity(cls):
if cls._meta.schema:
return Entity(cls._meta.schema, cls._meta.db_table)
return Entity(cls._meta.db_table)
def _get_pk_value(self):
return getattr(self, self._meta.primary_key.name)
get_id = _get_pk_value # Backwards-compatibility.
def _set_pk_value(self, value):
if not self._meta.composite_key:
setattr(self, self._meta.primary_key.name, value)
set_id = _set_pk_value # Backwards-compatibility.
def _pk_expr(self):
return self._meta.primary_key == self._get_pk_value()
def _prepare_instance(self):
self._dirty.clear()
self.prepared()
def prepared(self):
pass
def _prune_fields(self, field_dict, only):
new_data = {}
for field in only:
if field.name in field_dict:
new_data[field.name] = field_dict[field.name]
return new_data
def _populate_unsaved_relations(self, field_dict):
for key in self._meta.rel:
conditions = (
key in self._dirty and
key in field_dict and
field_dict[key] is None and
self._obj_cache.get(key) is not None)
if conditions:
setattr(self, key, getattr(self, key))
field_dict[key] = self._data[key]
def save(self, force_insert=False, only=None):
field_dict = dict(self._data)
pk_field = self._meta.primary_key
pk_value = self._get_pk_value()
if only:
field_dict = self._prune_fields(field_dict, only)
self._populate_unsaved_relations(field_dict)
if pk_value is not None and not force_insert:
if self._meta.composite_key:
for pk_part_name in pk_field.field_names:
field_dict.pop(pk_part_name, None)
else:
field_dict.pop(pk_field.name, None)
rows = self.update(**field_dict).where(self._pk_expr()).execute()
else:
pk_from_cursor = self.insert(**field_dict).execute()
if pk_from_cursor is not None:
pk_value = pk_from_cursor
self._set_pk_value(pk_value)
rows = 1
self._dirty.clear()
return rows
def is_dirty(self):
return bool(self._dirty)
@property
def dirty_fields(self):
return [f for f in self._meta.get_fields() if f.name in self._dirty]
def dependencies(self, search_nullable=False):
model_class = type(self)
query = self.select().where(self._pk_expr())
stack = [(type(self), query)]
seen = set()
while stack:
klass, query = stack.pop()
if klass in seen:
continue
seen.add(klass)
for rel_name, fk in klass._meta.reverse_rel.items():
rel_model = fk.model_class
if fk.rel_model is model_class:
node = (fk == self._data[fk.to_field.name])
subquery = rel_model.select().where(node)
else:
node = fk << query
subquery = rel_model.select().where(node)
if not fk.null or search_nullable:
stack.append((rel_model, subquery))
yield (node, fk)
def delete_instance(self, recursive=False, delete_nullable=False):
if recursive:
dependencies = self.dependencies(delete_nullable)
for query, fk in reversed(list(dependencies)):
model = fk.model_class
if fk.null and not delete_nullable:
model.update(**{fk.name: None}).where(query).execute()
else:
model.delete().where(query).execute()
return self.delete().where(self._pk_expr()).execute()
def __eq__(self, other):
return (
other.__class__ == self.__class__ and
self._get_pk_value() is not None and
other._get_pk_value() == self._get_pk_value())
def __ne__(self, other):
return not self == other
def prefetch_add_subquery(sq, subqueries):
fixed_queries = [PrefetchResult(sq)]
for i, subquery in enumerate(subqueries):
if not isinstance(subquery, Query) and issubclass(subquery, Model):
subquery = subquery.select()
subquery_model = subquery.model_class
fkf = backref = None
for j in reversed(range(i + 1)):
last_query = fixed_queries[j][0]
last_model = last_query.model_class
foreign_key = subquery_model._meta.rel_for_model(last_model)
if foreign_key:
fkf = getattr(subquery_model, foreign_key.name)
to_field = getattr(last_model, foreign_key.to_field.name)
else:
backref = last_model._meta.rel_for_model(subquery_model)
if fkf or backref:
break
if not (fkf or backref):
raise AttributeError('Error: unable to find foreign key for '
'query: %s' % subquery)
if fkf:
inner_query = last_query.select(to_field)
fixed_queries.append(
PrefetchResult(subquery.where(fkf << inner_query), fkf, False))
elif backref:
q = subquery.where(backref.to_field << last_query.select(backref))
fixed_queries.append(PrefetchResult(q, backref, True))
return fixed_queries
__prefetched = namedtuple('__prefetched', (
'query', 'field', 'backref', 'rel_model', 'foreign_key_attr', 'model'))
class PrefetchResult(__prefetched):
def __new__(cls, query, field=None, backref=None, rel_model=None,
foreign_key_attr=None, model=None):
if field:
if backref:
rel_model = field.model_class
foreign_key_attr = field.to_field.name
else:
rel_model = field.rel_model
foreign_key_attr = field.name
model = query.model_class
return super(PrefetchResult, cls).__new__(
cls, query, field, backref, rel_model, foreign_key_attr, model)
def populate_instance(self, instance, id_map):
if self.backref:
identifier = instance._data[self.field.name]
if identifier in id_map:
setattr(instance, self.field.name, id_map[identifier])
else:
identifier = instance._data[self.field.to_field.name]
rel_instances = id_map.get(identifier, [])
attname = self.foreign_key_attr
dest = '%s_prefetch' % self.field.related_name
for inst in rel_instances:
setattr(inst, attname, instance)
setattr(instance, dest, rel_instances)
def store_instance(self, instance, id_map):
identity = self.field.to_field.python_value(
instance._data[self.foreign_key_attr])
if self.backref:
id_map[identity] = instance
else:
id_map.setdefault(identity, [])
id_map[identity].append(instance)
def prefetch(sq, *subqueries):
if not subqueries:
return sq
fixed_queries = prefetch_add_subquery(sq, subqueries)
deps = {}
rel_map = {}
for prefetch_result in reversed(fixed_queries):
query_model = prefetch_result.model
if prefetch_result.field:
rel_map.setdefault(prefetch_result.rel_model, [])
rel_map[prefetch_result.rel_model].append(prefetch_result)
deps[query_model] = {}
id_map = deps[query_model]
has_relations = bool(rel_map.get(query_model))
for instance in prefetch_result.query:
if prefetch_result.field:
prefetch_result.store_instance(instance, id_map)
if has_relations:
for rel in rel_map[query_model]:
rel.populate_instance(instance, deps[rel.model])
return prefetch_result.query
def create_model_tables(models, **create_table_kwargs):
"""Create tables for all given models (in the right order)."""
for m in sort_models_topologically(models):
m.create_table(**create_table_kwargs)
def drop_model_tables(models, **drop_table_kwargs):
"""Drop tables for all given models (in the right order)."""
for m in reversed(sort_models_topologically(models)):
m.drop_table(**drop_table_kwargs)
def sort_models_topologically(models):
"""Sort models topologically so that parents will precede children."""
models = set(models)
seen = set()
ordering = []
def dfs(model):
if model in models and model not in seen:
seen.add(model)
for foreign_key in model._meta.reverse_rel.values():
dfs(foreign_key.model_class)
ordering.append(model) # parent will follow descendants
# order models by name and table initially to guarantee a total ordering
names = lambda m: (m._meta.name, m._meta.db_table)
for m in sorted(models, key=names, reverse=True):
dfs(m)
return list(reversed(ordering)) # want parents first in output ordering
|
{
"content_hash": "4a09b35b39f6f0e06646db4dcfcbc1d1",
"timestamp": "",
"source": "github",
"line_count": 4629,
"max_line_length": 79,
"avg_line_length": 33.77403326852452,
"alnum_prop": 0.5637073045925547,
"repo_name": "py4a/peewee",
"id": "d1a1c42c966173003e04a9442a28f2309b9da2a1",
"size": "156866",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "peewee.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "941115"
}
],
"symlink_target": ""
}
|
import sys
import os
import __builtin__
from ctypes import *
from cStringIO import StringIO
from termcolor import cprint
import colorama
global here
global test_functions
here = os.getcwd()
test_functions = set([])
# Constant format output strings
success_string = '[OK] {}'
fail_string = '[KO] {}'
problem_string = '[KO] {}: Process exited with error code {}'
success_color = 'green'
fail_color = 'red'
print_args = []
print_success = lambda *args: cprint(success_string.format(*args),
success_color)
print_fail = lambda *args: cprint(fail_string.format(*args),
fail_color)
print_problem = lambda *args: cprint(problem_string.format(*args),
fail_color)
# Init rift with lib name
def init(lib_name):
"""lib_name -- name of the .so to load
"""
colorama.init()
__builtin__.lib = cdll.LoadLibrary(here + "/" + lib_name)
# Decorator for functions attempting to be tested
def Test(func):
"""Decorator registering test functions.
Test function have to return a boolean and can modify the output by
changing rift.success_string, rift.fail_string, rift.print_args values.
"""
test_functions.add(func)
# Stdout catching
class _Capturing(list):
"""Capturing capture printed content and add it in itself.
Use guide:
with Capturing() as stdout, stderr:
do_something(my_object)
"""
def __enter__(self):
self._stdout = sys.stdout
self._stderr = sys.stderr
sys.stdout = self._stdoutio = StringIO()
sys.stderr = self._stderrio = StringIO()
return self
def __exit__(self, *args):
self.extend((self._stdoutio.getvalue().splitlines(),
self._stderrio.getvalue().splitlines()))
sys.stdout = self._stdout
sys.stderr = self._stderr
# Call method that capture return value, stdout and stderr
def call(func, ret_type, *args):
"""call(func, ret_type, *args) -> (ret_value, stdout, stderr)
Call a fucntion and return it value cast with ret_type and also stdout
and stderr.
func -- function ptr accessible through `lib` global variable.
ret_type -- return type of the c function (ctypes type)
"""
ret = None
stdout = None
stderr = None
func.restype = ret_type
with _Capturing() as streams:
ret = func(*args)
return (ret, streams[0], streams[1])
def run_tests(handle_fail=False):
"""Run all tests."""
for test in test_functions:
print_args = [test.__name__]
if handle_fail:
child_pid = os.fork()
if child_pid == 0:
if test():
print_success(*print_args)
else:
print_fail(*print_args)
else:
_, ret_code = os.waitpid(child_pid, 0)
if ret_code != 0:
print_args.append(ret_code)
print_problem(*print_args)
else:
if test():
print_success(*print_args)
else:
print_fail(*print_args)
|
{
"content_hash": "86a2e57ffb851a6ffda9796bfd4e879a",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 75,
"avg_line_length": 31.62,
"alnum_prop": 0.5815939278937381,
"repo_name": "Yayg/rift",
"id": "e53d3bcfc65605cd97562da6b0b77304d6328e3c",
"size": "3162",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rift/rift.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "666"
},
{
"name": "Makefile",
"bytes": "750"
},
{
"name": "Python",
"bytes": "5386"
},
{
"name": "Shell",
"bytes": "135"
}
],
"symlink_target": ""
}
|
import sys, os, math
import matplotlib.pyplot as plt
#-Configuration ----------------------------------------------------------------
sizes = [ 2**n for n in range(6, 13) ]
modes = ['Naive', 'SMB', 'CRB', 'CRB-T', 'CRB-TR']
#-Script Body ------------------------------------------------------------------
if __name__ == "__main__":
#-Build the OpenCL project--------------------------------------------------
if not 'build' in os.listdir('.'):
os.system('mkdir build')
os.chdir('build')
print '# Building the OpenCL project ...'
if (os.system('cmake ..') != 0):
sys.exit('# Please check your OpenCL installation.')
os.system('make')
#-Run the Benchmark---------------------------------------------------------
print '# Benchmark starts here ...'
results = [[] for i in range(len(modes))]
for i, size in enumerate(sizes):
path = 'bench.log'
with open(path, 'w+') as fo:
# run the 'cpp' executable ...
n_iteration = max(1, int((2 ** 14 / size)))
print ('[step %s/%s] size : %4s | iter : %3s'
% (i+1, len(sizes), size, n_iteration))
cmd_line = ('./GEMM -s %s -i %s -r > %s'
% (size, n_iteration, path))
status = os.system(cmd_line)
scores = fo.read()[:-1]
# process the output ...
if (status != 0):
print '# Iteration failed :\n', scores
sys.exit()
else:
for i, time in enumerate(scores.split('\n')):
tflops = (size ** 3 / float(time)) * 2 * 1e-9
results[i].append(tflops)
print '# Benchmark completed !'
#-Display the Results-------------------------------------------------------
fig,axes = plt.subplots()
# size and name
fig.set_size_inches(8, 4, forward=True)
fig.subplots_adjust(bottom=0.15)
fig.canvas.set_window_title('GEMM - benchmark')
# axes
axes.set_xlim([0, len(sizes)-1])
axes.set_ylim([0, 1.1 * max([max(l) for l in results])])
axes.xaxis.set_ticks(range(0, len(sizes)))
axes.xaxis.set_ticklabels(sizes)
# plotting
def plot_entry(n):
markers = ['o', '^', 's', 'D', 'v']
colors = ['purple', 'royalblue', 'r', 'orange', 'k']
return plt.plot(results[n], color=colors[n],
linestyle='-', marker=markers[n],
markeredgewidth=1, markeredgecolor=colors[n],
markerfacecolor=colors[n], markersize=6)[0]
print '# Plotting the results ...'
plots = [ plot_entry(n) for n in range(0, len(modes)) ]
# legend
plt.legend(plots, modes, loc='upper left', fontsize = '14')
# background grid
plt.grid(True, which="major", linestyle=':')
plt.grid(True, which="minor", linestyle=':', alpha=0.25)
plt.minorticks_on()
# labels
plt.xlabel('matrix size (M=N=K)', fontsize=14)
plt.ylabel('effective performance (TFLOPS)', fontsize=14)
plt.title('Performance Comparison of the Proposed Kernels', size=16)
# and that's it!
plt.show()
print '# Exiting ...'
|
{
"content_hash": "e7d892343520e604dab9bc463b497111",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 80,
"avg_line_length": 34.86813186813187,
"alnum_prop": 0.4853450992751339,
"repo_name": "Cryst4L/Blaze",
"id": "762726e125dd253658b56731550ccb3475bc9d2d",
"size": "3195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "15039"
},
{
"name": "C++",
"bytes": "16234"
},
{
"name": "CMake",
"bytes": "1020"
},
{
"name": "Python",
"bytes": "3195"
}
],
"symlink_target": ""
}
|
"""ABCs."""
# Authors: Guillaume Favelier <guillaume.favelier@gmail.com
# Eric Larson <larson.eric.d@gmail.com>
#
# License: Simplified BSD
from abc import ABC, abstractmethod, abstractclassmethod
from contextlib import nullcontext
import warnings
from ..utils import tight_layout
class _AbstractRenderer(ABC):
@abstractclassmethod
def __init__(self, fig=None, size=(600, 600), bgcolor=(0., 0., 0.),
name=None, show=False, shape=(1, 1)):
"""Set up the scene."""
pass
@abstractclassmethod
def subplot(self, x, y):
"""Set the active subplot."""
pass
@abstractclassmethod
def scene(self):
"""Return scene handle."""
pass
@abstractclassmethod
def set_interaction(self, interaction):
"""Set interaction mode."""
pass
@abstractclassmethod
def mesh(self, x, y, z, triangles, color, opacity=1.0, shading=False,
backface_culling=False, scalars=None, colormap=None,
vmin=None, vmax=None, interpolate_before_map=True,
representation='surface', line_width=1., normals=None,
polygon_offset=None, **kwargs):
"""Add a mesh in the scene.
Parameters
----------
x : array, shape (n_vertices,)
The array containing the X component of the vertices.
y : array, shape (n_vertices,)
The array containing the Y component of the vertices.
z : array, shape (n_vertices,)
The array containing the Z component of the vertices.
triangles : array, shape (n_polygons, 3)
The array containing the indices of the polygons.
color : tuple | str
The color of the mesh as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
opacity : float
The opacity of the mesh.
shading : bool
If True, enable the mesh shading.
backface_culling : bool
If True, enable backface culling on the mesh.
scalars : ndarray, shape (n_vertices,)
The scalar valued associated to the vertices.
vmin : float | None
vmin is used to scale the colormap.
If None, the min of the data will be used
vmax : float | None
vmax is used to scale the colormap.
If None, the max of the data will be used
colormap :
The colormap to use.
interpolate_before_map :
Enabling makes for a smoother scalars display. Default is True.
When False, OpenGL will interpolate the mapped colors which can
result is showing colors that are not present in the color map.
representation : str
The representation of the mesh: either 'surface' or 'wireframe'.
line_width : int
The width of the lines when representation='wireframe'.
normals : array, shape (n_vertices, 3)
The array containing the normal of each vertex.
polygon_offset : float
If not None, the factor used to resolve coincident topology.
kwargs : args
The arguments to pass to triangular_mesh
Returns
-------
surface :
Handle of the mesh in the scene.
"""
pass
@abstractclassmethod
def contour(self, surface, scalars, contours, width=1.0, opacity=1.0,
vmin=None, vmax=None, colormap=None,
normalized_colormap=False, kind='line', color=None):
"""Add a contour in the scene.
Parameters
----------
surface : surface object
The mesh to use as support for contour.
scalars : ndarray, shape (n_vertices,)
The scalar valued associated to the vertices.
contours : int | list
Specifying a list of values will only give the requested contours.
width : float
The width of the lines or radius of the tubes.
opacity : float
The opacity of the contour.
vmin : float | None
vmin is used to scale the colormap.
If None, the min of the data will be used
vmax : float | None
vmax is used to scale the colormap.
If None, the max of the data will be used
colormap :
The colormap to use.
normalized_colormap : bool
Specify if the values of the colormap are between 0 and 1.
kind : 'line' | 'tube'
The type of the primitives to use to display the contours.
color :
The color of the mesh as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
"""
pass
@abstractclassmethod
def surface(self, surface, color=None, opacity=1.0,
vmin=None, vmax=None, colormap=None,
normalized_colormap=False, scalars=None,
backface_culling=False, polygon_offset=None):
"""Add a surface in the scene.
Parameters
----------
surface : surface object
The information describing the surface.
color : tuple | str
The color of the surface as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
opacity : float
The opacity of the surface.
vmin : float | None
vmin is used to scale the colormap.
If None, the min of the data will be used
vmax : float | None
vmax is used to scale the colormap.
If None, the max of the data will be used
colormap :
The colormap to use.
scalars : ndarray, shape (n_vertices,)
The scalar valued associated to the vertices.
backface_culling : bool
If True, enable backface culling on the surface.
polygon_offset : float
If not None, the factor used to resolve coincident topology.
"""
pass
@abstractclassmethod
def sphere(self, center, color, scale, opacity=1.0,
resolution=8, backface_culling=False,
radius=None):
"""Add sphere in the scene.
Parameters
----------
center : ndarray, shape(n_center, 3)
The list of centers to use for the sphere(s).
color : tuple | str
The color of the sphere as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
scale : float
The scaling applied to the spheres. The given value specifies
the maximum size in drawing units.
opacity : float
The opacity of the sphere(s).
resolution : int
The resolution of the sphere created. This is the number
of divisions along theta and phi.
backface_culling : bool
If True, enable backface culling on the sphere(s).
radius : float | None
Replace the glyph scaling by a fixed radius value for each
sphere (not supported by mayavi).
"""
pass
@abstractclassmethod
def tube(self, origin, destination, radius=0.001, color='white',
scalars=None, vmin=None, vmax=None, colormap='RdBu',
normalized_colormap=False, reverse_lut=False):
"""Add tube in the scene.
Parameters
----------
origin : array, shape(n_lines, 3)
The coordinates of the first end of the tube(s).
destination : array, shape(n_lines, 3)
The coordinates of the other end of the tube(s).
radius : float
The radius of the tube(s).
color : tuple | str
The color of the tube as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
scalars : array, shape (n_quivers,) | None
The optional scalar data to use.
vmin : float | None
vmin is used to scale the colormap.
If None, the min of the data will be used
vmax : float | None
vmax is used to scale the colormap.
If None, the max of the data will be used
colormap :
The colormap to use.
opacity : float
The opacity of the tube(s).
backface_culling : bool
If True, enable backface culling on the tube(s).
reverse_lut : bool
If True, reverse the lookup table.
Returns
-------
surface :
Handle of the tube in the scene.
"""
pass
@abstractclassmethod
def quiver3d(self, x, y, z, u, v, w, color, scale, mode, resolution=8,
glyph_height=None, glyph_center=None, glyph_resolution=None,
opacity=1.0, scale_mode='none', scalars=None,
backface_culling=False, colormap=None, vmin=None, vmax=None,
line_width=2., name=None):
"""Add quiver3d in the scene.
Parameters
----------
x : array, shape (n_quivers,)
The X component of the position of the quiver.
y : array, shape (n_quivers,)
The Y component of the position of the quiver.
z : array, shape (n_quivers,)
The Z component of the position of the quiver.
u : array, shape (n_quivers,)
The last X component of the quiver.
v : array, shape (n_quivers,)
The last Y component of the quiver.
w : array, shape (n_quivers,)
The last Z component of the quiver.
color : tuple | str
The color of the quiver as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
scale : float
The scaling applied to the glyphs. The size of the glyph
is by default calculated from the inter-glyph spacing.
The given value specifies the maximum glyph size in drawing units.
mode : 'arrow', 'cone' or 'cylinder'
The type of the quiver.
resolution : int
The resolution of the glyph created. Depending on the type of
glyph, it represents the number of divisions in its geometric
representation.
glyph_height : float
The height of the glyph used with the quiver.
glyph_center : tuple
The center of the glyph used with the quiver: (x, y, z).
glyph_resolution : float
The resolution of the glyph used with the quiver.
opacity : float
The opacity of the quiver.
scale_mode : 'vector', 'scalar' or 'none'
The scaling mode for the glyph.
scalars : array, shape (n_quivers,) | None
The optional scalar data to use.
backface_culling : bool
If True, enable backface culling on the quiver.
colormap :
The colormap to use.
vmin : float | None
vmin is used to scale the colormap.
If None, the min of the data will be used
vmax : float | None
vmax is used to scale the colormap.
If None, the max of the data will be used
line_width : float
The width of the 2d arrows.
"""
pass
@abstractclassmethod
def text2d(self, x_window, y_window, text, size=14, color='white'):
"""Add 2d text in the scene.
Parameters
----------
x : float
The X component to use as position of the text in the
window coordinates system (window_width, window_height).
y : float
The Y component to use as position of the text in the
window coordinates system (window_width, window_height).
text : str
The content of the text.
size : int
The size of the font.
color : tuple | str
The color of the text as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
"""
pass
@abstractclassmethod
def text3d(self, x, y, z, text, width, color='white'):
"""Add 2d text in the scene.
Parameters
----------
x : float
The X component to use as position of the text.
y : float
The Y component to use as position of the text.
z : float
The Z component to use as position of the text.
text : str
The content of the text.
width : float
The width of the text.
color : tuple | str
The color of the text as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
"""
pass
@abstractclassmethod
def scalarbar(self, source, color="white", title=None, n_labels=4,
bgcolor=None):
"""Add a scalar bar in the scene.
Parameters
----------
source :
The object of the scene used for the colormap.
color :
The color of the label text.
title : str | None
The title of the scalar bar.
n_labels : int | None
The number of labels to display on the scalar bar.
bgcolor :
The color of the background when there is transparency.
"""
pass
@abstractclassmethod
def show(self):
"""Render the scene."""
pass
@abstractclassmethod
def close(self):
"""Close the scene."""
pass
@abstractclassmethod
def set_camera(self, azimuth=None, elevation=None, distance=None,
focalpoint=None, roll=None, reset_camera=True):
"""Configure the camera of the scene.
Parameters
----------
azimuth : float
The azimuthal angle of the camera.
elevation : float
The zenith angle of the camera.
distance : float
The distance to the focal point.
focalpoint : tuple
The focal point of the camera: (x, y, z).
roll : float
The rotation of the camera along its axis.
reset_camera : bool
If True, reset the camera properties beforehand.
"""
pass
@abstractclassmethod
def reset_camera(self):
"""Reset the camera properties."""
pass
@abstractclassmethod
def screenshot(self, mode='rgb', filename=None):
"""Take a screenshot of the scene.
Parameters
----------
mode : str
Either 'rgb' or 'rgba' for values to return.
Default is 'rgb'.
filename : str | None
If not None, save the figure to the disk.
"""
pass
@abstractclassmethod
def project(self, xyz, ch_names):
"""Convert 3d points to a 2d perspective.
Parameters
----------
xyz : array, shape(n_points, 3)
The points to project.
ch_names : array, shape(_n_points,)
Names of the channels.
"""
pass
@abstractclassmethod
def enable_depth_peeling(self):
"""Enable depth peeling."""
pass
@abstractclassmethod
def remove_mesh(self, mesh_data):
"""Remove the given mesh from the scene.
Parameters
----------
mesh_data : tuple | Surface
The mesh to remove.
"""
pass
class _AbstractToolBar(ABC):
@abstractmethod
def _tool_bar_load_icons(self):
pass
@abstractmethod
def _tool_bar_initialize(self, name="default", window=None):
pass
@abstractmethod
def _tool_bar_add_button(self, name, desc, func, icon_name=None,
shortcut=None):
pass
@abstractmethod
def _tool_bar_update_button_icon(self, name, icon_name):
pass
@abstractmethod
def _tool_bar_add_text(self, name, value, placeholder):
pass
@abstractmethod
def _tool_bar_add_spacer(self):
pass
@abstractmethod
def _tool_bar_add_file_button(self, name, desc, func, shortcut=None):
pass
@abstractmethod
def _tool_bar_add_play_button(self, name, desc, func, shortcut=None):
pass
@abstractmethod
def _tool_bar_set_theme(self, theme):
pass
class _AbstractDock(ABC):
@abstractmethod
def _dock_initialize(self, window=None):
pass
@abstractmethod
def _dock_finalize(self):
pass
@abstractmethod
def _dock_show(self):
pass
@abstractmethod
def _dock_hide(self):
pass
@abstractmethod
def _dock_add_stretch(self, layout):
pass
@abstractmethod
def _dock_add_layout(self, vertical=True):
pass
@abstractmethod
def _dock_add_label(self, value, align=False, layout=None):
pass
@abstractmethod
def _dock_add_button(self, name, callback, layout=None):
pass
@abstractmethod
def _dock_named_layout(self, name, layout, compact):
pass
@abstractmethod
def _dock_add_slider(self, name, value, rng, callback,
compact=True, double=False, layout=None):
pass
@abstractmethod
def _dock_add_spin_box(self, name, value, rng, callback,
compact=True, double=True, layout=None):
pass
@abstractmethod
def _dock_add_combo_box(self, name, value, rng,
callback, compact=True, layout=None):
pass
@abstractmethod
def _dock_add_group_box(self, name, layout=None):
pass
class _AbstractMenuBar(ABC):
@abstractmethod
def _menu_initialize(self, window=None):
pass
@abstractmethod
def _menu_add_submenu(self, name, desc):
pass
@abstractmethod
def _menu_add_button(self, menu_name, name, desc, func):
pass
class _AbstractStatusBar(ABC):
@abstractmethod
def _status_bar_initialize(self, window=None):
pass
@abstractmethod
def _status_bar_add_label(self, value, stretch=0):
pass
@abstractmethod
def _status_bar_add_progress_bar(self, stretch=0):
pass
@abstractmethod
def _status_bar_update(self):
pass
class _AbstractPlayback(ABC):
@abstractmethod
def _playback_initialize(self, func, timeout, value, rng,
time_widget, play_widget):
pass
class _AbstractLayout(ABC):
@abstractmethod
def _layout_initialize(self, max_width):
pass
@abstractmethod
def _layout_add_widget(self, layout, widget, stretch=0):
pass
class _AbstractWidget(ABC):
def __init__(self, widget):
self._widget = widget
@property
def widget(self):
return self._widget
@abstractmethod
def set_value(self, value):
pass
@abstractmethod
def get_value(self):
pass
@abstractmethod
def set_range(self, rng):
pass
@abstractmethod
def show(self):
pass
@abstractmethod
def hide(self):
pass
@abstractmethod
def update(self, repaint=True):
pass
class _AbstractMplInterface(ABC):
@abstractmethod
def _mpl_initialize():
pass
class _AbstractMplCanvas(ABC):
def __init__(self, width, height, dpi):
"""Initialize the MplCanvas."""
from matplotlib import rc_context
from matplotlib.figure import Figure
# prefer constrained layout here but live with tight_layout otherwise
context = nullcontext
self._extra_events = ('resize',)
try:
context = rc_context({'figure.constrained_layout.use': True})
self._extra_events = ()
except KeyError:
pass
with context:
self.fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = self.fig.add_subplot(111)
self.axes.set(xlabel='Time (sec)', ylabel='Activation (AU)')
self.manager = None
def _connect(self):
for event in ('button_press', 'motion_notify') + self._extra_events:
self.canvas.mpl_connect(
event + '_event', getattr(self, 'on_' + event))
def plot(self, x, y, label, update=True, **kwargs):
"""Plot a curve."""
line, = self.axes.plot(
x, y, label=label, **kwargs)
if update:
self.update_plot()
return line
def plot_time_line(self, x, label, update=True, **kwargs):
"""Plot the vertical line."""
line = self.axes.axvline(x, label=label, **kwargs)
if update:
self.update_plot()
return line
def update_plot(self):
"""Update the plot."""
with warnings.catch_warnings(record=True):
warnings.filterwarnings('ignore', 'constrained_layout')
self.canvas.draw()
def set_color(self, bg_color, fg_color):
"""Set the widget colors."""
self.axes.set_facecolor(bg_color)
self.axes.xaxis.label.set_color(fg_color)
self.axes.yaxis.label.set_color(fg_color)
self.axes.spines['top'].set_color(fg_color)
self.axes.spines['bottom'].set_color(fg_color)
self.axes.spines['left'].set_color(fg_color)
self.axes.spines['right'].set_color(fg_color)
self.axes.tick_params(axis='x', colors=fg_color)
self.axes.tick_params(axis='y', colors=fg_color)
self.fig.patch.set_facecolor(bg_color)
def show(self):
"""Show the canvas."""
if self.manager is None:
self.canvas.show()
else:
self.manager.show()
def close(self):
"""Close the canvas."""
self.canvas.close()
def clear(self):
"""Clear internal variables."""
self.close()
self.axes.clear()
self.fig.clear()
self.canvas = None
self.manager = None
def on_resize(self, event):
"""Handle resize events."""
tight_layout(fig=self.axes.figure)
class _AbstractBrainMplCanvas(_AbstractMplCanvas):
def __init__(self, brain, width, height, dpi):
"""Initialize the MplCanvas."""
super().__init__(width, height, dpi)
self.brain = brain
self.time_func = brain.callbacks["time"]
def update_plot(self):
"""Update the plot."""
leg = self.axes.legend(
prop={'family': 'monospace', 'size': 'small'},
framealpha=0.5, handlelength=1.,
facecolor=self.brain._bg_color)
for text in leg.get_texts():
text.set_color(self.brain._fg_color)
super().update_plot()
def on_button_press(self, event):
"""Handle button presses."""
# left click (and maybe drag) in progress in axes
if (event.inaxes != self.axes or
event.button != 1):
return
self.time_func(
event.xdata, update_widget=True, time_as_index=False)
on_motion_notify = on_button_press # for now they can be the same
def clear(self):
"""Clear internal variables."""
super().clear()
self.brain = None
class _AbstractWindow(ABC):
def _window_initialize(self):
self._window = None
self._interactor = None
self._mplcanvas = None
self._show_traces = None
self._separate_canvas = None
self._interactor_fraction = None
@abstractmethod
def _window_close_connect(self, func):
pass
@abstractmethod
def _window_get_dpi(self):
pass
@abstractmethod
def _window_get_size(self):
pass
def _window_get_mplcanvas_size(self, fraction):
ratio = (1 - fraction) / fraction
dpi = self._window_get_dpi()
w, h = self._window_get_size()
h /= ratio
return (w / dpi, h / dpi)
@abstractmethod
def _window_get_simple_canvas(self, width, height, dpi):
pass
@abstractmethod
def _window_get_mplcanvas(self, brain, interactor_fraction, show_traces,
separate_canvas):
pass
@abstractmethod
def _window_adjust_mplcanvas_layout(self):
pass
@abstractmethod
def _window_get_cursor(self):
pass
@abstractmethod
def _window_set_cursor(self, cursor):
pass
@abstractmethod
def _window_new_cursor(self, name):
pass
@abstractmethod
def _window_ensure_minimum_sizes(self):
pass
@abstractmethod
def _window_set_theme(self, theme):
pass
|
{
"content_hash": "d0fe24f0db78f777450602a71b1d1f2d",
"timestamp": "",
"source": "github",
"line_count": 809,
"max_line_length": 79,
"avg_line_length": 30.826946847960446,
"alnum_prop": 0.5697902883034605,
"repo_name": "rkmaddox/mne-python",
"id": "11f4c79ecf1e03eabae180587b978c7b84c7c3fd",
"size": "24939",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mne/viz/backends/_abstract.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "3114"
},
{
"name": "PowerShell",
"bytes": "2988"
},
{
"name": "Python",
"bytes": "4400215"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
}
|
from inspect import signature
def actual_kwargs(function):
"""
Decorator that provides the wrapped function with an attribute
'actual_kwargs'
containing just those keyword arguments actually passed in to the function.
References
----------
http://stackoverflow.com/questions/1408818/getting-the-the-keyword
-arguments-actually-passed-to-a-python-method
Notes
-----
We override the signature of the decorated function to ensure it will be
displayed correctly in sphinx
"""
original_signature = signature(function)
def inner(*args, **kwargs):
inner.actual_kwargs = kwargs
return function(*args, **kwargs)
inner.__signature__ = original_signature
return inner
|
{
"content_hash": "966a91c62a9c3ffebe2bfdb38fd0860e",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 79,
"avg_line_length": 25.82758620689655,
"alnum_prop": 0.6875834445927904,
"repo_name": "Dekken/tick",
"id": "46a02fe416ff50232ed6baf53db2c33e1df55299",
"size": "774",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tick/base/decorators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "6660"
},
{
"name": "C++",
"bytes": "1181742"
},
{
"name": "CMake",
"bytes": "22073"
},
{
"name": "Dockerfile",
"bytes": "2017"
},
{
"name": "Python",
"bytes": "1450866"
},
{
"name": "Shell",
"bytes": "33446"
}
],
"symlink_target": ""
}
|
from iris_sdk.models.maps.base_map import BaseMap
class SipPeerTelephoneNumbersMap(BaseMap):
sip_peer_telephone_number = None
|
{
"content_hash": "ab6ba7ac1c9e227a91bc1c3e2e52862d",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 49,
"avg_line_length": 26.2,
"alnum_prop": 0.8015267175572519,
"repo_name": "bandwidthcom/python-bandwidth-iris",
"id": "a9b1e917803d73c738d17d5c7d1d146999743b3d",
"size": "154",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "iris_sdk/models/maps/sip_peer_telephone_numbers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "308732"
}
],
"symlink_target": ""
}
|
import imp
import sys
import os
from conans.model.ref import ConanFileReference
from conans.client.recorder.action_recorder import ActionRecorder
from conans.model.requires import Requirement
class ConanPythonRequire(object):
def __init__(self, proxy, range_resolver):
self._modules = {}
self._proxy = proxy
self._range_resolver = range_resolver
self._references = []
@property
def references(self):
result = self._references
self._references = []
return result
def __call__(self, require):
try:
m, reference = self._modules[require]
self._references.append(reference)
return m
except KeyError:
r = ConanFileReference.loads(require)
requirement = Requirement(r)
self._range_resolver.resolve(requirement, "python_require", update=False,
remote_name=None)
r = requirement.conan_reference
result = self._proxy.get_recipe(r, False, False, remote_name=None,
recorder=ActionRecorder())
path, _, _, reference = result
self._references.append(reference)
try:
sys.path.append(os.path.dirname(path))
module = imp.load_source("python_require", path)
finally:
sys.path.pop()
self._modules[require] = module, reference
return module
|
{
"content_hash": "8d11b8b26bae535836f6c3d31efff214",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 85,
"avg_line_length": 34.31818181818182,
"alnum_prop": 0.5735099337748344,
"repo_name": "luckielordie/conan",
"id": "9929a5430a23c0433592bcdecf56a6a6e9379dbc",
"size": "1510",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "conans/client/graph/python_requires.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1100"
},
{
"name": "Dockerfile",
"bytes": "3392"
},
{
"name": "Groovy",
"bytes": "7992"
},
{
"name": "Python",
"bytes": "3232431"
},
{
"name": "Shell",
"bytes": "1864"
}
],
"symlink_target": ""
}
|
"""
pyClanSphere.utils.account
~~~~~~~~~~~~~~~~~~~~~~~~~~
This module implements various functions used by the account interface.
:copyright: (c) 2009 - 2010 by the pyClanSphere Team,
see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from pyClanSphere.privileges import ENTER_ACCOUNT_PANEL, require_privilege
from pyClanSphere.utils import local
from pyClanSphere.i18n import _
def flash(msg, type='info'):
"""Add a message to the message flash buffer.
The default message type is "info", other possible values are
"add", "remove", "error", "ok" and "configure". The message type affects
the icon and visual appearance.
The flashes messages appear only in the account interface!
"""
assert type in \
('info', 'add', 'remove', 'error', 'ok', 'configure', 'warning')
if type == 'error':
msg = (u'<strong>%s:</strong> ' % _('Error')) + msg
if type == 'warning':
msg = (u'<strong>%s:</strong> ' % _('Warning')) + msg
local.request.session.setdefault('account/flashed_messages', []).\
append((type, msg))
def require_account_privilege(expr=None):
"""Works like `require_privilege` but checks if the rule for
`ENTER_ADMIN_PANEL` exists as well.
"""
if expr:
expr = ENTER_ACCOUNT_PANEL & expr
else:
expr = ENTER_ACCOUNT_PANEL
return require_privilege(expr)
def add_account_urls(app, path, idname, listview, editview=None, deleteview=None):
"""Generic admin backend routing function
As it has become pretty common to use
/account/foo/ and
/account/foo/page/2 for listings
/account/foo/1 for editing
/account/foo/new for creating with None to edit function and
/account/foo/1/delete for removals
just generate the matching urls and endpoints
editview and/or deleteview may be omitted if they're not needed
"""
app.add_url_rule('/%s/' % path, prefix='account', defaults={'page': 1}, endpoint='account/%s' % path,
view=listview)
app.add_url_rule('/%s/page/<int:page>' % path, prefix='account', endpoint='account/%s' % path)
if editview:
app.add_url_rule('/%s/new' % path, prefix='account', endpoint='account/%s/new' % path,
view=editview)
app.add_url_rule('/%s/<int:%s>' % (path, idname), prefix='account', endpoint='account/%s/edit' % path,
view=editview)
if deleteview:
app.add_url_rule('/%s/<int:%s>/delete' % (path, idname), prefix='account', endpoint='account/%s/delete' % path,
view=deleteview)
|
{
"content_hash": "36ed888e0174e9290912e89fee5ac311",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 119,
"avg_line_length": 36.78082191780822,
"alnum_prop": 0.6171322160148975,
"repo_name": "jokey2k/pyClanSphere",
"id": "85421492fc4aac22784cefa6272e68fe6a772a7f",
"size": "2709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyClanSphere/utils/account.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "188174"
},
{
"name": "Python",
"bytes": "891594"
}
],
"symlink_target": ""
}
|
'''
Fibonacci numbers
The Fibonacci numbers form a sequence of integers defined recursively
in the following way. The first two numbers in the Fibonacci sequence are 0 and 1,
and each subsequent number is the sum of the previous two.
'''
def fibonacci(N):
# Initialize the fibonacci series
# fib(0) = 0, fib(1) = 1
L = [0] * (N + 1)
L[1] = 1
for i in range(2, N+1):
L[i] = L[i-1] + L[i-2]
return L[N]
print(fibonacci(10))
|
{
"content_hash": "981e7a5ee96fa241248206aa7009aec8",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 82,
"avg_line_length": 20.761904761904763,
"alnum_prop": 0.6697247706422018,
"repo_name": "barissimsek/gopython",
"id": "d6b5ed26bd07e2b16c0cf26367dbaf083dce7595",
"size": "436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cs/fib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23623"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Machinecoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test deprecation of RPC calls."""
from test_framework.test_framework import MachinecoinTestFramework
from test_framework.util import assert_raises_rpc_error
class DeprecatedRpcTest(MachinecoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [[], ["-deprecatedrpc=validateaddress", "-deprecatedrpc=accounts"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# This test should be used to verify correct behaviour of deprecated
# RPC methods with and without the -deprecatedrpc flags. For example:
#
# self.log.info("Make sure that -deprecatedrpc=createmultisig allows it to take addresses")
# assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, [self.nodes[0].getnewaddress()])
# self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()])
self.log.info("Test validateaddress deprecation")
SOME_ADDRESS = "mnvGjUy3NMj67yJ6gkK5o9e5RS33Z2Vqcu" # This is just some random address to pass as a parameter to validateaddress
dep_validate_address = self.nodes[0].validateaddress(SOME_ADDRESS)
assert "ismine" not in dep_validate_address
not_dep_val = self.nodes[1].validateaddress(SOME_ADDRESS)
assert "ismine" in not_dep_val
self.log.info("Test accounts deprecation")
# The following account RPC methods are deprecated:
# - getaccount
# - getaccountaddress
# - getaddressesbyaccount
# - getreceivedbyaccount
# - listaccouts
# - listreceivedbyaccount
# - move
# - setaccount
#
# The following 'label' RPC methods are usable both with and without the
# -deprecatedrpc=accounts switch enabled.
# - getaddressesbylabel
# - getreceivedbylabel
# - listlabels
# - listreceivedbylabel
# - setlabel
#
address0 = self.nodes[0].getnewaddress()
self.nodes[0].generatetoaddress(101, address0)
self.sync_all()
address1 = self.nodes[1].getnewaddress()
self.nodes[1].generatetoaddress(101, address1)
self.log.info("- getaccount")
assert_raises_rpc_error(-32, "getaccount is deprecated", self.nodes[0].getaccount, address0)
self.nodes[1].getaccount(address1)
self.log.info("- setaccount")
assert_raises_rpc_error(-32, "setaccount is deprecated", self.nodes[0].setaccount, address0, "label0")
self.nodes[1].setaccount(address1, "label1")
self.log.info("- setlabel")
self.nodes[0].setlabel(address0, "label0")
self.nodes[1].setlabel(address1, "label1")
self.log.info("- getaccountaddress")
assert_raises_rpc_error(-32, "getaccountaddress is deprecated", self.nodes[0].getaccountaddress, "label0")
self.nodes[1].getaccountaddress("label1")
self.log.info("- getaddressesbyaccount")
assert_raises_rpc_error(-32, "getaddressesbyaccount is deprecated", self.nodes[0].getaddressesbyaccount, "label0")
self.nodes[1].getaddressesbyaccount("label1")
self.log.info("- getaddressesbylabel")
self.nodes[0].getaddressesbylabel("label0")
self.nodes[1].getaddressesbylabel("label1")
self.log.info("- getreceivedbyaccount")
assert_raises_rpc_error(-32, "getreceivedbyaccount is deprecated", self.nodes[0].getreceivedbyaccount, "label0")
self.nodes[1].getreceivedbyaccount("label1")
self.log.info("- getreceivedbylabel")
self.nodes[0].getreceivedbylabel("label0")
self.nodes[1].getreceivedbylabel("label1")
self.log.info("- listaccounts")
assert_raises_rpc_error(-32, "listaccounts is deprecated", self.nodes[0].listaccounts)
self.nodes[1].listaccounts()
self.log.info("- listlabels")
self.nodes[0].listlabels()
self.nodes[1].listlabels()
self.log.info("- listreceivedbyaccount")
assert_raises_rpc_error(-32, "listreceivedbyaccount is deprecated", self.nodes[0].listreceivedbyaccount)
self.nodes[1].listreceivedbyaccount()
self.log.info("- listreceivedbylabel")
self.nodes[0].listreceivedbylabel()
self.nodes[1].listreceivedbylabel()
self.log.info("- move")
assert_raises_rpc_error(-32, "move is deprecated", self.nodes[0].move, "label0", "label0b", 10)
self.nodes[1].move("label1", "label1b", 10)
if __name__ == '__main__':
DeprecatedRpcTest().main()
|
{
"content_hash": "8ef22623091c228b4439d845e2b6868b",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 137,
"avg_line_length": 43.531531531531535,
"alnum_prop": 0.6632864238410596,
"repo_name": "machinecoin-project/machinecoin",
"id": "d2d835163c315f79fdd23fee58fbd1e293c7f339",
"size": "4834",
"binary": false,
"copies": "2",
"ref": "refs/heads/0.17",
"path": "test/functional/rpc_deprecated.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "7639"
},
{
"name": "C",
"bytes": "342684"
},
{
"name": "C++",
"bytes": "3521961"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "18048"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "Makefile",
"bytes": "66797"
},
{
"name": "Objective-C",
"bytes": "2023"
},
{
"name": "Objective-C++",
"bytes": "7246"
},
{
"name": "Protocol Buffer",
"bytes": "2308"
},
{
"name": "Python",
"bytes": "211880"
},
{
"name": "QMake",
"bytes": "2019"
},
{
"name": "Shell",
"bytes": "40513"
}
],
"symlink_target": ""
}
|
class ZellenKultur:
def __init__(self, listeDerZellen1=[]):
self.listeDerZellen = listeDerZellen1
def anzahlLebenderZellen(self):
anzahlZellenDieLeben = 2
return anzahlZellenDieLeben
def main(self):
return
if __name__ == '__main__':
pass
|
{
"content_hash": "bb381b818877a39ab8470625f8dd9852",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 45,
"avg_line_length": 19.0625,
"alnum_prop": 0.5934426229508196,
"repo_name": "hemmerling/codingdojo",
"id": "f5d9115cf6c3d09b4e8623a5b5d82c1a714cee42",
"size": "661",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/game_of_life/python_coderetreat_socramob/cr_socramob07/zellenkultur.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "8159"
},
{
"name": "ASP",
"bytes": "4306"
},
{
"name": "Batchfile",
"bytes": "180"
},
{
"name": "C#",
"bytes": "48393"
},
{
"name": "Gherkin",
"bytes": "197"
},
{
"name": "HTML",
"bytes": "122607"
},
{
"name": "Haskell",
"bytes": "597"
},
{
"name": "Java",
"bytes": "77787"
},
{
"name": "JavaScript",
"bytes": "114736"
},
{
"name": "PHP",
"bytes": "2347"
},
{
"name": "PowerShell",
"bytes": "50481"
},
{
"name": "Python",
"bytes": "109650"
},
{
"name": "Ruby",
"bytes": "5256"
},
{
"name": "Swift",
"bytes": "2587"
},
{
"name": "Tcl",
"bytes": "13019"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from furl import furl
from django.views.generic import FormView, DeleteView
from django.core.mail import send_mail
from django.shortcuts import redirect
from django.http import Http404, HttpResponse
from website.settings import SUPPORT_EMAIL, DOMAIN
from website.security import random_string
from framework.auth import get_user
from website.project.model import User
from website.mailchimp_utils import subscribe_on_confirm
from admin.base.views import GuidFormView, GuidView
from admin.users.templatetags.user_extras import reverse_user
from admin.base.utils import OSFAdmin
from admin.common_auth.logs import (
update_admin_log,
USER_2_FACTOR,
USER_EMAILED,
USER_REMOVED,
USER_RESTORED,
)
from admin.users.serializers import serialize_user
from admin.users.forms import EmailResetForm
class UserDeleteView(OSFAdmin, DeleteView):
""" Allow authorised admin user to remove/restore user
Interface with OSF database. No admin models.
"""
template_name = 'users/remove_user.html'
context_object_name = 'user'
object = None
def delete(self, request, *args, **kwargs):
try:
user = self.get_object()
if user.date_disabled is None:
user.disable_account()
user.is_registered = False
flag = USER_REMOVED
message = 'User account {} disabled'.format(user.pk)
else:
user.date_disabled = None
subscribe_on_confirm(user)
user.is_registered = True
flag = USER_RESTORED
message = 'User account {} reenabled'.format(user.pk)
user.save()
except AttributeError:
raise Http404(
'{} with id "{}" not found.'.format(
self.context_object_name.title(),
self.kwargs.get('guid')
))
update_admin_log(
user_id=self.request.user.id,
object_id=user.pk,
object_repr='User',
message=message,
action_flag=flag
)
return redirect(reverse_user(self.kwargs.get('guid')))
def get_context_data(self, **kwargs):
context = {}
context.setdefault('guid', kwargs.get('object').pk)
return super(UserDeleteView, self).get_context_data(**context)
def get_object(self, queryset=None):
return User.load(self.kwargs.get('guid'))
class User2FactorDeleteView(UserDeleteView):
""" Allow authorised admin user to remove 2 factor authentication.
Interface with OSF database. No admin models.
"""
template_name = 'users/remove_2_factor.html'
def delete(self, request, *args, **kwargs):
user = self.get_object()
try:
user.delete_addon('twofactor')
except AttributeError:
raise Http404(
'{} with id "{}" not found.'.format(
self.context_object_name.title(),
self.kwargs.get('guid')
))
update_admin_log(
user_id=self.request.user.id,
object_id=user.pk,
object_repr='User',
message='Removed 2 factor auth for user {}'.format(user.pk),
action_flag=USER_2_FACTOR
)
return redirect(reverse_user(self.kwargs.get('guid')))
class UserFormView(OSFAdmin, GuidFormView):
template_name = 'users/search.html'
object_type = 'user'
@property
def success_url(self):
return reverse_user(self.guid)
class UserView(OSFAdmin, GuidView):
template_name = 'users/user.html'
context_object_name = 'user'
def get_object(self, queryset=None):
return serialize_user(User.load(self.kwargs.get('guid')))
class ResetPasswordView(OSFAdmin, FormView):
form_class = EmailResetForm
template_name = 'users/reset.html'
context_object_name = 'user'
def get_context_data(self, **kwargs):
user = User.load(self.kwargs.get('guid'))
try:
self.initial.setdefault('emails', [(r, r) for r in user.emails])
except AttributeError:
raise Http404(
'{} with id "{}" not found.'.format(
self.context_object_name.title(),
self.kwargs.get('guid')
))
kwargs.setdefault('guid', user.pk)
return super(ResetPasswordView, self).get_context_data(**kwargs)
def form_valid(self, form):
email = form.cleaned_data.get('emails')
user = get_user(email)
if user is None or user.pk != self.kwargs.get('guid'):
return HttpResponse(
'{} with id "{}" and email "{}" not found.'.format(
self.context_object_name.title(),
self.kwargs.get('guid'),
email
),
status=409
)
reset_abs_url = furl(DOMAIN)
user.verification_key = random_string(20)
user.save()
reset_abs_url.path.add(('resetpassword/{}'.format(user.verification_key)))
send_mail(
subject='Reset OSF Password',
message='Follow this link to reset your password: {}'.format(
reset_abs_url.url
),
from_email=SUPPORT_EMAIL,
recipient_list=[email]
)
update_admin_log(
user_id=self.request.user.id,
object_id=user.pk,
object_repr='User',
message='Emailed user {} a reset link.'.format(user.pk),
action_flag=USER_EMAILED
)
return super(ResetPasswordView, self).form_valid(form)
@property
def success_url(self):
return reverse_user(self.kwargs.get('guid'))
|
{
"content_hash": "ab89060d8bff8c239850a13adf2f7d1c",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 82,
"avg_line_length": 32.84180790960452,
"alnum_prop": 0.5890246000344056,
"repo_name": "amyshi188/osf.io",
"id": "33b4aabf9b8d0f0bbd99b4dc27f82037c2da96f1",
"size": "5813",
"binary": false,
"copies": "8",
"ref": "refs/heads/develop",
"path": "admin/users/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "159639"
},
{
"name": "HTML",
"bytes": "110361"
},
{
"name": "JavaScript",
"bytes": "1649322"
},
{
"name": "Mako",
"bytes": "645108"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "5839557"
}
],
"symlink_target": ""
}
|
import os
from distutils.core import setup
def find_packages(root):
# so we don't depend on setuptools; from the Storm ORM setup.py
packages = []
for directory, subdirectories, files in os.walk(root):
if '__init__.py' in files:
packages.append(directory.replace(os.sep, '.'))
return packages
setup(
name = 'django-tables',
version = '0.1',
description = 'Render QuerySets as tabular data in Django.',
author = 'Michael Elsdoerfer',
author_email = 'michael@elsdoerfer.info',
license = 'BSD',
url = 'http://launchpad.net/django-tables',
classifiers = [
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries',
],
packages = find_packages('django_tables'),
)
|
{
"content_hash": "5db13e5033a9c7ceba3ec80ff328fba5",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 67,
"avg_line_length": 34.53125,
"alnum_prop": 0.5945701357466063,
"repo_name": "dantium/django-tables",
"id": "72ce82a9c252d17841ce388d6845702c04f428fa",
"size": "1105",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "80259"
}
],
"symlink_target": ""
}
|
"""Redis pool using one module in different files"""
import json
import redis
import time
import os
from functools import wraps
from kits.log import get_logger
ROOT_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
REDIS_LOGGER = get_logger('redis')
def redis_excepts(orig_func):
"""try excepts around each query"""
@wraps(orig_func)
def wrapper(*args, **kwargs):
try:
return orig_func(*args, **kwargs)
except redis.exceptions.ConnectionError as err:
REDIS_LOGGER.error(err)
except redis.exceptions.TimeoutError as err:
REDIS_LOGGER.error(err)
except Exception:
REDIS_LOGGER.critical("Exception", exc_info=True)
raise Exception("Error on redis. Check the log file")
return wrapper
def get_connection():
"""Expose api to context"""
if not Redispool.redis_pool:
try:
with open(ROOT_PATH + '/config/redis.conf') as config:
conf = json.load(config)
Redispool.redis_pool = redis.ConnectionPool(
host=conf['redis']['host'],
port=conf['redis']['port'],
db=conf['redis']['db'],
password=conf['redis']['password']
)
except IOError:
raise Exception(ROOT_PATH + '/config/redis.conf does not exist')
return redis.StrictRedis(connection_pool=Redispool.redis_pool)
class Redispool(object):
"""Full function of Redis Beckend
"""
redis_pool = None
def __init__(self, *args, **kwargs):
self.queue = None
if 'queue' in kwargs:
self.queue = kwargs['queue']
#################################################
# Commands in key-value
#################################################
@redis_excepts
def set(self, key, value):
"""Set key and value"""
REDIS_LOGGER.info("set %s %s", key, value)
return get_connection().set(key, value)
@redis_excepts
def get(self, key):
"""Get from redis using key"""
REDIS_LOGGER.info("get %s", key)
return get_connection().get(key)
@redis_excepts
def setnx(self, key, value):
"""set value into redis if key does not exist
"""
REDIS_LOGGER.info("setnx %s %s", key, value)
return get_connection().setnx(key, value)
@redis_excepts
def setex(self, key, ttl, value):
"""set key-value into redis with ttl
"""
REDIS_LOGGER.info("setex %s, %s, %s", key, ttl, value)
return get_connection().setex(key, ttl, value)
@redis_excepts
def delete(self, key):
"""Del key
"""
REDIS_LOGGER.info("del %s", key)
return get_connection().delete(key)
@redis_excepts
def incr(self, key):
"""increase key by one
"""
REDIS_LOGGER.info("incr %s", key)
return get_connection().incr(key)
@redis_excepts
def expire(self, key, ttl):
"""indicate expire for a certain key
"""
REDIS_LOGGER.info("expire %s %s", key, ttl)
return get_connection().expire(key, ttl)
@redis_excepts
def exists(self, key):
"""determine if given key exist
"""
REDIS_LOGGER.info("exists %s", key)
return get_connection().exists(key)
@redis_excepts
def mset(self, **kwargs):
"""multiple set keys and values
example: r.mset(hello="h", world="w")
"""
REDIS_LOGGER.info("mset %s", kwargs)
return get_connection().mset(kwargs)
@redis_excepts
def getset(self, key, value):
"""get value of key and update value to prameter value
If key has no value before. return None then
"""
REDIS_LOGGER.info("getset %s %s", key, value)
return get_connection().getset(key, value)
@redis_excepts
def mget(self, *keys):
"""Get the value of all the given keys
"""
if isinstance(keys[0], list):
keys = keys[0]
keys = [str(i) for i in keys]
REDIS_LOGGER.info("mget %s", ' '.join(keys))
return get_connection().mget(keys)
@redis_excepts
def append(self, key, value):
"""Append a value to a key
"""
REDIS_LOGGER.info("append %s %s", key, value)
return get_connection().append(key, value)
@redis_excepts
def substr(self, key, start, stop):
"""get sub string of value of key
"""
REDIS_LOGGER.info("substr %s %d %d", key, start, stop)
return get_connection().substr(key, start, stop)
@redis_excepts
def incrby(self, key, number):
"""increase value of gievn key by number
"""
REDIS_LOGGER.info("incrby %s %d", key, number)
return get_connection().incrby(key, number)
############################################################
# Commands in list
############################################################
@redis_excepts
def push(self, value, queue=None):
"""Push item into the queue
"""
if not queue:
queue = self.queue
if not queue:
raise Exception("queue does not exist")
REDIS_LOGGER.info("rpush %s %s", queue, value)
return get_connection().rpush(queue, value)
@redis_excepts
def pop(self, queue=None):
"""Pop from queue"""
if not queue:
queue = self.queue
if not queue:
raise Exception("queue does not exist")
REDIS_LOGGER.info("blpop:%s", self.queue)
item = get_connection().blpop(queue, timeout=None)
return item[1] if item else None
@redis_excepts
def lrange(self, key, start, stop):
"""decrease value of given key by number
"""
REDIS_LOGGER.info("lrange %s %d %d", key, start, stop)
return get_connection().lrange(key, start, stop)
@redis_excepts
def lindex(self, key, pos):
"""fetch individual items from the list with LINDEX.
"""
REDIS_LOGGER.info("lindex %s %d", key, pos)
return get_connection().lindex(key, pos)
################################
# Commands used on SET values
################################
@redis_excepts
def sadd(self, name, member):
"""add a item into a set
"""
REDIS_LOGGER.info("sadd %s %s", name, member)
return get_connection().sadd(name, member)
@redis_excepts
def smembers(self, name):
"""list members of a set
"""
REDIS_LOGGER.info("smembers %s", name)
return get_connection().smembers(name)
@redis_excepts
def sismembers(self, name, member):
"""determine if item in set collection
"""
REDIS_LOGGER.info("sismembers %s, %s", name, member)
return get_connection().sismember(name, member)
@redis_excepts
def srem(self, name, member):
"""remove a item from set
"""
REDIS_LOGGER.info("srem %s %s", name, member)
return get_connection().srem(name, member)
#########################################
# Hashes in Redis
#########################################
@redis_excepts
def hset(self, name, key, value):
"""Store the value at the key in the hash
"""
REDIS_LOGGER.info("hset %s %s %s", name, key, value)
return get_connection().hset(name, key, value)
@redis_excepts
def hmset(self, name, mapping):
"""multiple hset
example: redispool.hmset(key, {'a':1, 'b':2})
"""
REDIS_LOGGER.info("hmset %s %s", name, str(mapping))
return get_connection().hmset(name, mapping)
@redis_excepts
def hget(self, name, key):
"""Fetche the value at the given hash key
"""
REDIS_LOGGER.info("hget %s %s", name, key)
return get_connection().hget(name, key)
@redis_excepts
def hgetall(self, name):
"""Fetche the entire hash
"""
REDIS_LOGGER.info("hgetall %s", name)
return get_connection().hgetall(name)
@redis_excepts
def hdel(self, name, key):
"""Remove a key from the hash, if it exists
"""
REDIS_LOGGER.info("hdel %s %s", name, key)
return get_connection().hdel(name, key)
@redis_excepts
def hincrby(self, name, key, increment):
"""add increment into filed of key
"""
REDIS_LOGGER.info("hincrby %s %s %d", name, key, increment)
return get_connection().hincrby(name, key, increment)
#########################################
# ZSET in Redis
#########################################
@redis_excepts
def zadd(self, key, score, member):
"""Add member with the given score to the ZSET
"""
REDIS_LOGGER.info("zadd %s %s %s", key, score, member)
return get_connection().zadd(key, score, member)
@redis_excepts
def zrem(self, key, member):
"""Remove the item from the ZSET, if it exists
"""
REDIS_LOGGER.info("zrem %s %s", key, member)
return get_connection().zrem(key, member)
@redis_excepts
def zrange(self, key, start, stop, withscores=False):
"""Fetche the items in the ZSET from their positions in sorted order
"""
REDIS_LOGGER.info("zrange %s %s %s %s", key, start, stop, withscores)
return get_connection().zrange(key, start, stop, withscores=withscores)
@redis_excepts
def zrevrange(self, key, start, stop, withscores=False):
"""reverse range function
"""
REDIS_LOGGER.info("zrevrange: %s %s %s %s", key, start, stop, withscores)
return get_connection().zrevrange(key, start, stop, withscores=withscores)
@redis_excepts
def zrangebyscore(self, key, start, stop, withscores=False):
"""Fetche items in the ZSET based on a range of scores. can you sort yourself?
"""
REDIS_LOGGER.info("zrangebyscore %s %s %s %s", key, start, stop, withscores)
return get_connection().zrangebyscore(key, start, stop, withscores=withscores)
@redis_excepts
def zrevrangebyscore(self, key, start, stop, withscores=False):
"""pass
"""
REDIS_LOGGER.info("zrevrangebyscore: %s %s %s", key, start, stop)
return get_connection().zrevrangebyscore(key, start, stop, withscores=withscores)
@redis_excepts
def zscore(self, key, member):
"""return the ordered collection.
"""
REDIS_LOGGER.info("zscore: %s %s", key, member)
return get_connection().zscore(key, member)
@redis_excepts
def zincrby(self, key, member, increment):
"""Increment the score of a member in a sorted set
"""
REDIS_LOGGER.info("zincrby: %s %s %f", key, member, increment)
return get_connection().zincrby(key, member, increment)
# @redis_excepts
# def zinterstore(self, dest_zsets, sets_num, *args, aggregation='max'):
# """find those entries that are in all of the SETs and ZSETs, combining their scores
# """
# REDIS_LOGGER.info("zinterstore: dest_zets:%s" % (dest_zsets))
# return get_connection().zinterstore(dest, keys)
@redis_excepts
def zrank(self, key, member):
"""return the position of the given member in the ZSET.
return None if not exists
"""
REDIS_LOGGER.info("zrank: %s %s", key, member)
return get_connection().zrank(key, member)
def test():
"""Just test
"""
r = Redispool()
key = 'test'
value = "value"
# test key-value ################################
def test_key_value():
print("Test starts. key:%s ........." % key)
r.delete(key)
assert(r.exists(key) is False)
r.set(key, 234)
assert(r.exists(key) is True)
r.set(key, 1)
r.incr(key)
assert(int(r.get(key)) == 2)
r.incrby(key, 3)
assert(int(r.get(key)) == 5)
r.expire(key, 3)
assert(r.exists(key) is True)
time.sleep(3)
assert(r.exists(key) is False)
r.mset(hello="h", world="w")
assert(r.mget("hello", "world")[0] in "h")
assert(r.mget("hello", "world")[1] in "w")
r.append("hello", "eworld")
assert(r.get("hello") in "heworld")
assert(r.substr("hello", 0, 3) in "hewo")
r.delete(key)
assert(r.getset(key, "value") is None)
assert(r.get(key) in "value")
print("All key-value functions pass the test......\n")
# test_key_value()
# test List ######################################
def test_list():
r.delete(key)
r.push(value, key)
assert(r.lrange(key, 0, 0)[0] == value)
assert(r.lindex(key, 0) == value)
assert(r.pop(key) == value)
r2 = Redispool(queue=key)
r2.push(value, key)
assert(r2.pop(key) == value)
print("All list functions pass the test......\n")
test_list()
# test Set ######################################
def test_set():
r.delete(key)
r.sadd(key, value)
assert(value in r.smembers(key))
assert(r.sismembers(key, value) is True)
r.srem(key, value)
assert(value not in r.smembers(key))
assert(r.sismembers(key, value) is False)
print("All set functions pass the test......\n")
test_set()
# test Hash ######################################
def test_hash():
r.delete(key)
r.hset(key, "q", "q")
r.hset(key, "w", "w")
r.hset(key, "e", 1)
assert(r.hget(key, "q") == "q")
assert(r.hget(key, "w") == "w")
assert(r.hget(key, "e") == "1")
r.hincrby(key, "e", 3)
assert(r.hget(key, "e") == "4")
print(r.hgetall(key))
r.hdel(key, "e")
print(r.hgetall(key))
r.delete(key)
assert(r.exists(key) is False)
r.hmset(key, {"a": "a", "b": "b", "c": 1})
print(r.hgetall(key))
# assert(r.exists(key) is False)
print("All hash functions pass the test......\n")
test_hash()
# test Zset ######################################
def test_zset():
r.delete(key)
r.zadd(key, value, 100)
r.zadd(key, value + "2", 200)
r.zadd(key, value + "3", 300)
r.zadd(key, value + "4", 150)
# ==============================================
assert("value4" in r.zrange(key, 0, -1))
r.zrem(key, value + "4")
assert("value4" not in r.zrange(key, 0, -1))
print(r.zrange(key, 0, -1))
print(r.zrange(key, 0, -1, withscores=True))
print(r.zrevrange(key, 0, -1))
print(r.zrevrange(key, 0, -1, withscores=True))
assert(r.zscore(key, "value3") == 300.0)
r.zincrby(key, "value3", 100.5)
assert(r.zscore(key, "value3") == 400.5)
print(r.zrangebyscore(key, 0, 200))
print(r.zrangebyscore(key, 0, 200, withscores=True))
print(r.zrevrangebyscore(key, 1000, 100))
print(r.zrevrangebyscore(key, 1000, 100, withscores=True))
print("All zset functions pass the test..........\n")
test_zset()
print("*******Congratulations!!! All the test have been passed ~ ")
# test()
|
{
"content_hash": "dbac4c23bc0262c032c058eac62c831c",
"timestamp": "",
"source": "github",
"line_count": 465,
"max_line_length": 93,
"avg_line_length": 32.806451612903224,
"alnum_prop": 0.541265158964274,
"repo_name": "Elaoed/iplives",
"id": "57f34f2d72ff80ae1554976660bf488a8bd5a3a0",
"size": "15271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kits/redispool.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "50490"
},
{
"name": "Shell",
"bytes": "59"
}
],
"symlink_target": ""
}
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_ARB_texture_multisample'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_ARB_texture_multisample',error_checker=_errors._error_checker)
GL_INT_SAMPLER_2D_MULTISAMPLE=_C('GL_INT_SAMPLER_2D_MULTISAMPLE',0x9109)
GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY=_C('GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY',0x910C)
GL_MAX_COLOR_TEXTURE_SAMPLES=_C('GL_MAX_COLOR_TEXTURE_SAMPLES',0x910E)
GL_MAX_DEPTH_TEXTURE_SAMPLES=_C('GL_MAX_DEPTH_TEXTURE_SAMPLES',0x910F)
GL_MAX_INTEGER_SAMPLES=_C('GL_MAX_INTEGER_SAMPLES',0x9110)
GL_MAX_SAMPLE_MASK_WORDS=_C('GL_MAX_SAMPLE_MASK_WORDS',0x8E59)
GL_PROXY_TEXTURE_2D_MULTISAMPLE=_C('GL_PROXY_TEXTURE_2D_MULTISAMPLE',0x9101)
GL_PROXY_TEXTURE_2D_MULTISAMPLE_ARRAY=_C('GL_PROXY_TEXTURE_2D_MULTISAMPLE_ARRAY',0x9103)
GL_SAMPLER_2D_MULTISAMPLE=_C('GL_SAMPLER_2D_MULTISAMPLE',0x9108)
GL_SAMPLER_2D_MULTISAMPLE_ARRAY=_C('GL_SAMPLER_2D_MULTISAMPLE_ARRAY',0x910B)
GL_SAMPLE_MASK=_C('GL_SAMPLE_MASK',0x8E51)
GL_SAMPLE_MASK_VALUE=_C('GL_SAMPLE_MASK_VALUE',0x8E52)
GL_SAMPLE_POSITION=_C('GL_SAMPLE_POSITION',0x8E50)
GL_TEXTURE_2D_MULTISAMPLE=_C('GL_TEXTURE_2D_MULTISAMPLE',0x9100)
GL_TEXTURE_2D_MULTISAMPLE_ARRAY=_C('GL_TEXTURE_2D_MULTISAMPLE_ARRAY',0x9102)
GL_TEXTURE_BINDING_2D_MULTISAMPLE=_C('GL_TEXTURE_BINDING_2D_MULTISAMPLE',0x9104)
GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY=_C('GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY',0x9105)
GL_TEXTURE_FIXED_SAMPLE_LOCATIONS=_C('GL_TEXTURE_FIXED_SAMPLE_LOCATIONS',0x9107)
GL_TEXTURE_SAMPLES=_C('GL_TEXTURE_SAMPLES',0x9106)
GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE=_C('GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE',0x910A)
GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY=_C('GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY',0x910D)
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint,arrays.GLfloatArray)
def glGetMultisamplefv(pname,index,val):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLbitfield)
def glSampleMaski(maskNumber,mask):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLsizei,_cs.GLenum,_cs.GLsizei,_cs.GLsizei,_cs.GLboolean)
def glTexImage2DMultisample(target,samples,internalformat,width,height,fixedsamplelocations):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLsizei,_cs.GLenum,_cs.GLsizei,_cs.GLsizei,_cs.GLsizei,_cs.GLboolean)
def glTexImage3DMultisample(target,samples,internalformat,width,height,depth,fixedsamplelocations):pass
|
{
"content_hash": "23cb0158c453b8411fa885bbd0b16e0d",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 120,
"avg_line_length": 56.93478260869565,
"alnum_prop": 0.7804505536464299,
"repo_name": "alexus37/AugmentedRealityChess",
"id": "fd23d6aaab258d2ce9a12b3e0ff75742caa95b8d",
"size": "2619",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGL/raw/GL/ARB/texture_multisample.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "158062"
},
{
"name": "C++",
"bytes": "267993"
},
{
"name": "CMake",
"bytes": "11319"
},
{
"name": "Fortran",
"bytes": "3707"
},
{
"name": "Makefile",
"bytes": "14618"
},
{
"name": "Python",
"bytes": "12813086"
},
{
"name": "Roff",
"bytes": "3310"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
}
|
import sys
from getpass import getpass
from BioSQL import BioSeqDatabase
from common import standard_options, generate_placeholders, chunks, extract_feature_sql
def get_seqfeature_for_db(server, biodb):
''' find all seqfeatures that have the given value for the qualifier
returns a list of seqfeature_id
'''
sql = "SELECT s.seqfeature_id FROM seqfeature s join bioentry b using(bioentry_id) join biodatabase bd using(biodatabase_id) WHERE bd.name = %s"
return server.adaptor.execute_and_fetchall(sql, (biodb,))
def main(args):
server = BioSeqDatabase.open_database(driver=args.driver, db=args.database, user=args.user, host=args.host, passwd=args.password)
if args.output_format == 'fasta':
from Bio import SeqIO
db = server[args.database_name]
for rec in db.values():
SeqIO.write(rec, sys.stdout, args.output_format)
else:
seqfeature_ids = get_seqfeature_for_db(server, args.database_name)
if args.output_format == 'feat-prot':
extract_feature_sql(server, seqfeature_ids, type=['CDS'], translate=True )
elif args.output_format == 'feat-nucl':
extract_feature_sql(server, seqfeature_ids )
if __name__ == "__main__":
parser = standard_options()
parser.add_argument('-D', '--database-name', help='namespace of the database that you want to output genes from', dest='database_name', required=True)
parser.add_argument('-o', '--output_format', help='output format of the selected sequences', choices=['feat-prot', 'feat-nucl', 'fasta'], default='feat-prot')
args = parser.parse_args()
if args.password is None:
args.password = getpass("Please enter the password for user " + \
args.user + " on database " + args.database)
main(args)
|
{
"content_hash": "b845f124b0c79cde371c002c1e8b9caf",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 162,
"avg_line_length": 45.125,
"alnum_prop": 0.6770083102493075,
"repo_name": "ctSkennerton/BioSQL-Extensions",
"id": "608aa2a17c029e81a982bbb54b0e89ab7265bee5",
"size": "1827",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/dump_biodatabase.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2262"
},
{
"name": "Python",
"bytes": "206610"
},
{
"name": "TSQL",
"bytes": "21266"
}
],
"symlink_target": ""
}
|
""" This module provides the project version info and comparison utility.
"""
__version_info__ = (0, 1, 3)
__version__ = '.'.join(str(x) for x in __version_info__)
def compare_version(value):
""" Determines if the provided version value compares with program version.
`value`
Version comparison string (e.g. ==1.0, <=1.0, >1.1)
Supported operators:
<, <=, ==, >, >=
"""
# extract parts from value
import re
res = re.match(r'(<|<=|==|>|>=)(\d{1,2}\.\d{1,2}(\.\d{1,2})?)$',
str(value).strip())
if not res:
return False
operator, value, _ = res.groups()
# break into pieces
value = tuple(int(x) for x in str(value).split('.'))
if len(value) < 3:
value += (0,)
version = __version_info__
if operator in ('<', '<='):
if version < value:
return True
if operator != '<=':
return False
elif operator in ('>=', '>'):
if version > value:
return True
if operator != '>=':
return False
return value == version
|
{
"content_hash": "b3a4e180730892f98561f3940667da99",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 79,
"avg_line_length": 24.891304347826086,
"alnum_prop": 0.4908296943231441,
"repo_name": "xtrementl/focus",
"id": "e11afe8bca7f179e286e38bac574757d9d6943f9",
"size": "1145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "focus/version.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "341755"
}
],
"symlink_target": ""
}
|
HOST = "/dev/rfcomm0"
PORT = 1
DB_PORT = 5984
CONFSTRING = '{"enableRawOutput": false, "format": "Json"}\n'
DB_USERNAME = "encima"
DB_PWD = "Cocaine5Unicorn_Hiatus"
DB_NAME = 'mindwave_logs'
DB_DELETE = True
FG_CMD = {
'Darwin': "osascript /Users/encima/development/neurosky/neurosocket/osx/foreground_app.scpt",
'Windows':"windows\\appfocus.exe",
'Linux':"xprop -id $(xprop -root _NET_ACTIVE_WINDOW | cut -d ' ' -f 5) _NET_WM_NAME WM_CLASS"
}
READING_BUFFER = 2
BUFFER = False
|
{
"content_hash": "65a83549ecf463ee5f5333501b8037b4",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 101,
"avg_line_length": 31.4375,
"alnum_prop": 0.658051689860835,
"repo_name": "encima/NeuroSocket",
"id": "542d5b448897e555db4ead12a9da0ec9bf0306f7",
"size": "503",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "110"
},
{
"name": "HTML",
"bytes": "4896"
},
{
"name": "JavaScript",
"bytes": "4968"
},
{
"name": "Python",
"bytes": "65312"
}
],
"symlink_target": ""
}
|
from semcplogic.model import ModelBuilder
from semcplogic.cpmodel import NonLinearCPLogicGenerator
from semcplogic.cpcompiler import CPCompiler
from semcplogic.problogresult import GnuplotDrawer
b = ModelBuilder()
b.addNode("f")
b.addNode("v1",0,1)
b.addNode("v2",0,1)
b.setInfluence("f","v1",5)
b.setInfluence("f","v2",5)
b.setLatent("f")
m = b.consume()
d = m.sample(100)
d2 = d.discretise({"v1":["hoog","laag"],"v2":["hoog","laag"]})
cm = NonLinearCPLogicGenerator()
cpcode = cm.generate(m)
cc = CPCompiler()
runmodel = cc.compileCode(cpcode,d2)
runmodel.iterations = 250
result = runmodel.run()
import pprint; pprint.pprint(result.probs)
g = GnuplotDrawer()
g.draw(result)
|
{
"content_hash": "65ac08d811ac6dd84ef673b671e348ef",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 62,
"avg_line_length": 25.22222222222222,
"alnum_prop": 0.737151248164464,
"repo_name": "verhoevenv/semcplogic",
"id": "d94ed3052b090d8c855352e8f74e83e6ae64d171",
"size": "724",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/latent.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Perl",
"bytes": "18271"
},
{
"name": "Python",
"bytes": "68680"
},
{
"name": "R",
"bytes": "528"
}
],
"symlink_target": ""
}
|
"""Conversion tool from Brain Vision EEG to FIF"""
# Authors: Teon Brooks <teon.brooks@gmail.com>
# Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD (3-clause)
import os
import time
import re
import warnings
import numpy as np
from ...utils import verbose, logger
from ..constants import FIFF
from ..meas_info import _empty_info
from ..base import _BaseRaw, _check_update_montage
from ..reference import add_reference_channels
from ...externals.six import StringIO, u
from ...externals.six.moves import configparser
class RawBrainVision(_BaseRaw):
"""Raw object from Brain Vision EEG file
Parameters
----------
vhdr_fname : str
Path to the EEG header file.
montage : str | None | instance of Montage
Path or instance of montage containing electrode positions.
If None, sensor locations are (0,0,0).
eog : list or tuple
Names of channels or list of indices that should be designated
EOG channels. Values should correspond to the vhdr file.
Default is ('HEOGL', 'HEOGR', 'VEOGb').
misc : list or tuple
Names of channels or list of indices that should be designated
MISC channels. Values should correspond to the electrodes
in the vhdr file. Default is None.
reference : None | str
Name of the electrode which served as the reference in the recording.
If a name is provided, a corresponding channel is added and its data
is set to 0. This is useful for later re-referencing. The name should
correspond to a name in elp_names. Data must be preloaded.
scale : float
The scaling factor for EEG data. Units are in volts. Default scale
factor is 1. For microvolts, the scale factor would be 1e-6. This is
used when the header file does not specify the scale factor.
preload : bool
If True, all data are loaded at initialization.
If False, data are not read until save.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
@verbose
def __init__(self, vhdr_fname, montage=None,
eog=('HEOGL', 'HEOGR', 'VEOGb'), misc=None, reference=None,
scale=1., preload=False, verbose=None):
if reference is not None and preload is False:
raise ValueError("Preload must be set to True if reference is "
"specified.")
# Preliminary Raw attributes
self._events = np.empty((0, 3))
self.preload = False
# Channel info and events
logger.info('Extracting eeg Parameters from %s...' % vhdr_fname)
vhdr_fname = os.path.abspath(vhdr_fname)
if not isinstance(scale, (int, float)):
raise TypeError('Scale factor must be an int or float. '
'%s provided' % type(scale))
info, self._eeg_info, events = _get_eeg_info(vhdr_fname, eog, misc)
self._eeg_info['scale'] = float(scale)
logger.info('Creating Raw.info structure...')
_check_update_montage(info, montage)
with open(info['filename'], 'rb') as f:
f.seek(0, os.SEEK_END)
n_samples = f.tell()
dtype = int(self._eeg_info['dtype'][-1])
last_samps = [(n_samples //
(dtype * self._eeg_info['n_eeg_chan'])) - 1]
super(RawBrainVision, self).__init__(
info, last_samps=last_samps, filenames=[vhdr_fname],
verbose=verbose)
self.set_brainvision_events(events)
# load data
if preload:
self.preload = preload
logger.info('Reading raw data from %s...' % vhdr_fname)
self._data, _ = self._read_segment()
if reference is not None:
add_reference_channels(self, reference, copy=False)
assert len(self._data) == self.info['nchan']
logger.info(' Range : %d ... %d = %9.3f ... %9.3f secs'
% (self.first_samp, self.last_samp,
float(self.first_samp) / self.info['sfreq'],
float(self.last_samp) / self.info['sfreq']))
logger.info('Ready.')
def _read_segment(self, start=0, stop=None, sel=None, verbose=None,
projector=None):
"""Read a chunk of raw data
Parameters
----------
start : int, (optional)
first sample to include (first is 0). If omitted, defaults to the
first sample in data.
stop : int, (optional)
First sample to not include.
If omitted, data is included to the end.
sel : array, optional
Indices of channels to select.
projector : array
SSP operator to apply to the data.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
data : array, shape (n_channels, n_samples)
The data.
times : array, shape (n_samples,)
returns the time values corresponding to the samples.
"""
if sel is not None:
if len(sel) == 1 and sel[0] == 0 and start == 0 and stop == 1:
return (666, 666)
if projector is not None:
raise NotImplementedError('Currently does not handle projections.')
if stop is None:
stop = self.last_samp + 1
elif stop > self.last_samp + 1:
stop = self.last_samp + 1
# Initial checks
start = int(start)
stop = int(stop)
if start >= stop:
raise ValueError('No data in this range')
# assemble channel information
eeg_info = self._eeg_info
sfreq = self.info['sfreq']
chs = self.info['chs']
units = eeg_info['units']
if len(self._events):
chs = chs[:-1]
n_eeg = len(chs)
cals = np.atleast_2d([chan_info['cal'] for chan_info in chs])
cals *= eeg_info['scale'] * units
logger.info('Reading %d ... %d = %9.3f ... %9.3f secs...' %
(start, stop - 1, start / float(sfreq),
(stop - 1) / float(sfreq)))
# read data
dtype = np.dtype(eeg_info['dtype'])
buffer_size = (stop - start)
pointer = start * n_eeg * dtype.itemsize
with open(self.info['filename'], 'rb') as f:
f.seek(pointer)
# extract data
data_buffer = np.fromfile(f, dtype=dtype,
count=buffer_size * n_eeg)
if eeg_info['data_orientation'] == 'MULTIPLEXED':
data_buffer = data_buffer.reshape((n_eeg, -1), order='F')
elif eeg_info['data_orientation'] == 'VECTORIZED':
data_buffer = data_buffer.reshape((n_eeg, -1), order='C')
n_channels, n_times = data_buffer.shape
# Total number of channels
n_channels += int(len(self._events) > 0)
# Preallocate data array
data = np.empty((n_channels, n_times), dtype=np.float64)
data[:len(data_buffer)] = data_buffer # cast to float64
data[:len(data_buffer)] *= cals.T
ch_idx = len(data_buffer)
del data_buffer
# stim channel (if applicable)
if len(self._events):
data[ch_idx] = _synthesize_stim_channel(self._events, start, stop)
ch_idx += 1
if sel is not None:
data = data.take(sel, axis=0)
logger.info('[done]')
times = np.arange(start, stop, dtype=float) / sfreq
return data, times
def get_brainvision_events(self):
"""Retrieve the events associated with the Brain Vision Raw object
Returns
-------
events : array, shape (n_events, 3)
Events, each row consisting of an (onset, duration, trigger)
sequence.
"""
return self._events.copy()
def set_brainvision_events(self, events):
"""Set the events (automatically updates the synthesized stim channel)
Parameters
----------
events : array, shape (n_events, 3)
Events, each row consisting of an (onset, duration, trigger)
sequence.
"""
events = np.copy(events)
if not events.ndim == 2 and events.shape[1] == 3:
raise ValueError("[n_events x 3] shaped array required")
# update info based on presence of stim channel
had_events = bool(len(self._events))
has_events = bool(len(events))
if had_events and not has_events: # remove stim channel
if self.info['ch_names'][-1] != 'STI 014':
err = "Last channel is not stim channel; info was modified"
raise RuntimeError(err)
self.info['nchan'] -= 1
del self.info['ch_names'][-1]
del self.info['chs'][-1]
if self.preload:
self._data = self._data[:-1]
elif has_events and not had_events: # add stim channel
idx = len(self.info['chs']) + 1
chan_info = {'ch_name': 'STI 014',
'kind': FIFF.FIFFV_STIM_CH,
'coil_type': FIFF.FIFFV_COIL_NONE,
'logno': idx,
'scanno': idx,
'cal': 1,
'range': 1,
'unit_mul': 0,
'unit': FIFF.FIFF_UNIT_NONE,
'eeg_loc': np.zeros(3),
'loc': np.zeros(12)}
self.info['nchan'] += 1
self.info['ch_names'].append(chan_info['ch_name'])
self.info['chs'].append(chan_info)
if self.preload:
shape = (1, self._data.shape[1])
self._data = np.vstack((self._data, np.empty(shape)))
# update events
self._events = events
if has_events and self.preload:
start = self.first_samp
stop = self.last_samp + 1
self._data[-1] = _synthesize_stim_channel(events, start, stop)
def _read_vmrk_events(fname):
"""Read events from a vmrk file
Parameters
----------
fname : str
vmrk file to be read.
Returns
-------
events : array, shape (n_events, 3)
An array containing the whole recording's events, each row representing
an event as (onset, duration, trigger) sequence.
"""
# read vmrk file
with open(fname) as fid:
txt = fid.read()
header = txt.split('\n')[0].strip()
start_tag = 'Brain Vision Data Exchange Marker File'
if not header.startswith(start_tag):
raise ValueError("vmrk file should start with %r" % start_tag)
end_tag = 'Version 1.0'
if not header.endswith(end_tag):
raise ValueError("vmrk file should be %r" % end_tag)
# extract Marker Infos block
m = re.search("\[Marker Infos\]", txt)
if not m:
return np.zeros(0)
mk_txt = txt[m.end():]
m = re.search("\[.*\]", mk_txt)
if m:
mk_txt = mk_txt[:m.start()]
# extract event information
items = re.findall("^Mk\d+=(.*)", mk_txt, re.MULTILINE)
events = []
for info in items:
mtype, mdesc, onset, duration = info.split(',')[:4]
try:
trigger = int(re.findall('[A-Za-z]*\s*?(\d+)', mdesc)[0])
onset = int(onset)
duration = int(duration)
events.append((onset, duration, trigger))
except IndexError:
pass
events = np.array(events).reshape(-1, 3)
return events
def _synthesize_stim_channel(events, start, stop):
"""Synthesize a stim channel from events read from a vmrk file
Parameters
----------
events : array, shape (n_events, 3)
Each row representing an event as (onset, duration, trigger) sequence
(the format returned by _read_vmrk_events).
start : int
First sample to return.
stop : int
Last sample to return.
Returns
-------
stim_channel : array, shape (n_samples,)
An array containing the whole recording's event marking
"""
# select events overlapping buffer
onset = events[:, 0]
offset = onset + events[:, 1]
idx = np.logical_and(onset < stop, offset > start)
events = events[idx]
# make onset relative to buffer
events[:, 0] -= start
# fix onsets before buffer start
idx = events[:, 0] < 0
events[idx, 0] = 0
# create output buffer
stim_channel = np.zeros(stop - start)
for onset, duration, trigger in events:
stim_channel[onset:onset + duration] = trigger
return stim_channel
def _get_eeg_info(vhdr_fname, eog, misc):
"""Extracts all the information from the header file.
Parameters
----------
vhdr_fname : str
Raw EEG header to be read.
eog : list of str
Names of channels that should be designated EOG channels. Names should
correspond to the vhdr file.
misc : list of str
Names of channels that should be designated MISC channels. Names
should correspond to the electrodes in the vhdr file.
Returns
-------
info : Info
The measurement info.
edf_info : dict
A dict containing Brain Vision specific parameters.
events : array, shape (n_events, 3)
Events from the corresponding vmrk file.
"""
if eog is None:
eog = []
if misc is None:
misc = []
info = _empty_info()
info['buffer_size_sec'] = 10.
info['filename'] = vhdr_fname
eeg_info = {}
ext = os.path.splitext(vhdr_fname)[-1]
if ext != '.vhdr':
raise IOError("The header file must be given to read the data, "
"not the '%s' file." % ext)
with open(vhdr_fname, 'r') as f:
# extract the first section to resemble a cfg
l = f.readline().strip()
assert l == 'Brain Vision Data Exchange Header File Version 1.0'
settings = f.read()
if settings.find('[Comment]') != -1:
params, settings = settings.split('[Comment]')
else:
params, settings = settings, str()
cfg = configparser.ConfigParser()
if hasattr(cfg, 'read_file'): # newer API
cfg.read_file(StringIO(params))
else:
cfg.readfp(StringIO(params))
# get sampling info
# Sampling interval is given in microsec
sfreq = 1e6 / cfg.getfloat('Common Infos', 'SamplingInterval')
sfreq = int(sfreq)
eeg_info['n_eeg_chan'] = n_eeg_chan = cfg.getint('Common Infos',
'NumberOfChannels')
# check binary format
assert cfg.get('Common Infos', 'DataFormat') == 'BINARY'
eeg_info['data_orientation'] = cfg.get('Common Infos', 'DataOrientation')
if not (eeg_info['data_orientation'] == 'MULTIPLEXED' or
eeg_info['data_orientation'] == 'VECTORIZED'):
raise NotImplementedError('Data Orientation %s is not supported'
% eeg_info['data_orientation'])
binary_format = cfg.get('Binary Infos', 'BinaryFormat')
if binary_format == 'INT_16':
eeg_info['dtype'] = '<i2'
elif binary_format == 'INT_32':
eeg_info['dtype'] = '<i4'
elif binary_format == 'IEEE_FLOAT_32':
eeg_info['dtype'] = '<f4'
else:
raise NotImplementedError('Datatype %s is not supported'
% binary_format)
# load channel labels
ch_names = ['UNKNOWN'] * n_eeg_chan
cals = np.empty(n_eeg_chan)
cals[:] = np.nan
units = ['UNKNOWN'] * n_eeg_chan
for chan, props in cfg.items('Channel Infos'):
n = int(re.findall(r'ch(\d+)', chan)[0])
props = props.split(',')
if len(props) < 4:
name, _, resolution = props
unit = 'V'
else:
name, _, resolution, unit = props[:4]
ch_names[n - 1] = name
if resolution == "": # For truncated vhdrs (e.g. EEGLAB export)
cals[n - 1] = 0.000001 # Fill in a default
else:
cals[n - 1] = float(resolution)
unit = unit.replace('\xc2', '') # Remove unwanted control characters
if u(unit) == u('\xb5V'):
units[n - 1] = 1e-6
elif unit == 'V':
units[n - 1] = 1.
else:
units[n - 1] = unit
eeg_info['units'] = np.asarray(units, dtype=float)
# Attempts to extract filtering info from header. If not found, both are
# set to zero.
settings = settings.splitlines()
idx = None
if 'Channels' in settings:
idx = settings.index('Channels')
settings = settings[idx + 1:]
for idx, setting in enumerate(settings):
if re.match('#\s+Name', setting):
break
else:
idx = None
if idx:
lowpass = []
highpass = []
for i, ch in enumerate(ch_names, 1):
line = settings[idx + i].split()
assert ch in line
highpass.append(line[5])
lowpass.append(line[6])
if len(highpass) == 0:
info['highpass'] = None
elif all(highpass):
if highpass[0] == 'NaN':
info['highpass'] = None
elif highpass[0] == 'DC':
info['highpass'] = 0.
else:
info['highpass'] = float(highpass[0])
else:
info['highpass'] = np.min(np.array(highpass, dtype=np.float))
warnings.warn('%s' % ('Channels contain different highpass '
'filters. Highest filter setting will '
'be stored.'))
if len(lowpass) == 0:
info['lowpass'] = None
elif all(lowpass):
if lowpass[0] == 'NaN':
info['lowpass'] = None
else:
info['lowpass'] = float(lowpass[0])
else:
info['lowpass'] = np.min(np.array(lowpass, dtype=np.float))
warnings.warn('%s' % ('Channels contain different lowpass filters.'
' Lowest filter setting will be stored.'))
# Post process highpass and lowpass to take into account units
header = settings[idx].split(' ')
header = [h for h in header if len(h)]
if '[s]' in header[4] and info['highpass'] is not None \
and (info['highpass'] > 0):
info['highpass'] = 1. / info['highpass']
if '[s]' in header[5] and info['lowpass'] is not None:
info['lowpass'] = 1. / info['lowpass']
else:
info['highpass'] = None
info['lowpass'] = None
# locate EEG and marker files
path = os.path.dirname(vhdr_fname)
info['filename'] = os.path.join(path, cfg.get('Common Infos', 'DataFile'))
eeg_info['marker_id'] = os.path.join(path, cfg.get('Common Infos',
'MarkerFile'))
info['meas_date'] = int(time.time())
# Creates a list of dicts of eeg channels for raw.info
logger.info('Setting channel info structure...')
info['chs'] = []
info['nchan'] = nchan = n_eeg_chan
info['ch_names'] = ch_names
info['sfreq'] = sfreq
idxs = range(len(ch_names))
for idx, ch_name, cal in zip(idxs, ch_names, cals):
if ch_name in eog or idx in eog or idx - nchan in eog:
kind = FIFF.FIFFV_EOG_CH
coil_type = FIFF.FIFFV_COIL_NONE
elif ch_name in misc or idx in misc or idx - nchan in misc:
kind = FIFF.FIFFV_MISC_CH
coil_type = FIFF.FIFFV_COIL_NONE
else:
kind = FIFF.FIFFV_EEG_CH
coil_type = FIFF.FIFFV_COIL_EEG
chan_info = {'ch_name': ch_name,
'coil_type': coil_type,
'kind': kind,
'logno': idx + 1,
'scanno': idx + 1,
'cal': cal,
'range': 1.,
'unit_mul': 0., # always zero- mne manual pg. 273
'unit': FIFF.FIFF_UNIT_V,
'coord_frame': FIFF.FIFFV_COORD_HEAD,
'eeg_loc': np.zeros(3),
'loc': np.zeros(12)}
info['chs'].append(chan_info)
# for stim channel
events = _read_vmrk_events(eeg_info['marker_id'])
return info, eeg_info, events
def read_raw_brainvision(vhdr_fname, montage=None,
eog=('HEOGL', 'HEOGR', 'VEOGb'), misc=None,
reference=None, scale=1., preload=False,
verbose=None):
"""Reader for Brain Vision EEG file
Parameters
----------
vhdr_fname : str
Path to the EEG header file.
montage : str | None | instance of Montage
Path or instance of montage containing electrode positions.
If None, sensor locations are (0,0,0).
eog : list or tuple of str
Names of channels or list of indices that should be designated
EOG channels. Values should correspond to the vhdr file
Default is ('HEOGL', 'HEOGR', 'VEOGb').
misc : list or tuple of str
Names of channels or list of indices that should be designated
MISC channels. Values should correspond to the electrodes
in the vhdr file. Default is None.
reference : None | str
Name of the electrode which served as the reference in the recording.
If a name is provided, a corresponding channel is added and its data
is set to 0. This is useful for later re-referencing. The name should
correspond to a name in elp_names. Data must be preloaded.
scale : float
The scaling factor for EEG data. Units are in volts. Default scale
factor is 1. For microvolts, the scale factor would be 1e-6. This is
used when the header file does not specify the scale factor.
preload : bool
If True, all data are loaded at initialization.
If False, data are not read until save.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
raw : Instance of RawBrainVision
A Raw object containing BrainVision data.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
raw = RawBrainVision(vhdr_fname=vhdr_fname, montage=montage, eog=eog,
misc=misc, reference=reference, scale=scale,
preload=preload, verbose=verbose)
return raw
|
{
"content_hash": "53bce5527bd000000fb34b5cb4d0e75b",
"timestamp": "",
"source": "github",
"line_count": 619,
"max_line_length": 79,
"avg_line_length": 36.79806138933764,
"alnum_prop": 0.5525946088330845,
"repo_name": "Odingod/mne-python",
"id": "f2a17325e85f72acdead02ddf21f1fcc2e0f8a0d",
"size": "22778",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "mne/io/brainvision/brainvision.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "69806"
},
{
"name": "Makefile",
"bytes": "3403"
},
{
"name": "PowerShell",
"bytes": "2988"
},
{
"name": "Python",
"bytes": "3741370"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
}
|
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
from urllib.parse import parse_qs, urljoin, urlparse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._data_collection_endpoints_operations import (
build_create_request,
build_delete_request,
build_get_request,
build_list_by_resource_group_request,
build_list_by_subscription_request,
build_update_request,
)
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DataCollectionEndpointsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~$(python-base-namespace).v2021_04_01.aio.MonitorManagementClient`'s
:attr:`data_collection_endpoints` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_resource_group(
self, resource_group_name: str, **kwargs: Any
) -> AsyncIterable["_models.DataCollectionEndpointResource"]:
"""Lists all data collection endpoints in the specified resource group.
Lists all data collection endpoints in the specified resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DataCollectionEndpointResource or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~$(python-base-namespace).v2021_04_01.models.DataCollectionEndpointResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.DataCollectionEndpointResourceListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_resource_group.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DataCollectionEndpointResourceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponseCommonV2, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_resource_group.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/dataCollectionEndpoints"} # type: ignore
@distributed_trace
def list_by_subscription(self, **kwargs: Any) -> AsyncIterable["_models.DataCollectionEndpointResource"]:
"""Lists all data collection endpoints in the specified subscription.
Lists all data collection endpoints in the specified subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DataCollectionEndpointResource or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~$(python-base-namespace).v2021_04_01.models.DataCollectionEndpointResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.DataCollectionEndpointResourceListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_subscription.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DataCollectionEndpointResourceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponseCommonV2, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_subscription.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Insights/dataCollectionEndpoints"} # type: ignore
@distributed_trace_async
async def get(
self, resource_group_name: str, data_collection_endpoint_name: str, **kwargs: Any
) -> _models.DataCollectionEndpointResource:
"""Returns the specified data collection endpoint.
Returns the specified data collection endpoint.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param data_collection_endpoint_name: The name of the data collection endpoint. The name is
case insensitive. Required.
:type data_collection_endpoint_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataCollectionEndpointResource or the result of cls(response)
:rtype: ~$(python-base-namespace).v2021_04_01.models.DataCollectionEndpointResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.DataCollectionEndpointResource]
request = build_get_request(
resource_group_name=resource_group_name,
data_collection_endpoint_name=data_collection_endpoint_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponseCommonV2, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("DataCollectionEndpointResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/dataCollectionEndpoints/{dataCollectionEndpointName}"} # type: ignore
@overload
async def create(
self,
resource_group_name: str,
data_collection_endpoint_name: str,
body: Optional[_models.DataCollectionEndpointResource] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.DataCollectionEndpointResource:
"""Creates or updates a data collection endpoint.
Creates or updates a data collection endpoint.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param data_collection_endpoint_name: The name of the data collection endpoint. The name is
case insensitive. Required.
:type data_collection_endpoint_name: str
:param body: The payload. Default value is None.
:type body: ~$(python-base-namespace).v2021_04_01.models.DataCollectionEndpointResource
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataCollectionEndpointResource or the result of cls(response)
:rtype: ~$(python-base-namespace).v2021_04_01.models.DataCollectionEndpointResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def create(
self,
resource_group_name: str,
data_collection_endpoint_name: str,
body: Optional[IO] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.DataCollectionEndpointResource:
"""Creates or updates a data collection endpoint.
Creates or updates a data collection endpoint.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param data_collection_endpoint_name: The name of the data collection endpoint. The name is
case insensitive. Required.
:type data_collection_endpoint_name: str
:param body: The payload. Default value is None.
:type body: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataCollectionEndpointResource or the result of cls(response)
:rtype: ~$(python-base-namespace).v2021_04_01.models.DataCollectionEndpointResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def create(
self,
resource_group_name: str,
data_collection_endpoint_name: str,
body: Optional[Union[_models.DataCollectionEndpointResource, IO]] = None,
**kwargs: Any
) -> _models.DataCollectionEndpointResource:
"""Creates or updates a data collection endpoint.
Creates or updates a data collection endpoint.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param data_collection_endpoint_name: The name of the data collection endpoint. The name is
case insensitive. Required.
:type data_collection_endpoint_name: str
:param body: The payload. Is either a model type or a IO type. Default value is None.
:type body: ~$(python-base-namespace).v2021_04_01.models.DataCollectionEndpointResource or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataCollectionEndpointResource or the result of cls(response)
:rtype: ~$(python-base-namespace).v2021_04_01.models.DataCollectionEndpointResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.DataCollectionEndpointResource]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(body, (IO, bytes)):
_content = body
else:
if body is not None:
_json = self._serialize.body(body, "DataCollectionEndpointResource")
else:
_json = None
request = build_create_request(
resource_group_name=resource_group_name,
data_collection_endpoint_name=data_collection_endpoint_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponseCommonV2, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("DataCollectionEndpointResource", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("DataCollectionEndpointResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/dataCollectionEndpoints/{dataCollectionEndpointName}"} # type: ignore
@overload
async def update(
self,
resource_group_name: str,
data_collection_endpoint_name: str,
body: Optional[_models.ResourceForUpdate] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.DataCollectionEndpointResource:
"""Updates part of a data collection endpoint.
Updates part of a data collection endpoint.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param data_collection_endpoint_name: The name of the data collection endpoint. The name is
case insensitive. Required.
:type data_collection_endpoint_name: str
:param body: The payload. Default value is None.
:type body: ~$(python-base-namespace).v2021_04_01.models.ResourceForUpdate
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataCollectionEndpointResource or the result of cls(response)
:rtype: ~$(python-base-namespace).v2021_04_01.models.DataCollectionEndpointResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def update(
self,
resource_group_name: str,
data_collection_endpoint_name: str,
body: Optional[IO] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.DataCollectionEndpointResource:
"""Updates part of a data collection endpoint.
Updates part of a data collection endpoint.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param data_collection_endpoint_name: The name of the data collection endpoint. The name is
case insensitive. Required.
:type data_collection_endpoint_name: str
:param body: The payload. Default value is None.
:type body: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataCollectionEndpointResource or the result of cls(response)
:rtype: ~$(python-base-namespace).v2021_04_01.models.DataCollectionEndpointResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def update(
self,
resource_group_name: str,
data_collection_endpoint_name: str,
body: Optional[Union[_models.ResourceForUpdate, IO]] = None,
**kwargs: Any
) -> _models.DataCollectionEndpointResource:
"""Updates part of a data collection endpoint.
Updates part of a data collection endpoint.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param data_collection_endpoint_name: The name of the data collection endpoint. The name is
case insensitive. Required.
:type data_collection_endpoint_name: str
:param body: The payload. Is either a model type or a IO type. Default value is None.
:type body: ~$(python-base-namespace).v2021_04_01.models.ResourceForUpdate or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataCollectionEndpointResource or the result of cls(response)
:rtype: ~$(python-base-namespace).v2021_04_01.models.DataCollectionEndpointResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.DataCollectionEndpointResource]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(body, (IO, bytes)):
_content = body
else:
if body is not None:
_json = self._serialize.body(body, "ResourceForUpdate")
else:
_json = None
request = build_update_request(
resource_group_name=resource_group_name,
data_collection_endpoint_name=data_collection_endpoint_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponseCommonV2, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("DataCollectionEndpointResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/dataCollectionEndpoints/{dataCollectionEndpointName}"} # type: ignore
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, data_collection_endpoint_name: str, **kwargs: Any
) -> None:
"""Deletes a data collection endpoint.
Deletes a data collection endpoint.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param data_collection_endpoint_name: The name of the data collection endpoint. The name is
case insensitive. Required.
:type data_collection_endpoint_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
data_collection_endpoint_name=data_collection_endpoint_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponseCommonV2, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/dataCollectionEndpoints/{dataCollectionEndpointName}"} # type: ignore
|
{
"content_hash": "8b62dc0d055342244005f6eeecdef2bd",
"timestamp": "",
"source": "github",
"line_count": 645,
"max_line_length": 197,
"avg_line_length": 45.88837209302326,
"alnum_prop": 0.651429150618285,
"repo_name": "Azure/azure-sdk-for-python",
"id": "0e9e8468ff4bb466ccc3ea363b8f2cd44b64d23e",
"size": "30098",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2021_04_01/aio/operations/_data_collection_endpoints_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
"""Lighting channels module for Zigbee Home Automation."""
from typing import Optional
import zigpy.zcl.clusters.lighting as lighting
from .. import registries, typing as zha_typing
from ..const import REPORT_CONFIG_DEFAULT
from .base import ClientChannel, ZigbeeChannel
@registries.ZIGBEE_CHANNEL_REGISTRY.register(lighting.Ballast.cluster_id)
class Ballast(ZigbeeChannel):
"""Ballast channel."""
@registries.CLIENT_CHANNELS_REGISTRY.register(lighting.Color.cluster_id)
class ColorClientChannel(ClientChannel):
"""Color client channel."""
@registries.BINDABLE_CLUSTERS.register(lighting.Color.cluster_id)
@registries.LIGHT_CLUSTERS.register(lighting.Color.cluster_id)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(lighting.Color.cluster_id)
class ColorChannel(ZigbeeChannel):
"""Color channel."""
CAPABILITIES_COLOR_XY = 0x08
CAPABILITIES_COLOR_TEMP = 0x10
UNSUPPORTED_ATTRIBUTE = 0x86
REPORT_CONFIG = (
{"attr": "current_x", "config": REPORT_CONFIG_DEFAULT},
{"attr": "current_y", "config": REPORT_CONFIG_DEFAULT},
{"attr": "color_temperature", "config": REPORT_CONFIG_DEFAULT},
)
def __init__(
self, cluster: zha_typing.ZigpyClusterType, ch_pool: zha_typing.ChannelPoolType
) -> None:
"""Initialize ColorChannel."""
super().__init__(cluster, ch_pool)
self._color_capabilities = None
self._min_mireds = 153
self._max_mireds = 500
@property
def color_loop_active(self) -> Optional[int]:
"""Return cached value of the color_loop_active attribute."""
return self.cluster.get("color_loop_active")
@property
def color_temperature(self) -> Optional[int]:
"""Return cached value of color temperature."""
return self.cluster.get("color_temperature")
@property
def current_x(self) -> Optional[int]:
"""Return cached value of the current_x attribute."""
return self.cluster.get("current_x")
@property
def current_y(self) -> Optional[int]:
"""Return cached value of the current_y attribute."""
return self.cluster.get("current_y")
@property
def min_mireds(self) -> int:
"""Return the coldest color_temp that this channel supports."""
return self.cluster.get("color_temp_physical_min", self._min_mireds)
@property
def max_mireds(self) -> int:
"""Return the warmest color_temp that this channel supports."""
return self.cluster.get("color_temp_physical_max", self._max_mireds)
def get_color_capabilities(self):
"""Return the color capabilities."""
return self._color_capabilities
async def async_configure(self):
"""Configure channel."""
await self.fetch_color_capabilities(False)
await super().async_configure()
async def async_initialize(self, from_cache):
"""Initialize channel."""
await self.fetch_color_capabilities(True)
attributes = ["color_temperature", "current_x", "current_y"]
await self.get_attributes(attributes, from_cache=from_cache)
async def fetch_color_capabilities(self, from_cache):
"""Get the color configuration."""
attributes = [
"color_temp_physical_min",
"color_temp_physical_max",
"color_capabilities",
]
results = await self.get_attributes(attributes, from_cache=from_cache)
capabilities = results.get("color_capabilities")
if capabilities is None:
# ZCL Version 4 devices don't support the color_capabilities
# attribute. In this version XY support is mandatory, but we
# need to probe to determine if the device supports color
# temperature.
capabilities = self.CAPABILITIES_COLOR_XY
result = await self.get_attribute_value(
"color_temperature", from_cache=from_cache
)
if result is not None and result is not self.UNSUPPORTED_ATTRIBUTE:
capabilities |= self.CAPABILITIES_COLOR_TEMP
self._color_capabilities = capabilities
await super().async_initialize(from_cache)
|
{
"content_hash": "877df746bcc4946a609cb2b49b90dfb7",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 87,
"avg_line_length": 36.9646017699115,
"alnum_prop": 0.6581278429494852,
"repo_name": "soldag/home-assistant",
"id": "9d52ff12d379954147b8aa05f39e13a071be0106",
"size": "4177",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/zha/core/channels/lighting.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19025087"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
import unittest
from mock import patch
import simplejson as json
from .context import app
app.config['TWILIO_ACCOUNT_SID'] = 'ACxxxxxx'
app.config['TWILIO_AUTH_TOKEN'] = 'yyyyyyyyy'
app.config['TWILIO_CALLER_ID'] = '+15558675309'
app.config['NYTIMES_API_KEY'] = '###'
class TwiMLTest(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
def assertTwiML(self, response):
self.assertTrue("<Response>" in response.data, "Did not find " \
"<Response>: %s" % response.data)
self.assertTrue("</Response>" in response.data, "Did not find " \
"</Response>: %s" % response.data)
self.assertEqual("200 OK", response.status)
def sms(self, body, url='/sms', to=app.config['TWILIO_CALLER_ID'],
from_='+15558675309', extra_params=None):
params = {
'SmsSid': 'SMtesting',
'AccountSid': app.config['TWILIO_ACCOUNT_SID'],
'To': to,
'From': from_,
'Body': body,
'FromCity': 'BROOKLYN',
'FromState': 'NY',
'FromCountry': 'US',
'FromZip': '55555'}
if extra_params:
params = dict(params.items() + extra_params.items())
return self.app.post(url, data=params)
def call(self, url='/voice', to=app.config['TWILIO_CALLER_ID'],
from_='+15558675309', digits=None, extra_params=None):
params = {
'CallSid': 'CAtesting',
'AccountSid': app.config['TWILIO_ACCOUNT_SID'],
'To': to,
'From': from_,
'CallStatus': 'ringing',
'Direction': 'inbound',
'FromCity': 'BROOKLYN',
'FromState': 'NY',
'FromCountry': 'US',
'FromZip': '55555'}
if digits:
params['Digits'] = digits
if extra_params:
params = dict(params.items() + extra_params.items())
return self.app.post(url, data=params)
class ConferenceTests(TwiMLTest):
def test_voice(self):
response = self.call(url="/conference/conference_test")
self.assertTwiML(response)
self.assertTrue('<Conference' in response.data, "Could not find " \
"<Conference> in response: %s" % response.data)
class WaitTests(TwiMLTest):
@patch('requests.get')
def test_waitUrl(self, MockRequests):
mock_request = MockRequests()
mock_request.status_code = 200
mock_good_json = json.loads(
open('./tests/test_assets/good.json').read())
mock_request.json.return_value = mock_good_json
response = self.call(url="/wait")
self.assertTwiML(response)
self.assertTrue("<Say voice=\"alice\">" in response.data,
"Could not find <Say> verb in response: %s" % response.data)
@patch('requests.get')
def test_waitUrlAuthError(self, MockRequests):
mock_request = MockRequests()
mock_request.status_code = 403
response = self.call(url="/wait")
self.assertTwiML(response)
self.assertTrue("<Redirect>" in response.data, "Could not find hold " \
"music in failed response: %s" % response.data)
@patch('requests.get')
def test_waitUrlBadJson(self, MockRequests):
mock_request = MockRequests()
mock_request.status_code = 200
mock_request.json.return_value = None
response = self.call(url="/wait")
self.assertTwiML(response)
self.assertTrue("<Redirect>" in response.data, "Could not find hold " \
"music in failed response: %s" % response.data)
def test_waitUrlNoKey(self):
app.config['NYTIMES_API_KEY'] = None
response = self.call(url="/wait")
self.assertTwiML(response)
self.assertTrue("Configuration error" in response.data, "Could not " \
"find configuration error message in response: %s" %
response.data)
self.assertTrue("<Redirect>" in response.data, "Could not find hold " \
"music in failed response: %s" % response.data)
def test_music(self):
response = self.call(url="/music")
self.assertTwiML(response)
self.assertTrue("<Play>" in response.data, "Could not find music in " \
"response: %s" % response.data)
|
{
"content_hash": "bd6cdc00abf563afdb9900597e0c7190",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 79,
"avg_line_length": 35.153225806451616,
"alnum_prop": 0.5797201192934159,
"repo_name": "RobSpectre/New-York-Times-Conference-Room",
"id": "fddff13c97322ef9ea51e09d5d629058c4692148",
"size": "4359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_twilio.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7786"
}
],
"symlink_target": ""
}
|
__author__ = 'termie@google.com (Andy Smith)'
import re
import time, datetime
try:
import cPickle as pickle
except ImportError:
import pickle
from google.appengine.ext import db
from google.appengine.api.datastore_types import Blob
DJANGO_DATE = "%Y-%m-%d"
DJANGO_TIME = "%H:%M:%S"
class DateTimeProperty(db.DateTimeProperty):
def validate(self, value):
"""Validate a datetime, attempt to convert from string
Returns:
A valid datetime object
"""
# XXX termie: pretty naive at this point, ask for forgiveness
try:
us = 0
m_fractional = re.search('(.*)\.(\d+)$', value)
if (m_fractional):
value = m_fractional.group(1)
fractional_s = m_fractional.group(2)
scaled_to_us = fractional_s + '0' * (6 - len(fractional_s))
truncated_to_us = scaled_to_us[0:6]
us = int(truncated_to_us)
t = time.strptime(value, "%s %s" % (DJANGO_DATE, DJANGO_TIME))
t = (t)[0:6] + (us,)
d = datetime.datetime(*t)
value = d
except ValueError, e:
# eat the error
pass
except TypeError, e:
# we passed it a datetime, probably, let the orignal handle this
pass
value = super(DateTimeProperty, self).validate(value)
return value
class DictProperty(db.Property):
def validate(self, value):
value = super(DictProperty, self).validate(value)
if not isinstance(value, dict):
raise Exception("NOT A DICT %s" % value)
return value
def default_value(self):
return {}
def datastore_type(self):
return Blob
def get_value_for_datastore(self, model_instance):
value = super(DictProperty, self).get_value_for_datastore(model_instance)
return Blob(pickle.dumps(value, protocol= -1))
def make_value_from_datastore(self, model_instance):
value = super(DictProperty, self).make_value_from_datastore(model_instance)
return pickle.loads(str(value))
|
{
"content_hash": "816b1cbda18d7f8fef4d34ddc40a7e2e",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 79,
"avg_line_length": 28.611940298507463,
"alnum_prop": 0.6541471048513302,
"repo_name": "AloneRoad/Inforlearn",
"id": "df0ff00316d55a67397eedbb7f97fe579a58ea47",
"size": "1938",
"binary": false,
"copies": "1",
"ref": "refs/heads/1.0-rc3",
"path": "common/properties.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "321037"
},
{
"name": "C",
"bytes": "3015"
},
{
"name": "JavaScript",
"bytes": "439358"
},
{
"name": "PHP",
"bytes": "7956"
},
{
"name": "Python",
"bytes": "5804536"
},
{
"name": "R",
"bytes": "1277"
},
{
"name": "Shell",
"bytes": "6290"
}
],
"symlink_target": ""
}
|
"""Helper utilities and decorators."""
from flask import flash
from sqlalchemy.orm import exc
from werkzeug.exceptions import abort
from werkzeug.routing import BaseConverter
def flash_errors(form, category='warning'):
"""Flash all errors for a form."""
for field, errors in form.errors.items():
for error in errors:
flash(
'{0} - {1}'.format(
getattr(form, field).label.text, error), category
)
def get_object_or_404(model, *criterion):
"""Get object from db or abort."""
try:
return model.query.filter(*criterion).one()
except exc.NoResultFound, exc.MultipleResultsFound:
abort(404)
def get_or_create(model, **kwargs):
"""Django get_or_create function."""
instance = model.query.filter_by(**kwargs).first()
if instance:
return instance, False
else:
instance = model(**kwargs)
instance.save()
return instance, True
class RegexConverter(BaseConverter):
"""Regex for routes."""
def __init__(self, url_map, *items):
"""Init object."""
super(RegexConverter, self).__init__(url_map)
self.regex = items[0]
|
{
"content_hash": "2e40d18cc6fb9287d1b8219ee3f52c02",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 69,
"avg_line_length": 27.86046511627907,
"alnum_prop": 0.6143572621035058,
"repo_name": "dyzajash/scanlation_cms",
"id": "11713afe3a10269d2fcf43f3a5bb61c999e39030",
"size": "1222",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scanlation_cms/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "21555"
},
{
"name": "HTML",
"bytes": "46348"
},
{
"name": "JavaScript",
"bytes": "289576"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "78104"
},
{
"name": "Shell",
"bytes": "283"
}
],
"symlink_target": ""
}
|
from scriptine import path, log
import tarfile
import warnings
warnings.warn('scriptine.file module is still in development and will be changed')
class file_collection(object):
def __init__(self):
self.files = []
self.base = path('.')
def include(self, patterns, recursive=False):
if isinstance(patterns, basestring): patterns = (patterns,)
for pattern in patterns:
if recursive:
for dirname in self.base.dirs(pattern):
self.files.extend((f for f in dirname.walk()))
else:
self.files.extend((f for f in self.base.listdir(pattern)))
def exclude(self, patterns):
if isinstance(patterns, basestring): patterns = (patterns,)
for pattern in patterns:
self.files = [f for f in self.files if not f.fnmatch(pattern)]
def __iter__(self):
return iter(self.files)
def tar(self, dest, archive_base=None):
dest = path(dest)
if archive_base is None:
archive_base = path(self.dest.basename()).splitext()[0]
tar = tarfile.open(dest, 'w:gz')
for f in self:
log.info('adding %s', f)
tar.add(f, arcname=archive_base/f, recursive=False)
tar.close()
|
{
"content_hash": "5e91394298811d7ed9a2d6bb4719feed",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 82,
"avg_line_length": 33.15384615384615,
"alnum_prop": 0.5870069605568445,
"repo_name": "olt/scriptine",
"id": "c78cafa26ac34893d76526b2c79ecd1b4e10718d",
"size": "1293",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scriptine/files.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "75342"
}
],
"symlink_target": ""
}
|
try:
import blurdev
aboutToClearPaths = blurdev.core.aboutToClearPaths
except ImportError:
aboutToClearPaths = None
# This is used by Studiomax's rescaletime module. It basicly provides a QMainWindow
# that is parented to 3ds Max to tell the user to not do anything while its processing.
# rescaletime is basicly a hack that allows us to change the frameRate of a max scene
# and preserve the key frame locations. It uses win32 to interact with the max interface.
try:
from blurdev.gui import Window
except ImportError:
Window = None
#--------------------------------------------------------------------------------
# These methods were marked as @pendingdeprecation. They have been removed from
# cross3d, but code outside of cross3d may still attempt to use these functions.
# We need to search the code base to make sure those calls are removed.
"""
SceneCamera.isVrayCam()
Scene._nativeCameras()
Scene._nativeModels()
Scene._nativeRefresh()
Scene.application()
Scene.cameras()
Scene.models()
Scene.exportModel()
Scene.update()
ValueRange.multiply()
FrameRange.offset(...)
FrameRange.merge(...)
SceneModel.addAnimationClip(...)
Timecode.fromSeconds(...)
Timecode.toFrames()
FileSequence.sequenceForPath(...)
FileSequence.paddingCode()
FileSequence.codeName()
FileSequence.codePath()
SceneWrapper.setName(...)
SceneViewport.restoreState()
SceneViewport.storeState()
SceneAnimationController.controllerType()
Container.isVisible()
Container.setVisible(...)
"""
|
{
"content_hash": "149eb2b84cb01924dc06740b123934b4",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 89,
"avg_line_length": 30.040816326530614,
"alnum_prop": 0.7438858695652174,
"repo_name": "blurstudio/cross3d",
"id": "527549db8866ead43f3cc5157cfab078c57fe0f3",
"size": "2013",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cross3d/migrate/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "46"
},
{
"name": "MAXScript",
"bytes": "17640"
},
{
"name": "Python",
"bytes": "1023336"
}
],
"symlink_target": ""
}
|
from django.contrib.auth import get_user_model
from django.urls import reverse
from lbworkflow.views.helper import user_wf_info_as_dict
from .leave.models import Leave
from .test_base import BaseTests
User = get_user_model()
class HelperTests(BaseTests):
def test_user_wf_info_as_dict(self):
leave = self.leave
leave.submit_process()
info = user_wf_info_as_dict(leave, self.users["tom"])
self.assertIsNotNone(info["task"])
self.assertIsNotNone(info["object"])
self.assertFalse(info["can_give_up"])
self.assertEqual(info["wf_code"], "leave")
info = user_wf_info_as_dict(leave, self.users["owner"])
self.assertIsNone(info["task"])
self.assertTrue(info["can_give_up"])
info = user_wf_info_as_dict(leave, self.users["vicalloy"])
self.assertIsNone(info["task"])
class ViewTests(BaseTests):
def setUp(self):
super().setUp()
self.client.login(username="owner", password="password")
def test_start_wf(self):
resp = self.client.get(reverse("wf_start_wf"))
self.assertEqual(resp.status_code, 200)
def test_wf_list(self):
resp = self.client.get(reverse("wf_list", args=("leave",)))
self.assertEqual(resp.status_code, 200)
def test_wf_report_list(self):
resp = self.client.get(reverse("wf_report_list"))
self.assertEqual(resp.status_code, 200)
def test_wf_list_export(self):
resp = self.client.get(
reverse("wf_list", args=("leave",)), {"export": 1}
)
self.assertEqual(resp.status_code, 200)
def test_detail(self):
resp = self.client.get(reverse("wf_detail", args=("1",)))
self.assertEqual(resp.status_code, 200)
def test_submit(self):
self.client.login(username="owner", password="password")
url = reverse("wf_new", args=("leave",))
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
data = {
"start_on": "2017-04-19 09:01",
"end_on": "2017-04-20 09:01",
"leave_days": "1",
"reason": "test save",
}
resp = self.client.post(url, data)
leave = Leave.objects.get(reason="test save")
self.assertRedirects(resp, "/wf/%s/" % leave.pinstance.pk)
self.assertEqual("Draft", leave.pinstance.cur_node.name)
data["act_submit"] = "Submit"
data["reason"] = "test submit"
resp = self.client.post(url, data)
leave = Leave.objects.get(reason="test submit")
self.assertRedirects(resp, "/wf/%s/" % leave.pinstance.pk)
self.assertEqual("A2", leave.pinstance.cur_node.name)
def test_edit(self):
self.client.login(username="owner", password="password")
data = {
"start_on": "2017-04-19 09:01",
"end_on": "2017-04-20 09:01",
"leave_days": "1",
"reason": "test save",
}
url = reverse("wf_new", args=("leave",))
resp = self.client.post(url, data)
leave = Leave.objects.get(reason="test save")
self.assertRedirects(resp, "/wf/%s/" % leave.pinstance.pk)
self.assertEqual("Draft", leave.pinstance.cur_node.name)
url = reverse("wf_edit", args=(leave.pinstance.pk,))
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
data["act_submit"] = "Submit"
data["reason"] = "test submit"
resp = self.client.post(url, data)
leave = Leave.objects.get(reason="test submit")
self.assertRedirects(resp, "/wf/%s/" % leave.pinstance.pk)
self.assertEqual("A2", leave.pinstance.cur_node.name)
def test_delete(self):
self.client.login(username="admin", password="password")
# POST
url = reverse("wf_delete")
leave = self.create_leave("to delete")
data = {"pk": leave.pinstance.pk}
resp = self.client.post(url, data)
self.assertRedirects(resp, "/wf/list/")
self.assertIsNone(self.get_leave("to delete"))
# GET
leave = self.create_leave("to delete")
data = {"pk": leave.pinstance.pk}
resp = self.client.get(url, data)
self.assertRedirects(resp, "/wf/list/")
self.assertIsNone(self.get_leave("to delete"))
|
{
"content_hash": "a8a42718121c4eb2aa8184a098347267",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 67,
"avg_line_length": 34.88709677419355,
"alnum_prop": 0.5945446139620897,
"repo_name": "vicalloy/django-lb-workflow",
"id": "b240ce6698e4e924b2db977e6e864de02f843cb1",
"size": "4326",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lbworkflow/tests/test_process.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "39"
},
{
"name": "Dockerfile",
"bytes": "452"
},
{
"name": "HTML",
"bytes": "32992"
},
{
"name": "JavaScript",
"bytes": "18"
},
{
"name": "Makefile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "194839"
}
],
"symlink_target": ""
}
|
import switchlight_api
import time
import sys
import threading
from bottle import run, get, post, request, template, static_file, redirect, response
class Main(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.addr = sys.argv[1].split(':')
if len(self.addr) < 2: self.addr.append(25500)
self.sl = switchlight_api.Client(self.addr[0], int(self.addr[1]))
self.active = True
def run(self):
while self.active:
self.sl.update()
time.sleep(0.1)
main = None
@get('/')
def webui():
if main.sl.get_locked(): return template('locked.tpl', incorrect = False)
return template(
'main.tpl',
switches = [sw for sw in main.sl.get_switches().values()],
locked = main.sl.get_locked(),
timers = [[time.strftime('%I:%M:%S %p', time.localtime(t.time)),
t.action.items(), t.lock, t.id] for t in main.sl.get_timers().values()]
)
@get('/set/<switch>/<statename>')
def set_switch(switch, statename):
if main.sl.get_locked(): redirect('/'); return
sw = main.sl.get_switches()[switch]
sw.set(statename)
time.sleep(0.2)
redirect('/')
@get('/lock')
def lock():
if main.sl.get_locked(): redirect('/'); return
main.sl.lock()
time.sleep(0.2)
redirect('/')
@post('/unlock')
def unlock():
if main.sl.unlock(request.forms.get('code')):
time.sleep(0.2)
redirect('/')
else:
return template('locked.tpl', incorrect = True)
@get('/settimer')
def set_timer_page():
if main.sl.get_locked(): redirect('/'); return
return template('settimer.tpl', switches = main.sl.get_switches().values())
@post('/settimer')
def set_timer():
if main.sl.get_locked(): redirect('/'); return
action = {}
for switchname in request.forms.getall('switches'):
switch = main.sl.get_switch(switchname)
if request.forms.get('switchmode') == 'on':
action[switch.name] = switch.states[-1]
else:
action[switch.name] = switch.states[0]
acttime = time.time() + ((int(request.forms.get('hours')) * 60) + int(request.forms.get('minutes'))) * 60
main.sl.set_timer(acttime, action, bool(request.forms.get('lock')))
time.sleep(0.2)
redirect('/')
@get('/cancel/<id>')
def cancel_timer(id):
if main.sl.get_locked(): redirect('/'); return
timer = main.sl.get_timers().get(int(id))
if timer:
main.sl.cancel_timer(timer)
time.sleep(0.2)
redirect('/')
@get('/images/<filename>')
def get_static_image(filename):
return static_file(filename, root='images')
@get('/favicon')
def get_favicon():
return static_file('switch-small.png', root='images')
try:
main = Main()
main.start()
try: addr = sys.argv[2].split(':')
except: print('Usage: switchlight-web.py server_address[:port] listen_address[:port]'); quit()
if len(addr) < 2: addr.append(8080)
run(host=addr[0], port=int(addr[1]), debug=True)
except:
main.active = False
main.sl.disconnect()
raise
|
{
"content_hash": "4d41cf18fcbf43b08b4f00041218b560",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 109,
"avg_line_length": 29.858490566037737,
"alnum_prop": 0.5876777251184834,
"repo_name": "hunternet93/switchlight",
"id": "babbac29a9bf3ffe4981d890963edbbf8d3d1d45",
"size": "3187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "switchlight-web.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37232"
}
],
"symlink_target": ""
}
|
"""
slurpECL.py
Honestly there's not much practical use for this one but it was fun to
experiment with.
It's like slurp.py, but sends a midi START message followed by
midi CLOCK messages for the designated tempo, and stops when a midi STOP
message is recieved (or KeyboardInterrupt).
Note that for this to work properly the DGX-505 needs to be set
with EXTERNAL CLOCK = ON. It also won't capture everything needed for
proper playback, because the some of the channel settings are output
when the song/style is selected, not when playback starts, and of course
the tempo cannot change.
"""
import argparse
import mido
import time
import threading
import sys
import logging
from commons import mido_util
from commons.timers import offsetTimer
argparser = argparse.ArgumentParser(
description="Dumps midi messages as mido text with line breaks to stdout")
argparser.add_argument(
'-p', '--port', type=str, metavar='PORT',
help="Input port to read from (run 'mido-ports' to list available ports)")
inportargs = argparser.add_mutually_exclusive_group()
inportargs.add_argument(
'-g', '--guessport', action='store_true',
help="Guess which ports to use (partial name match on PORT)")
inportargs.add_argument(
'-V', '--virtual', action='store_true',
help='Use virtual ports')
argparser.add_argument(
'-o', '--outport', type=str, metavar='PORT',
nargs='?', const=None, default=False, # a bit hacky but whatever
help="Use this different output port")
argparser.add_argument(
'-t', '--tempo', type=float, default='92',
help="tempo, quarter-notes-per-minute, default 92 qnpm")
argparser.add_argument(
'-c', '--clocktime', action='store_true',
help="Use the clock (since epoch) time instead of elapsed time")
args = argparser.parse_args()
input_port = args.port
if args.outport is False:
output_port = args.port
else:
output_port = args.outport
CLOCK = mido.Message('clock')
START = mido.Message('start')
STOP = mido.Message('stop')
# midi beat clock is 24 pulses per QN.
# tempo is given in QN per minute
# so the pulse duration in seconds is:
pulse_duration = 60 / (24 * args.tempo)
def new_callback(stopper, clocktime=False):
# this is a bit of an overcomplicated way to do it but whatever
if clocktime:
timer = time.time
else:
timer = offsetTimer()
def msg_callback(message):
# mutate the message!
message.time = timer()
# write text to stdout. Also add a line break
sys.stdout.write(str(message)+'\n')
sys.stdout.flush()
# just a simple conditional
if message.type == 'stop':
stopper.set()
return msg_callback
logger = logging.getLogger('slurpECL')
handler = logging.StreamHandler()
logger.addHandler(handler)
logger.setLevel(logging.INFO)
with mido_util.open_output(
output_port, args.guessport, args.virtual,
autoreset=True) as outport:
logger.info('Sending to port %r', outport.name)
stopper = threading.Event()
with mido_util.open_input(
input_port, args.guessport, args.virtual,
callback=new_callback(stopper, args.clocktime)) as inport:
logger.info('Reading from port %r', inport.name)
# send the start message.
try:
outport.send(START)
while (not stopper.is_set()):
# MIDI beat clock
outport.send(CLOCK)
stopper.wait(pulse_duration)
except KeyboardInterrupt:
pass
finally:
outport.send(STOP)
|
{
"content_hash": "0435b3774a9a0f29e001d3c55b909e03",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 78,
"avg_line_length": 30.25423728813559,
"alnum_prop": 0.6756302521008404,
"repo_name": "hschh86/usersong-extractor",
"id": "99371f3ed07e7fd0fcfee6959df582479910ab69",
"size": "3570",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "slurpECL.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "109178"
},
{
"name": "Python",
"bytes": "44818"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, with_statement
import tornado.escape
from tornado.escape import utf8, xhtml_escape, xhtml_unescape, url_escape, url_unescape, to_unicode, json_decode, json_encode, squeeze, recursive_unicode
from tornado.util import u, unicode_type
from tornado.test.util import unittest
linkify_tests = [
# (input, linkify_kwargs, expected_output)
("hello http://world.com/!", {},
u('hello <a href="http://world.com/">http://world.com/</a>!')),
("hello http://world.com/with?param=true&stuff=yes", {},
u('hello <a href="http://world.com/with?param=true&stuff=yes">http://world.com/with?param=true&stuff=yes</a>')),
# an opened paren followed by many chars killed Gruber's regex
("http://url.com/w(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", {},
u('<a href="http://url.com/w">http://url.com/w</a>(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')),
# as did too many dots at the end
("http://url.com/withmany.......................................", {},
u('<a href="http://url.com/withmany">http://url.com/withmany</a>.......................................')),
("http://url.com/withmany((((((((((((((((((((((((((((((((((a)", {},
u('<a href="http://url.com/withmany">http://url.com/withmany</a>((((((((((((((((((((((((((((((((((a)')),
# some examples from http://daringfireball.net/2009/11/liberal_regex_for_matching_urls
# plus a fex extras (such as multiple parentheses).
("http://foo.com/blah_blah", {},
u('<a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>')),
("http://foo.com/blah_blah/", {},
u('<a href="http://foo.com/blah_blah/">http://foo.com/blah_blah/</a>')),
("(Something like http://foo.com/blah_blah)", {},
u('(Something like <a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>)')),
("http://foo.com/blah_blah_(wikipedia)", {},
u('<a href="http://foo.com/blah_blah_(wikipedia)">http://foo.com/blah_blah_(wikipedia)</a>')),
("http://foo.com/blah_(blah)_(wikipedia)_blah", {},
u('<a href="http://foo.com/blah_(blah)_(wikipedia)_blah">http://foo.com/blah_(blah)_(wikipedia)_blah</a>')),
("(Something like http://foo.com/blah_blah_(wikipedia))", {},
u('(Something like <a href="http://foo.com/blah_blah_(wikipedia)">http://foo.com/blah_blah_(wikipedia)</a>)')),
("http://foo.com/blah_blah.", {},
u('<a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>.')),
("http://foo.com/blah_blah/.", {},
u('<a href="http://foo.com/blah_blah/">http://foo.com/blah_blah/</a>.')),
("<http://foo.com/blah_blah>", {},
u('<<a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>>')),
("<http://foo.com/blah_blah/>", {},
u('<<a href="http://foo.com/blah_blah/">http://foo.com/blah_blah/</a>>')),
("http://foo.com/blah_blah,", {},
u('<a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>,')),
("http://www.example.com/wpstyle/?p=364.", {},
u('<a href="http://www.example.com/wpstyle/?p=364">http://www.example.com/wpstyle/?p=364</a>.')),
("rdar://1234",
{"permitted_protocols": ["http", "rdar"]},
u('<a href="rdar://1234">rdar://1234</a>')),
("rdar:/1234",
{"permitted_protocols": ["rdar"]},
u('<a href="rdar:/1234">rdar:/1234</a>')),
("http://userid:password@example.com:8080", {},
u('<a href="http://userid:password@example.com:8080">http://userid:password@example.com:8080</a>')),
("http://userid@example.com", {},
u('<a href="http://userid@example.com">http://userid@example.com</a>')),
("http://userid@example.com:8080", {},
u('<a href="http://userid@example.com:8080">http://userid@example.com:8080</a>')),
("http://userid:password@example.com", {},
u('<a href="http://userid:password@example.com">http://userid:password@example.com</a>')),
("message://%3c330e7f8409726r6a4ba78dkf1fd71420c1bf6ff@mail.gmail.com%3e",
{"permitted_protocols": ["http", "message"]},
u('<a href="message://%3c330e7f8409726r6a4ba78dkf1fd71420c1bf6ff@mail.gmail.com%3e">message://%3c330e7f8409726r6a4ba78dkf1fd71420c1bf6ff@mail.gmail.com%3e</a>')),
(u("http://\u27a1.ws/\u4a39"), {},
u('<a href="http://\u27a1.ws/\u4a39">http://\u27a1.ws/\u4a39</a>')),
("<tag>http://example.com</tag>", {},
u('<tag><a href="http://example.com">http://example.com</a></tag>')),
("Just a www.example.com link.", {},
u('Just a <a href="http://www.example.com">www.example.com</a> link.')),
("Just a www.example.com link.",
{"require_protocol": True},
u('Just a www.example.com link.')),
("A http://reallylong.com/link/that/exceedsthelenglimit.html",
{"require_protocol": True, "shorten": True},
u('A <a href="http://reallylong.com/link/that/exceedsthelenglimit.html" title="http://reallylong.com/link/that/exceedsthelenglimit.html">http://reallylong.com/link...</a>')),
("A http://reallylongdomainnamethatwillbetoolong.com/hi!",
{"shorten": True},
u('A <a href="http://reallylongdomainnamethatwillbetoolong.com/hi" title="http://reallylongdomainnamethatwillbetoolong.com/hi">http://reallylongdomainnametha...</a>!')),
("A file:///passwords.txt and http://web.com link", {},
u('A file:///passwords.txt and <a href="http://web.com">http://web.com</a> link')),
("A file:///passwords.txt and http://web.com link",
{"permitted_protocols": ["file"]},
u('A <a href="file:///passwords.txt">file:///passwords.txt</a> and http://web.com link')),
("www.external-link.com",
{"extra_params": 'rel="nofollow" class="external"'},
u('<a href="http://www.external-link.com" rel="nofollow" class="external">www.external-link.com</a>')),
("www.external-link.com and www.internal-link.com/blogs extra",
{"extra_params": lambda href: 'class="internal"' if href.startswith("http://www.internal-link.com") else 'rel="nofollow" class="external"'},
u('<a href="http://www.external-link.com" rel="nofollow" class="external">www.external-link.com</a> and <a href="http://www.internal-link.com/blogs" class="internal">www.internal-link.com/blogs</a> extra')),
("www.external-link.com",
{"extra_params": lambda href: ' rel="nofollow" class="external" '},
u('<a href="http://www.external-link.com" rel="nofollow" class="external">www.external-link.com</a>')),
]
class EscapeTestCase(unittest.TestCase):
def test_linkify(self):
for text, kwargs, html in linkify_tests:
linked = tornado.escape.linkify(text, **kwargs)
self.assertEqual(linked, html)
def test_xhtml_escape(self):
tests = [
("<foo>", "<foo>"),
(u("<foo>"), u("<foo>")),
(b"<foo>", b"<foo>"),
("<>&\"'", "<>&"'"),
("&", "&amp;"),
(u("<\u00e9>"), u("<\u00e9>")),
(b"<\xc3\xa9>", b"<\xc3\xa9>"),
]
for unescaped, escaped in tests:
self.assertEqual(utf8(xhtml_escape(unescaped)), utf8(escaped))
self.assertEqual(utf8(unescaped), utf8(xhtml_unescape(escaped)))
def test_url_escape_unicode(self):
tests = [
# byte strings are passed through as-is
(u('\u00e9').encode('utf8'), '%C3%A9'),
(u('\u00e9').encode('latin1'), '%E9'),
# unicode strings become utf8
(u('\u00e9'), '%C3%A9'),
]
for unescaped, escaped in tests:
self.assertEqual(url_escape(unescaped), escaped)
def test_url_unescape_unicode(self):
tests = [
('%C3%A9', u('\u00e9'), 'utf8'),
('%C3%A9', u('\u00c3\u00a9'), 'latin1'),
('%C3%A9', utf8(u('\u00e9')), None),
]
for escaped, unescaped, encoding in tests:
# input strings to url_unescape should only contain ascii
# characters, but make sure the function accepts both byte
# and unicode strings.
self.assertEqual(url_unescape(to_unicode(escaped), encoding), unescaped)
self.assertEqual(url_unescape(utf8(escaped), encoding), unescaped)
def test_url_escape_quote_plus(self):
unescaped = '+ #%'
plus_escaped = '%2B+%23%25'
escaped = '%2B%20%23%25'
self.assertEqual(url_escape(unescaped), plus_escaped)
self.assertEqual(url_escape(unescaped, plus=False), escaped)
self.assertEqual(url_unescape(plus_escaped), unescaped)
self.assertEqual(url_unescape(escaped, plus=False), unescaped)
self.assertEqual(url_unescape(plus_escaped, encoding=None),
utf8(unescaped))
self.assertEqual(url_unescape(escaped, encoding=None, plus=False),
utf8(unescaped))
def test_escape_return_types(self):
# On python2 the escape methods should generally return the same
# type as their argument
self.assertEqual(type(xhtml_escape("foo")), str)
self.assertEqual(type(xhtml_escape(u("foo"))), unicode_type)
def test_json_decode(self):
# json_decode accepts both bytes and unicode, but strings it returns
# are always unicode.
self.assertEqual(json_decode(b'"foo"'), u("foo"))
self.assertEqual(json_decode(u('"foo"')), u("foo"))
# Non-ascii bytes are interpreted as utf8
self.assertEqual(json_decode(utf8(u('"\u00e9"'))), u("\u00e9"))
def test_json_encode(self):
# json deals with strings, not bytes. On python 2 byte strings will
# convert automatically if they are utf8; on python 3 byte strings
# are not allowed.
self.assertEqual(json_decode(json_encode(u("\u00e9"))), u("\u00e9"))
if bytes is str:
self.assertEqual(json_decode(json_encode(utf8(u("\u00e9")))), u("\u00e9"))
self.assertRaises(UnicodeDecodeError, json_encode, b"\xe9")
def test_squeeze(self):
self.assertEqual(squeeze(u('sequences of whitespace chars')), u('sequences of whitespace chars'))
def test_recursive_unicode(self):
tests = {
'dict': {b"foo": b"bar"},
'list': [b"foo", b"bar"],
'tuple': (b"foo", b"bar"),
'bytes': b"foo"
}
self.assertEqual(recursive_unicode(tests['dict']), {u("foo"): u("bar")})
self.assertEqual(recursive_unicode(tests['list']), [u("foo"), u("bar")])
self.assertEqual(recursive_unicode(tests['tuple']), (u("foo"), u("bar")))
self.assertEqual(recursive_unicode(tests['bytes']), u("foo"))
|
{
"content_hash": "c9891082d4eac3086bd29fe9da4a32a7",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 212,
"avg_line_length": 46.633187772925766,
"alnum_prop": 0.5920966382620095,
"repo_name": "xinyu7/tornado",
"id": "98a23463890c0b8633bc966075253196df8ecadd",
"size": "10703",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tornado/test/escape_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1070"
},
{
"name": "CSS",
"bytes": "7736"
},
{
"name": "HTML",
"bytes": "13318"
},
{
"name": "JavaScript",
"bytes": "6073"
},
{
"name": "Python",
"bytes": "1404549"
},
{
"name": "Ruby",
"bytes": "1733"
},
{
"name": "Shell",
"bytes": "5045"
}
],
"symlink_target": ""
}
|
"""Light for Shelly."""
from __future__ import annotations
from aioshelly import Block
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
ATTR_WHITE_VALUE,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_WHITE_VALUE,
LightEntity,
)
from homeassistant.core import callback
from homeassistant.util.color import (
color_hs_to_RGB,
color_RGB_to_hs,
color_temperature_kelvin_to_mired,
color_temperature_mired_to_kelvin,
)
from . import ShellyDeviceWrapper
from .const import (
COAP,
DATA_CONFIG_ENTRY,
DOMAIN,
KELVIN_MAX_VALUE,
KELVIN_MIN_VALUE_COLOR,
KELVIN_MIN_VALUE_WHITE,
)
from .entity import ShellyBlockEntity
from .utils import async_remove_shelly_entity
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up lights for device."""
wrapper = hass.data[DOMAIN][DATA_CONFIG_ENTRY][config_entry.entry_id][COAP]
blocks = []
for block in wrapper.device.blocks:
if block.type == "light":
blocks.append(block)
elif block.type == "relay":
appliance_type = wrapper.device.settings["relays"][int(block.channel)].get(
"appliance_type"
)
if appliance_type and appliance_type.lower() == "light":
blocks.append(block)
unique_id = (
f'{wrapper.device.shelly["mac"]}-{block.type}_{block.channel}'
)
await async_remove_shelly_entity(hass, "switch", unique_id)
if not blocks:
return
async_add_entities(ShellyLight(wrapper, block) for block in blocks)
class ShellyLight(ShellyBlockEntity, LightEntity):
"""Switch that controls a relay block on Shelly devices."""
def __init__(self, wrapper: ShellyDeviceWrapper, block: Block) -> None:
"""Initialize light."""
super().__init__(wrapper, block)
self.control_result = None
self.mode_result = None
self._supported_features = 0
self._min_kelvin = KELVIN_MIN_VALUE_WHITE
self._max_kelvin = KELVIN_MAX_VALUE
if hasattr(block, "brightness") or hasattr(block, "gain"):
self._supported_features |= SUPPORT_BRIGHTNESS
if hasattr(block, "colorTemp"):
self._supported_features |= SUPPORT_COLOR_TEMP
if hasattr(block, "white"):
self._supported_features |= SUPPORT_WHITE_VALUE
if hasattr(block, "red") and hasattr(block, "green") and hasattr(block, "blue"):
self._supported_features |= SUPPORT_COLOR
self._min_kelvin = KELVIN_MIN_VALUE_COLOR
@property
def supported_features(self) -> int:
"""Supported features."""
return self._supported_features
@property
def is_on(self) -> bool:
"""If light is on."""
if self.control_result:
return self.control_result["ison"]
return self.block.output
@property
def mode(self) -> str | None:
"""Return the color mode of the light."""
if self.mode_result:
return self.mode_result["mode"]
if hasattr(self.block, "mode"):
return self.block.mode
if (
hasattr(self.block, "red")
and hasattr(self.block, "green")
and hasattr(self.block, "blue")
):
return "color"
return "white"
@property
def brightness(self) -> int:
"""Brightness of light."""
if self.mode == "color":
if self.control_result:
brightness = self.control_result["gain"]
else:
brightness = self.block.gain
else:
if self.control_result:
brightness = self.control_result["brightness"]
else:
brightness = self.block.brightness
return int(brightness / 100 * 255)
@property
def white_value(self) -> int:
"""White value of light."""
if self.control_result:
white = self.control_result["white"]
else:
white = self.block.white
return int(white)
@property
def hs_color(self) -> tuple[float, float]:
"""Return the hue and saturation color value of light."""
if self.mode == "white":
return color_RGB_to_hs(255, 255, 255)
if self.control_result:
red = self.control_result["red"]
green = self.control_result["green"]
blue = self.control_result["blue"]
else:
red = self.block.red
green = self.block.green
blue = self.block.blue
return color_RGB_to_hs(red, green, blue)
@property
def color_temp(self) -> int | None:
"""Return the CT color value in mireds."""
if self.mode == "color":
return None
if self.control_result:
color_temp = self.control_result["temp"]
else:
color_temp = self.block.colorTemp
color_temp = min(self._max_kelvin, max(self._min_kelvin, color_temp))
return int(color_temperature_kelvin_to_mired(color_temp))
@property
def min_mireds(self) -> int:
"""Return the coldest color_temp that this light supports."""
return int(color_temperature_kelvin_to_mired(self._max_kelvin))
@property
def max_mireds(self) -> int:
"""Return the warmest color_temp that this light supports."""
return int(color_temperature_kelvin_to_mired(self._min_kelvin))
async def async_turn_on(self, **kwargs) -> None:
"""Turn on light."""
if self.block.type == "relay":
self.control_result = await self.block.set_state(turn="on")
self.async_write_ha_state()
return
set_mode = None
params = {"turn": "on"}
if ATTR_BRIGHTNESS in kwargs:
tmp_brightness = int(kwargs[ATTR_BRIGHTNESS] / 255 * 100)
if hasattr(self.block, "gain"):
params["gain"] = tmp_brightness
if hasattr(self.block, "brightness"):
params["brightness"] = tmp_brightness
if ATTR_COLOR_TEMP in kwargs:
color_temp = color_temperature_mired_to_kelvin(kwargs[ATTR_COLOR_TEMP])
color_temp = min(self._max_kelvin, max(self._min_kelvin, color_temp))
# Color temperature change - used only in white mode, switch device mode to white
set_mode = "white"
params["red"] = params["green"] = params["blue"] = 255
params["temp"] = int(color_temp)
if ATTR_HS_COLOR in kwargs:
red, green, blue = color_hs_to_RGB(*kwargs[ATTR_HS_COLOR])
# Color channels change - used only in color mode, switch device mode to color
set_mode = "color"
params["red"] = red
params["green"] = green
params["blue"] = blue
if ATTR_WHITE_VALUE in kwargs:
# White channel change - used only in color mode, switch device mode device to color
set_mode = "color"
params["white"] = int(kwargs[ATTR_WHITE_VALUE])
if set_mode and self.mode != set_mode:
self.mode_result = await self.wrapper.device.switch_light_mode(set_mode)
self.control_result = await self.block.set_state(**params)
self.async_write_ha_state()
async def async_turn_off(self, **kwargs) -> None:
"""Turn off light."""
self.control_result = await self.block.set_state(turn="off")
self.async_write_ha_state()
@callback
def _update_callback(self):
"""When device updates, clear control & mode result that overrides state."""
self.control_result = None
self.mode_result = None
super()._update_callback()
|
{
"content_hash": "39f7fe3613ffee8b85d4d00a423093df",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 96,
"avg_line_length": 33.99134199134199,
"alnum_prop": 0.586602139582272,
"repo_name": "adrienbrault/home-assistant",
"id": "a9e137968758a91fa46fb19b8a5dc272cf01bec4",
"size": "7852",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/shelly/light.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "32021043"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
}
|
from .bitstream import BitStream
__all__ = ['BitStream', 'bitmasks']
|
{
"content_hash": "d3fa6781e83fb170a2f8cab48ab5eff9",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 35,
"avg_line_length": 23.333333333333332,
"alnum_prop": 0.6857142857142857,
"repo_name": "mudaltsov/bitstream_iter",
"id": "e844734cf7d0462de45d31a79fdfa4ceed922200",
"size": "70",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bitstream_iter/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12323"
}
],
"symlink_target": ""
}
|
from tkinter import Tk
import pytest
from pyDEA.main_gui import MainFrame
import tests.utils_for_tests as utils_for_tests
@pytest.fixture
def main_frame(request):
parent = Tk()
main_frame = MainFrame(parent)
request.addfinalizer(parent.destroy)
return main_frame
def test_run_gui(main_frame):
filename = 'tests/params_new_format.txt'
def get_params():
return filename
main_frame.params_frame._get_filename_for_load = get_params
def get_sheet_name(names):
return 'Sheet1'
main_frame.data_frame.data_tab.call_dialog = get_sheet_name
main_frame.params_frame.load_file()
main_frame.run()
model_solutions = main_frame.data_frame.solution_tab.model_solutions
assert len(model_solutions) == 1
model_solution = model_solutions[0]
model_input = model_solution._input_data
utils_for_tests.check_optimal_solution_status_and_sizes(model_solution,
model_input)
dmus = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K']
utils_for_tests.check_efficiency_scores(dmus, [1, 0.86998617, 1, 0.95561335,
0.85,
1, 1, 0.84507042, 1,
0.524, 0.89058524],
model_solution, model_input)
|
{
"content_hash": "b523547522791ae05efac39a43398b2c",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 80,
"avg_line_length": 35.425,
"alnum_prop": 0.5589273112208892,
"repo_name": "olga-perederieieva/pyDEA",
"id": "b0644629565064abbb126b62763b84339bda9306",
"size": "1417",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_run_routine.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "699661"
}
],
"symlink_target": ""
}
|
import numpy as np
import quantmod as qm
import pandas as pd
import pandas_datareader as web
# In[]:
ch = qm.get_symbol('TSLA')
ch.adjust(inplace=True)
ch.add_RSI()
ch.add_MACD()
ch.add_EMA()
ch.add_BBANDS()
ch.plot(kind='candlestick', theme='light', log=True, volume=True)
ch.plot(kind='candlestick', theme='dark', log=True, volume=True)
ch.ind.tail(100)
|
{
"content_hash": "166ec1dcc9fc0ec61679d4482c0c5bbe",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 65,
"avg_line_length": 20,
"alnum_prop": 0.7138888888888889,
"repo_name": "jackwluo/py-quantmod",
"id": "fe77284c2a6b63a212f923bbac7cf7e5ef95edb1",
"size": "369",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests_themes.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "4607630"
},
{
"name": "Python",
"bytes": "148917"
}
],
"symlink_target": ""
}
|
import datetime
from django.conf import settings
from freezegun import freeze_time
from testil import assert_raises, eq
from corehq.apps.celery import task
from corehq.celery_monitoring.heartbeat import (
HEARTBEAT_FREQUENCY,
Heartbeat,
HeartbeatNeverRecorded,
)
from corehq.celery_monitoring.signals import (
TimeToStartTimer,
TimingNotAvailable,
get_domain_from_task,
)
def test_heartbeat():
hb = Heartbeat('celery_periodic')
hb.clear_last_seen()
with assert_raises(HeartbeatNeverRecorded):
hb.get_last_seen()
with assert_raises(HeartbeatNeverRecorded):
hb.get_blockage_duration()
seen_time = datetime.datetime.utcnow()
with freeze_time(seen_time):
hb.mark_seen()
eq(hb.get_last_seen(), seen_time)
eq(hb.get_blockage_duration(), datetime.timedelta(seconds=0))
with freeze_time(seen_time + datetime.timedelta(minutes=10)):
eq(hb.get_last_seen(), seen_time)
eq(hb.get_blockage_duration(), datetime.timedelta(minutes=10) - HEARTBEAT_FREQUENCY)
def test_get_and_report_blockage_duration():
hb = Heartbeat('celery_periodic')
hb.mark_seen()
# just assert that this doesn't error
hb.get_and_report_blockage_duration()
def test_time_to_start_timer():
task_id = 'abc123'
delay = datetime.timedelta(seconds=6)
start_time = datetime.datetime.utcnow()
# starts empty
with assert_raises(TimingNotAvailable):
TimeToStartTimer(task_id).stop_and_pop_timing()
with freeze_time(start_time):
TimeToStartTimer(task_id).start_timing(datetime.datetime.utcnow())
with freeze_time(start_time + delay):
time_to_start = TimeToStartTimer(task_id).stop_and_pop_timing()
eq(time_to_start, delay)
# can only pop once, second time empty
with assert_raises(TimingNotAvailable):
TimeToStartTimer(task_id).stop_and_pop_timing()
def test_time_to_start_timer_with_eta():
task_id = 'abc1234'
delay = datetime.timedelta(seconds=6)
start_time = datetime.datetime.utcnow()
eta = start_time + datetime.timedelta(minutes=5)
with freeze_time(start_time):
TimeToStartTimer(task_id).start_timing(eta)
with freeze_time(eta + delay):
time_to_start = TimeToStartTimer(task_id).stop_and_pop_timing()
eq(time_to_start, delay)
def test_parse_iso8601():
eq(TimeToStartTimer.parse_iso8601('2009-11-17T12:30:56.527191'),
datetime.datetime(2009, 11, 17, 12, 30, 56, 527191))
def test_import_tasks():
from . import tasks
for queue in settings.CELERY_HEARTBEAT_THRESHOLDS:
# assert each heartbeat task is there
getattr(tasks, Heartbeat(queue).periodic_task_name)
def test_get_domain_from_task():
@task()
def example_task_1(domain, var1, var2):
pass
@task()
def example_task_2(var1, domain, var2):
pass
@task()
def example_task_3(var1, var2, domain=None):
pass
@task()
def example_task_4(domain_name):
pass
@task()
def example_task_5(var1, var2):
pass
eq('example', get_domain_from_task(example_task_1, (), {'domain': 'example', 'var1': 1, 'var2': 2}))
eq('example', get_domain_from_task(example_task_1, ('example', 1, 2), {}))
eq('example', get_domain_from_task(example_task_2, (), {'domain': 'example', 'var1': 1, 'var2': 2}))
eq('example', get_domain_from_task(example_task_2, (1,), {'domain': 'example', 'var2': 2}))
eq('example', get_domain_from_task(example_task_2, (1, 'example', 2), {}))
eq('example', get_domain_from_task(example_task_3, (), {'domain': 'example', 'var1': 1, 'var2': 2}))
eq('example', get_domain_from_task(example_task_3, (1, 2), {'domain': 'example'}))
eq('example', get_domain_from_task(example_task_3, (1, 2, 'example'), {}))
eq('example', get_domain_from_task(example_task_4, ('example',), {}))
eq(None, get_domain_from_task(example_task_5, (1, 2), {}))
|
{
"content_hash": "dd1261ed754ac1330e0c1bafa0101790",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 104,
"avg_line_length": 29.574626865671643,
"alnum_prop": 0.6540499621498864,
"repo_name": "dimagi/commcare-hq",
"id": "0a85aec3ba8b10af0a817c99bfc48eba46298283",
"size": "3963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/celery_monitoring/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
}
|
"""Define tests for the Dune HD config flow."""
from unittest.mock import patch
from homeassistant import data_entry_flow
from homeassistant.components.dunehd.const import DOMAIN
from homeassistant.config_entries import SOURCE_IMPORT, SOURCE_USER
from homeassistant.const import CONF_HOST
from tests.common import MockConfigEntry
CONFIG_HOSTNAME = {CONF_HOST: "dunehd-host"}
CONFIG_IP = {CONF_HOST: "10.10.10.12"}
DUNEHD_STATE = {"protocol_version": "4", "player_state": "navigator"}
async def test_import(hass):
"""Test that the import works."""
with patch("pdunehd.DuneHDPlayer.update_state", return_value=DUNEHD_STATE):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=CONFIG_HOSTNAME
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "dunehd-host"
assert result["data"] == {CONF_HOST: "dunehd-host"}
async def test_import_cannot_connect(hass):
"""Test that errors are shown when cannot connect to the host during import."""
with patch("pdunehd.DuneHDPlayer.update_state", return_value={}):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=CONFIG_HOSTNAME
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "cannot_connect"
async def test_import_duplicate_error(hass):
"""Test that errors are shown when duplicates are added during import."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: "dunehd-host"},
title="dunehd-host",
)
config_entry.add_to_hass(hass)
with patch("pdunehd.DuneHDPlayer.update_state", return_value=DUNEHD_STATE):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=CONFIG_HOSTNAME
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_user_invalid_host(hass):
"""Test that errors are shown when the host is invalid."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data={CONF_HOST: "invalid/host"}
)
assert result["errors"] == {CONF_HOST: "invalid_host"}
async def test_user_very_long_host(hass):
"""Test that errors are shown when the host is longer than 253 chars."""
long_host = (
"very_long_host_very_long_host_very_long_host_very_long_host_very_long_"
"host_very_long_host_very_long_host_very_long_host_very_long_host_very_long_"
"host_very_long_host_very_long_host_very_long_host_very_long_host_very_long_"
"host_very_long_host_very_long_host"
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data={CONF_HOST: long_host}
)
assert result["errors"] == {CONF_HOST: "invalid_host"}
async def test_user_cannot_connect(hass):
"""Test that errors are shown when cannot connect to the host."""
with patch("pdunehd.DuneHDPlayer.update_state", return_value={}):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=CONFIG_IP
)
assert result["errors"] == {CONF_HOST: "cannot_connect"}
async def test_duplicate_error(hass):
"""Test that errors are shown when duplicates are added."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data=CONFIG_HOSTNAME,
title="dunehd-host",
)
config_entry.add_to_hass(hass)
with patch("pdunehd.DuneHDPlayer.update_state", return_value=DUNEHD_STATE):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=CONFIG_HOSTNAME
)
assert result["errors"] == {CONF_HOST: "already_configured"}
async def test_create_entry(hass):
"""Test that the user step works."""
with patch("pdunehd.DuneHDPlayer.update_state", return_value=DUNEHD_STATE):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=CONFIG_HOSTNAME
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "dunehd-host"
assert result["data"] == {CONF_HOST: "dunehd-host"}
async def test_create_entry_with_ipv6_address(hass):
"""Test that the user step works with device IPv6 address.."""
with patch("pdunehd.DuneHDPlayer.update_state", return_value=DUNEHD_STATE):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: "2001:db8::1428:57ab"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "2001:db8::1428:57ab"
assert result["data"] == {CONF_HOST: "2001:db8::1428:57ab"}
|
{
"content_hash": "979126c04641a6f1cc6942194eeb1aac",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 85,
"avg_line_length": 38.25,
"alnum_prop": 0.6601307189542484,
"repo_name": "FreekingDean/home-assistant",
"id": "c8e7b0f7f3e5eaf1976e0e7193c42ee2685accd2",
"size": "5049",
"binary": false,
"copies": "6",
"ref": "refs/heads/dev",
"path": "tests/components/dunehd/test_config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2335"
},
{
"name": "Python",
"bytes": "36746639"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
"""
Glance Image Cache Invalid Cache Entry and Stalled Image cleaner
This is meant to be run as a periodic task from cron.
If something goes wrong while we're caching an image (for example the fetch
times out, or an exception is raised), we create an 'invalid' entry. These
entires are left around for debugging purposes. However, after some period of
time, we want to clean these up.
Also, if an incomplete image hangs around past the image_cache_stall_time
period, we automatically sweep it up.
"""
import os
import sys
# If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir)
from glance.common import config
from glance.image_cache import cleaner
from glance.openstack.common import log
def main():
try:
config.parse_cache_args()
log.setup('glance')
app = cleaner.Cleaner()
app.run()
except RuntimeError as e:
sys.exit("ERROR: %s" % e)
|
{
"content_hash": "2c1bac7fd403c784b19b862f36de0a3d",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 77,
"avg_line_length": 32.76923076923077,
"alnum_prop": 0.6744913928012519,
"repo_name": "tanglei528/glance",
"id": "42a5c84494531d8047564298d9bea1bd8f36e788",
"size": "2048",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "glance/cmd/cache_cleaner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3193082"
},
{
"name": "Shell",
"bytes": "7168"
}
],
"symlink_target": ""
}
|
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.models import (BoxSelectTool, Circle, Column, ColumnDataSource,
DataTable, Grid, HoverTool, IntEditor, LinearAxis,
NumberEditor, NumberFormatter, Plot, SelectEditor,
StringEditor, StringFormatter, TableColumn,)
from bokeh.resources import INLINE
from bokeh.sampledata.autompg2 import autompg2 as mpg
from bokeh.util.browser import view
source = ColumnDataSource(mpg)
manufacturers = sorted(mpg["manufacturer"].unique())
models = sorted(mpg["model"].unique())
transmissions = sorted(mpg["trans"].unique())
drives = sorted(mpg["drv"].unique())
classes = sorted(mpg["class"].unique())
columns = [
TableColumn(field="manufacturer", title="Manufacturer", editor=SelectEditor(options=manufacturers), formatter=StringFormatter(font_style="bold")),
TableColumn(field="model", title="Model", editor=StringEditor(completions=models)),
TableColumn(field="displ", title="Displacement", editor=NumberEditor(step=0.1), formatter=NumberFormatter(format="0.0")),
TableColumn(field="year", title="Year", editor=IntEditor()),
TableColumn(field="cyl", title="Cylinders", editor=IntEditor()),
TableColumn(field="trans", title="Transmission", editor=SelectEditor(options=transmissions)),
TableColumn(field="drv", title="Drive", editor=SelectEditor(options=drives)),
TableColumn(field="class", title="Class", editor=SelectEditor(options=classes)),
TableColumn(field="cty", title="City MPG", editor=IntEditor()),
TableColumn(field="hwy", title="Highway MPG", editor=IntEditor()),
]
data_table = DataTable(source=source, columns=columns, editable=True, width=1000,
index_position=-1, index_header="row index", index_width=60)
plot = Plot(title=None, plot_width=1000, plot_height=300)
# Set up x & y axis
plot.add_layout(LinearAxis(), 'below')
yaxis = LinearAxis()
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
# Add Glyphs
cty_glyph = Circle(x="index", y="cty", fill_color="#396285", size=8, fill_alpha=0.5, line_alpha=0.5)
hwy_glyph = Circle(x="index", y="hwy", fill_color="#CE603D", size=8, fill_alpha=0.5, line_alpha=0.5)
cty = plot.add_glyph(source, cty_glyph)
hwy = plot.add_glyph(source, hwy_glyph)
# Add the tools
tooltips = [
("Manufacturer", "@manufacturer"),
("Model", "@model"),
("Displacement", "@displ"),
("Year", "@year"),
("Cylinders", "@cyl"),
("Transmission", "@trans"),
("Drive", "@drv"),
("Class", "@class"),
]
cty_hover_tool = HoverTool(renderers=[cty], tooltips=tooltips + [("City MPG", "@cty")])
hwy_hover_tool = HoverTool(renderers=[hwy], tooltips=tooltips + [("Highway MPG", "@hwy")])
select_tool = BoxSelectTool(renderers=[cty, hwy], dimensions='width')
plot.add_tools(cty_hover_tool, hwy_hover_tool, select_tool)
layout = Column(plot, data_table)
doc = Document()
doc.add_root(layout)
if __name__ == "__main__":
doc.validate()
filename = "data_tables.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Data Tables"))
print("Wrote %s" % filename)
view(filename)
|
{
"content_hash": "7425a4541320e1cbe465c1837aef3943",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 150,
"avg_line_length": 44.48,
"alnum_prop": 0.6573741007194245,
"repo_name": "ericmjl/bokeh",
"id": "8c209f64f9e9853be0cde3fad9ccbc806c8fe93f",
"size": "3336",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/models/file/data_tables.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1442"
},
{
"name": "CSS",
"bytes": "102094"
},
{
"name": "CoffeeScript",
"bytes": "462899"
},
{
"name": "HTML",
"bytes": "46193"
},
{
"name": "JavaScript",
"bytes": "24563"
},
{
"name": "Makefile",
"bytes": "1150"
},
{
"name": "Python",
"bytes": "2705341"
},
{
"name": "Shell",
"bytes": "8995"
},
{
"name": "TypeScript",
"bytes": "1468288"
}
],
"symlink_target": ""
}
|
subreddit = 'buildapcsales'
t_channel = '@buildapcsalesusa'
def send_post(submission, r2t):
return r2t.send_simple(submission)
|
{
"content_hash": "422badc4c373412f140968228799d1e6",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 38,
"avg_line_length": 22.166666666666668,
"alnum_prop": 0.7443609022556391,
"repo_name": "Fillll/reddit2telegram",
"id": "02cc6b80eeb0234f981c78126d762e7333bdeaf6",
"size": "150",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reddit2telegram/channels/~inactive/buildapcsalesusa/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "301463"
},
{
"name": "Shell",
"bytes": "153"
}
],
"symlink_target": ""
}
|
"""Scheduling Utilities."""
from __future__ import absolute_import, unicode_literals
from itertools import count
from kombu.five import python_2_unicode_compatible
from .imports import symbol_by_name
__all__ = (
'FairCycle', 'priority_cycle', 'round_robin_cycle', 'sorted_cycle',
)
CYCLE_ALIASES = {
'priority': 'kombu.utils.scheduling:priority_cycle',
'round_robin': 'kombu.utils.scheduling:round_robin_cycle',
'sorted': 'kombu.utils.scheduling:sorted_cycle',
}
@python_2_unicode_compatible
class FairCycle(object):
"""Cycle between resources.
Consume from a set of resources, where each resource gets
an equal chance to be consumed from.
Arguments:
fun (Callable): Callback to call.
resources (Sequence[Any]): List of resources.
predicate (type): Exception predicate.
"""
def __init__(self, fun, resources, predicate=Exception):
self.fun = fun
self.resources = resources
self.predicate = predicate
self.pos = 0
def _next(self):
while 1:
try:
resource = self.resources[self.pos]
self.pos += 1
return resource
except IndexError:
self.pos = 0
if not self.resources:
raise self.predicate()
def get(self, callback, **kwargs):
"""Get from next resource."""
for tried in count(0): # for infinity
resource = self._next()
try:
return self.fun(resource, callback, **kwargs)
except self.predicate:
# reraise when retries exchausted.
if tried >= len(self.resources) - 1:
raise
def close(self):
"""Close cycle."""
pass
def __repr__(self):
"""``repr(cycle)``."""
return '<FairCycle: {self.pos}/{size} {self.resources}>'.format(
self=self, size=len(self.resources))
class round_robin_cycle(object):
"""Iterator that cycles between items in round-robin."""
def __init__(self, it=None):
self.items = it if it is not None else []
def update(self, it):
"""Update items from iterable."""
self.items[:] = it
def consume(self, n):
"""Consume n items."""
return self.items[:n]
def rotate(self, last_used):
"""Move most recently used item to end of list."""
items = self.items
try:
items.append(items.pop(items.index(last_used)))
except ValueError:
pass
return last_used
class priority_cycle(round_robin_cycle):
"""Cycle that repeats items in order."""
def rotate(self, last_used):
"""Unused in this implementation."""
pass
class sorted_cycle(priority_cycle):
"""Cycle in sorted order."""
def consume(self, n):
"""Consume n items."""
return sorted(self.items[:n])
def cycle_by_name(name):
"""Get cycle class by name."""
return symbol_by_name(name, CYCLE_ALIASES)
|
{
"content_hash": "766010fdd2ea2528f91ec2db17248df0",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 72,
"avg_line_length": 26.82456140350877,
"alnum_prop": 0.5784826684107259,
"repo_name": "mdworks2016/work_development",
"id": "9bc4d93625df5c1d7f661377a2fe1d0a32c78374",
"size": "3058",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Python/20_Third_Certification/venv/lib/python3.7/site-packages/kombu/utils/scheduling.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "142"
},
{
"name": "Kotlin",
"bytes": "68744"
},
{
"name": "Python",
"bytes": "1080"
}
],
"symlink_target": ""
}
|
"""Unit tests for organization application view."""
from django import http
from google.appengine.ext import ndb
from melange.models import connection as connection_model
from melange.models import contact as contact_model
from melange.models import organization as org_model
from melange.models import survey as survey_model
from soc.models import licenses
from soc.modules.gsoc.views.helper import request_data
from summerofcode.models import organization as soc_org_model
from summerofcode.templates import tabs
from summerofcode.views import org_app as org_app_view
from tests import org_utils
from tests import profile_utils
from tests import test_utils
TEST_ORG_ID = 'test_org_id'
TEST_ORG_NAME = u'Test Org Name'
TEST_BLOG = 'http://www.test.blog.com/'
TEST_DESCRIPTION = u'Test Organization Description'
TEST_TAGS = u'tag one,tag_two,tag 3'
TEST_LICENSE = licenses.LICENSES[0]
TEST_FACEBOOK = u'http://www.test.facebook.com/'
TEST_FEED_URL = u'http://www.test.feed.com/'
TEST_GOOGLE_PLUS = u'http://www.test.google.plus.com/'
TEST_IDEAS_PAGE = 'http://www.test.ideas.com/'
TEST_IRC_CHANNEL = 'irc://irc.freenode.net/test'
TEST_LOGO_URL = u'http://www.test.logo.url.com/'
TEST_MAILING_LIST = 'mailinglist@example.com'
TEST_TWITTER = u'http://www.test.twitter.com/'
TEST_WEB_PAGE = u'http://www.web.page.com/'
TEST_IS_VETERAN = True
TEST_ELIGIBLE_COUNTRY = True
def _getOrgPreferencesEditUrl(org):
"""Returns URL to Edit Organization Preferences page.
Args:
org: Organization entity.
Returns:
A string containing the URL to Edit Organization Preferences page.
"""
return '/gsoc/org/preferences/edit/%s' % org.key.id()
def _getOrgProfileCreateUrl(program):
"""Returns URL to Create Organization Profile page.
Args:
program: Program entity.
Returns:
A string containing the URL to Create Organization Profile page.
"""
return '/gsoc/org/profile/create/%s' % program.key().name()
def _getOrgProfileEditUrl(org):
"""Returns URL to Edit Organization Profile page.
Args:
org: Organization entity.
Returns:
A string containing the URL to Edit Organization Profile page.
"""
return '/gsoc/org/profile/edit/%s' % org.key.id()
def _getOrgApplicationSubmitUrl(org):
"""Returns URL to Submit Organization Application page.
Args:
org: Organization entity.
Returns:
A string containing the URL to Submit Organization Application page.
"""
return '/gsoc/org/application/submit/%s' % org.key.id()
def _getOrgAppShowUrl(org):
"""Returns URL to Organization Application Show page.
Args:
org: Organization entity.
Returns:
A string containing the URL to Organization Application Show page.
"""
return '/gsoc/org/application/show2/%s' % org.key.id()
def _getOrgSurveyResponseShowUrl(org):
"""Returns URL to Organization Survey Response Show page.
Args:
org: Organization entity.
Returns:
A string containing the URL to Organization Survey Response Show page.
"""
return '/gsoc/org/survey_response/show/%s' % org.key.id()
def _getPublicOrgListUrl(program):
"""Returns URL to Public Organization List page.
Args:
program: Program entity.
Returns:
A string containing the URL to Public Organization List page.
"""
return '/gsoc/org/list/public/%s' % program.key().name()
def _getOrgApplicationListPageListUrl(program):
"""Returns URL to Organization Application List page.
Args:
program: Program entity.
Returns:
A string containing the URL to Organization Application List page.
"""
return '/gsoc/org/application/list/%s' % program.key().name()
class OrgProfileCreatePageTest(test_utils.GSoCDjangoTestCase):
"""Unit tests for OrgProfileCreatePage class."""
def setUp(self):
"""See unittest.TestCase.setUp for specification."""
self.init()
def testPageLoads(self):
"""Tests that page loads properly."""
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
profile_utils.seedNDBProfile(self.program.key(), user=user)
response = self.get(_getOrgProfileCreateUrl(self.program))
self.assertResponseOK(response)
def testOrgProfileCreated(self):
"""Tests that organization entity is created properly."""
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
profile = profile_utils.seedNDBProfile(
self.program.key(), user=user, admin_for=[self.org.key])
backup_admin = profile_utils.seedNDBProfile(self.program.key())
postdata = {
'org_id': TEST_ORG_ID,
'name': TEST_ORG_NAME,
'blog': TEST_BLOG,
'description': TEST_DESCRIPTION,
'facebook': TEST_FACEBOOK,
'feed_url': TEST_FEED_URL,
'google_plus': TEST_GOOGLE_PLUS,
'license': TEST_LICENSE,
'logo_url': TEST_LOGO_URL,
'ideas_page': TEST_IDEAS_PAGE,
'irc_channel': TEST_IRC_CHANNEL,
'backup_admin': backup_admin.profile_id,
'mailing_list': TEST_MAILING_LIST,
'tags': TEST_TAGS,
'twitter': TEST_TWITTER,
'web_page': TEST_WEB_PAGE,
'is_veteran': TEST_IS_VETERAN,
'eligible_country': TEST_ELIGIBLE_COUNTRY,
}
response = self.post(
_getOrgProfileCreateUrl(self.program), postdata=postdata)
# check that organization entity has been created
org = ndb.Key(
soc_org_model.SOCOrganization._get_kind(),
'%s/%s' % (self.program.key().name(), TEST_ORG_ID)).get()
self.assertIsNotNone(org)
self.assertEqual(org.contact.blog, TEST_BLOG)
self.assertEqual(org.contact.facebook, TEST_FACEBOOK)
self.assertEqual(org.contact.feed_url, TEST_FEED_URL)
self.assertEqual(org.contact.google_plus, TEST_GOOGLE_PLUS)
self.assertEqual(org.contact.irc_channel, TEST_IRC_CHANNEL)
self.assertEqual(org.contact.mailing_list, TEST_MAILING_LIST)
self.assertEqual(org.contact.twitter, TEST_TWITTER)
self.assertEqual(org.contact.web_page, TEST_WEB_PAGE)
self.assertEqual(org.description, TEST_DESCRIPTION)
self.assertEqual(org.ideas_page, TEST_IDEAS_PAGE)
self.assertEqual(org.license, TEST_LICENSE)
self.assertEqual(org.logo_url, TEST_LOGO_URL)
self.assertEqual(org.name, TEST_ORG_NAME)
self.assertEqual(org.org_id, TEST_ORG_ID)
self.assertEqual(org.tags, TEST_TAGS.split(','))
self.assertTrue(org.is_veteran)
# check that the client is redirected to update page
self.assertResponseRedirect(response, url=_getOrgApplicationSubmitUrl(org))
# check that a connection with the current user has been started
profile = profile.key.get()
self.assertIn(org.key, profile.admin_for)
connection = connection_model.Connection.query(
connection_model.Connection.organization == org.key,
ancestor=profile.key).get()
self.assertIsNotNone(connection)
self.assertEqual(connection.org_role, connection_model.ORG_ADMIN_ROLE)
self.assertEqual(connection.user_role, connection_model.ROLE)
self.assertTrue(connection.seen_by_org)
self.assertTrue(connection.seen_by_user)
# check that a connection with backup admin has been started
backup_admin = backup_admin.key.get()
self.assertIn(org.key, backup_admin.admin_for)
connection = connection_model.Connection.query(
connection_model.Connection.organization == org.key,
ancestor=backup_admin.key).get()
self.assertIsNotNone(connection)
self.assertEqual(connection.org_role, connection_model.ORG_ADMIN_ROLE)
self.assertEqual(connection.user_role, connection_model.ROLE)
self.assertTrue(connection.seen_by_org)
self.assertTrue(connection.seen_by_user)
def testInvalidData(self):
"""Tests that organization is not created if data is not valid."""
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
profile_utils.seedNDBProfile(
self.program.key(), user=user, admin_for=[self.org.key])
backup_admin = profile_utils.seedNDBProfile(self.program.key())
# valid set of data
valid_postdata = {
'org_id': TEST_ORG_ID,
'name': TEST_ORG_NAME,
'blog': TEST_BLOG,
'description': TEST_DESCRIPTION,
'facebook': TEST_FACEBOOK,
'feed_url': TEST_FEED_URL,
'google_plus': TEST_GOOGLE_PLUS,
'license': TEST_LICENSE,
'logo_url': TEST_LOGO_URL,
'ideas_page': TEST_IDEAS_PAGE,
'irc_channel': TEST_IRC_CHANNEL,
'backup_admin': backup_admin.profile_id,
'mailing_list': TEST_MAILING_LIST,
'tags': TEST_TAGS,
'twitter': TEST_TWITTER,
'web_page': TEST_WEB_PAGE,
'eligible_country': True
}
# the organization is not from the eligible countries
postdata = valid_postdata.copy()
postdata['eligible_country'] = False
response = self.post(
_getOrgProfileCreateUrl(self.program), postdata=postdata)
self.assertTrue(response.context['error'])
# the organization with the same org_id already exists
org_utils.seedSOCOrganization(self.program.key(), org_id='duplicate')
postdata = valid_postdata.copy()
postdata['org_id'] = 'duplicate'
response = self.post(
_getOrgProfileCreateUrl(self.program), postdata=postdata)
self.assertResponseBadRequest(response)
OTHER_TEST_BLOG = 'http://www.other.test.blog.com/'
OTHER_TEST_DESCRIPTION = u'Other Organization Description'
OTHER_TEST_FACEBOOK = u'http://www.other.test.facebook.com/'
OTHER_TEST_NAME = 'Other Org Name'
OTHER_TEST_FEED_URL = u'http://www.other.test.feed.com/'
OTHER_TEST_GOOGLE_PLUS = 'http://www.other.test.google.plus.com/'
OTHER_TEST_IDEAS_PAGE = 'http://www.other.ideas.page.com/'
OTHER_TEST_IRC_CHANNEL = 'irc://irc.freenode.net/other'
OTHER_TEST_LICENSE = licenses.LICENSES[-1]
OTHER_TEST_LOGO_URL = 'http://www.other.test.logo.url.com/'
OTHER_TEST_MAILING_LIST = 'othermailinglist@example.com'
OTHER_TEST_TWITTER = u'http://www.other.test.twitter.com/'
OTHER_TEST_TAGS = u'other tag one,other_tag_two,other tag 3'
OTHER_TEST_WEB_PAGE = u'http://www.other.web.page.com/'
OTHER_TEST_IS_VETERAN = False
class OrgProfileEditPageTest(test_utils.GSoCDjangoTestCase):
"""Unit tests for OrgProfileEditPage class."""
def setUp(self):
"""See unittest.TestCase.setUp for specification."""
self.init()
contact = contact_model.Contact(mailing_list=TEST_MAILING_LIST)
self.org = org_utils.seedSOCOrganization(
self.program.key(), org_id=TEST_ORG_ID, name=TEST_ORG_NAME,
ideas_page=TEST_IDEAS_PAGE, tags=TEST_TAGS.split(','), contact=contact,
is_veteran=not OTHER_TEST_IS_VETERAN)
self.app_response = survey_model.SurveyResponse(parent=self.org.key)
self.app_response.put()
def testPageLoads(self):
"""Tests that page loads properly."""
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
profile_utils.seedNDBProfile(
self.program.key(), user=user, admin_for=[self.org.key])
response = self.get(_getOrgProfileEditUrl(self.org))
self.assertResponseOK(response)
def testOrgProfileUpdated(self):
"""Tests that organization entity is updated correctly."""
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
profile_utils.seedNDBProfile(
self.program.key(), user=user, admin_for=[self.org.key])
# check that mutable properties are updated
postdata = {
'blog': OTHER_TEST_BLOG,
'description': OTHER_TEST_DESCRIPTION,
'facebook': OTHER_TEST_FACEBOOK,
'feed_url': OTHER_TEST_FEED_URL,
'google_plus': OTHER_TEST_GOOGLE_PLUS,
'ideas_page': OTHER_TEST_IDEAS_PAGE,
'irc_channel': OTHER_TEST_IRC_CHANNEL,
'license': OTHER_TEST_LICENSE,
'logo_url': OTHER_TEST_LOGO_URL,
'mailing_list': OTHER_TEST_MAILING_LIST,
'name': OTHER_TEST_NAME,
'tags': OTHER_TEST_TAGS,
'twitter': OTHER_TEST_TWITTER,
'web_page': OTHER_TEST_WEB_PAGE,
'is_veteran': OTHER_TEST_IS_VETERAN,
}
response = self.post(_getOrgProfileEditUrl(self.org), postdata=postdata)
self.assertResponseRedirect(response, url=_getOrgProfileEditUrl(self.org))
# check that organization entity has been updated
org = ndb.Key(
soc_org_model.SOCOrganization._get_kind(),
'%s/%s' % (self.program.key().name(), TEST_ORG_ID)).get()
self.assertEqual(org.contact.blog, OTHER_TEST_BLOG)
self.assertEqual(org.contact.mailing_list, OTHER_TEST_MAILING_LIST)
self.assertEqual(org.description, OTHER_TEST_DESCRIPTION)
self.assertEqual(org.contact.facebook, OTHER_TEST_FACEBOOK)
self.assertEqual(org.contact.google_plus, OTHER_TEST_GOOGLE_PLUS)
self.assertEqual(org.ideas_page, OTHER_TEST_IDEAS_PAGE)
self.assertEqual(org.contact.irc_channel, OTHER_TEST_IRC_CHANNEL)
self.assertEqual(org.license, OTHER_TEST_LICENSE)
self.assertEqual(org.logo_url, OTHER_TEST_LOGO_URL)
self.assertEqual(org.name, OTHER_TEST_NAME)
self.assertEqual(org.tags, OTHER_TEST_TAGS.split(','))
self.assertEqual(org.contact.twitter, OTHER_TEST_TWITTER)
self.assertEqual(org.contact.web_page, OTHER_TEST_WEB_PAGE)
# check that organization ID is not updated even if it is in POST data
postdata = {
'blog': OTHER_TEST_BLOG,
'description': OTHER_TEST_DESCRIPTION,
'facebook': OTHER_TEST_FACEBOOK,
'feed_url': OTHER_TEST_FEED_URL,
'google_plus': OTHER_TEST_GOOGLE_PLUS,
'ideas_page': OTHER_TEST_IDEAS_PAGE,
'license': OTHER_TEST_LICENSE,
'logo_url': OTHER_TEST_LOGO_URL,
'org_id': 'other_org_id',
'name': TEST_ORG_NAME,
'twitter': OTHER_TEST_TWITTER,
'web_page': OTHER_TEST_WEB_PAGE,
}
response = self.post(_getOrgProfileEditUrl(self.org), postdata=postdata)
self.assertResponseRedirect(response, url=_getOrgProfileEditUrl(self.org))
# check that organization entity has been updated
org = ndb.Key(
soc_org_model.SOCOrganization._get_kind(),
'%s/%s' % (self.program.key().name(), TEST_ORG_ID)).get()
self.assertEqual(org.org_id, TEST_ORG_ID)
def testOrgsTabs(self):
"""Tests that correct organization related tabs are present in context."""
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
profile_utils.seedNDBProfile(
self.program.key(), user=user, admin_for=[self.org.key])
response = self.get(_getOrgProfileEditUrl(self.org))
# check that tabs are present in context
self.assertIn('tabs', response.context)
# check that tab to "Edit Profile" page is the selected one
self.assertEqual(response.context['tabs'].selected_tab_id,
tabs.ORG_PROFILE_TAB_ID)
class OrgApplicationSubmitPageTest(test_utils.GSoCDjangoTestCase):
"""Unit tests for OrgApplicationSubmitPage class."""
def setUp(self):
"""See unittest.TestCase.setUp for specification."""
self.init()
self.org = org_utils.seedSOCOrganization(self.program.key())
def testPageLoads(self):
"""Tests that page loads properly."""
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
profile_utils.seedNDBProfile(
self.program.key(), user=user, admin_for=[self.org.key])
response = self.get(_getOrgApplicationSubmitUrl(self.org))
self.assertResponseOK(response)
def testApplicationCreated(self):
"""Tests that organization application is created properly."""
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
profile_utils.seedNDBProfile(
self.program.key(), user=user, admin_for=[self.org.key])
# TODO(daniel): submit actual responses in POST data
response = self.post(_getOrgApplicationSubmitUrl(self.org))
self.assertResponseRedirect(response)
# check that application has been created
application = survey_model.SurveyResponse.query(ancestor=self.org.key).get()
self.assertIsNotNone(application)
def testOrgsTabs(self):
"""Tests that correct organization related tabs are present in context."""
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
profile_utils.seedNDBProfile(
self.program.key(), user=user, admin_for=[self.org.key])
response = self.get(_getOrgApplicationSubmitUrl(self.org))
# check that tabs are present in context
self.assertIn('tabs', response.context)
# check that tab to "Edit Profile" page is the selected one
self.assertEqual(response.context['tabs'].selected_tab_id,
tabs.ORG_APP_RESPONSE_TAB_ID)
class OrgAppShowPageTest(test_utils.GSoCDjangoTestCase):
"""Unit tests for OrgAppShowPage class."""
def setUp(self):
"""See unittest.TestCase.setUp for specification."""
self.init()
self.org = org_utils.seedSOCOrganization(
self.program.key(), name=TEST_ORG_NAME)
self.app_response = survey_model.SurveyResponse(parent=self.org.key)
self.app_response.put()
def testPageLoads(self):
"""Tests that page loads properly."""
user = profile_utils.seedNDBUser(host_for=[self.program])
profile_utils.loginNDB(user)
response = self.get(_getOrgAppShowUrl(self.org))
self.assertResponseOK(response)
def testPostMethodNotAllowed(self):
"""Tests that POST method is not permitted."""
user = profile_utils.seedNDBUser(host_for=[self.program])
profile_utils.loginNDB(user)
response = self.post(_getOrgAppShowUrl(self.org))
self.assertResponseMethodNotAllowed(response)
class SurveyResponseShowPageTest(test_utils.GSoCDjangoTestCase):
"""Unit tests for SurveyResponseShowPage class."""
def setUp(self):
"""See unittest.TestCase.setUp for specification."""
self.init()
self.org = org_utils.seedSOCOrganization(
self.program.key(), name=TEST_ORG_NAME)
self.app_response = survey_model.SurveyResponse(parent=self.org.key)
self.app_response.put()
def testPageLoads(self):
"""Tests that page loads properly."""
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
profile_utils.seedNDBProfile(
self.program.key(), user=user, admin_for=[self.org.key])
response = self.get(_getOrgSurveyResponseShowUrl(self.org))
self.assertResponseOK(response)
def testPostMethodNotAllowed(self):
"""Tests that POST method is not permitted."""
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
profile_utils.seedNDBProfile(
self.program.key(), user=user, admin_for=[self.org.key])
response = self.post(_getOrgSurveyResponseShowUrl(self.org))
self.assertResponseMethodNotAllowed(response)
class PublicOrganizationListPageTest(test_utils.GSoCDjangoTestCase):
"""Unit tests for PublicOrganizationListPage class."""
def setUp(self):
"""See unittest.TestCase.setUp for specification."""
self.init()
def testPageLoads(self):
"""Tests that page loads properly."""
response = self.get(_getPublicOrgListUrl(self.program))
self.assertResponseOK(response)
class OrgApplicationListPageTest(test_utils.GSoCDjangoTestCase):
"""Unit tests for OrgApplicationListPage class."""
def setUp(self):
"""See unittest.TestCase.setUp for specification."""
self.init()
def testPageLoads(self):
"""Tests that page loads properly."""
user = profile_utils.seedNDBUser(host_for=[self.program])
profile_utils.loginNDB(user)
response = self.get(_getOrgApplicationListPageListUrl(self.program))
self.assertResponseOK(response)
class _MockView(object):
"""Simple request handler to be used as a callback for other handlers."""
def get(self, data, access, mutator):
"""See base.RequestHandler.get for specification."""
pass
_NUMBER_OF_ORG_ADMINS = 2
class ApplyOrgAdmissionDecisionHandlerTest(test_utils.GSoCDjangoTestCase):
"""Unit tests for ApplyOrgAdmissionDecisionHandler class."""
def setUp(self):
"""See unittest.TestCase.setUp for specification."""
self.init()
self.pre_accepted_org = org_utils.seedSOCOrganization(
self.program.key(), status=org_model.Status.PRE_ACCEPTED)
self.pre_rejected_org = org_utils.seedSOCOrganization(
self.program.key(), status=org_model.Status.PRE_REJECTED)
self.accepted_org = org_utils.seedSOCOrganization(
self.program.key(), status=org_model.Status.ACCEPTED)
self.rejected_org = org_utils.seedSOCOrganization(
self.program.key(), status=org_model.Status.REJECTED)
self.applying_org = org_utils.seedSOCOrganization(
self.program.key(), status=org_model.Status.APPLYING)
def testOrganizationStatusIsUpdated(self):
"""Tests that organization admission decisions are applied correctly."""
kwargs = {
'sponsor': self.sponsor.key().name(),
'program': self.program.program_id,
}
request = http.HttpRequest()
data = request_data.RequestData(request, None, kwargs)
handler = org_app_view.ApplyOrgAdmissionDecisionHandler(_MockView())
handler.handle(data, None, None)
self.executeMapReduceJobs()
# check that status have been changed for pre_accepted and pre_rejected orgs
self.assertEqual(
self.pre_accepted_org.key.get().status, org_model.Status.ACCEPTED)
self.assertEqual(
self.pre_rejected_org.key.get().status, org_model.Status.REJECTED)
# check that status for other orgs have not changed
self.assertEqual(
self.accepted_org.key.get().status, org_model.Status.ACCEPTED)
self.assertEqual(
self.rejected_org.key.get().status, org_model.Status.REJECTED)
self.assertEqual(
self.applying_org.key.get().status, org_model.Status.APPLYING)
def testEmailIsSent(self):
"""Tests that acceptance and rejection emails are sent."""
# seed a couple of organization administrators for both organizations
addresses_for_accept_email = []
addresses_for_reject_email = []
for _ in range(_NUMBER_OF_ORG_ADMINS):
profile = profile_utils.seedNDBProfile(
self.program.key(), admin_for=[self.pre_accepted_org.key])
addresses_for_accept_email.append(profile.contact.email)
profile = profile_utils.seedNDBProfile(
self.program.key(), admin_for=[self.pre_rejected_org.key])
addresses_for_reject_email.append(profile.contact.email)
# finalize decision and execute all MapReduce jobs
kwargs = {
'sponsor': self.sponsor.key().name(),
'program': self.program.program_id,
}
request = http.HttpRequest()
data = request_data.RequestData(request, None, kwargs)
handler = org_app_view.ApplyOrgAdmissionDecisionHandler(_MockView())
handler.handle(data, None, None)
self.executeMapReduceJobs()
# check that emails have been sent
for email_address in addresses_for_accept_email:
self.assertEmailSent(bcc=email_address)
for email_address in addresses_for_reject_email:
self.assertEmailSent(bcc=email_address)
TEST_MAX_SCORE = 7
TEST_SLOT_REQUEST_MIN = 3
TEST_SLOT_REQUEST_MAX = 10
TEST_CONTRIB_TEMPLATE = u'Test Application Template'
class OrgPreferencesEditPageTest(test_utils.GSoCDjangoTestCase):
"""Unit tests for OrgPreferencesEditPage class."""
def setUp(self):
"""See unittest.TestCase.testUp for specification."""
self.init()
self.org = org_utils.seedSOCOrganization(
self.program.key(), name=TEST_ORG_NAME,
status=org_model.Status.ACCEPTED)
def testPageLoads(self):
"""Tests that page loads properly."""
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
profile_utils.seedNDBProfile(
self.program.key(), user=user, admin_for=[self.org.key])
response = self.get(_getOrgPreferencesEditUrl(self.org))
self.assertResponseOK(response)
def testOrgPreferencesUpdated(self):
"""Tests that organization entity is updated correctly."""
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
profile_utils.seedNDBProfile(
self.program.key(), user=user, admin_for=[self.org.key])
postdata = {
'slot_request_min': unicode(TEST_SLOT_REQUEST_MIN),
'slot_request_max': unicode(TEST_SLOT_REQUEST_MAX),
'max_score': unicode(TEST_MAX_SCORE),
'contrib_template': TEST_CONTRIB_TEMPLATE
}
response = self.post(_getOrgPreferencesEditUrl(self.org), postdata=postdata)
self.assertResponseRedirect(
response, url=_getOrgPreferencesEditUrl(self.org))
org = self.org.key.get()
self.assertEqual(org.max_score, TEST_MAX_SCORE)
self.assertEqual(org.slot_request_min, TEST_SLOT_REQUEST_MIN)
self.assertEqual(org.slot_request_max, TEST_SLOT_REQUEST_MAX)
self.assertEqual(org.contrib_template, TEST_CONTRIB_TEMPLATE)
def testOrgsTabs(self):
"""Tests that correct organization related tabs are present in context."""
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
profile_utils.seedNDBProfile(
self.program.key(), user=user, admin_for=[self.org.key])
response = self.get(_getOrgPreferencesEditUrl(self.org))
# check that tabs are present in context
self.assertIn('tabs', response.context)
# check that tab to "Edit Profile" page is the selected one
self.assertEqual(response.context['tabs'].selected_tab_id,
tabs.ORG_PREFERENCES_TAB_ID)
|
{
"content_hash": "77789b1e36564fdb3320b2d2f10d75b8",
"timestamp": "",
"source": "github",
"line_count": 699,
"max_line_length": 80,
"avg_line_length": 36.15736766809728,
"alnum_prop": 0.7018279654981404,
"repo_name": "rhyolight/nupic.son",
"id": "dbfd3ec2bbd22a53574e808fd76f542a699fca87",
"size": "25857",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/app/summerofcode/views/test_org_app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "681301"
},
{
"name": "JavaScript",
"bytes": "392600"
},
{
"name": "PHP",
"bytes": "217376"
},
{
"name": "Python",
"bytes": "5162564"
}
],
"symlink_target": ""
}
|
import pickle
from gzip import open as gzip_open
from collections import defaultdict
# ------------------------------------------------------------------------------
# Openers
#
file_openers = {
"vert": open,
"txt": open,
"gz": gzip_open,
}
def get_opener(file_path):
""" Gets a proper opening function for certain file types. """
return file_openers[file_path.split(".")[-1]]
def use_opener(file_path, mode="r"):
return get_opener(file_path)(file_path, mode)
# ------------------------------------------------------------------------------
class LineCorpus(object):
def __init__(self, file_name):
self.file_name = file_name
def __iter__(self):
with use_opener(self.file_name) as f:
for line in f:
sentence = line.strip("\n\r\t ").split()
if len(sentence) > 0:
yield sentence
def corpus2vocab(corpus):
vocab = defaultdict(lambda: 0)
for sentence in corpus:
for word in sentence:
vocab[word] += 1
return vocab
def dump_sentences(sentences, output_file):
with use_opener(output_file, "w") as f:
for sentence in sentences:
f.write(" ".join(sentence) + "\n")
# ------------------------------------------------------------------------------
def save_report(report, dataset_name, model_name, formula, directory="reports/"):
parts = [dataset_name, model_name, formula]
name = ".".join(parts)
with open(directory + "/" + name + ".pickle", "w") as f:
pickle.dump(file=f, obj=report)
def load_report(name):
with open(name) as f:
return pickle.load(file=f)
|
{
"content_hash": "5030a6bcab458bbfec186751d1761dd2",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 81,
"avg_line_length": 23.23611111111111,
"alnum_prop": 0.5086670651524208,
"repo_name": "nimcho/dimo",
"id": "0c29a452d3bf2ed38c641b6a602ea108accbbc3e",
"size": "1673",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "misc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27917"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
# Copyright (c) 2010-2019 openpyxl
"""
Enclosing chart object. The various chart types are actually child objects.
Will probably need to call this indirectly
"""
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors import (
Typed,
String,
Alias,
)
from openpyxl.descriptors.excel import (
ExtensionList,
Relation
)
from openpyxl.descriptors.nested import (
NestedBool,
NestedNoneSet,
NestedString,
NestedMinMax,
)
from openpyxl.descriptors.sequence import NestedSequence
from openpyxl.xml.constants import CHART_NS
from openpyxl.drawing.colors import ColorMapping
from .text import RichText
from .shapes import GraphicalProperties
from .legend import Legend
from ._3d import _3DBase
from .plotarea import PlotArea
from .title import Title
from .pivot import (
PivotFormat,
PivotSource,
)
from .print_settings import PrintSettings
class ChartContainer(Serialisable):
tagname = "chart"
title = Typed(expected_type=Title, allow_none=True)
autoTitleDeleted = NestedBool(allow_none=True)
pivotFmts = NestedSequence(expected_type=PivotFormat)
view3D = _3DBase.view3D
floor = _3DBase.floor
sideWall = _3DBase.sideWall
backWall = _3DBase.backWall
plotArea = Typed(expected_type=PlotArea, )
legend = Typed(expected_type=Legend, allow_none=True)
plotVisOnly = NestedBool()
dispBlanksAs = NestedNoneSet(values=(['span', 'gap', 'zero']))
showDLblsOverMax = NestedBool(allow_none=True)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ('title', 'autoTitleDeleted', 'pivotFmts', 'view3D',
'floor', 'sideWall', 'backWall', 'plotArea', 'legend', 'plotVisOnly',
'dispBlanksAs', 'showDLblsOverMax')
def __init__(self,
title=None,
autoTitleDeleted=None,
pivotFmts=(),
view3D=None,
floor=None,
sideWall=None,
backWall=None,
plotArea=None,
legend=None,
plotVisOnly=True,
dispBlanksAs="gap",
showDLblsOverMax=None,
extLst=None,
):
self.title = title
self.autoTitleDeleted = autoTitleDeleted
self.pivotFmts = pivotFmts
self.view3D = view3D
self.floor = floor
self.sideWall = sideWall
self.backWall = backWall
if plotArea is None:
plotArea = PlotArea()
self.plotArea = plotArea
self.legend = legend
self.plotVisOnly = plotVisOnly
self.dispBlanksAs = dispBlanksAs
self.showDLblsOverMax = showDLblsOverMax
class Protection(Serialisable):
tagname = "protection"
chartObject = NestedBool(allow_none=True)
data = NestedBool(allow_none=True)
formatting = NestedBool(allow_none=True)
selection = NestedBool(allow_none=True)
userInterface = NestedBool(allow_none=True)
__elements__ = ("chartObject", "data", "formatting", "selection", "userInterface")
def __init__(self,
chartObject=None,
data=None,
formatting=None,
selection=None,
userInterface=None,
):
self.chartObject = chartObject
self.data = data
self.formatting = formatting
self.selection = selection
self.userInterface = userInterface
class ExternalData(Serialisable):
tagname = "externalData"
autoUpdate = NestedBool(allow_none=True)
id = String() # Needs namespace
def __init__(self,
autoUpdate=None,
id=None
):
self.autoUpdate = autoUpdate
self.id = id
class ChartSpace(Serialisable):
tagname = "chartSpace"
date1904 = NestedBool(allow_none=True)
lang = NestedString(allow_none=True)
roundedCorners = NestedBool(allow_none=True)
style = NestedMinMax(allow_none=True, min=1, max=48)
clrMapOvr = Typed(expected_type=ColorMapping, allow_none=True)
pivotSource = Typed(expected_type=PivotSource, allow_none=True)
protection = Typed(expected_type=Protection, allow_none=True)
chart = Typed(expected_type=ChartContainer)
spPr = Typed(expected_type=GraphicalProperties, allow_none=True)
graphicalProperties = Alias("spPr")
txPr = Typed(expected_type=RichText, allow_none=True)
textProperties = Alias("txPr")
externalData = Typed(expected_type=ExternalData, allow_none=True)
printSettings = Typed(expected_type=PrintSettings, allow_none=True)
userShapes = Relation()
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ('date1904', 'lang', 'roundedCorners', 'style',
'clrMapOvr', 'pivotSource', 'protection', 'chart', 'spPr', 'txPr',
'externalData', 'printSettings', 'userShapes')
def __init__(self,
date1904=None,
lang=None,
roundedCorners=None,
style=None,
clrMapOvr=None,
pivotSource=None,
protection=None,
chart=None,
spPr=None,
txPr=None,
externalData=None,
printSettings=None,
userShapes=None,
extLst=None,
):
self.date1904 = date1904
self.lang = lang
self.roundedCorners = roundedCorners
self.style = style
self.clrMapOvr = clrMapOvr
self.pivotSource = pivotSource
self.protection = protection
self.chart = chart
self.spPr = spPr
self.txPr = txPr
self.externalData = externalData
self.printSettings = printSettings
self.userShapes = userShapes
def to_tree(self, tagname=None, idx=None, namespace=None):
tree = super(ChartSpace, self).to_tree()
tree.set("xmlns", CHART_NS)
return tree
|
{
"content_hash": "33ca4018d918e99f95355656672cda24",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 89,
"avg_line_length": 31.394871794871793,
"alnum_prop": 0.6179353152564522,
"repo_name": "cloudera/hue",
"id": "c2bd9b685c1b911befdc38542140c002140882e8",
"size": "6122",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/openpyxl-2.6.4/openpyxl/chart/chartspace.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "2347"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "Batchfile",
"bytes": "143575"
},
{
"name": "C",
"bytes": "5129166"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "718011"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "680715"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "Closure Templates",
"bytes": "1072"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "Cython",
"bytes": "1016963"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "13576"
},
{
"name": "EJS",
"bytes": "752"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GAP",
"bytes": "29873"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "28328425"
},
{
"name": "Haml",
"bytes": "920"
},
{
"name": "Handlebars",
"bytes": "173"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "Java",
"bytes": "457398"
},
{
"name": "JavaScript",
"bytes": "39181239"
},
{
"name": "Jinja",
"bytes": "356"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Less",
"bytes": "396102"
},
{
"name": "Lex",
"bytes": "218764"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1751"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "1025937"
},
{
"name": "Mako",
"bytes": "3644004"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "29403"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "84273"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "6235"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Pug",
"bytes": "584"
},
{
"name": "Python",
"bytes": "92881549"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "484108"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "SCSS",
"bytes": "78508"
},
{
"name": "Sass",
"bytes": "770"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "249165"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "SourcePawn",
"bytes": "948"
},
{
"name": "Stylus",
"bytes": "682"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "341963"
},
{
"name": "Twig",
"bytes": "761"
},
{
"name": "TypeScript",
"bytes": "1241396"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "Vue",
"bytes": "350385"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "522199"
},
{
"name": "Yacc",
"bytes": "1070437"
},
{
"name": "jq",
"bytes": "4"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.test import TestCase
from django.utils.encoding import python_2_unicode_compatible
from rest_framework import serializers
@python_2_unicode_compatible
class Tag(models.Model):
"""
Tags have a descriptive slug, and are attached to an arbitrary object.
"""
tag = models.SlugField()
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
tagged_item = GenericForeignKey('content_type', 'object_id')
def __str__(self):
return self.tag
@python_2_unicode_compatible
class Bookmark(models.Model):
"""
A URL bookmark that may have multiple tags attached.
"""
url = models.URLField()
tags = GenericRelation(Tag)
def __str__(self):
return 'Bookmark: %s' % self.url
@python_2_unicode_compatible
class Note(models.Model):
"""
A textual note that may have multiple tags attached.
"""
text = models.TextField()
tags = GenericRelation(Tag)
def __str__(self):
return 'Note: %s' % self.text
class TestGenericRelations(TestCase):
def setUp(self):
self.bookmark = Bookmark.objects.create(url='https://www.djangoproject.com/')
Tag.objects.create(tagged_item=self.bookmark, tag='django')
Tag.objects.create(tagged_item=self.bookmark, tag='python')
self.note = Note.objects.create(text='Remember the milk')
Tag.objects.create(tagged_item=self.note, tag='reminder')
def test_generic_relation(self):
"""
Test a relationship that spans a GenericRelation field.
IE. A reverse generic relationship.
"""
class BookmarkSerializer(serializers.ModelSerializer):
tags = serializers.StringRelatedField(many=True)
class Meta:
model = Bookmark
fields = ('tags', 'url')
serializer = BookmarkSerializer(self.bookmark)
expected = {
'tags': ['django', 'python'],
'url': 'https://www.djangoproject.com/'
}
assert serializer.data == expected
def test_generic_fk(self):
"""
Test a relationship that spans a GenericForeignKey field.
IE. A forward generic relationship.
"""
class TagSerializer(serializers.ModelSerializer):
tagged_item = serializers.StringRelatedField()
class Meta:
model = Tag
fields = ('tag', 'tagged_item')
serializer = TagSerializer(Tag.objects.all(), many=True)
expected = [
{
'tag': 'django',
'tagged_item': 'Bookmark: https://www.djangoproject.com/'
},
{
'tag': 'python',
'tagged_item': 'Bookmark: https://www.djangoproject.com/'
},
{
'tag': 'reminder',
'tagged_item': 'Note: Remember the milk'
}
]
assert serializer.data == expected
|
{
"content_hash": "585e56786471dce6081e5180c21ced54",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 85,
"avg_line_length": 29.88888888888889,
"alnum_prop": 0.6093556381660471,
"repo_name": "linovia/django-rest-framework",
"id": "a3798b0a39741578dcd39ef39f8dc3626934a0f8",
"size": "3228",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "tests/test_relations_generic.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "39327"
},
{
"name": "HTML",
"bytes": "81231"
},
{
"name": "JavaScript",
"bytes": "17284"
},
{
"name": "Python",
"bytes": "1124994"
}
],
"symlink_target": ""
}
|
import unittest
def char_counter(string):
counter = dict()
for char in string:
if char in counter:
counter[char] += 1
else:
counter[char] = 1
result= ""
for key in counter:
result += key + ": " + str(counter[key]) + ", "
return result[0: len(result) - 2]
def run():
print(char_counter("abacca"))
print(char_counter("aadfafadsffabacca"))
print(char_counter("abaadfbfjdhjffgndsgfgsfdgscca"))
class Test(unittest.TestCase):
def test_one(self):
self.assertEqual(char_counter("abacca"), "a: 3, b: 1, c: 2")
if __name__ == '__main__':
#unittest.main()
run()
|
{
"content_hash": "0a8ef3b8821acf27e400a1608c971406",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 68,
"avg_line_length": 20.02857142857143,
"alnum_prop": 0.5378031383737518,
"repo_name": "adrianbeloqui/Python",
"id": "d55df40503726ed9b55334e0e855896d3c72c359",
"size": "701",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "character_counter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19877"
}
],
"symlink_target": ""
}
|
import lcm
def stripLog(inFile, outFile):
channels = {'CAMERA':True, 'SCAN':True, 'PRE_SPINDLE_TO_POST_SPINDLE':True}
inLog = lcm.EventLog(inFile, 'r')
outLog = lcm.EventLog(outFile, 'w', overwrite=True)
for event in inLog:
if event.channel in channels:
outLog.write_event(event.timestamp, event.channel, event.data)
print 'done'
if __name__=='__main__':
inFile = '/home/antone/data/2015-02-11_multisense-02-calib/lcmlog-2015-02-11.00'
outFile = '/home/antone/data/2015-02-11_multisense-02-calib/lcmlog-2015-02-11.00.stripped'
stripLog(inFile, outFile)
|
{
"content_hash": "8170179c927b43213cd75af3374e059c",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 94,
"avg_line_length": 34.333333333333336,
"alnum_prop": 0.6601941747572816,
"repo_name": "openhumanoids/oh-distro",
"id": "1ed1c100c71c1200e2bcd344e1128f60bb2918e4",
"size": "618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "software/perception/matt_sandbox/matlab/multisense_calib/strip_log.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "131738"
},
{
"name": "C++",
"bytes": "2773796"
},
{
"name": "CMake",
"bytes": "1099155"
},
{
"name": "GLSL",
"bytes": "5320"
},
{
"name": "Java",
"bytes": "233603"
},
{
"name": "JavaScript",
"bytes": "232"
},
{
"name": "M",
"bytes": "3971"
},
{
"name": "Makefile",
"bytes": "82095"
},
{
"name": "Matlab",
"bytes": "1946915"
},
{
"name": "Mercury",
"bytes": "1487"
},
{
"name": "Objective-C",
"bytes": "10657"
},
{
"name": "Pascal",
"bytes": "3353"
},
{
"name": "Perl",
"bytes": "18915"
},
{
"name": "Python",
"bytes": "378988"
},
{
"name": "Shell",
"bytes": "35631"
},
{
"name": "XSLT",
"bytes": "73426"
}
],
"symlink_target": ""
}
|
import re
from pathlib import Path
regex = r"(?:.*/)?(?P<firstname>[^/]*) (?P<lastname>[^/]*)_(?P<moodleid>\d*)_\w*/?.*"
#compileing the regex is optional. It is a performance optimization.
cmp_regex = re.compile(regex)
def parseSub(file_path):
file_path = Path(file_path).as_posix()
m = re.match(cmp_regex, file_path)
if m:
return {
'last_name' : m.group('lastname'),
'first_name' : m.group('firstname'),
'moodle_id' : m.group('moodleid')
}
else:
return {
'last_name' : '',
'first_name' : '',
'moodle_id' : ''
}
def test():
test1 = "firstname lastname_1128269_HW2/"
test2 = "firstname(alternatefirstname) lastname_1093811_HW2/"
test3 = "stuff/directories/firstname lastname_1128269_HW2/"
test4 = "stuff/directories/firstname lastname_1128269_HW2/stuffagain/file.py"
test5 = "stuff/directories/firstname(alternatefirstname) lastname_1093811_HW2/moredire/file.py"
test6 = ""
for test in [test1, test2, test3, test4, test5, test6]:
print(parseSub(test))
|
{
"content_hash": "d2302ea3e862345a231e74a69c6ea216",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 99,
"avg_line_length": 34.333333333333336,
"alnum_prop": 0.588702559576346,
"repo_name": "ludvi025/graid",
"id": "2028120f82b9ae28a38375b892ce283f765829e7",
"size": "1133",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/tools/sub_parser.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42836"
}
],
"symlink_target": ""
}
|
import unittest
def suite():
from babel.messages.tests import catalog, extract, frontend, mofile, \
plurals, pofile, checkers
suite = unittest.TestSuite()
suite.addTest(catalog.suite())
suite.addTest(extract.suite())
suite.addTest(frontend.suite())
suite.addTest(mofile.suite())
suite.addTest(plurals.suite())
suite.addTest(pofile.suite())
suite.addTest(checkers.suite())
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
{
"content_hash": "0e0aace98a349716552bdac6af10195b",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 74,
"avg_line_length": 31.176470588235293,
"alnum_prop": 0.6377358490566037,
"repo_name": "miracle2k/babel",
"id": "2fb1b2ed245e0d38544fe5e978b5e494e697bf7e",
"size": "1025",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "babel/messages/tests/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "4747"
},
{
"name": "Python",
"bytes": "441517"
}
],
"symlink_target": ""
}
|
import uuid
from keystone.common.validation import parameter_types
from keystone.common.validation import validators
from keystone import exception
from keystone import tests
# Test schema to validate create requests against
_CREATE = {
'type': 'object',
'properties': {
'name': parameter_types.name,
'description': parameter_types.description,
'enabled': parameter_types.boolean,
'url': parameter_types.url,
'email': parameter_types.email
},
'required': ['name'],
'additionalProperties': True,
}
class ValidationTestCase(tests.TestCase):
def setUp(self):
super(ValidationTestCase, self).setUp()
self.resource_name = 'some resource name'
self.description = 'Some valid description'
self.valid_enabled = True
self.valid_url = 'http://example.com'
self.valid_email = 'joe@example.com'
self.create_schema_validator = validators.SchemaValidator(_CREATE)
def test_create_schema_with_all_valid_parameters(self):
"""Validate proper values against test schema."""
request_to_validate = {'name': self.resource_name,
'some_uuid': uuid.uuid4().hex,
'description': self.description,
'enabled': self.valid_enabled,
'url': self.valid_url}
self.create_schema_validator.validate(request_to_validate)
def test_create_schema_with_name_too_long_raises_exception(self):
"""Validate long names.
Validate that an exception is raised when validating a string of 255+
characters passed in as a name.
"""
invalid_name = ''
for i in range(255):
invalid_name = invalid_name + str(i)
request_to_validate = {'name': invalid_name}
self.assertRaises(exception.SchemaValidationError,
self.create_schema_validator.validate,
request_to_validate)
def test_create_schema_with_name_too_short_raises_exception(self):
"""Validate short names.
Test that an exception is raised when passing a string of length
zero as a name parameter.
"""
request_to_validate = {'name': ''}
self.assertRaises(exception.SchemaValidationError,
self.create_schema_validator.validate,
request_to_validate)
def test_create_schema_with_unicode_name_is_successful(self):
"""Test that we successfully validate a unicode string."""
request_to_validate = {'name': u'αβγδ'}
self.create_schema_validator.validate(request_to_validate)
def test_create_schema_with_invalid_enabled_format_raises_exception(self):
"""Validate invalid enabled formats.
Test that an exception is raised when passing invalid boolean-like
values as `enabled`.
"""
invalid_enabled_formats = 'some string'
request_to_validate = {'name': self.resource_name,
'enabled': invalid_enabled_formats}
self.assertRaises(exception.SchemaValidationError,
self.create_schema_validator.validate,
request_to_validate)
def test_create_schema_with_valid_enabled_formats(self):
"""Validate valid enabled formats.
Test that we have successful validation on boolean values for
`enabled`.
"""
valid_enabled_formats = [True, False]
for valid_enabled in valid_enabled_formats:
request_to_validate = {'name': self.resource_name,
'enabled': valid_enabled}
# Make sure validation doesn't raise a validation exception
self.create_schema_validator.validate(request_to_validate)
def test_create_schema_with_valid_urls(self):
"""Test that proper urls are successfully validated."""
valid_urls = ['https://169.254.0.1', 'https://example.com',
'https://EXAMPLE.com', 'https://127.0.0.1:35357',
'https://localhost']
for valid_url in valid_urls:
request_to_validate = {'name': self.resource_name,
'url': valid_url}
self.create_schema_validator.validate(request_to_validate)
def test_create_schema_with_invalid_urls(self):
"""Test that an exception is raised when validating improper urls."""
invalid_urls = ['http//something.com',
'https//something.com',
'https://9.9.9']
for invalid_url in invalid_urls:
request_to_validate = {'name': self.resource_name,
'url': invalid_url}
self.assertRaises(exception.SchemaValidationError,
self.create_schema_validator.validate,
request_to_validate)
def test_create_schema_with_valid_email(self):
"""Validate email address
Test that we successfully validate properly formatted email
addresses.
"""
request_to_validate = {'name': self.resource_name,
'email': self.valid_email}
self.create_schema_validator.validate(request_to_validate)
def test_create_schema_with_invalid_email(self):
"""Validate invalid email address
Test that an exception is raised when validating improperly
formatted email addresses.
"""
request_to_validate = {'name': self.resource_name,
'email': 'some invalid email value'}
self.assertRaises(exception.SchemaValidationError,
self.create_schema_validator.validate,
request_to_validate)
|
{
"content_hash": "5b1d4f5c013e40ae25ddbf20aec3ee35",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 78,
"avg_line_length": 40.054421768707485,
"alnum_prop": 0.5944293478260869,
"repo_name": "reeshupatel/demo",
"id": "76793c4993a58fbef5c5f43121d0f1e19143c63e",
"size": "6457",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "keystone/tests/test_validation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "2886403"
},
{
"name": "Shell",
"bytes": "10635"
}
],
"symlink_target": ""
}
|
"""
Unit testing module for pytest-pylti plugin
"""
pytest_plugins = 'pytester', # pylint: disable=invalid-name
def test_basic(testdir):
"""Verify basic pylint checks"""
testdir.makepyfile("""import sys""")
result = testdir.runpytest('--pylint')
assert 'Missing module docstring' in result.stdout.str()
assert 'Unused import sys' in result.stdout.str()
assert 'Final newline missing' in result.stdout.str()
assert 'passed' not in result.stdout.str()
def test_error_control(testdir):
"""Verify that error types are configurable"""
testdir.makepyfile("""import sys""")
result = testdir.runpytest('--pylint', '--pylint-error-types=EF')
assert '1 passed' in result.stdout.str()
def test_pylintrc_file(testdir):
"""Verify that a specified pylint rc file will work."""
rcfile = testdir.makefile('rc', """
[FORMAT]
max-line-length=3
""")
testdir.makepyfile("""import sys""")
result = testdir.runpytest(
'--pylint', '--pylint-rcfile={0}'.format(rcfile.strpath)
)
assert 'Line too long (10/3)' in result.stdout.str()
def test_pylintrc_ignore(testdir):
"""Verify that a pylintrc file with ignores will work."""
rcfile = testdir.makefile('rc', """
[MASTER]
ignore = test_pylintrc_ignore.py
""")
testdir.makepyfile("""import sys""")
result = testdir.runpytest(
'--pylint', '--pylint-rcfile={0}'.format(rcfile.strpath)
)
assert 'collected 0 items' in result.stdout.str()
|
{
"content_hash": "ee317f6746b25e8279a0ea1487b5ac10",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 69,
"avg_line_length": 29.56,
"alnum_prop": 0.6623815967523681,
"repo_name": "rutsky/pytest-pylint",
"id": "2a00618cb06b84c5e19cf169252af701fe4dc218",
"size": "1502",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_pytest_pylint.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6608"
}
],
"symlink_target": ""
}
|
class StemmerI(object):
"""
A processing interface for removing morphological affixes from
words. This process is known as stemming.
"""
def stem(self, token):
"""
Strip affixes from the token and return the stem.
:param token: The token that should be stemmed.
:type token: str
"""
raise NotImplementedError()
|
{
"content_hash": "fbc1b95b6ae1472124f3b335e26e8f0e",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 66,
"avg_line_length": 24,
"alnum_prop": 0.6145833333333334,
"repo_name": "MiniPlayer/log-island",
"id": "5866a0cc7c180eff040d73d821932676f79c3134",
"size": "673",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "logisland-plugins/logisland-scripting-processors-plugin/src/main/resources/nltk/stem/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3163"
},
{
"name": "HTML",
"bytes": "32441"
},
{
"name": "Java",
"bytes": "2489666"
},
{
"name": "JavaScript",
"bytes": "32790"
},
{
"name": "Makefile",
"bytes": "6774"
},
{
"name": "Python",
"bytes": "4205508"
},
{
"name": "Roff",
"bytes": "3242333"
},
{
"name": "Scala",
"bytes": "274678"
},
{
"name": "Shell",
"bytes": "31034"
}
],
"symlink_target": ""
}
|
def genSubsets(L):
'''
L: list
Returns: all subsets of L
'''
# base case
if len(L) == 0:
return [[]]
# recursive block
# all the subsets of smaller + all the subsets of smaller combined with extra = all subsets of L
extra = L[0:1]
smaller = genSubsets(L[1:])
combine = []
for i in smaller:
combine.append(extra + i)
return smaller + combine
|
{
"content_hash": "b672e5cddc5e02f26a09e2050fb21bc3",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 101,
"avg_line_length": 25.5625,
"alnum_prop": 0.5721271393643031,
"repo_name": "medifle/python_6.00.1x",
"id": "a98b7b44feddf0456ba25200a194c7a38cc6abff",
"size": "409",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "defRecurGenSubsets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "264711"
}
],
"symlink_target": ""
}
|
import six
RECON_RELINKER_FILE = 'relinker.recon'
RECON_OBJECT_FILE = 'object.recon'
RECON_CONTAINER_FILE = 'container.recon'
RECON_ACCOUNT_FILE = 'account.recon'
RECON_DRIVE_FILE = 'drive.recon'
DEFAULT_RECON_CACHE_PATH = '/var/cache/swift'
def server_type_to_recon_file(server_type):
if not isinstance(server_type, six.string_types) or \
server_type.lower() not in ('account', 'container', 'object'):
raise ValueError('Invalid server_type')
return "%s.recon" % server_type.lower()
|
{
"content_hash": "790242285fd706f47024479e99c931c9",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 74,
"avg_line_length": 34.266666666666666,
"alnum_prop": 0.6984435797665369,
"repo_name": "openstack/swift",
"id": "44c8fb8ef159921ae842fd2aefa9d9c9e99bcca2",
"size": "1109",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "swift/common/recon.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3072"
},
{
"name": "HTML",
"bytes": "2147"
},
{
"name": "JavaScript",
"bytes": "40376"
},
{
"name": "Jinja",
"bytes": "4305"
},
{
"name": "Python",
"bytes": "14278715"
},
{
"name": "Shell",
"bytes": "10106"
}
],
"symlink_target": ""
}
|
from paddle import _C_ops
from paddle.fluid.framework import dygraph_only
__all__ = []
@dygraph_only
def addmm(input, x, y, beta=1.0, alpha=1.0, name=None):
"""
Note:
This API is only supported from ``CUDA 11.0`` .
Applies matrix multiplication for `x` and `y` , `input` is added to
the final result. The equation is:
.. math::
out = alpha * x * y + beta * input
The supported input/output Tensor layout are as follows:
Note:
input[SparseCsrTensor] + x[SparseCsrTensor] @ y[SparseCsrTensor] -> out[SparseCsrTensor]
input[DenseTensor] + x[SparseCsrTensor] @ y[DenseTensor] -> out[DenseTensor]
input[SparseCooTensor] + x[SparseCooTensor] @ y[SparseCooTensor] -> out[SparseCooTensor]
input[DenseTensor] + x[SparseCooTensor] @ y[DenseTensor] -> out[DenseTensor]
It supports backward propagation.
Dimensions `input` , `x` , `y` must be same and >= 2D. Automatic broadcasting of Tensor is not supported.
Args:
input (SparseTensor|DenseTensor): The input tensor. Shape is [*, M, N]. The data type can be float32 or float64.
x (SparseTensor): The input SparseTensor. Shape is [*, M, K]. The data type can be float32 or float64.
y (SparseTensor|DenseTensor): The input tensor. Shape is [*, K, N]. The data type can be float32 or float64.
beta (float, optional): Coefficient of `input` . Default: 1.0
alpha (float, optional): Coefficient of `x * y` . Default: 1.0
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
SparseTensor|DenseTensor: Tensor type, date type and shape is the same with `input` .
Examples:
.. code-block:: python
# required: gpu
import paddle
# dense + csr @ dense -> dense
input = paddle.rand([3, 2])
crows = [0, 1, 2, 3]
cols = [1, 2, 0]
values = [1., 2., 3.]
x = paddle.sparse.sparse_csr_tensor(crows, cols, values, [3, 3])
y = paddle.rand([3, 2])
out = paddle.sparse.addmm(input, x, y, 3.0, 2.0)
# dense + coo @ dense -> dense
input = paddle.rand([3, 2])
indices = [[0, 1, 2], [1, 2, 0]]
values = [1., 2., 3.]
x = paddle.sparse.sparse_coo_tensor(indices, values, [3, 3])
y = paddle.rand([3, 2])
out = paddle.sparse.addmm(input, x, y, 3.0, 2.0)
"""
return _C_ops.sparse_addmm(input, x, y, beta, alpha)
|
{
"content_hash": "829d28873a7e9b6b10826245bd9c6463",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 142,
"avg_line_length": 38.23529411764706,
"alnum_prop": 0.5903846153846154,
"repo_name": "luotao1/Paddle",
"id": "a09611d2d0f5178685c5b5488de30e96d8b489e9",
"size": "3213",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "python/paddle/sparse/multiary.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36771446"
},
{
"name": "CMake",
"bytes": "903079"
},
{
"name": "Cuda",
"bytes": "5200715"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36248258"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553175"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('smartshark', '0016_auto_20160610_1438'),
]
operations = [
migrations.AlterField(
model_name='plugin',
name='active',
field=models.BooleanField(default=False),
),
]
|
{
"content_hash": "a20f156fcfd060fc44ded1251cbc29c2",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 53,
"avg_line_length": 21.333333333333332,
"alnum_prop": 0.5989583333333334,
"repo_name": "smartshark/serverSHARK",
"id": "d982f3f8714e235a019ef4a6b0c8772b691096ab",
"size": "456",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smartshark/migrations/0017_auto_20160613_0914.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "66094"
},
{
"name": "HTML",
"bytes": "40562"
},
{
"name": "JavaScript",
"bytes": "1056"
},
{
"name": "Jinja",
"bytes": "4515"
},
{
"name": "Less",
"bytes": "78481"
},
{
"name": "Python",
"bytes": "267732"
},
{
"name": "Ruby",
"bytes": "5037"
},
{
"name": "SCSS",
"bytes": "79489"
}
],
"symlink_target": ""
}
|
import os
import sys
import shutil
import sqlite3
class Logger(object):
def __init__(self, log_file):
self.terminal = sys.stdout
self.log = open(log_file, "w")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
#this flush method is needed for python 3 compatibility.
#this handles the flush command by doing nothing.
#you might want to specify some extra behavior here.
pass
def ensureDirectoryExists(path):
"""Create the directory named by path and any necessary parents if it
doesn't exist.
"""
if not os.path.exists(path):
os.makedirs(path)
# Copy infile to outFile and create dirs if not present
def copy(inFile, outFile):
# create path if it doesnt exist
out_path = os.path.dirname(outFile)
ensureDirectoryExists(out_path)
# copy file
shutil.copyfile(inFile, outFile)
def open_sqlite3_db(dir):
"""Open the sqlite3 database contained in dir. We use "data.sqlite3"."""
return sqlite3.connect(os.path.join(dir, 'data.sqlite3'))
def scale_net_input_data(data):
"""Rescale data, presumably obtained from an 8-bit grayscale image, to
the range [0, 1] for feeding into the network.
"""
return data / 255.
|
{
"content_hash": "2086a99be4d7df7569963f06fa67349f",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 77,
"avg_line_length": 25.647058823529413,
"alnum_prop": 0.6666666666666666,
"repo_name": "aylward/ITKTubeTK",
"id": "47f65129041140fa598701e37c06d3aca0862e48",
"size": "1308",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/archive/SegmentVesselsUsingNeuralNetworks/scripts/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "13419"
},
{
"name": "C++",
"bytes": "3271086"
},
{
"name": "CMake",
"bytes": "96467"
},
{
"name": "Python",
"bytes": "72225"
},
{
"name": "Shell",
"bytes": "23057"
}
],
"symlink_target": ""
}
|
from bigml.api import BigML
api = BigML()
source1_file = "iris.csv"
args = \
{'fields': {'000000': {'name': 'sepal length', 'optype': 'numeric'},
'000001': {'name': 'sepal width', 'optype': 'numeric'},
'000002': {'name': 'petal length', 'optype': 'numeric'},
'000003': {'name': 'petal width', 'optype': 'numeric'},
'000004': {'name': 'species',
'optype': 'categorical',
'term_analysis': {'enabled': True}}},
}
source2 = api.create_source(source1_file, args)
api.ok(source2)
args = \
{'objective_field': {'id': '000004'},
}
dataset1 = api.create_dataset(source2, args)
api.ok(dataset1)
|
{
"content_hash": "ab56d661af03226425f6301a6d7c422e",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 68,
"avg_line_length": 30.736842105263158,
"alnum_prop": 0.6386986301369864,
"repo_name": "jaor/bigmler",
"id": "16827017e4b1dd0f70bdc098ad28d0a6a29eed5c",
"size": "584",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "check_files/reify_dataset.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "26465"
},
{
"name": "JavaScript",
"bytes": "73784"
},
{
"name": "Jupyter Notebook",
"bytes": "802"
},
{
"name": "Python",
"bytes": "2081730"
},
{
"name": "R",
"bytes": "71763"
}
],
"symlink_target": ""
}
|
"""
flask.ext.security.decorators
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Flask-Security decorators module
:copyright: (c) 2012 by Matt Wright.
:license: MIT, see LICENSE for more details.
"""
from collections import namedtuple
from functools import wraps
from flask import current_app, Response, request, redirect, _request_ctx_stack
from flask.ext.login import current_user, login_required
from flask.ext.principal import RoleNeed, Permission, Identity, identity_changed
from werkzeug.local import LocalProxy
from . import utils
# Convenient references
_security = LocalProxy(lambda: current_app.extensions['security'])
_default_unauthorized_html = """
<h1>Unauthorized</h1>
<p>The server could not verify that you are authorized to access the URL
requested. You either supplied the wrong credentials (e.g. a bad password),
or your browser doesn't understand how to supply the credentials required.</p>
"""
BasicAuth = namedtuple('BasicAuth', 'username, password')
def _get_unauthorized_response(text=None, headers=None):
text = text or _default_unauthorized_html
headers = headers or {}
return Response(text, 401, headers)
def _get_unauthorized_view():
cv = utils.get_url(utils.config_value('UNAUTHORIZED_VIEW'))
utils.do_flash(*utils.get_message('UNAUTHORIZED'))
return redirect(cv or request.referrer or '/')
def _check_token():
header_key = _security.token_authentication_header
args_key = _security.token_authentication_key
header_token = request.headers.get(header_key, None)
token = request.args.get(args_key, header_token)
if request.get_json(silent=True):
token = request.json.get(args_key, token)
user = _security.login_manager.token_callback(token)
if user and user.is_authenticated():
app = current_app._get_current_object()
_request_ctx_stack.top.user = user
identity_changed.send(app, identity=Identity(user.id))
return True
return False
def _check_http_auth():
auth = request.authorization or BasicAuth(username=None, password=None)
user = _security.datastore.find_user(email=auth.username)
if user and utils.verify_and_update_password(auth.password, user):
_security.datastore.commit()
app = current_app._get_current_object()
_request_ctx_stack.top.user = user
identity_changed.send(app, identity=Identity(user.id))
return True
return False
def http_auth_required(realm):
"""Decorator that protects endpoints using Basic HTTP authentication.
The username should be set to the user's email address.
:param realm: optional realm name"""
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if _check_http_auth():
return fn(*args, **kwargs)
r = _security.default_http_auth_realm if callable(realm) else realm
h = {'WWW-Authenticate': 'Basic realm="%s"' % r}
return _get_unauthorized_response(headers=h)
return wrapper
if callable(realm):
return decorator(realm)
return decorator
def auth_token_required(fn):
"""Decorator that protects endpoints using token authentication. The token
should be added to the request by the client by using a query string
variable with a name equal to the configuration value of
`SECURITY_TOKEN_AUTHENTICATION_KEY` or in a request header named that of
the configuration value of `SECURITY_TOKEN_AUTHENTICATION_HEADER`
"""
@wraps(fn)
def decorated(*args, **kwargs):
if _check_token():
return fn(*args, **kwargs)
return _get_unauthorized_response()
return decorated
def auth_required(*auth_methods):
"""
Decorator that protects enpoints through multiple mechanisms
Example::
@app.route('/dashboard')
@auth_required('token', 'session')
def dashboard():
return 'Dashboard'
:param auth_methods: Specified mechanisms.
"""
login_mechanisms = {
'token': lambda: _check_token(),
'basic': lambda: _check_http_auth(),
'session': lambda: current_user.is_authenticated()
}
def wrapper(fn):
@wraps(fn)
def decorated_view(*args, **kwargs):
mechanisms = [login_mechanisms.get(method) for method in auth_methods]
for mechanism in mechanisms:
if mechanism and mechanism():
return fn(*args, **kwargs)
return _get_unauthorized_response()
return decorated_view
return wrapper
def roles_required(*roles):
"""Decorator which specifies that a user must have all the specified roles.
Example::
@app.route('/dashboard')
@roles_required('admin', 'editor')
def dashboard():
return 'Dashboard'
The current user must have both the `admin` role and `editor` role in order
to view the page.
:param args: The required roles.
"""
def wrapper(fn):
@wraps(fn)
def decorated_view(*args, **kwargs):
perms = [Permission(RoleNeed(role)) for role in roles]
for perm in perms:
if not perm.can():
return _get_unauthorized_view()
return fn(*args, **kwargs)
return decorated_view
return wrapper
def roles_accepted(*roles):
"""Decorator which specifies that a user must have at least one of the
specified roles. Example::
@app.route('/create_post')
@roles_accepted('editor', 'author')
def create_post():
return 'Create Post'
The current user must have either the `editor` role or `author` role in
order to view the page.
:param args: The possible roles.
"""
def wrapper(fn):
@wraps(fn)
def decorated_view(*args, **kwargs):
perm = Permission(*[RoleNeed(role) for role in roles])
if perm.can():
return fn(*args, **kwargs)
return _get_unauthorized_view()
return decorated_view
return wrapper
def anonymous_user_required(f):
@wraps(f)
def wrapper(*args, **kwargs):
if current_user.is_authenticated():
return redirect(utils.get_url(_security.post_login_view))
return f(*args, **kwargs)
return wrapper
|
{
"content_hash": "9bce3ce20b742c08c4f272e54a27c154",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 82,
"avg_line_length": 31.004878048780487,
"alnum_prop": 0.6428571428571429,
"repo_name": "maxziv/SEApp",
"id": "40b5ba58b9d00219621b2c5f923629b24d767ae0",
"size": "6380",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "server/lib/flask_security/decorators.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1449"
},
{
"name": "CSS",
"bytes": "438732"
},
{
"name": "JavaScript",
"bytes": "190797"
},
{
"name": "PHP",
"bytes": "232"
},
{
"name": "Perl",
"bytes": "36"
},
{
"name": "Python",
"bytes": "4621804"
},
{
"name": "Shell",
"bytes": "4561"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('imager_images', '0004_auto_20170206_1601'),
]
operations = [
migrations.AlterField(
model_name='photo',
name='photo',
field=models.ImageField(default=3, upload_to=''),
preserve_default=False,
),
]
|
{
"content_hash": "3eb462f50a114f236e4913d6b440282d",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 61,
"avg_line_length": 22.57894736842105,
"alnum_prop": 0.5874125874125874,
"repo_name": "midfies/django-imager",
"id": "da0d23ec593d2c369d8e74bb701c6cee9bf037f5",
"size": "502",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imagersite/imager_images/migrations/0005_auto_20170206_1658.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4562"
},
{
"name": "HTML",
"bytes": "140212"
},
{
"name": "JavaScript",
"bytes": "12316"
},
{
"name": "Python",
"bytes": "58058"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('leaderboard', '0002_auto_20161030_2333'),
('activities', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='activity',
name='run',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='notification',
name='run',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='leaderboard.Algorithm'),
),
migrations.AlterField(
model_name='notification',
name='notification_type',
field=models.CharField(choices=[(b'L', b'Liked'), (b'C', b'Commented'), (b'F', b'Favorited'), (b'A', b'Answered'), (b'W', b'Accepted Answer'), (b'E', b'Edited Article'), (b'S', b'Also Commented'), (b'R', b'Run is done')], max_length=1),
),
]
|
{
"content_hash": "c12688949781093017be7edbaba1efe9",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 248,
"avg_line_length": 35.56666666666667,
"alnum_prop": 0.5876288659793815,
"repo_name": "JeromeRisselin/PRJ-medtec_sigproc",
"id": "9cbf41d33ec56c841a239cd6fcc1dc2eb39edf3b",
"size": "1139",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "echopen-leaderboard/bootcamp/activities/migrations/0002_auto_20161108_0226.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "70570"
},
{
"name": "CSS",
"bytes": "341808"
},
{
"name": "HTML",
"bytes": "139971"
},
{
"name": "JavaScript",
"bytes": "1264215"
},
{
"name": "Jupyter Notebook",
"bytes": "2986408"
},
{
"name": "Makefile",
"bytes": "748"
},
{
"name": "Matlab",
"bytes": "1777"
},
{
"name": "PowerShell",
"bytes": "468"
},
{
"name": "Python",
"bytes": "240794"
},
{
"name": "Roff",
"bytes": "10996"
},
{
"name": "Shell",
"bytes": "3483"
}
],
"symlink_target": ""
}
|
from girder.exceptions import ValidationException
from girder.plugin import GirderPlugin
from girder.utility import setting_utilities
from . import constants, rest
@setting_utilities.validator(constants.PluginSettings.GOOGLE_ANALYTICS_TRACKING_ID)
def validateTrackingId(doc):
if not doc['value']:
raise ValidationException('Google Analytics Tracking ID must not be empty.', 'value')
class GoogleAnalyticsPlugin(GirderPlugin):
DISPLAY_NAME = 'Google Analytics'
CLIENT_SOURCE_PATH = 'web_client'
def load(self, info):
info['apiRoot'].google_analytics = rest.GoogleAnalytics()
|
{
"content_hash": "4e73da28e4512183862eb38c6093673f",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 93,
"avg_line_length": 32.26315789473684,
"alnum_prop": 0.763458401305057,
"repo_name": "kotfic/girder",
"id": "91036786f606ec7020a0193600b3b8506c8a2570",
"size": "1402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/google_analytics/girder_google_analytics/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "38260"
},
{
"name": "CSS",
"bytes": "54843"
},
{
"name": "Dockerfile",
"bytes": "2482"
},
{
"name": "HCL",
"bytes": "1424"
},
{
"name": "HTML",
"bytes": "139763"
},
{
"name": "JavaScript",
"bytes": "1129529"
},
{
"name": "Mako",
"bytes": "7873"
},
{
"name": "Python",
"bytes": "2117090"
},
{
"name": "Roff",
"bytes": "17"
},
{
"name": "Ruby",
"bytes": "9921"
},
{
"name": "Shell",
"bytes": "2177"
}
],
"symlink_target": ""
}
|
from annotations import Annotations, ExpansionError
from jstruct_generator import CGenerator
from preprocess import preprocess
from pycparser import c_parser, c_ast, plyparser
import re
GENERATED1NL = '// Generated automatically by libjstruct. Do Not Modify.\n'
GENERATED = GENERATED1NL + '\n'
INIT_INSTRUCTIONS = '// This file must be included directly in a single c file.\n\n'
INCLUDE_H = '#include "{0}"\n'.format
# these prepended headers get parsed, then output.
# TODO: figure out how to add a comment after?
PREPEND_HEADERS = '#include {JSTRUCT_H}\n#include {JSON_OBJECT_H}\n'.format
GUARD_HEADERS_EXPR = re.compile(
r'^\s*#ifndef\s+[A-Z_]+\s+#define\s+[A-Z_]+\s*\n',
flags=re.IGNORECASE
)
# TODO: allow more than 2 dots?
FILENAME_EXPR = re.compile(
r'^(?P<basename>[^.]+)\.(?:(?P<ext2>[^.]+)\.)?(?P<ext>[^.]+)$'
)
def parse_jstruct(filename, include_paths=[], defines=[]):
parser = c_parser.CParser()
with open(filename, 'r') as infile:
text = infile.read()
define_map = {
'JSTRUCT_H': '<jstruct/jstruct.h>',
'JSON_OBJECT_H': '<json-c/json_object.h>',
'ARRAYLIST_H': '<json-c/arraylist.h>'
}
define_map.update({ds[0]: ds[1] for ds in (d.split('=') for d in defines)})
defines = ['{0}={1}'.format(*kv) for kv in define_map.iteritems()]
# insert some header includes and a 'do not modify'
text = re.sub(
GUARD_HEADERS_EXPR,
r'\g<0>' + GENERATED + PREPEND_HEADERS(**define_map),
text, count=1
)
pptext, err = preprocess(text,
include_paths=include_paths,
defines=['__attribute__(x)='] + defines
)
if err:
import os
rel_filename = os.path.relpath(filename)
err = err.replace('<stdin>', rel_filename)
print(repr(defines))
raise Exception('C Preprocessor: ' + err)
try:
ast = parser.parse(pptext, filename=filename)
return (ast, text)
except plyparser.ParseError as ex:
import os
rel_filename = os.path.relpath(filename)
message = ex.message.replace('<stdin>', rel_filename)
raise plyparser.ParseError(message)
def prune_ast(ast, filename):
"""
prune the ast which will contain all the definitions of all the included headers tool
"""
ast.ext = [e for e in ast.ext if e.coord is None or e.coord.file == filename]
def split_ast(ast):
"""
split the ast into header and initializer headers.
initializers cannot be included in more than one translation unit (c file)
"""
init_decls = []
def extern_inits(decl):
# c_ast.ID is used for directives. pass them straight through to both files
if isinstance(decl, c_ast.ID):
init_decls.append(decl)
return decl
elif decl.init:
init_decls.append(c_ast.Decl(
decl.name,
decl.quals,
decl.storage,
decl.funcspec,
decl.type,
decl.init,
decl.bitsize
))
decl.storage = ['extern']
decl.init = None
return decl
else:
return decl
out_ast = c_ast.FileAST([extern_inits(e) for e in ast.ext])
init_ast = c_ast.FileAST(init_decls)
return (out_ast, init_ast)
def parse_and_generate(filename, out_filename=None, init_filename=None, include_paths=[], defines=[]):
"""
parse the file at filename.
if out_filename and init_filename are None:return a tuple containing the generated file's names.
otherwise return the generated source code for each
"""
from os import path
if out_filename:
out_filename = re.sub(FILENAME_EXPR, out_filename, filename)
if init_filename:
init_filename = re.sub(FILENAME_EXPR, init_filename, filename)
rel_filename = ''
if out_filename is None and init_filename is None:
rel_filename = re.sub(FILENAME_EXPR, r'\g<basename>.h', path.basename(filename))
else:
init_dir = path.dirname(init_filename)
rel_filename = path.relpath(out_filename, init_dir)
ast, text = parse_jstruct(filename, include_paths=include_paths, defines=defines)
annotations = Annotations(text)
try:
annotations.expand(ast, '<stdin>')
except ExpansionError as ex:
ex.filename = filename
raise
prune_ast(ast, '<stdin>')
out_ast, init_ast = split_ast(ast)
generator = CGenerator()
out_result = generator.visit(out_ast)
init_result = generator.visit(init_ast)
if GUARD_HEADERS_EXPR.search(out_result):
out_result = re.sub(
GUARD_HEADERS_EXPR,
r'\g<0>' + GENERATED,
out_result, count=1
) + '\n#endif\n'
else:
out_result = GENERATED + out_result
init_result = re.sub(GUARD_HEADERS_EXPR, '', init_result)
init_instructions = INIT_INSTRUCTIONS if init_filename and init_filename.endswith('.h') else ''
init_result = GENERATED1NL + init_instructions + INCLUDE_H(rel_filename) + init_result
if out_filename:
with open(out_filename, 'w') as out_file:
out_file.write(out_result)
if init_filename:
with open(init_filename, 'w') as init_file:
init_file.write(init_result)
if out_filename is None and init_filename is None:
return (out_result, init_result)
else:
return (out_filename, init_filename)
if __name__ == '__main__':
import argparse
argparser = argparse.ArgumentParser(
description='Parse x.jstruct.h file and generate x.h and x.init.h ')
argparser.add_argument('infile',
metavar='jstruct_header_filename',
type=str,
help='x.jstruct.h file to be parsed'
)
argparser.add_argument('-o', '--out', dest='outfile', type=str,
default=r'\g<basename>.h',
help='output header file name. (python re.sub() repl syntax)')
argparser.add_argument('-i', '--init', dest='initfile', type=str,
default=r'\g<basename>.init.c',
help='initializer code file name. (python re.sub() repl syntax)')
argparser.add_argument('-s', '--silent', action='store_true',
help='silent mode')
argparser.add_argument('-D', '--define', action='append', default=[])
argparser.add_argument('includedir', type=str, nargs='*',
help='override/extra directories to pass to the c preprocessor with -I. ' +
'(Suggested: util/fake_libc_include)'
)
args = argparser.parse_args()
outfile, initfile = parse_and_generate(
args.infile,
args.outfile,
args.initfile,
args.includedir,
args.define
)
if not args.silent:
remember = '\nRemember to include {1} directly in a single .c file' \
if args.initfile.endswith('.h') else ''
print(
('Success: {0} and {1} generated successfully.' +
remember)
.format(outfile, initfile)
)
|
{
"content_hash": "39365ce63799abe61cab167f1858189f",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 102,
"avg_line_length": 35.25757575757576,
"alnum_prop": 0.6110872367855608,
"repo_name": "jamie-pate/jstruct",
"id": "f3670adf0193dc86fc38cecbf22bd1fe6410f278",
"size": "7004",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parse/jstruct_parse.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "59093"
},
{
"name": "Python",
"bytes": "40179"
}
],
"symlink_target": ""
}
|
"""
Examples of using the API from datastax to query Cassandra
https://datastax.github.io/python-driver/api/index.html
"""
import pprint
import uuid
from cassandra.cluster import Cluster
from cassandra.cqlengine.connection import setup
from cassandra.cqlengine.management import sync_table
from cassandra.cqlengine.functions import MinTimeUUID, MaxTimeUUID
#from cassandra.metadata import KeyspaceMetadata, TableMetadata
from datetime import datetime
from models import Blog, Post, User
pp = pprint.PrettyPrinter(indent=4)
class CassObj(object):
def __init__(self):
self.cluster, self.session = self.connect_cluster()
self.connection = self.connect_cqlengine()
def connect_cluster(self, ip=['localhost']):
"""
Connect to a cluster and create a session to explore the cluster
looking at keyspaces, token_map, and tables
"""
cluster = Cluster(ip)
session = cluster.connect()
return (cluster, session)
def connect_cqlengine(self):
"""
Setup a cqlengine connection so we can do queries like query_blogs
"""
setup(['localhost'], 'cassdb')
def get_cluster_metadata(self):
""" Example of how to get metadata about the Cluster """
pp.pprint("Cluster Name is: " + self.cluster.metadata.cluster_name) # 'Test Cluster'
pp.pprint(self.session.hosts) # [<Host: 127.0.0.1 datacenter1>]
pp.pprint(self.cluster.metadata.keyspaces) # { 'cassdb': ...,
#pp.pprint(self.cluster.metadata.token_map.token_to_host_owner) # <Murmur3Token: -6809172061991742977>: <Host: 127.0.0.1 datacenter1>
def get_keyspace_metadata(self):
""" Example of how to get metadata about specific Keyspaces """
my_keyspace = self.cluster.metadata.keyspaces['cassdb']
pp.pprint("Keyspace name is: " + my_keyspace.name) # 'cassdb'
pp.pprint(my_keyspace.tables) # { 'blog': ...,
pp.pprint(my_keyspace.user_types) # { 'address': ...
pp.pprint(my_keyspace.indexes) # { 'blog_user_idx': ...
pp.pprint(my_keyspace.views)
def get_table_metadata(self):
""" Example of how to get metadata about Tables in a specific Keyspace """
my_keyspace = self.cluster.metadata.keyspaces['cassdb']
my_table = my_keyspace.tables['blog']
pp.pprint(my_table)
pp.pprint(my_table.columns) # { 'blog_id': }
def query_blogs(self):
""" Examples of some simple querying """
print "Queryset for All Blogs"
for blog in Blog.objects.all():
print(blog)
print "Queryset to Filter Blogs with user: Will"
queryset_will = Blog.objects.filter(user='will')
for blog in queryset_will:
print(blog)
def create_user(self):
""" Examples of creating a User """
sync_table(User)
self.user1 = User.create(
user_id=uuid.uuid1(),
first_name='Will',
last_name='Liu',
todo_list=['Laundry', 'Dishes', 'Cook'],
favorite_restaurant={'American': 'Sweet Afton', 'Indian': 'Seva'},
friends={'Wayne': 30, 'John': 33},
favorite_numbers=set([1, 1, 2, 3, 4, 6])
)
def create_posts(self):
sync_table(Post)
self.post1 = Post.create(
post_id=uuid.uuid1(),
blog_id='fdd0ba00-13b2-11e6-88a9-0002a5d5c51c',
created_at=datetime.now(),
post_title='I did it!',
content='Stuff goes in here',
tags=['Books', 'Movies', 'Audio']
)
self.post2 = Post.create(
post_id=uuid.uuid1(),
blog_id='fdd0ba00-13b2-11e6-88a9-0002a5d5c51c',
created_at=datetime.now(),
post_title='You did it!',
content='More stuff goes in here',
tags=set(['a', 'b', 'c', 'c'])
)
def query_posts(self):
""" Examples of some slightly more advanced querying """
min_time = datetime(2016, 1, 1)
max_time = datetime(2017, 1, 1)
queryset_filter = Post.objects.filter(
id='312756e2-0ad6-465f-be8c-7d9a35c0fa20',
post_id__gt=MinTimeUUID(min_time),
post_id__lt=MaxTimeUUID(max_time))
for post in queryset_filter:
print "SUCCESS"
print(post)
if __name__ == '__main__':
cass1 = CassObj()
# cass1.get_cluster_metadata()
# cass1.get_keyspace_metadata()
# cass1.get_table_metadata()
#cass1.query_blogs()
#cass1.create_posts()
#cass1.query_posts()
cass1.create_user()
|
{
"content_hash": "eaf6ab1477cba4b77c74f7357d442b82",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 142,
"avg_line_length": 35.50769230769231,
"alnum_prop": 0.6009532062391681,
"repo_name": "WilliamQLiu/django-cassandra-prototype",
"id": "c92469d6506246b8741ec1177be7f4486dbf066e",
"size": "4616",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cass-prototype/reddit/queries.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "10"
},
{
"name": "Python",
"bytes": "17588"
}
],
"symlink_target": ""
}
|
import inspect
import six
from .core import Locust, TaskSet
from .log import console_logger
def print_task_ratio(locusts, total=False, level=0, parent_ratio=1.0):
d = get_task_ratio_dict(locusts, total=total, parent_ratio=parent_ratio)
_print_task_ratio(d)
def _print_task_ratio(x, level=0):
for k, v in six.iteritems(x):
padding = 2*" "*level
ratio = v.get('ratio', 1)
console_logger.info(" %-10s %-50s" % (padding + "%-6.1f" % (ratio*100), padding + k))
if 'tasks' in v:
_print_task_ratio(v['tasks'], level + 1)
def get_task_ratio_dict(tasks, total=False, parent_ratio=1.0):
"""
Return a dict containing task execution ratio info
"""
if hasattr(tasks[0], 'weight'):
divisor = sum(t.weight for t in tasks)
else:
divisor = len(tasks) / parent_ratio
ratio = {}
for task in tasks:
ratio.setdefault(task, 0)
ratio[task] += task.weight if hasattr(task, 'weight') else 1
# get percentage
ratio_percent = dict((k, float(v) / divisor) for k, v in six.iteritems(ratio))
task_dict = {}
for locust, ratio in six.iteritems(ratio_percent):
d = {"ratio":ratio}
if inspect.isclass(locust):
if issubclass(locust, Locust):
T = locust.task_set.tasks
elif issubclass(locust, TaskSet):
T = locust.tasks
if total:
d["tasks"] = get_task_ratio_dict(T, total, ratio)
else:
d["tasks"] = get_task_ratio_dict(T, total)
task_dict[locust.__name__] = d
return task_dict
|
{
"content_hash": "b446aacad349fab2ad1110a3c40e0d6e",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 93,
"avg_line_length": 31.96078431372549,
"alnum_prop": 0.5779141104294478,
"repo_name": "locustrelease/locust",
"id": "15555341e817771aa3793fcce1fef1e1a669b99d",
"size": "1630",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "locust/inspectlocust.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8303"
},
{
"name": "HTML",
"bytes": "12597"
},
{
"name": "JavaScript",
"bytes": "9689"
},
{
"name": "Makefile",
"bytes": "145"
},
{
"name": "Python",
"bytes": "155987"
},
{
"name": "Shell",
"bytes": "121"
}
],
"symlink_target": ""
}
|
import os
import sys
import time
import inspect
import traceback
saved_path = sys.path[:]
sys.path.append(os.path.dirname(os.path.abspath(inspect.getsourcefile(lambda:0))))
from internal.memcached_connection import MemcachedBinaryConnection
from internal.memcached_connection import STATUS, COMMANDS
mc = MemcachedBinaryConnection("127.0.0.1", iproto.py_con.port)
def iequal(left, right, level = 1):
if (left != right):
tb = traceback.extract_stack()[-(level + 1)]
print("Error on line %s:%d: %s not equal %s" % (tb[0], tb[1],
repr(left), repr(right)))
return False
return True
def issert(stmt, level = 1):
if not bool(stmt):
tb = traceback.extract_stack()[-(level + 1)]
print("Error on line %s:%d: result is False" % (tb[0], tb[1]))
return False
return True
def __check(res, flags, val, level = 0):
return iequal(res.get('flags', -1), flags, level + 1) and \
iequal(res.get('val', val), val, level + 1)
def check(key, flags, val, level = 0):
res = mc.get(key)
__check(res[0], flags, val, level + 1)
print("""#-----------------------------# test expiration #-----------------------------#""")
server.admin("box.space.__mc_memcached:truncate()", silent=True)
stat = mc.stat("reset")
for i in range(10000):
mc.set('key-%d' % i, 'value-%d' % i, expire=1)
stat = mc.stat()
while int(stat.get('evictions', '0')) < 10000:
time.sleep(0.01)
stat = mc.stat()
issert('evictions' in stat)
iequal(int(mc.stat().get('evictions', 0)), 10000)
sys.path = saved_path
|
{
"content_hash": "7bce0ad05d602cfad2e2935b6455f039",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 92,
"avg_line_length": 28.727272727272727,
"alnum_prop": 0.6050632911392405,
"repo_name": "tarantool/memcached",
"id": "50c2698762f13ce3b212a253ce787cdcc5146830",
"size": "1580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/binary/binary-expire.test.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "115401"
},
{
"name": "CMake",
"bytes": "13318"
},
{
"name": "Lua",
"bytes": "36201"
},
{
"name": "Python",
"bytes": "62474"
},
{
"name": "Ragel",
"bytes": "45163"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django import forms
from django.forms import widgets
from django.template import engines
from django.template.loader import select_template
from django.utils.translation import ugettext_lazy as _, ugettext
from cms.plugin_pool import plugin_pool
from cmsplugin_cascade.fields import GlossaryField
from shop.conf import app_settings
from .plugin_base import ShopPluginBase
class ShopSearchResultsForm(forms.ModelForm):
def clean(self):
cleaned_data = super(ShopSearchResultsForm, self).clean()
page = self.instance.placeholder.page if self.instance.placeholder_id else None
if page and page.application_urls != 'CatalogSearchApp':
raise ValidationError("This plugin can only be used on a CMS page with an application of type 'Search'.")
return cleaned_data
class ShopSearchResultsPlugin(ShopPluginBase):
name = _("Search Results")
require_parent = True
parent_classes = ('BootstrapColumnPlugin',)
form = ShopSearchResultsForm
cache = False
infinite_scroll = GlossaryField(
widgets.CheckboxInput(),
label=_("Infinite Scroll"),
initial=True,
help_text=_("Shall the search results scroll infinitely?"),
)
def get_render_template(self, context, instance, placeholder):
if instance.placeholder.page.application_urls == 'CatalogSearchApp':
return select_template([
'{}/search/results.html'.format(app_settings.APP_LABEL),
'shop/search/results.html',
])
msg = '<pre class="bg-danger">This {} plugin is used on a CMS page without an application of type "Search".</pre>'
return engines['django'].from_string(msg.format(self.name))
def render(self, context, instance, placeholder):
super(ShopSearchResultsPlugin, self).render(context, instance, placeholder)
context['infinite_scroll'] = bool(instance.glossary.get('infinite_scroll', True))
try:
if context['edit_mode']:
# prevent scrolling while editing
context['data']['next'] = None
finally:
return context
@classmethod
def get_identifier(cls, obj):
if obj.glossary.get('infinite_scroll', True):
return ugettext("Infinite Scroll")
return ugettext("Manual Pagination")
plugin_pool.register_plugin(ShopSearchResultsPlugin)
|
{
"content_hash": "1d9168229f8d82eeaeb276406ef4ad2a",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 122,
"avg_line_length": 38.43076923076923,
"alnum_prop": 0.6837469975980784,
"repo_name": "khchine5/django-shop",
"id": "00f9abba029ebc3dbf833345734a4ca9d0ffda5d",
"size": "2522",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "shop/cascade/search.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "7850"
},
{
"name": "HTML",
"bytes": "103955"
},
{
"name": "JavaScript",
"bytes": "59776"
},
{
"name": "Python",
"bytes": "751464"
},
{
"name": "Shell",
"bytes": "583"
}
],
"symlink_target": ""
}
|
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-allauth'
copyright = u'2014, Raymond Penners'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.16.0-dev'
# The full version, including alpha/beta/rc tags.
release = '0.16.0-dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-allauthdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-allauth.tex', u'django-allauth Documentation',
u'Raymond Penners', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-allauth', u'django-allauth Documentation',
[u'Raymond Penners'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-allauth', u'django-allauth Documentation',
u'Raymond Penners', 'django-allauth', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
{
"content_hash": "1f94acfb4230a9d43ed02b34433f8638",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 80,
"avg_line_length": 31.506976744186048,
"alnum_prop": 0.7044582226158843,
"repo_name": "grue/django-allauth",
"id": "1feae52de60edbe2f0f8aa3beb341dac2223675a",
"size": "7802",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "3578"
},
{
"name": "Python",
"bytes": "478145"
}
],
"symlink_target": ""
}
|
import vim
from os import path
import urllib2
import urllib
import json
import subprocess
import time
import re
server_addr = vim.eval('g:padawan#server_addr')
server_command = vim.eval('g:padawan#server_command')
cli = vim.eval('g:padawan#cli')
composer = vim.eval('g:padawan#composer_command')
timeout = float(vim.eval('g:padawan#timeout'))
padawanPath = path.join(path.dirname(__file__), '..')
class Server:
def start(self):
command = '{0} > {1}/logs/server.log'.format(
server_command,
padawanPath
)
subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
def stop(self):
try:
self.sendRequest('kill', {})
return True
except Exception:
return False
def restart(self):
if self.stop():
self.start()
def sendRequest(self, command, params, data=''):
addr = server_addr + "/"+command+"?" + urllib.urlencode(params)
response = urllib2.urlopen(
addr,
urllib.quote_plus(data),
timeout
)
data = json.load(response)
if "error" in data:
raise ValueError(data["error"])
return data
class Editor:
def prepare(self, message):
return message.replace("'", "''")
def log(self, message):
vim.command("echo '%s'" % self.prepare(message))
def notify(self, message):
vim.command("echom '%s'" % self.prepare(message))
def progress(self, progress):
bars = int(progress / 5)
barsStr = ''
for i in range(20):
if i < bars:
barsStr += '='
else:
barsStr += ' '
barsStr = '[' + barsStr + ']'
vim.command(
"redraw | echo 'Progress "+barsStr+' '+str(progress)+"%'"
)
return
def error(self, error):
self.notify(error)
def callAfter(self, timeout, callback):
time.sleep(timeout)
while callback():
time.sleep(timeout)
server = Server()
editor = Editor()
pathError = '''padawan command is not found in your $PATH. Please\
make sure you installed padawan.php package and\
configured your $PATH'''
class PadawanClient:
def GetCompletion(self, filepath, line_num, column_num, contents):
curPath = self.GetProjectRoot(filepath)
params = {
'filepath': filepath.replace(curPath, ""),
'line': line_num,
'column': column_num,
'path': curPath
}
result = self.DoRequest('complete', params, contents)
if not result:
return {"completion": []}
return result
def SaveIndex(self, filepath):
return self.DoRequest('save', {'filepath': filepath})
def DoRequest(self, command, params, data=''):
try:
return server.sendRequest(command, params, data)
except urllib2.URLError:
editor.error("Padawan.php is not running")
except Exception as e:
editor.error("Error occured {0}".format(e.message))
return False
def AddPlugin(self, plugin):
composerCommand = composer + ' global require '
command = '{0} {2} && {1} plugin add {2}'.format(
composerCommand,
cli,
plugin
)
stream = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
def OnAdd(retcode):
if not retcode:
server.restart()
editor.notify("Plugin installed")
else:
if retcode == 127:
editor.error(pathError)
editor.error("Plugin installation failed")
def LogAdding():
retcode = stream.poll()
if retcode is not None:
return OnAdd(retcode)
line = stream.stdout.readline()
editor.log(line)
return True
editor.callAfter(1e-4, LogAdding)
def RemovePlugin(self, plugin):
composerCommand = composer + ' global remove'
command = '{0} {1}'.format(
composerCommand,
plugin
)
stream = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
def onRemoved():
subprocess.Popen(
'{0}'.format(
cli + ' plugin remove ' + plugin
),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
).wait()
self.RestartServer()
return editor.notify("Plugin removed")
def LogRemoving():
retcode = stream.poll()
if retcode is not None:
return onRemoved()
line = stream.stdout.readline()
editor.log(line)
return True
editor.callAfter(1e-4, LogRemoving)
def Generate(self, filepath):
curPath = self.GetProjectRoot(filepath)
stream = subprocess.Popen(
'cd ' + curPath + ' && ' + cli + ' generate',
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
def onGenerationEnd(retcode):
if retcode > 0:
if retcode == 127:
editor.error(pathError)
else:
editor.error("Error occured, code: {0}".format(str(retcode)))
return
server.restart()
editor.progress(100)
editor.notify("Index generated")
def ProcessGenerationPoll():
retcode = stream.poll()
if retcode is not None:
onGenerationEnd(retcode)
return
line = stream.stdout.readline()
errorMatch = re.search('Error: (.*)', line)
if errorMatch is not None:
retcode = 1
editor.error("{0}".format(
errorMatch.group(1).replace("'", "''")
))
return
match = re.search('Progress: ([0-9]+)', line)
if match is None:
return True
progress = int(match.group(1))
editor.progress(progress)
return True
editor.callAfter(1e-4, ProcessGenerationPoll)
def StartServer(self):
server.start()
def StopServer(self):
server.stop()
def RestartServer(self):
server.restart()
def GetProjectRoot(self, filepath):
curPath = path.dirname(filepath)
while curPath != '/' and not path.exists(
path.join(curPath, 'composer.json')
):
curPath = path.dirname(curPath)
if curPath == '/':
curPath = path.dirname(filepath)
return curPath
client = PadawanClient()
|
{
"content_hash": "303890a26d70b3f3a0a7db9e49c9bb0e",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 81,
"avg_line_length": 28.766798418972332,
"alnum_prop": 0.5103050288540808,
"repo_name": "xintron/padawan.vim",
"id": "287a246e151550c9374b881dea1deaa5bf415f44",
"size": "7278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/padawan.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7278"
},
{
"name": "VimL",
"bytes": "4472"
}
],
"symlink_target": ""
}
|
"""All the test files for API renderers plugins are imported here."""
# These need to register plugins so, pylint: disable=unused-import
from grr.gui.api_plugins import aff4_test
from grr.gui.api_plugins import artifact_test
from grr.gui.api_plugins import client_test
from grr.gui.api_plugins import config_test
from grr.gui.api_plugins import cron_test
from grr.gui.api_plugins import flow_test
from grr.gui.api_plugins import hunt_test
from grr.gui.api_plugins import output_plugin_test
from grr.gui.api_plugins import reflection_test
from grr.gui.api_plugins import stats_test
from grr.gui.api_plugins import user_test
|
{
"content_hash": "b969ec56647fd3e9298957bd642778f0",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 69,
"avg_line_length": 39.125,
"alnum_prop": 0.8019169329073482,
"repo_name": "pombredanne/grr",
"id": "8518601e9ecdd2fdf1d5a93ea4cbf0ba2bae0ac0",
"size": "648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gui/api_plugins/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "Batchfile",
"bytes": "15671"
},
{
"name": "C",
"bytes": "10598"
},
{
"name": "C++",
"bytes": "304580"
},
{
"name": "CMake",
"bytes": "3228"
},
{
"name": "CSS",
"bytes": "13093"
},
{
"name": "Groff",
"bytes": "444"
},
{
"name": "HTML",
"bytes": "103301"
},
{
"name": "JavaScript",
"bytes": "224916"
},
{
"name": "Makefile",
"bytes": "4304"
},
{
"name": "Protocol Buffer",
"bytes": "219063"
},
{
"name": "Python",
"bytes": "5356619"
},
{
"name": "Ruby",
"bytes": "5103"
},
{
"name": "Shell",
"bytes": "48368"
},
{
"name": "Standard ML",
"bytes": "8172"
}
],
"symlink_target": ""
}
|
import time
import json
import re
import os
from datetime import datetime, timedelta
from six.moves.urllib.parse import urlparse # pylint: disable=import-error
from msrest.paging import Paged
from azure.mgmt.recoveryservices.models import Vault, VaultProperties, Sku, SkuName, BackupStorageConfig
from azure.mgmt.recoveryservicesbackup.models import ProtectedItemResource, AzureIaaSComputeVMProtectedItem, \
AzureIaaSClassicComputeVMProtectedItem, ProtectionState, IaasVMBackupRequest, BackupRequestResource, \
IaasVMRestoreRequest, RestoreRequestResource, BackupManagementType, WorkloadType, OperationStatusValues, \
JobStatus, ILRRequestResource, IaasVMILRRegistrationRequest
import azure.cli.core.azlogging as azlogging
from azure.cli.core.util import CLIError
from azure.cli.core.commands.arm import parse_resource_id, is_valid_resource_id
from azure.cli.command_modules.backup._client_factory import vaults_cf, backup_protected_items_cf, \
protection_policies_cf, virtual_machines_cf, recovery_points_cf, protection_containers_cf, \
backup_protectable_items_cf, resources_cf, backup_operation_statuses_cf, job_details_cf, \
protection_container_refresh_operation_results_cf, backup_protection_containers_cf
logger = azlogging.get_az_logger(__name__)
fabric_name = "Azure"
default_policy_name = "DefaultPolicy"
os_windows = 'Windows'
os_linux = 'Linux'
password_offset = 33
password_length = 15
def create_vault(client, vault_name, region, resource_group_name):
vault_sku = Sku(SkuName.standard)
vault_properties = VaultProperties()
vault = Vault(region, sku=vault_sku, properties=vault_properties)
return client.create_or_update(resource_group_name, vault_name, vault, custom_headers=_get_custom_headers())
def list_vaults(client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name, custom_headers=_get_custom_headers())
return client.list_by_subscription_id(custom_headers=_get_custom_headers())
def set_backup_properties(client, vault_name, resource_group_name, backup_storage_redundancy):
backup_storage_config = BackupStorageConfig(storage_model_type=backup_storage_redundancy)
client.update(resource_group_name, vault_name, backup_storage_config, custom_headers=_get_custom_headers())
def get_default_policy_for_vm(client, resource_group_name, vault_name):
return show_policy(client, resource_group_name, vault_name, default_policy_name)
def show_policy(client, resource_group_name, vault_name, name):
return client.get(vault_name, resource_group_name, name, custom_headers=_get_custom_headers())
def list_policies(client, resource_group_name, vault_name):
policies = client.list(vault_name, resource_group_name, custom_headers=_get_custom_headers())
return _get_list_from_paged_response(policies)
def list_associated_items_for_policy(client, resource_group_name, vault_name, name):
filter_string = _get_filter_string({
'policyName': name})
items = client.list(vault_name, resource_group_name, filter_string, custom_headers=_get_custom_headers())
return _get_list_from_paged_response(items)
def set_policy(client, resource_group_name, vault_name, policy):
policy_object = _get_policy_from_json(client, policy)
return client.create_or_update(vault_name, resource_group_name, policy_object.name, policy_object,
custom_headers=_get_custom_headers())
def delete_policy(client, resource_group_name, vault_name, name):
client.delete(vault_name, resource_group_name, name, custom_headers=_get_custom_headers())
def show_container(client, name, resource_group_name, vault_name, container_type="AzureIaasVM", status="Registered"):
return _get_one_or_many(_get_containers(client, container_type, status, resource_group_name, vault_name, name))
def list_containers(client, resource_group_name, vault_name, container_type="AzureIaasVM", status="Registered"):
return _get_containers(client, container_type, status, resource_group_name, vault_name)
def enable_protection_for_vm(client, resource_group_name, vault_name, vm, policy_name):
vm_name, vm_rg = _get_resource_name_and_rg(resource_group_name, vm)
vm = virtual_machines_cf().get(vm_rg, vm_name)
vault = vaults_cf(None).get(resource_group_name, vault_name)
policy = show_policy(protection_policies_cf(None), resource_group_name, vault_name, policy_name)
if vm.location != vault.location:
raise CLIError(
"""
The VM should be in the same location as that of the Recovery Services vault to enable protection.
""")
if policy.properties.backup_management_type != BackupManagementType.azure_iaas_vm.value:
raise CLIError(
"""
The policy type should match with the workload being protected.
Use the relevant get-default policy command and use it to protect the workload.
""")
# Get protectable item.
protectable_item = _get_protectable_item_for_vm(vault_name, resource_group_name, vm_name, vm_rg)
if protectable_item is None:
raise CLIError(
"""
The specified Azure Virtual Machine Not Found. Possible causes are
1. VM does not exist
2. The VM name or the Service name needs to be case sensitive
3. VM is already Protected with same or other Vault.
Please Unprotect VM first and then try to protect it again.
Please contact Microsoft for further assistance.
""")
# Construct enable protection request object
container_uri = _get_protection_container_uri_from_id(protectable_item.id)
item_uri = _get_protectable_item_uri_from_id(protectable_item.id)
vm_item_properties = _get_vm_item_properties_from_vm_type(vm.type)
vm_item_properties.policy_id = policy.id
vm_item_properties.source_resource_id = protectable_item.properties.virtual_machine_id
vm_item = ProtectedItemResource(properties=vm_item_properties)
# Trigger enable protection and wait for completion
result = client.create_or_update(vault_name, resource_group_name, fabric_name, container_uri, item_uri, vm_item,
raw=True, custom_headers=_get_custom_headers())
return _track_backup_job(result, vault_name, resource_group_name)
def show_item(client, resource_group_name, vault_name, container_name, name, container_type="AzureIaasVM",
item_type="VM"):
items = list_items(client, resource_group_name, vault_name, container_name, container_type, item_type)
return _get_one_or_many([item for item in items if item.properties.friendly_name == name])
def list_items(client, resource_group_name, vault_name, container_name, container_type="AzureIaasVM", item_type="VM"):
filter_string = _get_filter_string({
'backupManagementType': container_type,
'itemType': item_type})
items = client.list(vault_name, resource_group_name, filter_string, custom_headers=_get_custom_headers())
paged_items = _get_list_from_paged_response(items)
container = show_container(backup_protection_containers_cf(None), container_name, resource_group_name, vault_name,
container_type)
return [item for item in paged_items if item.properties.container_name.lower() in container.name.lower()]
def update_policy_for_item(client, resource_group_name, vault_name, container_name, item_name, policy_name,
container_type="AzureIaasVM", item_type="VM"):
# Client factories
backup_protected_items_client = backup_protected_items_cf(None)
# Get objects from JSON files
item = show_item(backup_protected_items_client, resource_group_name, vault_name, container_name, item_name,
container_type, item_type)
policy = show_policy(protection_policies_cf(None), resource_group_name, vault_name, policy_name)
if item.properties.backup_management_type != policy.properties.backup_management_type:
raise CLIError(
"""
The policy type should match with the workload being protected.
Use the relevant get-default policy command and use it to update the policy for the workload.
""")
# Get container and item URIs
container_uri = _get_protection_container_uri_from_id(item.id)
item_uri = _get_protected_item_uri_from_id(item.id)
# Update policy request
vm_item_properties = _get_vm_item_properties_from_vm_id(item.properties.virtual_machine_id)
vm_item_properties.policy_id = policy.id
vm_item_properties.source_resource_id = item.properties.source_resource_id
vm_item = ProtectedItemResource(properties=vm_item_properties)
# Update policy
result = client.create_or_update(vault_name, resource_group_name, fabric_name, container_uri, item_uri, vm_item,
raw=True, custom_headers=_get_custom_headers())
return _track_backup_job(result, vault_name, resource_group_name)
def backup_now(client, resource_group_name, vault_name, container_name, item_name, retain_until,
container_type="AzureIaasVM", item_type="VM"):
item = show_item(backup_protected_items_cf(None), resource_group_name, vault_name, container_name, item_name,
container_type, item_type)
# Get container and item URIs
container_uri = _get_protection_container_uri_from_id(item.id)
item_uri = _get_protected_item_uri_from_id(item.id)
trigger_backup_request = _get_backup_request(item.properties.workload_type, retain_until)
# Trigger backup
result = client.trigger(vault_name, resource_group_name, fabric_name, container_uri, item_uri,
trigger_backup_request, raw=True, custom_headers=_get_custom_headers())
return _track_backup_job(result, vault_name, resource_group_name)
def show_recovery_point(client, resource_group_name, vault_name, container_name, item_name, name, # pylint: disable=redefined-builtin
container_type="AzureIaasVM", item_type="VM"):
item = show_item(backup_protected_items_cf(None), resource_group_name, vault_name, container_name, item_name,
container_type, item_type)
# Get container and item URIs
container_uri = _get_protection_container_uri_from_id(item.id)
item_uri = _get_protected_item_uri_from_id(item.id)
return client.get(vault_name, resource_group_name, fabric_name, container_uri, item_uri, name,
custom_headers=_get_custom_headers())
def list_recovery_points(client, resource_group_name, vault_name, container_name, item_name,
container_type="AzureIaasVM", item_type="VM", start_date=None, end_date=None):
item = show_item(backup_protected_items_cf(None), resource_group_name, vault_name, container_name, item_name,
container_type, item_type)
# Get container and item URIs
container_uri = _get_protection_container_uri_from_id(item.id)
item_uri = _get_protected_item_uri_from_id(item.id)
query_end_date, query_start_date = _get_query_dates(end_date, start_date)
filter_string = _get_filter_string({
'startDate': query_start_date,
'endDate': query_end_date})
# Get recovery points
recovery_points = client.list(vault_name, resource_group_name, fabric_name, container_uri, item_uri, filter_string,
custom_headers=_get_custom_headers())
paged_recovery_points = _get_list_from_paged_response(recovery_points)
return paged_recovery_points
def restore_disks(client, resource_group_name, vault_name, container_name, item_name, rp_name, storage_account):
item = show_item(backup_protected_items_cf(None), resource_group_name, vault_name, container_name, item_name,
"AzureIaasVM", "VM")
vault = vaults_cf(None).get(resource_group_name, vault_name, custom_headers=_get_custom_headers())
vault_location = vault.location
# Get container and item URIs
container_uri = _get_protection_container_uri_from_id(item.id)
item_uri = _get_protected_item_uri_from_id(item.id)
# Construct trigger restore request object
sa_name, sa_rg = _get_resource_name_and_rg(resource_group_name, storage_account)
_storage_account_id = _get_storage_account_id(sa_name, sa_rg)
_source_resource_id = item.properties.source_resource_id
trigger_restore_properties = IaasVMRestoreRequest(create_new_cloud_service=True,
recovery_point_id=rp_name,
recovery_type='RestoreDisks',
region=vault_location,
storage_account_id=_storage_account_id,
source_resource_id=_source_resource_id)
trigger_restore_request = RestoreRequestResource(properties=trigger_restore_properties)
# Trigger restore
result = client.trigger(vault_name, resource_group_name, fabric_name, container_uri, item_uri, rp_name,
trigger_restore_request, raw=True, custom_headers=_get_custom_headers())
return _track_backup_job(result, vault_name, resource_group_name)
def restore_files_mount_rp(client, resource_group_name, vault_name, container_name, item_name, rp_name):
item = show_item(backup_protected_items_cf(None), resource_group_name, vault_name, container_name, item_name,
"AzureIaasVM", "VM")
# Get container and item URIs
container_uri = _get_protection_container_uri_from_id(item.id)
item_uri = _get_protected_item_uri_from_id(item.id)
# file restore request
_virtual_machine_id = item.properties.virtual_machine_id
file_restore_request_properties = IaasVMILRRegistrationRequest(recovery_point_id=rp_name,
virtual_machine_id=_virtual_machine_id)
file_restore_request = ILRRequestResource(properties=file_restore_request_properties)
recovery_point = recovery_points_cf(None).get(vault_name, resource_group_name, fabric_name, container_uri,
item_uri, rp_name, custom_headers=_get_custom_headers())
if recovery_point.properties.is_instant_ilr_session_active:
recovery_point.properties.renew_existing_registration = True
result = client.provision(vault_name, resource_group_name, fabric_name, container_uri, item_uri, rp_name,
file_restore_request, raw=True, custom_headers=_get_custom_headers())
client_scripts = _track_backup_ilr(result, vault_name, resource_group_name)
if client_scripts[0].os_type == os_windows:
_run_client_script_for_windows(client_scripts)
elif client_scripts[0].os_type == os_linux:
_run_client_script_for_linux(client_scripts)
def restore_files_unmount_rp(client, resource_group_name, vault_name, container_name, item_name, rp_name):
item = show_item(backup_protected_items_cf(None), resource_group_name, vault_name, container_name, item_name,
"AzureIaasVM", "VM")
# Get container and item URIs
container_uri = _get_protection_container_uri_from_id(item.id)
item_uri = _get_protected_item_uri_from_id(item.id)
recovery_point = recovery_points_cf(None).get(vault_name, resource_group_name, fabric_name, container_uri,
item_uri, rp_name, custom_headers=_get_custom_headers())
if recovery_point.properties.is_instant_ilr_session_active:
result = client.revoke(vault_name, resource_group_name, fabric_name, container_uri, item_uri, rp_name,
raw=True, custom_headers=_get_custom_headers())
_track_backup_operation(resource_group_name, result, vault_name)
def disable_protection(client, resource_group_name, vault_name, container_name, item_name, # pylint: disable=unused-argument
container_type="AzureIaasVM", item_type="VM", delete_backup_data=False, **kwargs):
item = show_item(backup_protected_items_cf(None), resource_group_name, vault_name, container_name, item_name,
container_type, item_type)
# Get container and item URIs
container_uri = _get_protection_container_uri_from_id(item.id)
item_uri = _get_protected_item_uri_from_id(item.id)
# Trigger disable protection and wait for completion
if delete_backup_data:
result = client.delete(vault_name, resource_group_name, fabric_name, container_uri, item_uri, raw=True,
custom_headers=_get_custom_headers())
return _track_backup_job(result, vault_name, resource_group_name)
vm_item = _get_disable_protection_request(item)
result = client.create_or_update(vault_name, resource_group_name, fabric_name, container_uri, item_uri, vm_item,
raw=True, custom_headers=_get_custom_headers())
return _track_backup_job(result, vault_name, resource_group_name)
def list_jobs(client, resource_group_name, vault_name, status=None, operation=None, start_date=None, end_date=None):
query_end_date, query_start_date = _get_query_dates(end_date, start_date)
filter_string = _get_filter_string({
'status': status,
'operation': operation,
'startTime': query_start_date,
'endTime': query_end_date})
return _get_list_from_paged_response(client.list(vault_name, resource_group_name, filter_string,
custom_headers=_get_custom_headers()))
def show_job(client, resource_group_name, vault_name, name):
return client.get(vault_name, resource_group_name, name, custom_headers=_get_custom_headers())
def stop_job(client, resource_group_name, vault_name, name):
client.trigger(vault_name, resource_group_name, name, custom_headers=_get_custom_headers())
def wait_for_job(client, resource_group_name, vault_name, name, timeout=None):
logger.warning("Waiting for job '{}' ...".format(name))
start_timestamp = datetime.utcnow()
job_details = client.get(vault_name, resource_group_name, name, custom_headers=_get_custom_headers())
while _job_in_progress(job_details.properties.status):
if timeout:
elapsed_time = datetime.utcnow() - start_timestamp
if elapsed_time.seconds > timeout:
logger.warning("Command timed out while waiting for job '{}'".format(name))
break
job_details = client.get(vault_name, resource_group_name, name, custom_headers=_get_custom_headers())
time.sleep(30)
return job_details
# Client Utilities
def _get_containers(client, container_type, status, resource_group_name, vault_name, container_name=None):
filter_dict = {
'backupManagementType': container_type,
'status': status
}
if container_name:
filter_dict['friendlyName'] = container_name
filter_string = _get_filter_string(filter_dict)
containers = client.list(vault_name, resource_group_name, filter_string, custom_headers=_get_custom_headers())
return _get_list_from_paged_response(containers)
def _get_protectable_item_for_vm(vault_name, vault_rg, vm_name, vm_rg):
protection_containers_client = protection_containers_cf()
protectable_item = _try_get_protectable_item_for_vm(vault_name, vault_rg, vm_name, vm_rg)
if protectable_item is None:
# Protectable item not found. Trigger discovery.
refresh_result = protection_containers_client.refresh(vault_name, vault_rg, fabric_name, raw=True,
custom_headers=_get_custom_headers())
_track_refresh_operation(refresh_result, vault_name, vault_rg)
protectable_item = _try_get_protectable_item_for_vm(vault_name, vault_rg, vm_name, vm_rg)
return protectable_item
def _try_get_protectable_item_for_vm(vault_name, vault_rg, vm_name, vm_rg):
backup_protectable_items_client = backup_protectable_items_cf()
filter_string = _get_filter_string({
'backupManagementType': 'AzureIaasVM'})
protectable_items_paged = backup_protectable_items_client.list(vault_name, vault_rg, filter_string,
custom_headers=_get_custom_headers())
protectable_items = _get_list_from_paged_response(protectable_items_paged)
for protectable_item in protectable_items:
item_vm_name = _get_vm_name_from_vm_id(protectable_item.properties.virtual_machine_id)
item_vm_rg = _get_resource_group_from_id(protectable_item.properties.virtual_machine_id)
if item_vm_name.lower() == vm_name.lower() and item_vm_rg.lower() == vm_rg.lower():
return protectable_item
return None
def _get_backup_request(workload_type, retain_until):
if workload_type == WorkloadType.vm.value:
trigger_backup_properties = IaasVMBackupRequest(recovery_point_expiry_time_in_utc=retain_until)
trigger_backup_request = BackupRequestResource(properties=trigger_backup_properties)
return trigger_backup_request
def _get_storage_account_id(storage_account_name, storage_account_rg):
resources_client = resources_cf()
classic_storage_resource_namespace = 'Microsoft.ClassicStorage'
storage_resource_namespace = 'Microsoft.Storage'
parent_resource_path = 'storageAccounts'
resource_type = ''
classic_api_version = '2015-12-01'
api_version = '2016-01-01'
storage_account = None
try:
storage_account = resources_client.get(storage_account_rg, classic_storage_resource_namespace,
parent_resource_path, resource_type, storage_account_name,
classic_api_version)
except: # pylint: disable=bare-except
storage_account = resources_client.get(storage_account_rg, storage_resource_namespace, parent_resource_path,
resource_type, storage_account_name, api_version)
return storage_account.id
def _get_disable_protection_request(item):
if item.properties.workload_type == WorkloadType.vm.value:
vm_item_properties = _get_vm_item_properties_from_vm_id(item.properties.virtual_machine_id)
vm_item_properties.policy_id = ''
vm_item_properties.protection_state = ProtectionState.protection_stopped
vm_item_properties.source_resource_id = item.properties.source_resource_id
vm_item = ProtectedItemResource(properties=vm_item_properties)
return vm_item
def _get_vm_item_properties_from_vm_type(vm_type):
if vm_type == 'Microsoft.Compute/virtualMachines':
return AzureIaaSComputeVMProtectedItem()
elif vm_type == 'Microsoft.ClassicCompute/virtualMachines':
return AzureIaaSClassicComputeVMProtectedItem()
def _get_vm_item_properties_from_vm_id(vm_id):
if 'Microsoft.Compute/virtualMachines' in vm_id:
return AzureIaaSComputeVMProtectedItem()
elif 'Microsoft.ClassicCompute/virtualMachines' in vm_id:
return AzureIaaSClassicComputeVMProtectedItem()
def _get_associated_vm_item(container_uri, item_uri, resource_group, vault_name):
container_name = container_uri.split(';')[-1]
item_name = item_uri.split(';')[-1]
filter_string = _get_filter_string({
'backupManagementType': BackupManagementType.azure_iaas_vm.value,
'itemType': WorkloadType.vm.value})
items = backup_protected_items_cf(None).list(vault_name, resource_group, filter_string,
custom_headers=_get_custom_headers())
paged_items = _get_list_from_paged_response(items)
filtered_items = [item for item in paged_items
if container_name.lower() in item.properties.container_name.lower() and
item.properties.friendly_name.lower() == item_name.lower()]
item = filtered_items[0]
return item
def _run_executable(file_name):
try:
os.system('{}'.format(file_name))
except: # pylint: disable=bare-except
pass
def _get_host_os():
import platform
return platform.system()
def _remove_password_from_suffix(suffix):
password_segment_index = suffix.rfind('_')
password_start_index = password_segment_index + password_offset
password_end_index = password_segment_index + password_offset + password_length
password = suffix[password_start_index: password_end_index]
suffix = suffix[:password_start_index] + suffix[password_end_index:]
return suffix, password
def _get_script_file_name_and_password(script):
suffix, password = _remove_password_from_suffix(script.script_name_suffix)
return suffix + script.script_extension, password
def _run_client_script_for_windows(client_scripts):
windows_script = client_scripts[1]
file_name, password = _get_script_file_name_and_password(windows_script)
# Create File
from six.moves.urllib.request import urlopen # pylint: disable=import-error
import shutil
with urlopen(windows_script.url) as response, open(file_name, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
logger.warning('File downloaded: {}. Use password {}'.format(file_name, password))
def _run_client_script_for_linux(client_scripts):
linux_script = client_scripts[0]
file_name, password = _get_script_file_name_and_password(linux_script)
# Create File
import base64
script_content = base64.b64decode(linux_script.script_content).decode('utf-8')
script_content = script_content.replace('TargetPassword="{}"'.format(password),
'TargetPassword="UserInput012345"') # This is a hack due to bug in script
if _get_host_os() == os_windows:
with open(file_name, 'w', newline='\n') as out_file:
out_file.write(script_content)
elif _get_host_os() == os_linux:
with open(file_name, 'wb') as out_file:
out_file.write(script_content)
logger.warning('File downloaded: {}. Use password {}'.format(file_name, password))
def _get_custom_headers():
import uuid
return {'x-ms-client-request-id': str(uuid.uuid1()) + '-Cli'}
def _get_resource_name_and_rg(resource_group_name, name_or_id):
if is_valid_resource_id(name_or_id):
id_parts = parse_resource_id(name_or_id)
name = id_parts['name']
resource_group = id_parts['resource_group']
else:
name = name_or_id
resource_group = resource_group_name
return name, resource_group
# Tracking Utilities
def _track_backup_ilr(result, vault_name, resource_group):
operation_status = _track_backup_operation(resource_group, result, vault_name)
if operation_status.properties:
recovery_target = operation_status.properties.recovery_target
return recovery_target.client_scripts
def _track_backup_job(result, vault_name, resource_group):
job_details_client = job_details_cf(None)
operation_status = _track_backup_operation(resource_group, result, vault_name)
if operation_status.properties:
job_id = operation_status.properties.job_id
job_details = job_details_client.get(vault_name, resource_group, job_id, custom_headers=_get_custom_headers())
return job_details
def _track_backup_operation(resource_group, result, vault_name):
backup_operation_statuses_client = backup_operation_statuses_cf()
operation_id = _get_operation_id_from_header(result.response.headers['Azure-AsyncOperation'])
operation_status = backup_operation_statuses_client.get(vault_name, resource_group, operation_id,
custom_headers=_get_custom_headers())
while operation_status.status == OperationStatusValues.in_progress.value:
time.sleep(1)
operation_status = backup_operation_statuses_client.get(vault_name, resource_group, operation_id,
custom_headers=_get_custom_headers())
return operation_status
def _track_refresh_operation(result, vault_name, resource_group):
protection_container_refresh_operation_results_client = protection_container_refresh_operation_results_cf()
operation_id = _get_operation_id_from_header(result.response.headers['Location'])
result = protection_container_refresh_operation_results_client.get(vault_name, resource_group, fabric_name,
operation_id, raw=True,
custom_headers=_get_custom_headers())
while result.response.status_code == 202:
time.sleep(1)
result = protection_container_refresh_operation_results_client.get(vault_name, resource_group, fabric_name,
operation_id, raw=True,
custom_headers=_get_custom_headers())
def _job_in_progress(job_status):
return job_status == JobStatus.in_progress.value or job_status == JobStatus.cancelling.value
# List Utilities
def _get_list_from_paged_response(obj_list):
return list(obj_list) if isinstance(obj_list, Paged) else obj_list
def _get_one_or_many(obj_list):
return obj_list[0] if len(obj_list) == 1 else obj_list
def _get_filter_string(filter_dict):
filter_list = []
for k, v in sorted(filter_dict.items()):
filter_segment = None
if isinstance(v, str):
filter_segment = "{} eq '{}'".format(k, v)
elif isinstance(v, datetime):
filter_segment = "{} eq '{}'".format(k, v.strftime('%Y-%m-%d %I:%M:%S %p')) # yyyy-MM-dd hh:mm:ss tt
if filter_segment is not None:
filter_list.append(filter_segment)
filter_string = " and ".join(filter_list)
return None if not filter_string else filter_string
def _get_query_dates(end_date, start_date):
query_start_date = None
query_end_date = None
if start_date and end_date:
query_start_date = start_date
query_end_date = end_date
elif not start_date and end_date:
query_end_date = end_date
query_start_date = query_end_date - timedelta(days=30)
elif start_date and not end_date:
query_start_date = start_date
query_end_date = query_start_date + timedelta(days=30)
return query_end_date, query_start_date
# JSON Utilities
def _get_container_from_json(client, container):
return _get_object_from_json(client, container, 'ProtectionContainerResource')
def _get_vault_from_json(client, vault):
return _get_object_from_json(client, vault, 'Vault')
def _get_vm_from_json(client, vm):
return _get_object_from_json(client, vm, 'VirtualMachine')
def _get_policy_from_json(client, policy):
return _get_object_from_json(client, policy, 'ProtectionPolicyResource')
def _get_item_from_json(client, item):
return _get_object_from_json(client, item, 'ProtectedItemResource')
def _get_job_from_json(client, job):
return _get_object_from_json(client, job, 'JobResource')
def _get_recovery_point_from_json(client, recovery_point):
return _get_object_from_json(client, recovery_point, 'RecoveryPointResource')
def _get_or_read_json(json_or_file):
json_obj = None
if is_json(json_or_file):
json_obj = json.loads(json_or_file)
elif os.path.exists(json_or_file):
with open(json_or_file) as f:
json_obj = json.load(f)
if json_obj is None:
raise ValueError(
"""
The variable passed should be in valid JSON format and be supplied by az backup CLI commands.
Make sure that you use output of relevant 'az backup show' commands and the --out is 'json'
(use -o json for explicit JSON output) while assigning value to this variable.
Take care to edit only the values and not the keys within the JSON file or string.
""")
return json_obj
def _get_object_from_json(client, json_or_file, class_name):
# Determine if input is json or file
json_obj = _get_or_read_json(json_or_file)
# Deserialize json to object
param = client._deserialize(class_name, json_obj) # pylint: disable=protected-access
if param is None:
raise ValueError(
"""
The variable passed should be in valid JSON format and be supplied by az backup CLI commands.
Make sure that you use output of relevant 'az backup show' commands and the --out is 'json'
(use -o json for explicit JSON output) while assigning value to this variable.
Take care to edit only the values and not the keys within the JSON file or string.
""")
return param
def is_json(content):
try:
json.loads(content)
except ValueError:
return False
return True
# ID Utilities
def _get_protection_container_uri_from_id(arm_id):
m = re.search('(?<=protectionContainers/)[^/]+', arm_id)
return m.group(0)
def _get_protectable_item_uri_from_id(arm_id):
m = re.search('(?<=protectableItems/)[^/]+', arm_id)
return m.group(0)
def _get_protected_item_uri_from_id(arm_id):
m = re.search('(?<=protectedItems/)[^/]+', arm_id)
return m.group(0)
def _get_vm_name_from_vm_id(arm_id):
m = re.search('(?<=virtualMachines/)[^/]+', arm_id)
return m.group(0)
def _get_resource_group_from_id(arm_id):
m = re.search('(?<=resourceGroups/)[^/]+', arm_id)
return m.group(0)
def _get_operation_id_from_header(header):
parse_object = urlparse(header)
return parse_object.path.split("/")[-1]
def _get_vault_from_arm_id(arm_id):
m = re.search('(?<=vaults/)[^/]+', arm_id)
return m.group(0)
|
{
"content_hash": "147f91b8466e7ad45fad3a4883a00cc6",
"timestamp": "",
"source": "github",
"line_count": 766,
"max_line_length": 134,
"avg_line_length": 44.608355091383814,
"alnum_prop": 0.6719344454199591,
"repo_name": "QingChenmsft/azure-cli",
"id": "f38bd46514b3806f0725ef8b495bb9d37a8b3e7f",
"size": "34516",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/command_modules/azure-cli-backup/azure/cli/command_modules/backup/custom.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11279"
},
{
"name": "C++",
"bytes": "275"
},
{
"name": "JavaScript",
"bytes": "380"
},
{
"name": "Python",
"bytes": "5372365"
},
{
"name": "Shell",
"bytes": "25445"
}
],
"symlink_target": ""
}
|
from robot.libraries.BuiltIn import BuiltIn
class LogLevels(object):
ROBOT_LISTENER_API_VERSION = 2
def __init__(self):
self.ROBOT_LIBRARY_LISTENER = self
self.messages = []
def _log_message(self, msg):
self.messages.append('%s: %s' % (msg['level'], msg['message']))
def logged_messages_should_be(self, *expected):
BuiltIn().should_be_equal('\n'.join(self.messages), '\n'.join(expected))
|
{
"content_hash": "d3c8534abfbb30cc65b6324efa5d3fe6",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 80,
"avg_line_length": 29.4,
"alnum_prop": 0.6303854875283447,
"repo_name": "snyderr/robotframework",
"id": "3d367158dcc9a1e07fce4e52e4cc35b46e6f5ca8",
"size": "441",
"binary": false,
"copies": "6",
"ref": "refs/heads/Robotframework_SkipExecution",
"path": "atest/testdata/test_libraries/as_listener/LogLevels.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "245"
},
{
"name": "CSS",
"bytes": "23646"
},
{
"name": "HTML",
"bytes": "988253"
},
{
"name": "Java",
"bytes": "57542"
},
{
"name": "JavaScript",
"bytes": "163896"
},
{
"name": "Python",
"bytes": "2232719"
},
{
"name": "RobotFramework",
"bytes": "2061354"
},
{
"name": "Shell",
"bytes": "281"
}
],
"symlink_target": ""
}
|
"""Writes WMT training data from tfds to a TSV file, and generates spm model."""
import csv
import os
import tempfile
from absl import app
from absl import flags
from sentencepiece import SentencePieceTrainer
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
FLAGS = flags.FLAGS
flags.DEFINE_string('tfds_name', 'wmt19_translate/de-en', 'TFDS dataset name.')
flags.DEFINE_string('output_dir', '/tmp/tsv_data', 'Path to the output TSV.')
flags.DEFINE_string('source_language', 'de', 'Source language identifier.')
flags.DEFINE_string('target_language', 'en', 'Target language identifier.')
flags.DEFINE_float(
'split_for_guidance_data',
0.01,
help='Proportion of training data to set aside for guidance dataset. Default to 1%.'
)
LOGGING_STEPS = 100000
def _read_string_tensor(tensor):
raw_text = tensor.numpy().decode('utf-8')
return ' '.join(raw_text.strip().split())
def write_data_to_tsv(
output_dir,
source_language,
target_language,
tfds_name,
split_for_guidance_data,
):
"""Download data and write it to plain tsv for train, dev, and guide datasets.
Args:
output_dir: The dir to which the data will be written. Dirs will be
recursively created if not already present.
source_language: Source language of the translation data.
target_language: Target language of the translation data.
tfds_name: The name of the desired dataset in tfds. (ie.
wmt19_translate/de-en). See
https://www.tensorflow.org/datasets/catalog/wmt19_translate for more
details.
split_for_guidance_data: How much of the training data to set aside for
guidance dataset. Defaults to 1%. Set to 0 to produce a full training
split.
"""
lang_pair = source_language + target_language
if not 0 < split_for_guidance_data < 1:
raise ValueError('split_for_guidance_data must be between 0 and 1: (%s)' %
split_for_guidance_data)
output_file_train = os.path.join(
output_dir, '{}_train_{:.0f}percent.tsv'.format(
lang_pair, 100 * (1.0 - split_for_guidance_data)))
output_file_guide = os.path.join(
output_dir,
'{}_guide_{:.0f}percent.tsv'.format(lang_pair,
100 * (split_for_guidance_data)))
tf.io.gfile.makedirs(os.path.dirname(output_dir))
guide_example_count = 0
train_example_count = 0
print('Writing train output to: %s' % output_file_train)
print('Writing guide output to: %s' % output_file_guide)
with open(output_file_train, 'w') as outfile_train:
with open(output_file_guide, 'w') as outfile_guide:
csv_writer_train = csv.writer(outfile_train, delimiter='\t')
csv_writer_guide = csv.writer(outfile_guide, delimiter='\t')
for num_done_examples, example in enumerate(
tfds.load(tfds_name, split='train')):
if num_done_examples % LOGGING_STEPS == 0:
print('%d train examples done.' % num_done_examples)
if split_for_guidance_data > 0 and num_done_examples % (
1 / split_for_guidance_data) == 0:
csv_writer_guide.writerow([
_read_string_tensor(example[source_language]),
_read_string_tensor(example[target_language])
])
guide_example_count += 1
else:
csv_writer_train.writerow([
_read_string_tensor(example[source_language]),
_read_string_tensor(example[target_language])
])
train_example_count += 1
print('Num train examples: %d' % train_example_count)
print('Num guide examples: %d' % guide_example_count)
output_file_dev = os.path.join(output_dir, '{}_dev.tsv'.format(lang_pair))
with open(output_file_dev, 'w') as outfile_dev:
csv_writer_dev = csv.writer(outfile_dev, delimiter='\t')
for num_done_examples, example in enumerate(
tfds.load(tfds_name, split='validation')):
csv_writer_dev.writerow([
_read_string_tensor(example[source_language]),
_read_string_tensor(example[target_language])
])
print('Num validation examples: %d' % num_done_examples)
def generate_vocab(
output_dir,
source_language,
target_language,
tfds_name,
):
"""Train a sentencepiece vocab on a portion of the data.
Args:
output_dir: The dir to which the data will be written. Dirs will be
recursively created if not already present.
source_language: Source language of the translation data.
target_language: Target language of the translation data.
tfds_name: The name of the desired dataset in tfds. (ie.
wmt19_translate/de-en). See
https://www.tensorflow.org/datasets/catalog/wmt19_translate for more
details.
"""
tf.io.gfile.makedirs(os.path.dirname(output_dir))
train_ds = tfds.load(tfds_name, split='train')
vocab_file = os.path.join(
output_dir, '{}.32k.spm.model'.format(source_language + target_language))
print('vocab_file %s' % vocab_file)
_train_sentencepiece(
dataset=train_ds,
model_path=vocab_file,
vocab_size=2**15,
data_keys=(source_language, target_language))
def _dump_chars_to_textfile(dataset,
maxchars=int(1e7),
data_keys=('inputs', 'targets')):
"""Write part of a TFDS sentence dataset to lines in a text file.
Args:
dataset: tf.dataset containing string-data.
maxchars: int: approximate number of characters to save from dataset.
data_keys: Tuple[str]: what keys in dataset to dump from.
Returns:
name of temp file with dataset bytes, exact number of characters dumped.
"""
char_count = 0
ds_iter = dataset.as_numpy_iterator()
with tempfile.NamedTemporaryFile(
delete=False, prefix='/tmp/ds_chars') as outfp:
while char_count < maxchars:
example = next(ds_iter)
for k in data_keys:
line = example[k] + b'\n'
char_count += len(line)
outfp.write(line)
return outfp.name, char_count
def _train_sentencepiece(dataset,
model_path,
vocab_size=2**15,
maxchars=int(1e7),
model_type='unigram',
character_coverage=1.0,
data_keys=('inputs', 'targets')):
"""Train SentencePiece tokenizer from subset of tf dataset.
Args:
dataset: tf.dataset
model_path: str: path of model file to save vocab model to.
vocab_size: int: size of vocab tokens to train.
maxchars: int: number of characters to use for sentencepiece training.
model_type: str: type of sentencepiece vocab to train.
character_coverage: amount of characters covered by the model, good defaults
are 0.9995 for languages with rich character set like Japanese or Chinese
and 1.0 for other languages with small character set.
data_keys: Tuple[str]: keys of dataset to use for training.
Returns:
path to the trained sentencepiece vocabulary model.
"""
if model_path.startswith('gs://'):
abs_model_path = model_path
else:
abs_model_path = os.path.abspath(os.path.expanduser(model_path))
fname, _ = _dump_chars_to_textfile(
dataset, maxchars=maxchars, data_keys=data_keys)
with tempfile.NamedTemporaryFile(
delete=False, prefix='/tmp/sp_tmp') as model_fp:
pass # we just want a prefix'd tmp-filename
argstr = ' '.join([
f'--input={fname}', f'--vocab_size={vocab_size}',
f'--character_coverage={character_coverage}',
f'--model_prefix={model_fp.name}', f'--model_type={model_type}'
])
SentencePieceTrainer.Train(argstr)
# Use an intermediate filename that is renamed to the target name to address
# create and fill delays.
copy_rename_path = abs_model_path + '.rntmp'
tf.io.gfile.copy(model_fp.name + '.model', copy_rename_path, overwrite=True)
tf.io.gfile.rename(copy_rename_path, abs_model_path, overwrite=True)
print('copied %s to %s' % (model_fp.name + '.model', abs_model_path))
return abs_model_path
def main(unused_args):
tf.io.gfile.makedirs(FLAGS.output_dir)
print('Generating vocab.')
generate_vocab(
output_dir=FLAGS.output_dir,
source_language=FLAGS.source_language,
target_language=FLAGS.target_language,
tfds_name=FLAGS.tfds_name,
)
print('Saving data.')
write_data_to_tsv(
output_dir=FLAGS.output_dir,
source_language=FLAGS.source_language,
target_language=FLAGS.target_language,
tfds_name=FLAGS.tfds_name,
split_for_guidance_data=FLAGS.split_for_guidance_data,
)
if __name__ == '__main__':
app.run(main)
|
{
"content_hash": "4c3f372471bb470dae07169745770438",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 88,
"avg_line_length": 36.84615384615385,
"alnum_prop": 0.6565762004175365,
"repo_name": "google-research/google-research",
"id": "51c64ac9c5136b886e1d4c189629eea01bd19511",
"size": "9837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gradient_based_tuning/data_tfds_to_tsv.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
}
|
from rest_framework import serializers
from social.app.models.author import Author
class UnknownAuthorSerializer(serializers.Serializer):
"""
Used in input cases where we don't necessarily know about a remote Author yet, so it doesn't make sense
to use a ModelSerializer
"""
id = serializers.URLField()
host = serializers.URLField()
url = serializers.URLField()
displayName = serializers.CharField(
required=False,
allow_blank=True
)
github = serializers.URLField(
required=False,
allow_blank=True
)
class AuthorURLSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name='service:author-detail', read_only=True, lookup_field='pk')
class Meta:
model = Author
fields = ('url',)
class SimpleAuthorSerializer(serializers.ModelSerializer):
id = serializers.HyperlinkedIdentityField(
view_name='service:author-detail', read_only=True, lookup_field='pk')
host = serializers.CharField(source='node.service_url')
url = serializers.HyperlinkedIdentityField(
view_name='service:author-detail', read_only=True, lookup_field='pk')
class Meta:
model = Author
fields = ('id', 'host', 'displayName', 'url', 'github')
class AuthorSerializer(serializers.ModelSerializer):
id = serializers.HyperlinkedIdentityField(
view_name='service:author-detail', read_only=True, lookup_field='pk')
host = serializers.URLField(source='node.service_url')
url = serializers.HyperlinkedIdentityField(
view_name='service:author-detail', read_only=True, lookup_field='pk')
friends = AuthorURLSerializer(many=True, read_only=True)
firstName = serializers.CharField(source='user.first_name')
lastName = serializers.CharField(source='user.last_name')
email = serializers.CharField(source='user.email')
class Meta:
model = Author
fields = ('id', 'host', 'displayName', 'url', 'friends', 'github', 'firstName', 'lastName', 'email', 'bio',)
|
{
"content_hash": "59795f7ab5df655a18e0b902ad230d40",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 116,
"avg_line_length": 35.08474576271186,
"alnum_prop": 0.6903381642512078,
"repo_name": "TeamAADGT/CMPUT404-project-socialdistribution",
"id": "249f70b6b4c873404ec7c4f0a3f50ee1665bf39e",
"size": "2070",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "service/authors/serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2706"
},
{
"name": "HTML",
"bytes": "42308"
},
{
"name": "JavaScript",
"bytes": "15525"
},
{
"name": "Python",
"bytes": "241725"
}
],
"symlink_target": ""
}
|
from contextlib import contextmanager
import os
from unittest import mock
from mako.cmd import cmdline
from mako.testing.assertions import eq_
from mako.testing.assertions import expect_raises
from mako.testing.assertions import expect_raises_message
from mako.testing.config import config
from mako.testing.fixtures import TemplateTest
class CmdTest(TemplateTest):
@contextmanager
def _capture_output_fixture(self, stream="stdout"):
with mock.patch("sys.%s" % stream) as stdout:
yield stdout
def test_stdin_success(self):
with self._capture_output_fixture() as stdout:
with mock.patch(
"sys.stdin",
mock.Mock(read=mock.Mock(return_value="hello world ${x}")),
):
cmdline(["--var", "x=5", "-"])
eq_(stdout.write.mock_calls[0][1][0], "hello world 5")
def test_stdin_syntax_err(self):
with mock.patch(
"sys.stdin", mock.Mock(read=mock.Mock(return_value="${x"))
):
with self._capture_output_fixture("stderr") as stderr:
with expect_raises(SystemExit):
cmdline(["--var", "x=5", "-"])
assert (
"SyntaxException: Expected" in stderr.write.mock_calls[0][1][0]
)
assert "Traceback" in stderr.write.mock_calls[0][1][0]
def test_stdin_rt_err(self):
with mock.patch(
"sys.stdin", mock.Mock(read=mock.Mock(return_value="${q}"))
):
with self._capture_output_fixture("stderr") as stderr:
with expect_raises(SystemExit):
cmdline(["--var", "x=5", "-"])
assert "NameError: Undefined" in stderr.write.mock_calls[0][1][0]
assert "Traceback" in stderr.write.mock_calls[0][1][0]
def test_file_success(self):
with self._capture_output_fixture() as stdout:
cmdline(
[
"--var",
"x=5",
os.path.join(config.template_base, "cmd_good.mako"),
]
)
eq_(stdout.write.mock_calls[0][1][0], "hello world 5")
def test_file_syntax_err(self):
with self._capture_output_fixture("stderr") as stderr:
with expect_raises(SystemExit):
cmdline(
[
"--var",
"x=5",
os.path.join(config.template_base, "cmd_syntax.mako"),
]
)
assert "SyntaxException: Expected" in stderr.write.mock_calls[0][1][0]
assert "Traceback" in stderr.write.mock_calls[0][1][0]
def test_file_rt_err(self):
with self._capture_output_fixture("stderr") as stderr:
with expect_raises(SystemExit):
cmdline(
[
"--var",
"x=5",
os.path.join(config.template_base, "cmd_runtime.mako"),
]
)
assert "NameError: Undefined" in stderr.write.mock_calls[0][1][0]
assert "Traceback" in stderr.write.mock_calls[0][1][0]
def test_file_notfound(self):
with expect_raises_message(
SystemExit, "error: can't find fake.lalala"
):
cmdline(["--var", "x=5", "fake.lalala"])
|
{
"content_hash": "940035677ec6e151c1393d0de11a5847",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 79,
"avg_line_length": 34.96907216494845,
"alnum_prop": 0.527122641509434,
"repo_name": "chromium/chromium",
"id": "785c652304a5f2adc4decfe4e00d10482b97f270",
"size": "3392",
"binary": false,
"copies": "9",
"ref": "refs/heads/main",
"path": "third_party/mako/mako/test/test_cmd.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""This module contains classes used for decks."""
import random
from .cards import Card
class Deck(object):
"""
A class which specifies the details of the deck of playing cards.
"""
def __init__(self, packs):
"""
The method used to construct a deck using a given number of packs of playing cards.
:param int packs: The number of packs of 52 playing cards to place in the deck.
:raises InvalidDeckSize: if the number of packs given is lesser than 1.
"""
# The cards contained in the deck
self.cards = []
# Check to make sure that the number of packs given is valid
if packs < 1:
raise InvalidDeckSize(packs)
# Fill the deck with cards
suits = [
"Clubs",
"Diamonds",
"Hearts",
"Spades",
]
faces = [
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10",
"Jack",
"Queen",
"King",
"Ace",
]
for x in range(packs):
for suit in suits:
for face in faces:
self.cards.append(Card(suit, face))
def __len__(self):
"""
The method used to get the number of cards in the deck. Overrides the len function output.
:returns int: The number of cards in the deck.
"""
return len(self.cards)
def draw(self):
"""
The method used to draw a random card from the deck.
:returns Card: The card drawn from the deck.
:raises InvalidDeckDraw: if the deck is empty.
"""
# Make sure that the deck is not empty
if len(self.cards) == 0:
raise InvalidDeckDraw
# Draw the card from the deck
card_number = random.randrange(len(self.cards))
return self.cards.pop(card_number)
class InvalidDeckSize(Exception):
"""
An exception which is raised whenever an invalid number
"""
class InvalidDeckDraw(Exception):
"""
An exception which is raised whenever a card is attempted to be drawn from the deck, but the
deck is empty.
"""
|
{
"content_hash": "ecbe23ff4bb4ad0e74e23e3508555423",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 98,
"avg_line_length": 26.904761904761905,
"alnum_prop": 0.5323008849557522,
"repo_name": "ExcaliburZero/blackjack",
"id": "9c1a09d893756a918260f3fdbcfe816000d65a36",
"size": "2260",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blackjack/decks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "331"
},
{
"name": "Python",
"bytes": "50746"
}
],
"symlink_target": ""
}
|
"""
For a set of SWC neurons (subset of dendrites), load them in the model [1] and
calculate stats which can later be used for parameter estimation.
"""
import statistics
import glob
import textwrap
from pprint import pformat
from plucky import merge, plucks
from swc import LineSegment, read_neuron
from model import Segment, DendriticTree, InvalidModelParam
def map_with_stats(fn, argset, verbose=False):
"""Run `fn` over a list of parameters (positional arguments) in `argset`,
calculating stats of all values present in response of each run.
"""
def add_listified(x, y):
if not isinstance(x, list):
x = [x]
if not isinstance(y, list):
y = [y]
return x + y
sums = {}
for args in argset:
measures = fn(*args)
if verbose:
print("Run with args %r measures:" % args, measures)
sums = merge(sums, measures, add_listified, recurse_list=False)
stats = {'meta': {'n_samples': len(argset)}}
for k, v in sums.items():
try:
stats[k] = dict(total=sum(v),
mean=statistics.mean(v),
median=statistics.median(v),
stdev=statistics.stdev(v))
except:
print('failed for params:', argset[0])
raise InvalidModelParam
return stats
def get_apical_linesegments(neuron):
# get the one and only apical dendrite
for c in neuron.root.children:
if c.type == LineSegment.APICAL:
return c
def get_basal_linesegments_set(neuron):
# get all basal dendrites generator (of root line segments)
for c in neuron.root.children:
if c.type == LineSegment.BASAL:
yield c
def build_dendrite_from_linesegments(root_linesegment):
dendrite = DendriticTree()
dendrite.empty()
def trace(ls, parent_segment, parent_order, length=0):
length += ls.length
n_children = len(ls.children)
if n_children == 0:
# this is a terminal segment
segment = Segment(dendrite, parent_order+1, parent_segment)
# TODO: how to split total length we have into initial and elongated?
segment.initial_len = length
dendrite.terminal_segments.add(segment)
return segment
elif n_children == 1:
# intermediate line-segment without branches (invalid in our model),
# is still a segment growing...
return trace(ls.children[0], parent_segment, parent_order, length)
elif n_children == 2:
# branching; finish tracing this segment and fork
# (this is an intermediate segment)
segment = Segment(dendrite, parent_order+1, parent_segment)
segment.initial_len = length
segment.children = [
trace(ls.children[0], segment, segment.order, 0),
trace(ls.children[1], segment, segment.order, 0)
]
dendrite.intermediate_segments.add(segment)
return segment
else:
raise Exception("Invalid LineSegment tree (3-way branch)")
dendrite.root = trace(root_linesegment, parent_segment=None, parent_order=0)
for segment in dendrite.terminal_segments:
segment.update_degree()
return dendrite
def load_dendrite_from_swc(path):
neuron = read_neuron(path)
apical = get_apical_linesegments(neuron)
dendrite = build_dendrite_from_linesegments(apical)
return dendrite
def test_load_dendrite():
# dendrite = load_dendrite_from_swc('../data/smit-rigter-mouse/92-1631.CNG.swc')
dendrite = load_dendrite_from_swc('../data/smit-rigter-mouse/201411.CNG.swc')
print(dendrite.root.pformat())
print("Degree at root:", dendrite.root.degree)
print("Tree asymmetry index:", dendrite.asymmetry_index)
print("Total length:", dendrite.total_length)
print("Stats:", dendrite.stats())
def apical_dendrites_iter(paths):
for path in paths:
neuron = read_neuron(path)
apical_linesegments = get_apical_linesegments(neuron)
dendrite = build_dendrite_from_linesegments(apical_linesegments)
yield dendrite
def neuronset_apical_stats(paths):
dendrites_argset = [[d] for d in apical_dendrites_iter(paths)]
stats = map_with_stats(lambda d: d.stats(), dendrites_argset)
return stats
def basal_dendrites_iter(paths):
for path in paths:
neuron = read_neuron(path)
basal_linesegments = get_basal_linesegments_set(neuron)
for ls in basal_linesegments:
dendrite = build_dendrite_from_linesegments(ls)
yield dendrite
def neuronset_basal_stats(paths):
dendrites_argset = [[d] for d in basal_dendrites_iter(paths)]
stats = map_with_stats(lambda d: d.stats(), dendrites_argset)
return stats
neuronset_paths = {
# youngest neurons 92-* (9 days, 15 neurons)
'young': glob.glob('../data/smit-rigter-mouse/ws*.CNG.swc'),
# middle-aged neurons 20* (60 days, 17 neurons)
'middleage': glob.glob('../data/smit-rigter-mouse/20*.CNG.swc'),
# oldest neurons 92-* (365 days, 19 neurons)
'old': glob.glob('../data/smit-rigter-mouse/92-*.CNG.swc')
}
if __name__ == '__main__':
def print_stats_for_neuronset(paths):
stats = neuronset_apical_stats(paths)
print("Apical dendrites stats ({} neurons, {} dendrites):".format(
len(paths), plucks(stats, 'meta.n_samples')))
print(textwrap.indent(pformat(stats), ' '*4), "\n")
stats = neuronset_basal_stats(paths)
print("Basal dendrites stats ({} neurons, {} dendrites):".format(
len(paths), plucks(stats, 'meta.n_samples')))
print(textwrap.indent(pformat(stats), ' '*4))
print("\n### Youngest neurons (9 days old) ###\n")
print_stats_for_neuronset(neuronset_paths['young'])
print("\n### Middle-aged neurons (60 days old) ###\n")
print_stats_for_neuronset(neuronset_paths['middleage'])
print("\n### Oldest neurons (365 days old) ###\n")
print_stats_for_neuronset(neuronset_paths['old'])
|
{
"content_hash": "5aa337248eeb6c6c37f1df06d327d983",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 84,
"avg_line_length": 35.252873563218394,
"alnum_prop": 0.628953374633192,
"repo_name": "randomir/dendritic-growth-model",
"id": "d3055926ab8d78b826a586fe05952821e0118e7a",
"size": "6188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stats.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26517"
}
],
"symlink_target": ""
}
|
"""
:Author: Juti Noppornpitak <jnopporn@shiroyuki.com>
:Stability: Stable
"""
from bson import ObjectId
from tori.data.serializer import ArraySerializer
from tori.db.exception import ReadOnlyProxyException
from tori.db.metadata.helper import EntityMetadataHelper
class Serializer(ArraySerializer):
""" Object Serializer for Entity
"""
def extra_associations(self, data, stack_depth=0):
if not isinstance(data, object):
raise TypeError('The provided data must be an object')
returnee = {}
relational_map = EntityMetadataHelper.extract(data).relational_map if self._is_entity(data) else {}
extra_associations = {}
for name in dir(data):
# Skip all protected/private/reserved properties.
if self._is_preserved_property(name):
continue
guide = self._retrieve_guide(relational_map, name)
# Skip all properties without an associative guide or with reverse mapping or without pseudo association class.
if not guide or guide.inverted_by or not guide.association_class:
continue
property_reference = data.__getattribute__(name)
# Skip all callable properties and non-list properties
if callable(property_reference) or not isinstance(property_reference, list):
continue
# With a valid association class, this property has the many-to-many relationship with the other entity.
extra_associations[name] = []
for destination in property_reference:
extra_associations[name].append(destination.id)
return extra_associations
def encode(self, data, stack_depth=0, convert_object_id_to_str=False):
""" Encode data into dictionary and list.
:param data: the data to encode
:param stack_depth: traversal depth limit
:param convert_object_id_to_str: flag to convert object ID into string
"""
if not isinstance(data, object):
raise TypeError('The provided data must be an object')
returnee = {}
relational_map = EntityMetadataHelper.extract(data).relational_map if self._is_entity(data) else {}
for name in dir(data):
# Skip all protected/private/reserved properties.
if self._is_preserved_property(name):
continue
guide = self._retrieve_guide(relational_map, name)
# Skip all pseudo properties used for reverse mapping.
if guide and guide.inverted_by:
continue
property_reference = data.__getattribute__(name)
is_list = isinstance(property_reference, list)
value = None
# Skip all callable properties
if callable(property_reference):
continue
# For one-to-many relationship, this property relies on the built-in list type.
if is_list:
value = []
for item in property_reference:
value.append(self._process_value(data, item, stack_depth, convert_object_id_to_str))
else:
value = self._process_value(data, property_reference, stack_depth, convert_object_id_to_str)
returnee[name] = value
# If this is not a pseudo object ID, add the reserved key '_id' with the property 'id' .
if data.id and not isinstance(data.id, PseudoObjectId):
returnee['_id'] = self._process_value(data, data, stack_depth, convert_object_id_to_str)
return returnee
def _retrieve_guide(self, relational_map, name):
return relational_map[name] if name in relational_map else None
def _is_preserved_property(self, name):
return name[0] == '_' or name == 'id'
def _is_entity(self, data):
return EntityMetadataHelper.hasMetadata(data)
def _process_value(self, data, value, stack_depth, convert_object_id_to_str):
is_proxy = isinstance(value, ProxyObject)
is_document = isinstance(data, object) and self._is_entity(data)
processed_data = value
if value and not self._is_primitive_type(value):
if self._max_depth and stack_depth >= self._max_depth:
processed_data = value.encode('utf-8', 'replace') if self._is_string(value) else value
elif is_proxy or is_document:
processed_data = value.id
if isinstance(processed_data, ObjectId) and convert_object_id_to_str:
processed_data = str(processed_data)
else:
processed_data = self.encode(value, stack_depth + 1)
elif isinstance(value, ObjectId) and convert_object_id_to_str:
processed_data = str(value)
return processed_data
def default_primitive_types(self):
return super(Serializer, self).default_primitive_types() + [PseudoObjectId, ObjectId]
class PseudoObjectId(ObjectId):
""" Pseudo Object ID
This class extends from :class:`bson.objectid.ObjectId`.
This is used to differentiate stored entities and new entities.
"""
def __str__(self):
return 'P-{}'.format(super(PseudoObjectId, self).__str__())
def __repr__(self):
return "PseudoObjectId('%s')" % (str(self),)
class ProxyObject(object):
""" Proxy Collection
This class is designed to only load the entity whenever the data access
is required.
:param session: the managed session
:type session: tori.db.session.Session
:param cls: the class to map the data
:type cls: type
:param object_id: the object ID
:param read_only: the read-only flag
:type read_only: bool
:param cascading_options: the cascading options
:type cascading_options: list or tuple
:param is_reverse_proxy: the reverse proxy flag
:type is_reverse_proxy: bool
"""
def __init__(self, session, cls, object_id, read_only, cascading_options, is_reverse_proxy):
if isinstance(cls, ProxyObject) or not object_id:
raise RuntimeError('Cannot initiate a proxy')
self.__dict__['_class'] = cls
self.__dict__['_collection'] = session.collection(cls)
self.__dict__['_object_id'] = object_id
self.__dict__['_object'] = None
self.__dict__['_read_only'] = read_only
self.__dict__['_cascading_options'] = cascading_options
self.__dict__['_is_reverse_proxy'] = is_reverse_proxy
def __get_object(self):
if not self.__dict__['_object_id']:
raise RuntimeError('Cannot load the proxy')
if not self.__dict__['_object']:
entity = self._collection.get(self.__dict__['_object_id'])
if entity:
self.__dict__['_object'] = entity
return self.__dict__['_object']
def __getattr__(self, item):
if item == '_actual':
return self.__get_object()
#elif item == 'id':
# return self.__dict__['_object_id']
elif item[0] == '_':
return self.__dict__[item]
elif not self.__dict__['_object_id'] or not self.__get_object():
return None
return self.__get_object().__getattribute__(item)
def __setattr__(self, key, value):
if self._read_only:
raise ReadOnlyProxyException('The proxy is read only.')
self.__get_object().__setattr__(key, value)
class ProxyCollection(list):
""" Proxy Collection
This collection is extended from the built-in class :class:`list`,
designed to only load the associated data whenever is required.
:param session: the managed session
:type session: tori.db.session.Session
:param origin: the origin of the association
:type origin: object
:param guide: the relational guide
:type guide: tori.db.mapper.RelatingGuide
.. note:: To replace with criteria and driver
"""
def __init__(self, session, origin, guide):
self._session = session
self._origin = origin
self._guide = guide
self._loaded = False
def reload(self):
""" Reload the data list
.. warning::
This method is **not recommended** to be called directly. Use
:meth:`tori.db.session.Session.refresh` on the owned object
instead.
"""
while len(self):
self.pop(0)
self._loaded = False
self._prepare_list()
def _prepare_list(self):
if self._loaded:
return
self._loaded = True
association_class = self._guide.association_class.cls
collection = self._session.collection(association_class)
if self._guide.inverted_by:
criteria = collection.new_criteria()
criteria.where('destination', self._origin.id)
mapping_list = collection.find(criteria)
self.extend([
ProxyFactory.make(self._session, association.origin, self._guide)
for association in mapping_list
])
return
criteria = {'origin': self._origin.id}
mapping_list = collection.filter(criteria)
self.extend([
ProxyFactory.make(self._session, association.destination, self._guide)
for association in mapping_list
])
def __iter__(self):
self._prepare_list()
return super(ProxyCollection, self).__iter__()
def __len__(self):
self._prepare_list()
return super(ProxyCollection, self).__len__()
def __contains__(self, item):
self._prepare_list()
return super(ProxyCollection, self).__contains__(item)
def __delitem__(self, key):
self._prepare_list()
return super(ProxyCollection, self).__delitem__(key)
def __getitem__(self, item):
self._prepare_list()
return super(ProxyCollection, self).__getitem__(item)
def __getslice__(self, i, j):
self._prepare_list()
return super(ProxyCollection, self).__getslice__(i, j)
def __setitem__(self, key, value):
self._prepare_list()
super(ProxyCollection, self).__setitem__(key, value)
class ProxyFactory(object):
""" Proxy Factory
This factory is to create a proxy object.
:param session: the managed session
:type session: tori.db.session.Session
:param id: the object ID
:param mapping_guide: the relational guide
:type mapping_guide: tori.db.mapper.RelatingGuide
"""
@staticmethod
def make(session, id, mapping_guide):
is_reverse_proxy = mapping_guide.inverted_by != None
return ProxyObject(
session,
mapping_guide.target_class,
id,
mapping_guide.read_only or is_reverse_proxy,
mapping_guide.cascading_options,
is_reverse_proxy
)
|
{
"content_hash": "ba4870b7a836a349577d62d7bc136bcb",
"timestamp": "",
"source": "github",
"line_count": 330,
"max_line_length": 123,
"avg_line_length": 33.73030303030303,
"alnum_prop": 0.5964423681609918,
"repo_name": "shiroyuki/Tori",
"id": "4e628a874cf69702cd3a9f27d78465a6d3ea4387",
"size": "11155",
"binary": false,
"copies": "1",
"ref": "refs/heads/v3",
"path": "tori/db/common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "92628"
},
{
"name": "HTML",
"bytes": "571"
},
{
"name": "Makefile",
"bytes": "994"
},
{
"name": "Python",
"bytes": "261193"
},
{
"name": "Shell",
"bytes": "72"
}
],
"symlink_target": ""
}
|
from sqlalchemy.testing import assert_raises, assert_raises_message
import sqlalchemy as sa
from sqlalchemy import Integer, PickleType, String, ForeignKey
import operator
from sqlalchemy import testing
from sqlalchemy.util import OrderedSet
from sqlalchemy.orm import mapper, relationship, create_session, \
PropComparator, synonym, comparable_property, sessionmaker, \
attributes, Session, backref, configure_mappers
from sqlalchemy.orm.collections import attribute_mapped_collection
from sqlalchemy.orm.interfaces import MapperOption
from sqlalchemy.testing import eq_, ne_
from sqlalchemy.testing import fixtures
from test.orm import _fixtures
from sqlalchemy import event, and_, case
from sqlalchemy.testing.schema import Table, Column
class MergeTest(_fixtures.FixtureTest):
"""Session.merge() functionality"""
run_inserts = None
def load_tracker(self, cls, canary=None):
if canary is None:
def canary(instance, *args):
canary.called += 1
canary.called = 0
event.listen(cls, 'load', canary)
return canary
def test_transient_to_pending(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
sess = create_session()
load = self.load_tracker(User)
u = User(id=7, name='fred')
eq_(load.called, 0)
u2 = sess.merge(u)
eq_(load.called, 1)
assert u2 in sess
eq_(u2, User(id=7, name='fred'))
sess.flush()
sess.expunge_all()
eq_(sess.query(User).first(), User(id=7, name='fred'))
def test_transient_to_pending_no_pk(self):
"""test that a transient object with no PK attribute
doesn't trigger a needless load."""
User, users = self.classes.User, self.tables.users
mapper(User, users)
sess = create_session()
u = User(name='fred')
def go():
sess.merge(u)
self.assert_sql_count(testing.db, go, 0)
def test_transient_to_pending_collection(self):
User, Address, addresses, users = (self.classes.User,
self.classes.Address,
self.tables.addresses,
self.tables.users)
mapper(User, users, properties={
'addresses': relationship(Address, backref='user',
collection_class=OrderedSet)})
mapper(Address, addresses)
load = self.load_tracker(User)
self.load_tracker(Address, load)
u = User(id=7, name='fred', addresses=OrderedSet([
Address(id=1, email_address='fred1'),
Address(id=2, email_address='fred2'),
]))
eq_(load.called, 0)
sess = create_session()
sess.merge(u)
eq_(load.called, 3)
merged_users = [e for e in sess if isinstance(e, User)]
eq_(len(merged_users), 1)
assert merged_users[0] is not u
sess.flush()
sess.expunge_all()
eq_(sess.query(User).one(),
User(id=7, name='fred', addresses=OrderedSet([
Address(id=1, email_address='fred1'),
Address(id=2, email_address='fred2'),
]))
)
def test_transient_to_persistent(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
load = self.load_tracker(User)
sess = create_session()
u = User(id=7, name='fred')
sess.add(u)
sess.flush()
sess.expunge_all()
eq_(load.called, 0)
_u2 = u2 = User(id=7, name='fred jones')
eq_(load.called, 0)
u2 = sess.merge(u2)
assert u2 is not _u2
eq_(load.called, 1)
sess.flush()
sess.expunge_all()
eq_(sess.query(User).first(), User(id=7, name='fred jones'))
eq_(load.called, 2)
def test_transient_to_persistent_collection(self):
User, Address, addresses, users = (self.classes.User,
self.classes.Address,
self.tables.addresses,
self.tables.users)
mapper(User, users, properties={
'addresses':relationship(Address,
backref='user',
collection_class=OrderedSet,
order_by=addresses.c.id,
cascade="all, delete-orphan")
})
mapper(Address, addresses)
load = self.load_tracker(User)
self.load_tracker(Address, load)
u = User(id=7, name='fred', addresses=OrderedSet([
Address(id=1, email_address='fred1'),
Address(id=2, email_address='fred2'),
]))
sess = create_session()
sess.add(u)
sess.flush()
sess.expunge_all()
eq_(load.called, 0)
u = User(id=7, name='fred', addresses=OrderedSet([
Address(id=3, email_address='fred3'),
Address(id=4, email_address='fred4'),
]))
u = sess.merge(u)
# 1. merges User object. updates into session.
# 2.,3. merges Address ids 3 & 4, saves into session.
# 4.,5. loads pre-existing elements in "addresses" collection,
# marks as deleted, Address ids 1 and 2.
eq_(load.called, 5)
eq_(u,
User(id=7, name='fred', addresses=OrderedSet([
Address(id=3, email_address='fred3'),
Address(id=4, email_address='fred4'),
]))
)
sess.flush()
sess.expunge_all()
eq_(sess.query(User).one(),
User(id=7, name='fred', addresses=OrderedSet([
Address(id=3, email_address='fred3'),
Address(id=4, email_address='fred4'),
]))
)
def test_detached_to_persistent_collection(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(Address,
backref='user',
order_by=addresses.c.id,
collection_class=OrderedSet)})
mapper(Address, addresses)
load = self.load_tracker(User)
self.load_tracker(Address, load)
a = Address(id=1, email_address='fred1')
u = User(id=7, name='fred', addresses=OrderedSet([
a,
Address(id=2, email_address='fred2'),
]))
sess = create_session()
sess.add(u)
sess.flush()
sess.expunge_all()
u.name='fred jones'
u.addresses.add(Address(id=3, email_address='fred3'))
u.addresses.remove(a)
eq_(load.called, 0)
u = sess.merge(u)
eq_(load.called, 4)
sess.flush()
sess.expunge_all()
eq_(sess.query(User).first(),
User(id=7, name='fred jones', addresses=OrderedSet([
Address(id=2, email_address='fred2'),
Address(id=3, email_address='fred3')])))
def test_unsaved_cascade(self):
"""Merge of a transient entity with two child transient
entities, with a bidirectional relationship."""
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(mapper(Address, addresses),
cascade="all", backref="user")
})
load = self.load_tracker(User)
self.load_tracker(Address, load)
sess = create_session()
u = User(id=7, name='fred')
a1 = Address(email_address='foo@bar.com')
a2 = Address(email_address='hoho@bar.com')
u.addresses.append(a1)
u.addresses.append(a2)
u2 = sess.merge(u)
eq_(load.called, 3)
eq_(u,
User(id=7, name='fred', addresses=[
Address(email_address='foo@bar.com'),
Address(email_address='hoho@bar.com')]))
eq_(u2,
User(id=7, name='fred', addresses=[
Address(email_address='foo@bar.com'),
Address(email_address='hoho@bar.com')]))
sess.flush()
sess.expunge_all()
u2 = sess.query(User).get(7)
eq_(u2, User(id=7, name='fred', addresses=[
Address(email_address='foo@bar.com'),
Address(email_address='hoho@bar.com')]))
eq_(load.called, 6)
def test_merge_empty_attributes(self):
User, dingalings = self.classes.User, self.tables.dingalings
mapper(User, dingalings)
sess = create_session()
# merge empty stuff. goes in as NULL.
# not sure what this was originally trying to
# test.
u1 = sess.merge(User(id=1))
sess.flush()
assert u1.data is None
# save another user with "data"
u2 = User(id=2, data="foo")
sess.add(u2)
sess.flush()
# merge User on u2's pk with
# no "data".
# value isn't whacked from the destination
# dict.
u3 = sess.merge(User(id=2))
eq_(u3.__dict__['data'], "foo")
# make a change.
u3.data = 'bar'
# merge another no-"data" user.
# attribute maintains modified state.
# (usually autoflush would have happened
# here anyway).
u4 = sess.merge(User(id=2))
eq_(u3.__dict__['data'], "bar")
sess.flush()
# and after the flush.
eq_(u3.data, "bar")
# new row.
u5 = User(id=3, data="foo")
sess.add(u5)
sess.flush()
# blow it away from u5, but don't
# mark as expired. so it would just
# be blank.
del u5.data
# the merge adds expiry to the
# attribute so that it loads.
# not sure if I like this - it currently is needed
# for test_pickled:PickleTest.test_instance_deferred_cols
u6 = sess.merge(User(id=3))
assert 'data' not in u6.__dict__
assert u6.data == "foo"
# set it to None. this is actually
# a change so gets preserved.
u6.data = None
u7 = sess.merge(User(id=3))
assert u6.__dict__['data'] is None
def test_merge_irregular_collection(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses': relationship(
mapper(Address, addresses),
backref='user',
collection_class=
attribute_mapped_collection('email_address')),
})
u1 = User(id=7, name='fred')
u1.addresses['foo@bar.com'] = Address(email_address='foo@bar.com')
sess = create_session()
sess.merge(u1)
sess.flush()
assert u1.addresses.keys() == ['foo@bar.com']
def test_attribute_cascade(self):
"""Merge of a persistent entity with two child
persistent entities."""
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(mapper(Address, addresses),
backref='user')
})
load = self.load_tracker(User)
self.load_tracker(Address, load)
sess = create_session()
# set up data and save
u = User(id=7, name='fred', addresses=[
Address(email_address='foo@bar.com'),
Address(email_address = 'hoho@la.com')])
sess.add(u)
sess.flush()
# assert data was saved
sess2 = create_session()
u2 = sess2.query(User).get(7)
eq_(u2,
User(id=7, name='fred', addresses=[
Address(email_address='foo@bar.com'),
Address(email_address='hoho@la.com')]))
# make local changes to data
u.name = 'fred2'
u.addresses[1].email_address = 'hoho@lalala.com'
eq_(load.called, 3)
# new session, merge modified data into session
sess3 = create_session()
u3 = sess3.merge(u)
eq_(load.called, 6)
# ensure local changes are pending
eq_(u3, User(id=7, name='fred2', addresses=[
Address(email_address='foo@bar.com'),
Address(email_address='hoho@lalala.com')]))
# save merged data
sess3.flush()
# assert modified/merged data was saved
sess.expunge_all()
u = sess.query(User).get(7)
eq_(u, User(id=7, name='fred2', addresses=[
Address(email_address='foo@bar.com'),
Address(email_address='hoho@lalala.com')]))
eq_(load.called, 9)
# merge persistent object into another session
sess4 = create_session()
u = sess4.merge(u)
assert len(u.addresses)
for a in u.addresses:
assert a.user is u
def go():
sess4.flush()
# no changes; therefore flush should do nothing
self.assert_sql_count(testing.db, go, 0)
eq_(load.called, 12)
# test with "dontload" merge
sess5 = create_session()
u = sess5.merge(u, load=False)
assert len(u.addresses)
for a in u.addresses:
assert a.user is u
def go():
sess5.flush()
# no changes; therefore flush should do nothing
# but also, load=False wipes out any difference in committed state,
# so no flush at all
self.assert_sql_count(testing.db, go, 0)
eq_(load.called, 15)
sess4 = create_session()
u = sess4.merge(u, load=False)
# post merge change
u.addresses[1].email_address='afafds'
def go():
sess4.flush()
# afafds change flushes
self.assert_sql_count(testing.db, go, 1)
eq_(load.called, 18)
sess5 = create_session()
u2 = sess5.query(User).get(u.id)
eq_(u2.name, 'fred2')
eq_(u2.addresses[1].email_address, 'afafds')
eq_(load.called, 21)
def test_no_relationship_cascade(self):
"""test that merge doesn't interfere with a relationship()
target that specifically doesn't include 'merge' cascade.
"""
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
mapper(Address, addresses, properties={
'user':relationship(User, cascade="save-update")
})
mapper(User, users)
sess = create_session()
u1 = User(name="fred")
a1 = Address(email_address="asdf", user=u1)
sess.add(a1)
sess.flush()
a2 = Address(id=a1.id, email_address="bar", user=User(name="hoho"))
a2 = sess.merge(a2)
sess.flush()
# no expire of the attribute
assert a2.__dict__['user'] is u1
# merge succeeded
eq_(
sess.query(Address).all(),
[Address(id=a1.id, email_address="bar")]
)
# didn't touch user
eq_(
sess.query(User).all(),
[User(name="fred")]
)
def test_one_to_many_cascade(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(mapper(Address, addresses))})
load = self.load_tracker(User)
self.load_tracker(Address, load)
sess = create_session()
u = User(name='fred')
a1 = Address(email_address='foo@bar')
a2 = Address(email_address='foo@quux')
u.addresses.extend([a1, a2])
sess.add(u)
sess.flush()
eq_(load.called, 0)
sess2 = create_session()
u2 = sess2.query(User).get(u.id)
eq_(load.called, 1)
u.addresses[1].email_address = 'addr 2 modified'
sess2.merge(u)
eq_(u2.addresses[1].email_address, 'addr 2 modified')
eq_(load.called, 3)
sess3 = create_session()
u3 = sess3.query(User).get(u.id)
eq_(load.called, 4)
u.name = 'also fred'
sess3.merge(u)
eq_(load.called, 6)
eq_(u3.name, 'also fred')
def test_many_to_one_cascade(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
mapper(Address, addresses, properties={
'user':relationship(User)
})
mapper(User, users)
u1 = User(id=1, name="u1")
a1 =Address(id=1, email_address="a1", user=u1)
u2 = User(id=2, name="u2")
sess = create_session()
sess.add_all([a1, u2])
sess.flush()
a1.user = u2
sess2 = create_session()
a2 = sess2.merge(a1)
eq_(
attributes.get_history(a2, 'user'),
([u2], (), ())
)
assert a2 in sess2.dirty
sess.refresh(a1)
sess2 = create_session()
a2 = sess2.merge(a1, load=False)
eq_(
attributes.get_history(a2, 'user'),
((), [u1], ())
)
assert a2 not in sess2.dirty
def test_many_to_many_cascade(self):
items, Order, orders, order_items, Item = (self.tables.items,
self.classes.Order,
self.tables.orders,
self.tables.order_items,
self.classes.Item)
mapper(Order, orders, properties={
'items':relationship(mapper(Item, items),
secondary=order_items)})
load = self.load_tracker(Order)
self.load_tracker(Item, load)
sess = create_session()
i1 = Item()
i1.description='item 1'
i2 = Item()
i2.description = 'item 2'
o = Order()
o.description = 'order description'
o.items.append(i1)
o.items.append(i2)
sess.add(o)
sess.flush()
eq_(load.called, 0)
sess2 = create_session()
o2 = sess2.query(Order).get(o.id)
eq_(load.called, 1)
o.items[1].description = 'item 2 modified'
sess2.merge(o)
eq_(o2.items[1].description, 'item 2 modified')
eq_(load.called, 3)
sess3 = create_session()
o3 = sess3.query(Order).get(o.id)
eq_( load.called, 4)
o.description = 'desc modified'
sess3.merge(o)
eq_(load.called, 6)
eq_(o3.description, 'desc modified')
def test_one_to_one_cascade(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'address':relationship(mapper(Address, addresses),
uselist = False)
})
load = self.load_tracker(User)
self.load_tracker(Address, load)
sess = create_session()
u = User()
u.id = 7
u.name = "fred"
a1 = Address()
a1.email_address='foo@bar.com'
u.address = a1
sess.add(u)
sess.flush()
eq_(load.called, 0)
sess2 = create_session()
u2 = sess2.query(User).get(7)
eq_(load.called, 1)
u2.name = 'fred2'
u2.address.email_address = 'hoho@lalala.com'
eq_(load.called, 2)
u3 = sess.merge(u2)
eq_(load.called, 2)
assert u3 is u
def test_value_to_none(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'address':relationship(mapper(Address, addresses),
uselist = False, backref='user')
})
sess = sessionmaker()()
u = User(id=7, name="fred",
address=Address(id=1, email_address='foo@bar.com'))
sess.add(u)
sess.commit()
sess.close()
u2 = User(id=7, name=None, address=None)
u3 = sess.merge(u2)
assert u3.name is None
assert u3.address is None
sess.close()
a1 = Address(id=1, user=None)
a2 = sess.merge(a1)
assert a2.user is None
def test_transient_no_load(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = create_session()
u = User()
assert_raises_message(sa.exc.InvalidRequestError,
"load=False option does not support",
sess.merge, u, load=False)
def test_no_load_with_backrefs(self):
"""load=False populates relationships in both
directions without requiring a load"""
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(mapper(Address, addresses),
backref='user')
})
u = User(id=7, name='fred', addresses=[
Address(email_address='ad1'),
Address(email_address='ad2')])
sess = create_session()
sess.add(u)
sess.flush()
sess.close()
assert 'user' in u.addresses[1].__dict__
sess = create_session()
u2 = sess.merge(u, load=False)
assert 'user' in u2.addresses[1].__dict__
eq_(u2.addresses[1].user, User(id=7, name='fred'))
sess.expire(u2.addresses[1], ['user'])
assert 'user' not in u2.addresses[1].__dict__
sess.close()
sess = create_session()
u = sess.merge(u2, load=False)
assert 'user' not in u.addresses[1].__dict__
eq_(u.addresses[1].user, User(id=7, name='fred'))
def test_dontload_with_eager(self):
"""
This test illustrates that with load=False, we can't just copy
the committed_state of the merged instance over; since it
references collection objects which themselves are to be merged.
This committed_state would instead need to be piecemeal
'converted' to represent the correct objects. However, at the
moment I'd rather not support this use case; if you are merging
with load=False, you're typically dealing with caching and the
merged objects shouldnt be 'dirty'.
"""
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(mapper(Address, addresses))
})
sess = create_session()
u = User()
u.id = 7
u.name = "fred"
a1 = Address()
a1.email_address='foo@bar.com'
u.addresses.append(a1)
sess.add(u)
sess.flush()
sess2 = create_session()
u2 = sess2.query(User).\
options(sa.orm.joinedload('addresses')).get(7)
sess3 = create_session()
u3 = sess3.merge(u2, load=False)
def go():
sess3.flush()
self.assert_sql_count(testing.db, go, 0)
def test_no_load_disallows_dirty(self):
"""load=False doesnt support 'dirty' objects right now
(see test_no_load_with_eager()). Therefore lets assert it.
"""
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = create_session()
u = User()
u.id = 7
u.name = "fred"
sess.add(u)
sess.flush()
u.name = 'ed'
sess2 = create_session()
try:
sess2.merge(u, load=False)
assert False
except sa.exc.InvalidRequestError, e:
assert "merge() with load=False option does not support "\
"objects marked as 'dirty'. flush() all changes on "\
"mapped instances before merging with load=False." \
in str(e)
u2 = sess2.query(User).get(7)
sess3 = create_session()
u3 = sess3.merge(u2, load=False)
assert not sess3.dirty
def go():
sess3.flush()
self.assert_sql_count(testing.db, go, 0)
def test_no_load_sets_backrefs(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(mapper(Address, addresses),
backref='user')})
sess = create_session()
u = User()
u.id = 7
u.name = "fred"
a1 = Address()
a1.email_address='foo@bar.com'
u.addresses.append(a1)
sess.add(u)
sess.flush()
assert u.addresses[0].user is u
sess2 = create_session()
u2 = sess2.merge(u, load=False)
assert not sess2.dirty
def go():
assert u2.addresses[0].user is u2
self.assert_sql_count(testing.db, go, 0)
def test_no_load_preserves_parents(self):
"""Merge with load=False does not trigger a 'delete-orphan'
operation.
merge with load=False sets attributes without using events.
this means the 'hasparent' flag is not propagated to the newly
merged instance. in fact this works out OK, because the
'_state.parents' collection on the newly merged instance is
empty; since the mapper doesn't see an active 'False' setting in
this collection when _is_orphan() is called, it does not count
as an orphan (i.e. this is the 'optimistic' logic in
mapper._is_orphan().)
"""
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(mapper(Address, addresses),
backref='user',
cascade="all, delete-orphan")})
sess = create_session()
u = User()
u.id = 7
u.name = "fred"
a1 = Address()
a1.email_address='foo@bar.com'
u.addresses.append(a1)
sess.add(u)
sess.flush()
assert u.addresses[0].user is u
sess2 = create_session()
u2 = sess2.merge(u, load=False)
assert not sess2.dirty
a2 = u2.addresses[0]
a2.email_address='somenewaddress'
assert not sa.orm.object_mapper(a2)._is_orphan(
sa.orm.attributes.instance_state(a2))
sess2.flush()
sess2.expunge_all()
eq_(sess2.query(User).get(u2.id).addresses[0].email_address,
'somenewaddress')
# this use case is not supported; this is with a pending Address
# on the pre-merged object, and we currently dont support
# 'dirty' objects being merged with load=False. in this case,
# the empty '_state.parents' collection would be an issue, since
# the optimistic flag is False in _is_orphan() for pending
# instances. so if we start supporting 'dirty' with load=False,
# this test will need to pass
sess = create_session()
u = sess.query(User).get(7)
u.addresses.append(Address())
sess2 = create_session()
try:
u2 = sess2.merge(u, load=False)
assert False
# if load=False is changed to support dirty objects, this code
# needs to pass
a2 = u2.addresses[0]
a2.email_address='somenewaddress'
assert not sa.orm.object_mapper(a2)._is_orphan(
sa.orm.attributes.instance_state(a2))
sess2.flush()
sess2.expunge_all()
eq_(sess2.query(User).get(u2.id).addresses[0].email_address,
'somenewaddress')
except sa.exc.InvalidRequestError, e:
assert "load=False option does not support" in str(e)
def test_synonym_comparable(self):
users = self.tables.users
class User(object):
class Comparator(PropComparator):
pass
def _getValue(self):
return self._value
def _setValue(self, value):
setattr(self, '_value', value)
value = property(_getValue, _setValue)
mapper(User, users, properties={
'uid':synonym('id'),
'foobar':comparable_property(User.Comparator,User.value),
})
sess = create_session()
u = User()
u.name = 'ed'
sess.add(u)
sess.flush()
sess.expunge(u)
sess.merge(u)
def test_cascade_doesnt_blowaway_manytoone(self):
"""a merge test that was fixed by [ticket:1202]"""
User, Address, addresses, users = (self.classes.User,
self.classes.Address,
self.tables.addresses,
self.tables.users)
s = create_session(autoflush=True, autocommit=False)
mapper(User, users, properties={
'addresses':relationship(mapper(Address, addresses),
backref='user')})
a1 = Address(user=s.merge(User(id=1, name='ed')), email_address='x')
before_id = id(a1.user)
a2 = Address(user=s.merge(User(id=1, name='jack')),
email_address='x')
after_id = id(a1.user)
other_id = id(a2.user)
eq_(before_id, other_id)
eq_(after_id, other_id)
eq_(before_id, after_id)
eq_(a1.user, a2.user)
def test_cascades_dont_autoflush(self):
User, Address, addresses, users = (self.classes.User,
self.classes.Address,
self.tables.addresses,
self.tables.users)
sess = create_session(autoflush=True, autocommit=False)
m = mapper(User, users, properties={
'addresses':relationship(mapper(Address, addresses),
backref='user')})
user = User(id=8, name='fred',
addresses=[Address(email_address='user')])
merged_user = sess.merge(user)
assert merged_user in sess.new
sess.flush()
assert merged_user not in sess.new
def test_cascades_dont_autoflush_2(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(Address,
backref='user',
cascade="all, delete-orphan")
})
mapper(Address, addresses)
u = User(id=7, name='fred', addresses=[
Address(id=1, email_address='fred1'),
])
sess = create_session(autoflush=True, autocommit=False)
sess.add(u)
sess.commit()
sess.expunge_all()
u = User(id=7, name='fred', addresses=[
Address(id=1, email_address='fred1'),
Address(id=2, email_address='fred2'),
])
sess.merge(u)
assert sess.autoflush
sess.commit()
def test_dont_expire_pending(self):
"""test that pending instances aren't expired during a merge."""
users, User = self.tables.users, self.classes.User
mapper(User, users)
u = User(id=7)
sess = create_session(autoflush=True, autocommit=False)
u = sess.merge(u)
assert not bool(attributes.instance_state(u).expired_attributes)
def go():
eq_(u.name, None)
self.assert_sql_count(testing.db, go, 0)
def test_option_state(self):
"""test that the merged takes on the MapperOption characteristics
of that which is merged.
"""
users, User = self.tables.users, self.classes.User
class Option(MapperOption):
propagate_to_loaders = True
opt1, opt2 = Option(), Option()
sess = sessionmaker()()
umapper = mapper(User, users)
sess.add_all([
User(id=1, name='u1'),
User(id=2, name='u2'),
])
sess.commit()
sess2 = sessionmaker()()
s2_users = sess2.query(User).options(opt2).all()
# test 1. no options are replaced by merge options
sess = sessionmaker()()
s1_users = sess.query(User).all()
for u in s1_users:
ustate = attributes.instance_state(u)
eq_(ustate.load_path, ())
eq_(ustate.load_options, set())
for u in s2_users:
sess.merge(u)
for u in s1_users:
ustate = attributes.instance_state(u)
eq_(ustate.load_path.path, (umapper, ))
eq_(ustate.load_options, set([opt2]))
# test 2. present options are replaced by merge options
sess = sessionmaker()()
s1_users = sess.query(User).options(opt1).all()
for u in s1_users:
ustate = attributes.instance_state(u)
eq_(ustate.load_path.path, (umapper, ))
eq_(ustate.load_options, set([opt1]))
for u in s2_users:
sess.merge(u)
for u in s1_users:
ustate = attributes.instance_state(u)
eq_(ustate.load_path.path, (umapper, ))
eq_(ustate.load_options, set([opt2]))
class M2ONoUseGetLoadingTest(fixtures.MappedTest):
"""Merge a one-to-many. The many-to-one on the other side is set up
so that use_get is False. See if skipping the "m2o" merge
vs. doing it saves on SQL calls.
"""
@classmethod
def define_tables(cls, metadata):
Table('user', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
)
Table('address', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('user_id', Integer, ForeignKey('user.id')),
Column('email', String(50)),
)
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
class Address(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
User, Address = cls.classes.User, cls.classes.Address
user, address = cls.tables.user, cls.tables.address
mapper(User, user, properties={
'addresses':relationship(Address, backref=
backref('user',
# needlessly complex primaryjoin so that the
# use_get flag is False
primaryjoin=and_(
user.c.id==address.c.user_id,
user.c.id==user.c.id
)
)
)
})
mapper(Address, address)
configure_mappers()
assert Address.user.property._use_get is False
@classmethod
def insert_data(cls):
User, Address = cls.classes.User, cls.classes.Address
s = Session()
s.add_all([
User(id=1, name='u1', addresses=[Address(id=1, email='a1'),
Address(id=2, email='a2')])
])
s.commit()
# "persistent" - we get at an Address that was already present.
# With the "skip bidirectional" check removed, the "set" emits SQL
# for the "previous" version in any case,
# address.user_id is 1, you get a load.
def test_persistent_access_none(self):
User, Address = self.classes.User, self.classes.Address
s = Session()
def go():
u1 = User(id=1,
addresses =[Address(id=1), Address(id=2)]
)
u2 = s.merge(u1)
self.assert_sql_count(testing.db, go, 2)
def test_persistent_access_one(self):
User, Address = self.classes.User, self.classes.Address
s = Session()
def go():
u1 = User(id=1,
addresses =[Address(id=1), Address(id=2)]
)
u2 = s.merge(u1)
a1 = u2.addresses[0]
assert a1.user is u2
self.assert_sql_count(testing.db, go, 3)
def test_persistent_access_two(self):
User, Address = self.classes.User, self.classes.Address
s = Session()
def go():
u1 = User(id=1,
addresses =[Address(id=1), Address(id=2)]
)
u2 = s.merge(u1)
a1 = u2.addresses[0]
assert a1.user is u2
a2 = u2.addresses[1]
assert a2.user is u2
self.assert_sql_count(testing.db, go, 4)
# "pending" - we get at an Address that is new- user_id should be
# None. But in this case the set attribute on the forward side
# already sets the backref. commenting out the "skip bidirectional"
# check emits SQL again for the other two Address objects already
# persistent.
def test_pending_access_one(self):
User, Address = self.classes.User, self.classes.Address
s = Session()
def go():
u1 = User(id=1,
addresses =[Address(id=1), Address(id=2),
Address(id=3, email='a3')]
)
u2 = s.merge(u1)
a3 = u2.addresses[2]
assert a3.user is u2
self.assert_sql_count(testing.db, go, 3)
def test_pending_access_two(self):
User, Address = self.classes.User, self.classes.Address
s = Session()
def go():
u1 = User(id=1,
addresses =[Address(id=1), Address(id=2),
Address(id=3, email='a3')]
)
u2 = s.merge(u1)
a3 = u2.addresses[2]
assert a3.user is u2
a2 = u2.addresses[1]
assert a2.user is u2
self.assert_sql_count(testing.db, go, 5)
class MutableMergeTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table("data", metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', PickleType(comparator=operator.eq))
)
@classmethod
def setup_classes(cls):
class Data(cls.Basic):
pass
def test_list(self):
Data, data = self.classes.Data, self.tables.data
mapper(Data, data)
sess = sessionmaker()()
d = Data(data=["this", "is", "a", "list"])
sess.add(d)
sess.commit()
d2 = Data(id=d.id, data=["this", "is", "another", "list"])
d3 = sess.merge(d2)
eq_(d3.data, ["this", "is", "another", "list"])
class CompositeNullPksTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table("data", metadata,
Column('pk1', String(10), primary_key=True),
Column('pk2', String(10), primary_key=True),
)
@classmethod
def setup_classes(cls):
class Data(cls.Basic):
pass
def test_merge_allow_partial(self):
Data, data = self.classes.Data, self.tables.data
mapper(Data, data)
sess = sessionmaker()()
d1 = Data(pk1="someval", pk2=None)
def go():
return sess.merge(d1)
self.assert_sql_count(testing.db, go, 1)
def test_merge_disallow_partial(self):
Data, data = self.classes.Data, self.tables.data
mapper(Data, data, allow_partial_pks=False)
sess = sessionmaker()()
d1 = Data(pk1="someval", pk2=None)
def go():
return sess.merge(d1)
self.assert_sql_count(testing.db, go, 0)
class LoadOnPendingTest(fixtures.MappedTest):
"""Test interaction of merge() with load_on_pending relationships"""
@classmethod
def define_tables(cls, metadata):
rocks_table = Table("rocks", metadata,
Column("id", Integer, primary_key=True),
Column("description", String(10)),
)
bugs_table = Table("bugs", metadata,
Column("id", Integer, primary_key=True),
Column("rockid", Integer, ForeignKey('rocks.id')),
)
@classmethod
def setup_classes(cls):
class Rock(cls.Basic, fixtures.ComparableEntity):
pass
class Bug(cls.Basic, fixtures.ComparableEntity):
pass
def _setup_delete_orphan_o2o(self):
mapper(self.classes.Rock, self.tables.rocks,
properties={'bug': relationship(self.classes.Bug,
cascade='all,delete-orphan',
load_on_pending=True,
uselist=False)
})
mapper(self.classes.Bug, self.tables.bugs)
self.sess = sessionmaker()()
def _merge_delete_orphan_o2o_with(self, bug):
# create a transient rock with passed bug
r = self.classes.Rock(id=0, description='moldy')
r.bug = bug
m = self.sess.merge(r)
# we've already passed ticket #2374 problem since merge() returned,
# but for good measure:
assert m is not r
eq_(m,r)
def test_merge_delete_orphan_o2o_none(self):
"""one to one delete_orphan relationships marked load_on_pending
should be able to merge() with attribute None"""
self._setup_delete_orphan_o2o()
self._merge_delete_orphan_o2o_with(None)
def test_merge_delete_orphan_o2o(self):
"""one to one delete_orphan relationships marked load_on_pending
should be able to merge()"""
self._setup_delete_orphan_o2o()
self._merge_delete_orphan_o2o_with(self.classes.Bug(id=1))
class PolymorphicOnTest(fixtures.MappedTest):
"""Test merge() of polymorphic object when polymorphic_on
isn't a Column"""
@classmethod
def define_tables(cls, metadata):
Table('employees', metadata,
Column('employee_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('type', String(1), nullable=False),
Column('data', String(50)),
)
@classmethod
def setup_classes(cls):
class Employee(cls.Basic, fixtures.ComparableEntity):
pass
class Manager(Employee):
pass
class Engineer(Employee):
pass
def _setup_polymorphic_on_mappers(self):
employee_mapper = mapper(self.classes.Employee,
self.tables.employees,
polymorphic_on=case(value=self.tables.employees.c.type,
whens={
'E': 'employee',
'M': 'manager',
'G': 'engineer',
'R': 'engineer',
}),
polymorphic_identity='employee')
mapper(self.classes.Manager, inherits=employee_mapper,
polymorphic_identity='manager')
mapper(self.classes.Engineer, inherits=employee_mapper,
polymorphic_identity='engineer')
self.sess = sessionmaker()()
def test_merge_polymorphic_on(self):
"""merge() should succeed with a polymorphic object even when
polymorphic_on is not a Column
"""
self._setup_polymorphic_on_mappers()
m = self.classes.Manager(employee_id=55, type='M',
data='original data')
self.sess.add(m)
self.sess.commit()
self.sess.expunge_all()
m = self.classes.Manager(employee_id=55, data='updated data')
merged = self.sess.merge(m)
# we've already passed ticket #2449 problem since
# merge() returned, but for good measure:
assert m is not merged
eq_(m,merged)
|
{
"content_hash": "7601b8485a05a1eabb3a34f6e01cc7fe",
"timestamp": "",
"source": "github",
"line_count": 1413,
"max_line_length": 76,
"avg_line_length": 32.35527246992215,
"alnum_prop": 0.5323067500765563,
"repo_name": "rclmenezes/sqlalchemy",
"id": "e1474f39b1284d789dd1678161c15cf482a2097e",
"size": "45718",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/orm/test_merge.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "38103"
},
{
"name": "CSS",
"bytes": "7760"
},
{
"name": "JavaScript",
"bytes": "244"
},
{
"name": "Makefile",
"bytes": "7072"
},
{
"name": "Python",
"bytes": "7243712"
},
{
"name": "TeX",
"bytes": "13927"
}
],
"symlink_target": ""
}
|
__author__ = 'Lynch Lee'
|
{
"content_hash": "21975379da77228490cc62c9c3453f50",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 24,
"avg_line_length": 24,
"alnum_prop": 0.5833333333333334,
"repo_name": "easemob/emchat-server-examples",
"id": "986bbd123a94b15338a5c564776f9e98ed467c60",
"size": "66",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "emchat-server-python/emchat/entities/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "4751"
},
{
"name": "C#",
"bytes": "9970"
},
{
"name": "Go",
"bytes": "5895"
},
{
"name": "Java",
"bytes": "67522"
},
{
"name": "JavaScript",
"bytes": "41473"
},
{
"name": "PHP",
"bytes": "69444"
},
{
"name": "Python",
"bytes": "20324"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import os
import click
from zhihudaily.crawler import Crawler
from zhihudaily.app import create_app
from zhihudaily.configs import DevelopConfig, ProductionConfig
CONFIG = (ProductionConfig if os.environ.get('FLASK_APP_ENV') == 'production'
else DevelopConfig)
app = create_app(CONFIG)
crawler = Crawler()
@app.cli.command()
@click.option('--num', '-n', default=10)
@click.option('--all', is_flag=True)
def init_db(num, all):
"""init database.
\b
:param num: int, the number of daily news to fetch.
:param all: boolean, fetch all the news or not.
"""
if all:
crawler.init_database('all')
else:
crawler.init_database(num)
@app.cli.command()
def update_news():
"""Fetch today's latest news and save to database."""
crawler.daily_update()
@app.cli.command()
@click.option('--range', '-r', default=10)
@click.option('--all', is_flag=True)
def check_news(range, all):
"""check data integrity.
\b
:param range: int, the range of days to check
:param all: boolean, check all the data integrity or not.
"""
if all:
crawler.check_integrity('all')
else:
crawler.check_integrity(range)
|
{
"content_hash": "3cc6c5989a1f4ac8be61771f82ab55ae",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 77,
"avg_line_length": 23.245283018867923,
"alnum_prop": 0.6599025974025974,
"repo_name": "lord63/zhihudaily",
"id": "793a02fde9f570725dcfe389acbe9ea1030926b0",
"size": "1279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20269"
},
{
"name": "HTML",
"bytes": "7425"
},
{
"name": "JavaScript",
"bytes": "2188"
},
{
"name": "Python",
"bytes": "19685"
}
],
"symlink_target": ""
}
|
import cv2
import sys
# Get user supplied values
imagePath = sys.argv[1]
cascPath = sys.argv[2]
# Create the haar cascade
faceCascade = cv2.CascadeClassifier(cascPath)
# Read the image
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.01,
minNeighbors=3,
minSize=(15, 15),
maxSize=(30, 30),
flags = cv2.CASCADE_SCALE_IMAGE
)
print "Found {0} faces!".format(len(faces))
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imshow("Faces found", image)
cv2.waitKey(0)
|
{
"content_hash": "1e7e839fcc023f47a6efb2271f064ece",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 60,
"avg_line_length": 21.40625,
"alnum_prop": 0.6846715328467153,
"repo_name": "eugenekolo/EC500",
"id": "08189a183b33f9a8a06230b5a031a23a888409f4",
"size": "685",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/harrcascadeface_detector.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7817"
},
{
"name": "Shell",
"bytes": "1293"
}
],
"symlink_target": ""
}
|
from SDSURLS import app
import config
if __name__ == "__main__":
from wsgiref import simple_server
httpd = simple_server.make_server(config.host, config.port, app)
httpd.serve_forever()
|
{
"content_hash": "64cd1612bb97981f7fbb13db263c147f",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 68,
"avg_line_length": 28.285714285714285,
"alnum_prop": 0.696969696969697,
"repo_name": "EpicDavi/SDSURLS",
"id": "4e8a30aa82b63239131822ce88244557c2c602dc",
"size": "198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "runner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "880"
},
{
"name": "Python",
"bytes": "3680"
}
],
"symlink_target": ""
}
|
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
projectPath = os.getcwd() + '/../raxas'
sys.path.insert(0, os.path.abspath(projectPath))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'RAX-AutoScaler'
copyright = u'2016, Simon Mirco, Simone Soldateschi, Suraj Thapa, Teddy Schmitz, Jon Walton'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.4'
# The full version, including alpha/beta/rc tags.
release = '0.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'RAX-AutoScalerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'RAX-AutoScaler.tex', u'RAX-AutoScaler Documentation',
u'Simon Mirco, Simone Soldateschi, Suraj Thapa, Teddy Schmitz, Jon Walton', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'rax-autoscaler', u'RAX-AutoScaler Documentation',
[u'Simon Mirco, Simone Soldateschi, Suraj Thapa, Teddy Schmitz, Jon Walton'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'RAX-AutoScaler', u'RAX-AutoScaler Documentation',
u'Simon Mirco, Simone Soldateschi, Suraj Thapa, Teddy Schmitz, Jon Walton', 'RAX-AutoScaler', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'RAX-AutoScaler'
epub_author = u'Simon Mirco, Simone Soldateschi, Suraj Thapa, Teddy Schmitz, Jon Walton[D'
epub_publisher = u'Simon Mirco, Simone Soldateschi, Suraj Thapa, Teddy Schmitz, Jon Walton[D'
epub_copyright = u'2016, Simon Mirco, Simone Soldateschi, Suraj Thapa, Teddy Schmitz, Jon Walton[D'
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'RAX-AutoScaler'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
|
{
"content_hash": "b21bd3f1687c255eff0e00e2b9768d1c",
"timestamp": "",
"source": "github",
"line_count": 321,
"max_line_length": 132,
"avg_line_length": 32.62305295950156,
"alnum_prop": 0.7091291061879297,
"repo_name": "rackerlabs/rax-autoscaler",
"id": "d3c33fbb607e3fc240d1cedaa437c71d9944d516",
"size": "10899",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1262"
},
{
"name": "Python",
"bytes": "134223"
}
],
"symlink_target": ""
}
|
from time import time
from urlparse import urlparse
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from mezzanine.core.models import Displayable, Ownable
from mezzanine.generic.models import Rating
from mezzanine.generic.fields import RatingField, CommentsField
class Link(Displayable, Ownable):
rating = RatingField()
comments = CommentsField()
link = models.URLField()
@models.permalink
def get_absolute_url(self):
return ("link_detail", (), {"slug": self.slug})
class Profile(models.Model):
user = models.OneToOneField("auth.User")
karma = models.IntegerField(default=0, editable=False)
website = models.URLField(blank=True)
bio = models.TextField(blank=True)
def __unicode__(self):
return "%s" % (self.user)
@receiver(post_save, sender=Rating)
def karma(sender, **kwargs):
"""
Each time a rating is saved, check its value and modify the
profile karma for the related object's user accordingly.
Since ratings are either +1/-1, if a rating is being edited,
we can assume that the existing rating is in the other direction,
so we multiply the karma modifier by 2.
"""
rating = kwargs["instance"]
value = int(rating.value)
if not kwargs["created"]:
value *= 2
content_object = rating.content_object
if rating.user != content_object.user:
queryset = Profile.objects.filter(user=content_object.user)
queryset.update(karma=models.F("karma") + value)
|
{
"content_hash": "61c8b20331e731ff2ca118df3a65f786",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 69,
"avg_line_length": 28.814814814814813,
"alnum_prop": 0.7011568123393316,
"repo_name": "baturay/ne-istiyoruz",
"id": "871fcaf39465fc3e231ba4a71ddcc90307ac1f12",
"size": "1557",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main/models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "3993"
},
{
"name": "Python",
"bytes": "51289"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.